aboutsummaryrefslogtreecommitdiff
path: root/gcc/wide-int.h
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/wide-int.h')
-rw-r--r--gcc/wide-int.h683
1 files changed, 607 insertions, 76 deletions
diff --git a/gcc/wide-int.h b/gcc/wide-int.h
index 498d14d..73e431d 100644
--- a/gcc/wide-int.h
+++ b/gcc/wide-int.h
@@ -53,6 +53,10 @@ along with GCC; see the file COPYING3. If not see
multiply, division, shifts, comparisons, and operations that need
overflow detected), the signedness must be specified separately.
+ For precisions up to WIDE_INT_MAX_INL_PRECISION, it uses an inline
+ buffer in the type, for larger precisions up to WIDEST_INT_MAX_PRECISION
+ it uses a pointer to heap allocated buffer.
+
2) offset_int. This is a fixed-precision integer that can hold
any address offset, measured in either bits or bytes, with at
least one extra sign bit. At the moment the maximum address
@@ -79,8 +83,7 @@ along with GCC; see the file COPYING3. If not see
3) widest_int. This representation is an approximation of
infinite precision math. However, it is not really infinite
precision math as in the GMP library. It is really finite
- precision math where the precision is 4 times the size of the
- largest integer that the target port can represent.
+ precision math where the precision is WIDEST_INT_MAX_PRECISION.
Like offset_int, widest_int is wider than all the values that
it needs to represent, so the integers are logically signed.
@@ -231,17 +234,30 @@ along with GCC; see the file COPYING3. If not see
can be arbitrarily different from X. */
/* The MAX_BITSIZE_MODE_ANY_INT is automatically generated by a very
- early examination of the target's mode file. The WIDE_INT_MAX_ELTS
+ early examination of the target's mode file. The WIDE_INT_MAX_INL_ELTS
can accomodate at least 1 more bit so that unsigned numbers of that
mode can be represented as a signed value. Note that it is still
possible to create fixed_wide_ints that have precisions greater than
MAX_BITSIZE_MODE_ANY_INT. This can be useful when representing a
double-width multiplication result, for example. */
-#define WIDE_INT_MAX_ELTS \
- ((MAX_BITSIZE_MODE_ANY_INT + HOST_BITS_PER_WIDE_INT) / HOST_BITS_PER_WIDE_INT)
+#define WIDE_INT_MAX_INL_ELTS \
+ ((MAX_BITSIZE_MODE_ANY_INT + HOST_BITS_PER_WIDE_INT) \
+ / HOST_BITS_PER_WIDE_INT)
+
+#define WIDE_INT_MAX_INL_PRECISION \
+ (WIDE_INT_MAX_INL_ELTS * HOST_BITS_PER_WIDE_INT)
+/* Precision of wide_int and largest _BitInt precision + 1 we can
+ support. */
+#define WIDE_INT_MAX_ELTS 1024
#define WIDE_INT_MAX_PRECISION (WIDE_INT_MAX_ELTS * HOST_BITS_PER_WIDE_INT)
+/* Precision of widest_int. */
+#define WIDEST_INT_MAX_ELTS 2048
+#define WIDEST_INT_MAX_PRECISION (WIDEST_INT_MAX_ELTS * HOST_BITS_PER_WIDE_INT)
+
+STATIC_ASSERT (WIDE_INT_MAX_INL_ELTS < WIDE_INT_MAX_ELTS);
+
/* This is the max size of any pointer on any machine. It does not
seem to be as easy to sniff this out of the machine description as
it is for MAX_BITSIZE_MODE_ANY_INT since targets may support
@@ -307,17 +323,18 @@ along with GCC; see the file COPYING3. If not see
#define WI_BINARY_RESULT_VAR(RESULT, VAL, T1, X, T2, Y) \
WI_BINARY_RESULT (T1, T2) RESULT = \
wi::int_traits <WI_BINARY_RESULT (T1, T2)>::get_binary_result (X, Y); \
- HOST_WIDE_INT *VAL = RESULT.write_val ()
+ HOST_WIDE_INT *VAL = RESULT.write_val (0)
/* Similar for the result of a unary operation on X, which has type T. */
#define WI_UNARY_RESULT_VAR(RESULT, VAL, T, X) \
WI_UNARY_RESULT (T) RESULT = \
wi::int_traits <WI_UNARY_RESULT (T)>::get_binary_result (X, X); \
- HOST_WIDE_INT *VAL = RESULT.write_val ()
+ HOST_WIDE_INT *VAL = RESULT.write_val (0)
template <typename T> class generic_wide_int;
template <int N> class fixed_wide_int_storage;
class wide_int_storage;
+template <int N> class widest_int_storage;
/* An N-bit integer. Until we can use typedef templates, use this instead. */
#define FIXED_WIDE_INT(N) \
@@ -325,10 +342,8 @@ class wide_int_storage;
typedef generic_wide_int <wide_int_storage> wide_int;
typedef FIXED_WIDE_INT (ADDR_MAX_PRECISION) offset_int;
-typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION) widest_int;
-/* Spelled out explicitly (rather than through FIXED_WIDE_INT)
- so as not to confuse gengtype. */
-typedef generic_wide_int < fixed_wide_int_storage <WIDE_INT_MAX_PRECISION * 2> > widest2_int;
+typedef generic_wide_int <widest_int_storage <WIDEST_INT_MAX_PRECISION> > widest_int;
+typedef generic_wide_int <widest_int_storage <WIDEST_INT_MAX_PRECISION * 2> > widest2_int;
/* wi::storage_ref can be a reference to a primitive type,
so this is the conservatively-correct setting. */
@@ -378,8 +393,12 @@ namespace wi
/* The integer has a variable precision but no defined signedness. */
VAR_PRECISION,
- /* The integer has a constant precision (known at GCC compile time)
- and is signed. */
+ /* The integer has a constant precision (known at GCC compile time),
+ is signed and all elements are in inline buffer. */
+ INL_CONST_PRECISION,
+
+ /* Like INL_CONST_PRECISION, but elements can be heap allocated for
+ larger lengths. */
CONST_PRECISION
};
@@ -390,7 +409,8 @@ namespace wi
Classifies the type of T.
static const unsigned int precision;
- Only defined if precision_type == CONST_PRECISION. Specifies the
+ Only defined if precision_type == INL_CONST_PRECISION or
+ precision_type == CONST_PRECISION. Specifies the
precision of all integers of type T.
static const bool host_dependent_precision;
@@ -415,9 +435,10 @@ namespace wi
struct binary_traits;
/* Specify the result type for each supported combination of binary
- inputs. Note that CONST_PRECISION and VAR_PRECISION cannot be
- mixed, in order to give stronger type checking. When both inputs
- are CONST_PRECISION, they must have the same precision. */
+ inputs. Note that INL_CONST_PRECISION, CONST_PRECISION and
+ VAR_PRECISION cannot be mixed, in order to give stronger type
+ checking. When both inputs are INL_CONST_PRECISION or both are
+ CONST_PRECISION, they must have the same precision. */
template <typename T1, typename T2>
struct binary_traits <T1, T2, FLEXIBLE_PRECISION, FLEXIBLE_PRECISION>
{
@@ -434,7 +455,7 @@ namespace wi
};
template <typename T1, typename T2>
- struct binary_traits <T1, T2, FLEXIBLE_PRECISION, CONST_PRECISION>
+ struct binary_traits <T1, T2, FLEXIBLE_PRECISION, INL_CONST_PRECISION>
{
/* Spelled out explicitly (rather than through FIXED_WIDE_INT)
so as not to confuse gengtype. */
@@ -447,6 +468,17 @@ namespace wi
};
template <typename T1, typename T2>
+ struct binary_traits <T1, T2, FLEXIBLE_PRECISION, CONST_PRECISION>
+ {
+ typedef generic_wide_int < widest_int_storage
+ <int_traits <T2>::precision> > result_type;
+ typedef result_type operator_result;
+ typedef bool predicate_result;
+ typedef result_type signed_shift_result_type;
+ typedef bool signed_predicate_result;
+ };
+
+ template <typename T1, typename T2>
struct binary_traits <T1, T2, VAR_PRECISION, FLEXIBLE_PRECISION>
{
typedef wide_int result_type;
@@ -455,7 +487,7 @@ namespace wi
};
template <typename T1, typename T2>
- struct binary_traits <T1, T2, CONST_PRECISION, FLEXIBLE_PRECISION>
+ struct binary_traits <T1, T2, INL_CONST_PRECISION, FLEXIBLE_PRECISION>
{
/* Spelled out explicitly (rather than through FIXED_WIDE_INT)
so as not to confuse gengtype. */
@@ -468,7 +500,18 @@ namespace wi
};
template <typename T1, typename T2>
- struct binary_traits <T1, T2, CONST_PRECISION, CONST_PRECISION>
+ struct binary_traits <T1, T2, CONST_PRECISION, FLEXIBLE_PRECISION>
+ {
+ typedef generic_wide_int < widest_int_storage
+ <int_traits <T1>::precision> > result_type;
+ typedef result_type operator_result;
+ typedef bool predicate_result;
+ typedef result_type signed_shift_result_type;
+ typedef bool signed_predicate_result;
+ };
+
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, INL_CONST_PRECISION, INL_CONST_PRECISION>
{
STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision);
/* Spelled out explicitly (rather than through FIXED_WIDE_INT)
@@ -482,6 +525,18 @@ namespace wi
};
template <typename T1, typename T2>
+ struct binary_traits <T1, T2, CONST_PRECISION, CONST_PRECISION>
+ {
+ STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision);
+ typedef generic_wide_int < widest_int_storage
+ <int_traits <T1>::precision> > result_type;
+ typedef result_type operator_result;
+ typedef bool predicate_result;
+ typedef result_type signed_shift_result_type;
+ typedef bool signed_predicate_result;
+ };
+
+ template <typename T1, typename T2>
struct binary_traits <T1, T2, VAR_PRECISION, VAR_PRECISION>
{
typedef wide_int result_type;
@@ -709,8 +764,10 @@ wi::storage_ref::get_val () const
Although not required by generic_wide_int itself, writable storage
classes can also provide the following functions:
- HOST_WIDE_INT *write_val ()
- Get a modifiable version of get_val ()
+ HOST_WIDE_INT *write_val (unsigned int)
+ Get a modifiable version of get_val (). The argument should be
+ upper estimation for LEN (ignored by all storages but
+ widest_int_storage).
unsigned int set_len (unsigned int len)
Set the value returned by get_len () to LEN. */
@@ -777,6 +834,8 @@ public:
static const bool is_sign_extended
= wi::int_traits <generic_wide_int <storage> >::is_sign_extended;
+ static const bool needs_write_val_arg
+ = wi::int_traits <generic_wide_int <storage> >::needs_write_val_arg;
};
template <typename storage>
@@ -1049,6 +1108,7 @@ namespace wi
static const enum precision_type precision_type = VAR_PRECISION;
static const bool host_dependent_precision = HDP;
static const bool is_sign_extended = SE;
+ static const bool needs_write_val_arg = false;
};
}
@@ -1065,7 +1125,11 @@ namespace wi
class GTY(()) wide_int_storage
{
private:
- HOST_WIDE_INT val[WIDE_INT_MAX_ELTS];
+ union
+ {
+ HOST_WIDE_INT val[WIDE_INT_MAX_INL_ELTS];
+ HOST_WIDE_INT *valp;
+ } GTY((skip)) u;
unsigned int len;
unsigned int precision;
@@ -1073,14 +1137,17 @@ public:
wide_int_storage ();
template <typename T>
wide_int_storage (const T &);
+ wide_int_storage (const wide_int_storage &);
+ ~wide_int_storage ();
/* The standard generic_wide_int storage methods. */
unsigned int get_precision () const;
const HOST_WIDE_INT *get_val () const;
unsigned int get_len () const;
- HOST_WIDE_INT *write_val ();
+ HOST_WIDE_INT *write_val (unsigned int);
void set_len (unsigned int, bool = false);
+ wide_int_storage &operator = (const wide_int_storage &);
template <typename T>
wide_int_storage &operator = (const T &);
@@ -1099,12 +1166,15 @@ namespace wi
/* Guaranteed by a static assert in the wide_int_storage constructor. */
static const bool host_dependent_precision = false;
static const bool is_sign_extended = true;
+ static const bool needs_write_val_arg = false;
template <typename T1, typename T2>
static wide_int get_binary_result (const T1 &, const T2 &);
+ template <typename T1, typename T2>
+ static unsigned int get_binary_precision (const T1 &, const T2 &);
};
}
-inline wide_int_storage::wide_int_storage () {}
+inline wide_int_storage::wide_int_storage () : precision (0) {}
/* Initialize the storage from integer X, in its natural precision.
Note that we do not allow integers with host-dependent precision
@@ -1113,21 +1183,67 @@ inline wide_int_storage::wide_int_storage () {}
template <typename T>
inline wide_int_storage::wide_int_storage (const T &x)
{
- { STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision); }
- { STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION); }
+ STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision);
+ STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION);
+ STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::INL_CONST_PRECISION);
WIDE_INT_REF_FOR (T) xi (x);
precision = xi.precision;
+ if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION))
+ u.valp = XNEWVEC (HOST_WIDE_INT, CEIL (precision, HOST_BITS_PER_WIDE_INT));
wi::copy (*this, xi);
}
+inline wide_int_storage::wide_int_storage (const wide_int_storage &x)
+{
+ memcpy (this, &x, sizeof (wide_int_storage));
+ if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION))
+ {
+ u.valp = XNEWVEC (HOST_WIDE_INT, CEIL (precision, HOST_BITS_PER_WIDE_INT));
+ memcpy (u.valp, x.u.valp, len * sizeof (HOST_WIDE_INT));
+ }
+}
+
+inline wide_int_storage::~wide_int_storage ()
+{
+ if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION))
+ XDELETEVEC (u.valp);
+}
+
+inline wide_int_storage&
+wide_int_storage::operator = (const wide_int_storage &x)
+{
+ if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION))
+ {
+ if (this == &x)
+ return *this;
+ XDELETEVEC (u.valp);
+ }
+ memcpy (this, &x, sizeof (wide_int_storage));
+ if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION))
+ {
+ u.valp = XNEWVEC (HOST_WIDE_INT, CEIL (precision, HOST_BITS_PER_WIDE_INT));
+ memcpy (u.valp, x.u.valp, len * sizeof (HOST_WIDE_INT));
+ }
+ return *this;
+}
+
template <typename T>
inline wide_int_storage&
wide_int_storage::operator = (const T &x)
{
- { STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision); }
- { STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION); }
+ STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision);
+ STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION);
+ STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::INL_CONST_PRECISION);
WIDE_INT_REF_FOR (T) xi (x);
- precision = xi.precision;
+ if (UNLIKELY (precision != xi.precision))
+ {
+ if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION))
+ XDELETEVEC (u.valp);
+ precision = xi.precision;
+ if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION))
+ u.valp = XNEWVEC (HOST_WIDE_INT,
+ CEIL (precision, HOST_BITS_PER_WIDE_INT));
+ }
wi::copy (*this, xi);
return *this;
}
@@ -1141,7 +1257,7 @@ wide_int_storage::get_precision () const
inline const HOST_WIDE_INT *
wide_int_storage::get_val () const
{
- return val;
+ return UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION) ? u.valp : u.val;
}
inline unsigned int
@@ -1151,9 +1267,9 @@ wide_int_storage::get_len () const
}
inline HOST_WIDE_INT *
-wide_int_storage::write_val ()
+wide_int_storage::write_val (unsigned int)
{
- return val;
+ return UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION) ? u.valp : u.val;
}
inline void
@@ -1161,8 +1277,10 @@ wide_int_storage::set_len (unsigned int l, bool is_sign_extended)
{
len = l;
if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT > precision)
- val[len - 1] = sext_hwi (val[len - 1],
- precision % HOST_BITS_PER_WIDE_INT);
+ {
+ HOST_WIDE_INT &v = write_val (len)[len - 1];
+ v = sext_hwi (v, precision % HOST_BITS_PER_WIDE_INT);
+ }
}
/* Treat X as having signedness SGN and convert it to a PRECISION-bit
@@ -1172,7 +1290,7 @@ wide_int_storage::from (const wide_int_ref &x, unsigned int precision,
signop sgn)
{
wide_int result = wide_int::create (precision);
- result.set_len (wi::force_to_size (result.write_val (), x.val, x.len,
+ result.set_len (wi::force_to_size (result.write_val (x.len), x.val, x.len,
x.precision, precision, sgn));
return result;
}
@@ -1185,7 +1303,7 @@ wide_int_storage::from_array (const HOST_WIDE_INT *val, unsigned int len,
unsigned int precision, bool need_canon_p)
{
wide_int result = wide_int::create (precision);
- result.set_len (wi::from_array (result.write_val (), val, len, precision,
+ result.set_len (wi::from_array (result.write_val (len), val, len, precision,
need_canon_p));
return result;
}
@@ -1196,6 +1314,9 @@ wide_int_storage::create (unsigned int precision)
{
wide_int x;
x.precision = precision;
+ if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION))
+ x.u.valp = XNEWVEC (HOST_WIDE_INT,
+ CEIL (precision, HOST_BITS_PER_WIDE_INT));
return x;
}
@@ -1212,6 +1333,20 @@ wi::int_traits <wide_int_storage>::get_binary_result (const T1 &x, const T2 &y)
return wide_int::create (wi::get_precision (x));
}
+template <typename T1, typename T2>
+inline unsigned int
+wi::int_traits <wide_int_storage>::get_binary_precision (const T1 &x,
+ const T2 &y)
+{
+ /* This shouldn't be used for two flexible-precision inputs. */
+ STATIC_ASSERT (wi::int_traits <T1>::precision_type != FLEXIBLE_PRECISION
+ || wi::int_traits <T2>::precision_type != FLEXIBLE_PRECISION);
+ if (wi::int_traits <T1>::precision_type == FLEXIBLE_PRECISION)
+ return wi::get_precision (y);
+ else
+ return wi::get_precision (x);
+}
+
/* The storage used by FIXED_WIDE_INT (N). */
template <int N>
class GTY(()) fixed_wide_int_storage
@@ -1221,7 +1356,7 @@ private:
unsigned int len;
public:
- fixed_wide_int_storage ();
+ fixed_wide_int_storage () = default;
template <typename T>
fixed_wide_int_storage (const T &);
@@ -1229,7 +1364,7 @@ public:
unsigned int get_precision () const;
const HOST_WIDE_INT *get_val () const;
unsigned int get_len () const;
- HOST_WIDE_INT *write_val ();
+ HOST_WIDE_INT *write_val (unsigned int);
void set_len (unsigned int, bool = false);
static FIXED_WIDE_INT (N) from (const wide_int_ref &, signop);
@@ -1242,18 +1377,18 @@ namespace wi
template <int N>
struct int_traits < fixed_wide_int_storage <N> >
{
- static const enum precision_type precision_type = CONST_PRECISION;
+ static const enum precision_type precision_type = INL_CONST_PRECISION;
static const bool host_dependent_precision = false;
static const bool is_sign_extended = true;
+ static const bool needs_write_val_arg = false;
static const unsigned int precision = N;
template <typename T1, typename T2>
static FIXED_WIDE_INT (N) get_binary_result (const T1 &, const T2 &);
+ template <typename T1, typename T2>
+ static unsigned int get_binary_precision (const T1 &, const T2 &);
};
}
-template <int N>
-inline fixed_wide_int_storage <N>::fixed_wide_int_storage () {}
-
/* Initialize the storage from integer X, in precision N. */
template <int N>
template <typename T>
@@ -1288,7 +1423,7 @@ fixed_wide_int_storage <N>::get_len () const
template <int N>
inline HOST_WIDE_INT *
-fixed_wide_int_storage <N>::write_val ()
+fixed_wide_int_storage <N>::write_val (unsigned int)
{
return val;
}
@@ -1308,7 +1443,7 @@ inline FIXED_WIDE_INT (N)
fixed_wide_int_storage <N>::from (const wide_int_ref &x, signop sgn)
{
FIXED_WIDE_INT (N) result;
- result.set_len (wi::force_to_size (result.write_val (), x.val, x.len,
+ result.set_len (wi::force_to_size (result.write_val (x.len), x.val, x.len,
x.precision, N, sgn));
return result;
}
@@ -1323,7 +1458,7 @@ fixed_wide_int_storage <N>::from_array (const HOST_WIDE_INT *val,
bool need_canon_p)
{
FIXED_WIDE_INT (N) result;
- result.set_len (wi::from_array (result.write_val (), val, len,
+ result.set_len (wi::from_array (result.write_val (len), val, len,
N, need_canon_p));
return result;
}
@@ -1337,6 +1472,241 @@ get_binary_result (const T1 &, const T2 &)
return FIXED_WIDE_INT (N) ();
}
+template <int N>
+template <typename T1, typename T2>
+inline unsigned int
+wi::int_traits < fixed_wide_int_storage <N> >::
+get_binary_precision (const T1 &, const T2 &)
+{
+ return N;
+}
+
+#define WIDEST_INT(N) generic_wide_int < widest_int_storage <N> >
+
+/* The storage used by widest_int. */
+template <int N>
+class GTY(()) widest_int_storage
+{
+private:
+ union
+ {
+ HOST_WIDE_INT val[WIDE_INT_MAX_INL_ELTS];
+ HOST_WIDE_INT *valp;
+ } GTY((skip)) u;
+ unsigned int len;
+
+public:
+ widest_int_storage ();
+ widest_int_storage (const widest_int_storage &);
+ template <typename T>
+ widest_int_storage (const T &);
+ ~widest_int_storage ();
+ widest_int_storage &operator = (const widest_int_storage &);
+ template <typename T>
+ inline widest_int_storage& operator = (const T &);
+
+ /* The standard generic_wide_int storage methods. */
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ unsigned int get_len () const;
+ HOST_WIDE_INT *write_val (unsigned int);
+ void set_len (unsigned int, bool = false);
+
+ static WIDEST_INT (N) from (const wide_int_ref &, signop);
+ static WIDEST_INT (N) from_array (const HOST_WIDE_INT *, unsigned int,
+ bool = true);
+};
+
+namespace wi
+{
+ template <int N>
+ struct int_traits < widest_int_storage <N> >
+ {
+ static const enum precision_type precision_type = CONST_PRECISION;
+ static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = true;
+ static const bool needs_write_val_arg = true;
+ static const unsigned int precision = N;
+ template <typename T1, typename T2>
+ static WIDEST_INT (N) get_binary_result (const T1 &, const T2 &);
+ template <typename T1, typename T2>
+ static unsigned int get_binary_precision (const T1 &, const T2 &);
+ };
+}
+
+template <int N>
+inline widest_int_storage <N>::widest_int_storage () : len (0) {}
+
+/* Initialize the storage from integer X, in precision N. */
+template <int N>
+template <typename T>
+inline widest_int_storage <N>::widest_int_storage (const T &x) : len (0)
+{
+ /* Check for type compatibility. We don't want to initialize a
+ widest integer from something like a wide_int. */
+ WI_BINARY_RESULT (T, WIDEST_INT (N)) *assertion ATTRIBUTE_UNUSED;
+ wi::copy (*this, WIDE_INT_REF_FOR (T) (x, N));
+}
+
+template <int N>
+inline
+widest_int_storage <N>::widest_int_storage (const widest_int_storage &x)
+{
+ memcpy (this, &x, sizeof (widest_int_storage));
+ if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS))
+ {
+ u.valp = XNEWVEC (HOST_WIDE_INT, len);
+ memcpy (u.valp, x.u.valp, len * sizeof (HOST_WIDE_INT));
+ }
+}
+
+template <int N>
+inline widest_int_storage <N>::~widest_int_storage ()
+{
+ if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS))
+ XDELETEVEC (u.valp);
+}
+
+template <int N>
+inline widest_int_storage <N>&
+widest_int_storage <N>::operator = (const widest_int_storage &x)
+{
+ if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS))
+ {
+ if (this == &x)
+ return *this;
+ XDELETEVEC (u.valp);
+ }
+ memcpy (this, &x, sizeof (widest_int_storage));
+ if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS))
+ {
+ u.valp = XNEWVEC (HOST_WIDE_INT, len);
+ memcpy (u.valp, x.u.valp, len * sizeof (HOST_WIDE_INT));
+ }
+ return *this;
+}
+
+template <int N>
+template <typename T>
+inline widest_int_storage <N>&
+widest_int_storage <N>::operator = (const T &x)
+{
+ /* Check for type compatibility. We don't want to assign a
+ widest integer from something like a wide_int. */
+ WI_BINARY_RESULT (T, WIDEST_INT (N)) *assertion ATTRIBUTE_UNUSED;
+ if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS))
+ XDELETEVEC (u.valp);
+ len = 0;
+ wi::copy (*this, WIDE_INT_REF_FOR (T) (x, N));
+ return *this;
+}
+
+template <int N>
+inline unsigned int
+widest_int_storage <N>::get_precision () const
+{
+ return N;
+}
+
+template <int N>
+inline const HOST_WIDE_INT *
+widest_int_storage <N>::get_val () const
+{
+ return UNLIKELY (len > WIDE_INT_MAX_INL_ELTS) ? u.valp : u.val;
+}
+
+template <int N>
+inline unsigned int
+widest_int_storage <N>::get_len () const
+{
+ return len;
+}
+
+template <int N>
+inline HOST_WIDE_INT *
+widest_int_storage <N>::write_val (unsigned int l)
+{
+ if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS))
+ XDELETEVEC (u.valp);
+ len = l;
+ if (UNLIKELY (l > WIDE_INT_MAX_INL_ELTS))
+ {
+ u.valp = XNEWVEC (HOST_WIDE_INT, l);
+ return u.valp;
+ }
+ else if (CHECKING_P && l < WIDE_INT_MAX_INL_ELTS)
+ u.val[l] = HOST_WIDE_INT_UC (0xbaaaaaaddeadbeef);
+ return u.val;
+}
+
+template <int N>
+inline void
+widest_int_storage <N>::set_len (unsigned int l, bool)
+{
+ gcc_checking_assert (l <= len);
+ if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS)
+ && l <= WIDE_INT_MAX_INL_ELTS)
+ {
+ HOST_WIDE_INT *valp = u.valp;
+ memcpy (u.val, valp, l * sizeof (u.val[0]));
+ XDELETEVEC (valp);
+ }
+ else if (len && len < WIDE_INT_MAX_INL_ELTS)
+ gcc_checking_assert ((unsigned HOST_WIDE_INT) u.val[len]
+ == HOST_WIDE_INT_UC (0xbaaaaaaddeadbeef));
+ len = l;
+ /* There are no excess bits in val[len - 1]. */
+ STATIC_ASSERT (N % HOST_BITS_PER_WIDE_INT == 0);
+}
+
+/* Treat X as having signedness SGN and convert it to an N-bit number. */
+template <int N>
+inline WIDEST_INT (N)
+widest_int_storage <N>::from (const wide_int_ref &x, signop sgn)
+{
+ WIDEST_INT (N) result;
+ unsigned int exp_len = x.len;
+ unsigned int prec = result.get_precision ();
+ if (sgn == UNSIGNED && prec > x.precision && x.val[x.len - 1] < 0)
+ exp_len = CEIL (x.precision, HOST_BITS_PER_WIDE_INT) + 1;
+ result.set_len (wi::force_to_size (result.write_val (exp_len), x.val, x.len,
+ x.precision, prec, sgn));
+ return result;
+}
+
+/* Create a WIDEST_INT (N) from the explicit block encoding given by
+ VAL and LEN. NEED_CANON_P is true if the encoding may have redundant
+ trailing blocks. */
+template <int N>
+inline WIDEST_INT (N)
+widest_int_storage <N>::from_array (const HOST_WIDE_INT *val,
+ unsigned int len,
+ bool need_canon_p)
+{
+ WIDEST_INT (N) result;
+ result.set_len (wi::from_array (result.write_val (len), val, len,
+ result.get_precision (), need_canon_p));
+ return result;
+}
+
+template <int N>
+template <typename T1, typename T2>
+inline WIDEST_INT (N)
+wi::int_traits < widest_int_storage <N> >::
+get_binary_result (const T1 &, const T2 &)
+{
+ return WIDEST_INT (N) ();
+}
+
+template <int N>
+template <typename T1, typename T2>
+inline unsigned int
+wi::int_traits < widest_int_storage <N> >::
+get_binary_precision (const T1 &, const T2 &)
+{
+ return N;
+}
+
/* A reference to one element of a trailing_wide_ints structure. */
class trailing_wide_int_storage
{
@@ -1346,20 +1716,20 @@ private:
unsigned int m_precision;
/* A pointer to the length field. */
- unsigned char *m_len;
+ unsigned short *m_len;
/* A pointer to the HWI array. There are enough elements to hold all
values of precision M_PRECISION. */
HOST_WIDE_INT *m_val;
public:
- trailing_wide_int_storage (unsigned int, unsigned char *, HOST_WIDE_INT *);
+ trailing_wide_int_storage (unsigned int, unsigned short *, HOST_WIDE_INT *);
/* The standard generic_wide_int storage methods. */
unsigned int get_len () const;
unsigned int get_precision () const;
const HOST_WIDE_INT *get_val () const;
- HOST_WIDE_INT *write_val ();
+ HOST_WIDE_INT *write_val (unsigned int);
void set_len (unsigned int, bool = false);
template <typename T>
@@ -1391,15 +1761,13 @@ private:
unsigned short m_precision;
/* The shared maximum length of each number. */
- unsigned char m_max_len;
+ unsigned short m_max_len;
/* The number of elements. */
unsigned char m_num_elements;
- /* The current length of each number.
- Avoid char array so the whole structure is not a typeless storage
- that will, in turn, turn off TBAA on gimple, trees and RTL. */
- struct {unsigned char len;} m_len[N];
+ /* The current length of each number. */
+ unsigned short m_len[N];
/* The variable-length part of the structure, which always contains
at least one HWI. Element I starts at index I * M_MAX_LEN. */
@@ -1420,7 +1788,7 @@ public:
};
inline trailing_wide_int_storage::
-trailing_wide_int_storage (unsigned int precision, unsigned char *len,
+trailing_wide_int_storage (unsigned int precision, unsigned short *len,
HOST_WIDE_INT *val)
: m_precision (precision), m_len (len), m_val (val)
{
@@ -1445,7 +1813,7 @@ trailing_wide_int_storage::get_val () const
}
inline HOST_WIDE_INT *
-trailing_wide_int_storage::write_val ()
+trailing_wide_int_storage::write_val (unsigned int)
{
return m_val;
}
@@ -1486,7 +1854,7 @@ template <int N>
inline trailing_wide_int
trailing_wide_ints <N>::operator [] (unsigned int index)
{
- return trailing_wide_int_storage (m_precision, &m_len[index].len,
+ return trailing_wide_int_storage (m_precision, &m_len[index],
&m_val[index * m_max_len]);
}
@@ -1495,7 +1863,7 @@ inline typename trailing_wide_ints <N>::const_reference
trailing_wide_ints <N>::operator [] (unsigned int index) const
{
return wi::storage_ref (&m_val[index * m_max_len],
- m_len[index].len, m_precision);
+ m_len[index], m_precision);
}
/* Return how many extra bytes need to be added to the end of the
@@ -1528,6 +1896,7 @@ namespace wi
static const enum precision_type precision_type = FLEXIBLE_PRECISION;
static const bool host_dependent_precision = true;
static const bool is_sign_extended = true;
+ static const bool needs_write_val_arg = false;
static unsigned int get_precision (T);
static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, T);
};
@@ -1699,6 +2068,7 @@ namespace wi
precision of HOST_WIDE_INT. */
static const bool host_dependent_precision = false;
static const bool is_sign_extended = true;
+ static const bool needs_write_val_arg = false;
static unsigned int get_precision (const wi::hwi_with_prec &);
static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
const wi::hwi_with_prec &);
@@ -1804,8 +2174,8 @@ template <typename T1, typename T2>
inline unsigned int
wi::get_binary_precision (const T1 &x, const T2 &y)
{
- return get_precision (wi::int_traits <WI_BINARY_RESULT (T1, T2)>::
- get_binary_result (x, y));
+ using res_traits = wi::int_traits <WI_BINARY_RESULT (T1, T2)>;
+ return res_traits::get_binary_precision (x, y);
}
/* Copy the contents of Y to X, but keeping X's current precision. */
@@ -1813,14 +2183,17 @@ template <typename T1, typename T2>
inline void
wi::copy (T1 &x, const T2 &y)
{
- HOST_WIDE_INT *xval = x.write_val ();
- const HOST_WIDE_INT *yval = y.get_val ();
unsigned int len = y.get_len ();
+ HOST_WIDE_INT *xval = x.write_val (len);
+ const HOST_WIDE_INT *yval = y.get_val ();
unsigned int i = 0;
do
xval[i] = yval[i];
while (++i < len);
- x.set_len (len, y.is_sign_extended);
+ /* For widest_int write_val is called with an exact value, not
+ upper bound for len, so nothing is needed further. */
+ if (!wi::int_traits <T1>::needs_write_val_arg)
+ x.set_len (len, y.is_sign_extended);
}
/* Return true if X fits in a HOST_WIDE_INT with no loss of precision. */
@@ -2162,6 +2535,8 @@ wi::bit_not (const T &x)
{
WI_UNARY_RESULT_VAR (result, val, T, x);
WIDE_INT_REF_FOR (T) xi (x, get_precision (result));
+ if (result.needs_write_val_arg)
+ val = result.write_val (xi.len);
for (unsigned int i = 0; i < xi.len; ++i)
val[i] = ~xi.val[i];
result.set_len (xi.len);
@@ -2203,6 +2578,9 @@ wi::sext (const T &x, unsigned int offset)
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T) xi (x, precision);
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len,
+ CEIL (offset, HOST_BITS_PER_WIDE_INT)));
if (offset <= HOST_BITS_PER_WIDE_INT)
{
val[0] = sext_hwi (xi.ulow (), offset);
@@ -2230,6 +2608,9 @@ wi::zext (const T &x, unsigned int offset)
return result;
}
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len,
+ offset / HOST_BITS_PER_WIDE_INT + 1));
/* In these cases we know that at least the top bit will be clear,
so no sign extension is necessary. */
if (offset < HOST_BITS_PER_WIDE_INT)
@@ -2259,6 +2640,9 @@ wi::set_bit (const T &x, unsigned int bit)
WI_UNARY_RESULT_VAR (result, val, T, x);
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T) xi (x, precision);
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len,
+ bit / HOST_BITS_PER_WIDE_INT + 1));
if (precision <= HOST_BITS_PER_WIDE_INT)
{
val[0] = xi.ulow () | (HOST_WIDE_INT_1U << bit);
@@ -2280,6 +2664,8 @@ wi::bswap (const T &x)
WI_UNARY_RESULT_VAR (result, val, T, x);
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T) xi (x, precision);
+ static_assert (!result.needs_write_val_arg,
+ "bswap on widest_int makes no sense");
result.set_len (bswap_large (val, xi.val, xi.len, precision));
return result;
}
@@ -2292,6 +2678,8 @@ wi::bitreverse (const T &x)
WI_UNARY_RESULT_VAR (result, val, T, x);
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T) xi (x, precision);
+ static_assert (!result.needs_write_val_arg,
+ "bitreverse on widest_int makes no sense");
result.set_len (bitreverse_large (val, xi.val, xi.len, precision));
return result;
}
@@ -2368,6 +2756,8 @@ wi::bit_and (const T1 &x, const T2 &y)
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len, yi.len));
if (LIKELY (xi.len + yi.len == 2))
{
val[0] = xi.ulow () & yi.ulow ();
@@ -2389,6 +2779,8 @@ wi::bit_and_not (const T1 &x, const T2 &y)
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len, yi.len));
if (LIKELY (xi.len + yi.len == 2))
{
val[0] = xi.ulow () & ~yi.ulow ();
@@ -2410,6 +2802,8 @@ wi::bit_or (const T1 &x, const T2 &y)
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len, yi.len));
if (LIKELY (xi.len + yi.len == 2))
{
val[0] = xi.ulow () | yi.ulow ();
@@ -2431,6 +2825,8 @@ wi::bit_or_not (const T1 &x, const T2 &y)
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len, yi.len));
if (LIKELY (xi.len + yi.len == 2))
{
val[0] = xi.ulow () | ~yi.ulow ();
@@ -2452,6 +2848,8 @@ wi::bit_xor (const T1 &x, const T2 &y)
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len, yi.len));
if (LIKELY (xi.len + yi.len == 2))
{
val[0] = xi.ulow () ^ yi.ulow ();
@@ -2472,6 +2870,8 @@ wi::add (const T1 &x, const T2 &y)
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len, yi.len) + 1);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
val[0] = xi.ulow () + yi.ulow ();
@@ -2515,6 +2915,8 @@ wi::add (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len, yi.len) + 1);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
unsigned HOST_WIDE_INT xl = xi.ulow ();
@@ -2558,6 +2960,8 @@ wi::sub (const T1 &x, const T2 &y)
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len, yi.len) + 1);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
val[0] = xi.ulow () - yi.ulow ();
@@ -2601,6 +3005,8 @@ wi::sub (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (result.needs_write_val_arg)
+ val = result.write_val (MAX (xi.len, yi.len) + 1);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
unsigned HOST_WIDE_INT xl = xi.ulow ();
@@ -2643,6 +3049,8 @@ wi::mul (const T1 &x, const T2 &y)
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (result.needs_write_val_arg)
+ val = result.write_val (xi.len + yi.len + 2);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
val[0] = xi.ulow () * yi.ulow ();
@@ -2664,6 +3072,8 @@ wi::mul (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (result.needs_write_val_arg)
+ val = result.write_val (xi.len + yi.len + 2);
result.set_len (mul_internal (val, xi.val, xi.len,
yi.val, yi.len, precision,
sgn, overflow, false));
@@ -2698,6 +3108,8 @@ wi::mul_high (const T1 &x, const T2 &y, signop sgn)
unsigned int precision = get_precision (result);
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y, precision);
+ static_assert (!result.needs_write_val_arg,
+ "mul_high on widest_int doesn't make sense");
result.set_len (mul_internal (val, xi.val, xi.len,
yi.val, yi.len, precision,
sgn, 0, true));
@@ -2716,6 +3128,12 @@ wi::div_trunc (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
WIDE_INT_REF_FOR (T1) xi (x, precision);
WIDE_INT_REF_FOR (T2) yi (y);
+ if (quotient.needs_write_val_arg)
+ quotient_val = quotient.write_val ((sgn == UNSIGNED
+ && xi.val[xi.len - 1] < 0)
+ ? CEIL (precision,
+ HOST_BITS_PER_WIDE_INT) + 1
+ : xi.len + 1);
quotient.set_len (divmod_internal (quotient_val, 0, 0, xi.val, xi.len,
precision,
yi.val, yi.len, yi.precision,
@@ -2753,6 +3171,16 @@ wi::div_floor (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
+ if (quotient.needs_write_val_arg)
+ {
+ unsigned int est_len;
+ if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0)
+ est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1;
+ else
+ est_len = xi.len + 1;
+ quotient_val = quotient.write_val (est_len);
+ remainder_val = remainder.write_val (est_len);
+ }
quotient.set_len (divmod_internal (quotient_val,
&remainder_len, remainder_val,
xi.val, xi.len, precision,
@@ -2795,6 +3223,16 @@ wi::div_ceil (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
+ if (quotient.needs_write_val_arg)
+ {
+ unsigned int est_len;
+ if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0)
+ est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1;
+ else
+ est_len = xi.len + 1;
+ quotient_val = quotient.write_val (est_len);
+ remainder_val = remainder.write_val (est_len);
+ }
quotient.set_len (divmod_internal (quotient_val,
&remainder_len, remainder_val,
xi.val, xi.len, precision,
@@ -2828,6 +3266,16 @@ wi::div_round (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
+ if (quotient.needs_write_val_arg)
+ {
+ unsigned int est_len;
+ if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0)
+ est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1;
+ else
+ est_len = xi.len + 1;
+ quotient_val = quotient.write_val (est_len);
+ remainder_val = remainder.write_val (est_len);
+ }
quotient.set_len (divmod_internal (quotient_val,
&remainder_len, remainder_val,
xi.val, xi.len, precision,
@@ -2871,6 +3319,16 @@ wi::divmod_trunc (const T1 &x, const T2 &y, signop sgn,
WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
+ if (quotient.needs_write_val_arg)
+ {
+ unsigned int est_len;
+ if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0)
+ est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1;
+ else
+ est_len = xi.len + 1;
+ quotient_val = quotient.write_val (est_len);
+ remainder_val = remainder.write_val (est_len);
+ }
quotient.set_len (divmod_internal (quotient_val,
&remainder_len, remainder_val,
xi.val, xi.len, precision,
@@ -2915,6 +3373,12 @@ wi::mod_trunc (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
+ if (remainder.needs_write_val_arg)
+ remainder_val = remainder.write_val ((sgn == UNSIGNED
+ && xi.val[xi.len - 1] < 0)
+ ? CEIL (precision,
+ HOST_BITS_PER_WIDE_INT) + 1
+ : xi.len + 1);
divmod_internal (0, &remainder_len, remainder_val,
xi.val, xi.len, precision,
yi.val, yi.len, yi.precision, sgn, overflow);
@@ -2955,6 +3419,16 @@ wi::mod_floor (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
+ if (quotient.needs_write_val_arg)
+ {
+ unsigned int est_len;
+ if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0)
+ est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1;
+ else
+ est_len = xi.len + 1;
+ quotient_val = quotient.write_val (est_len);
+ remainder_val = remainder.write_val (est_len);
+ }
quotient.set_len (divmod_internal (quotient_val,
&remainder_len, remainder_val,
xi.val, xi.len, precision,
@@ -2991,6 +3465,16 @@ wi::mod_ceil (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
+ if (quotient.needs_write_val_arg)
+ {
+ unsigned int est_len;
+ if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0)
+ est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1;
+ else
+ est_len = xi.len + 1;
+ quotient_val = quotient.write_val (est_len);
+ remainder_val = remainder.write_val (est_len);
+ }
quotient.set_len (divmod_internal (quotient_val,
&remainder_len, remainder_val,
xi.val, xi.len, precision,
@@ -3017,6 +3501,16 @@ wi::mod_round (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
+ if (quotient.needs_write_val_arg)
+ {
+ unsigned int est_len;
+ if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0)
+ est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1;
+ else
+ est_len = xi.len + 1;
+ quotient_val = quotient.write_val (est_len);
+ remainder_val = remainder.write_val (est_len);
+ }
quotient.set_len (divmod_internal (quotient_val,
&remainder_len, remainder_val,
xi.val, xi.len, precision,
@@ -3086,12 +3580,16 @@ wi::lshift (const T1 &x, const T2 &y)
/* Handle the simple cases quickly. */
if (geu_p (yi, precision))
{
+ if (result.needs_write_val_arg)
+ val = result.write_val (1);
val[0] = 0;
result.set_len (1);
}
else
{
unsigned int shift = yi.to_uhwi ();
+ if (result.needs_write_val_arg)
+ val = result.write_val (xi.len + shift / HOST_BITS_PER_WIDE_INT + 1);
/* For fixed-precision integers like offset_int and widest_int,
handle the case where the shift value is constant and the
result is a single nonnegative HWI (meaning that we don't
@@ -3130,12 +3628,23 @@ wi::lrshift (const T1 &x, const T2 &y)
/* Handle the simple cases quickly. */
if (geu_p (yi, xi.precision))
{
+ if (result.needs_write_val_arg)
+ val = result.write_val (1);
val[0] = 0;
result.set_len (1);
}
else
{
unsigned int shift = yi.to_uhwi ();
+ if (result.needs_write_val_arg)
+ {
+ unsigned int est_len = xi.len;
+ if (xi.val[xi.len - 1] < 0 && shift)
+ /* Logical right shift of sign-extended value might need a very
+ large precision e.g. for widest_int. */
+ est_len = CEIL (xi.precision - shift, HOST_BITS_PER_WIDE_INT) + 1;
+ val = result.write_val (est_len);
+ }
/* For fixed-precision integers like offset_int and widest_int,
handle the case where the shift value is constant and the
shifted value is a single nonnegative HWI (meaning that all
@@ -3171,6 +3680,8 @@ wi::arshift (const T1 &x, const T2 &y)
since the result can be no larger than that. */
WIDE_INT_REF_FOR (T1) xi (x);
WIDE_INT_REF_FOR (T2) yi (y);
+ if (result.needs_write_val_arg)
+ val = result.write_val (xi.len);
/* Handle the simple cases quickly. */
if (geu_p (yi, xi.precision))
{
@@ -3374,25 +3885,41 @@ operator % (const T1 &x, const T2 &y)
return wi::smod_trunc (x, y);
}
-template<typename T>
+void gt_ggc_mx (generic_wide_int <wide_int_storage> *) = delete;
+void gt_pch_nx (generic_wide_int <wide_int_storage> *) = delete;
+void gt_pch_nx (generic_wide_int <wide_int_storage> *,
+ gt_pointer_operator, void *) = delete;
+
+template<int N>
void
-gt_ggc_mx (generic_wide_int <T> *)
+gt_ggc_mx (generic_wide_int <fixed_wide_int_storage <N> > *)
{
}
-template<typename T>
+template<int N>
void
-gt_pch_nx (generic_wide_int <T> *)
+gt_pch_nx (generic_wide_int <fixed_wide_int_storage <N> > *)
{
}
-template<typename T>
+template<int N>
void
-gt_pch_nx (generic_wide_int <T> *, gt_pointer_operator, void *)
+gt_pch_nx (generic_wide_int <fixed_wide_int_storage <N> > *,
+ gt_pointer_operator, void *)
{
}
template<int N>
+void gt_ggc_mx (generic_wide_int <widest_int_storage <N> > *) = delete;
+
+template<int N>
+void gt_pch_nx (generic_wide_int <widest_int_storage <N> > *) = delete;
+
+template<int N>
+void gt_pch_nx (generic_wide_int <widest_int_storage <N> > *,
+ gt_pointer_operator, void *) = delete;
+
+template<int N>
void
gt_ggc_mx (trailing_wide_ints <N> *)
{
@@ -3465,7 +3992,7 @@ inline wide_int
wi::mask (unsigned int width, bool negate_p, unsigned int precision)
{
wide_int result = wide_int::create (precision);
- result.set_len (mask (result.write_val (), width, negate_p, precision));
+ result.set_len (mask (result.write_val (0), width, negate_p, precision));
return result;
}
@@ -3477,7 +4004,7 @@ wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p,
unsigned int precision)
{
wide_int result = wide_int::create (precision);
- result.set_len (shifted_mask (result.write_val (), start, width, negate_p,
+ result.set_len (shifted_mask (result.write_val (0), start, width, negate_p,
precision));
return result;
}
@@ -3498,8 +4025,8 @@ wi::mask (unsigned int width, bool negate_p)
{
STATIC_ASSERT (wi::int_traits<T>::precision);
T result;
- result.set_len (mask (result.write_val (), width, negate_p,
- wi::int_traits <T>::precision));
+ result.set_len (mask (result.write_val (width / HOST_BITS_PER_WIDE_INT + 1),
+ width, negate_p, wi::int_traits <T>::precision));
return result;
}
@@ -3512,9 +4039,13 @@ wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p)
{
STATIC_ASSERT (wi::int_traits<T>::precision);
T result;
- result.set_len (shifted_mask (result.write_val (), start, width,
- negate_p,
- wi::int_traits <T>::precision));
+ unsigned int prec = wi::int_traits <T>::precision;
+ unsigned int est_len
+ = result.needs_write_val_arg
+ ? ((start + (width > prec - start ? prec - start : width))
+ / HOST_BITS_PER_WIDE_INT + 1) : 0;
+ result.set_len (shifted_mask (result.write_val (est_len), start, width,
+ negate_p, prec));
return result;
}