aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTrevor Saunders <tsaunders@mozilla.com>2014-11-11 02:31:00 +0000
committerTrevor Saunders <tbsaunde@gcc.gnu.org>2014-11-11 02:31:00 +0000
commit618b7f293745ac92cc4fef113239e84f79c4d4ff (patch)
tree45432b15918ada70945d621144d6decabaa36832
parent43722f9fa69d4cc9a369b468552d5612674a576f (diff)
downloadgcc-618b7f293745ac92cc4fef113239e84f79c4d4ff.zip
gcc-618b7f293745ac92cc4fef113239e84f79c4d4ff.tar.gz
gcc-618b7f293745ac92cc4fef113239e84f79c4d4ff.tar.bz2
c++ify sreal
gcc/ChangeLog: 2014-11-10 Trevor Saunders <tsaunders@mozilla.com> * ipa-inline.c (edge_badness): Adjust. (inline_small_functions): Likewise. * predict.c (propagate_freq): Likewise. (estimate_bb_frequencies): Likewise. * sreal.c (sreal::dump): Rename from dump_sreal. (debug): Adjust. (copy): Remove function. (sreal::shift_right): Rename from sreal_sift_right. (sreal::normalize): Rename from normalize. (sreal_init): Remove function. (sreal::to_int): Rename from sreal_to_int. (sreal_compare): Remove function. (sreal::operator+): Rename from sreal_add. (sreal::operator-): Rename from sreal_sub. (sreal::operator*): Rename from sreal_mul. (sreal::operator/): Rename from sreal_div. * sreal.h (class sreal): Adjust. (inline sreal &operator+=): New operator. (inline sreal &operator-=): Likewise. (inline sreal &operator/=): Likewise. (inline sreal &operator*=): Likewise. (inline bool operator!=): Likewise. (inline bool operator>): Likewise. (inline bool operator<=): Likewise. (inline bool operator>=): Likewise. From-SVN: r217332
-rw-r--r--gcc/ChangeLog28
-rw-r--r--gcc/ipa-inline.c25
-rw-r--r--gcc/predict.c82
-rw-r--r--gcc/sreal.c487
-rw-r--r--gcc/sreal.h106
5 files changed, 247 insertions, 481 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 1ddd36a..2b7f3a1 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,31 @@
+2014-11-10 Trevor Saunders <tsaunders@mozilla.com>
+
+ * ipa-inline.c (edge_badness): Adjust.
+ (inline_small_functions): Likewise.
+ * predict.c (propagate_freq): Likewise.
+ (estimate_bb_frequencies): Likewise.
+ * sreal.c (sreal::dump): Rename from dump_sreal.
+ (debug): Adjust.
+ (copy): Remove function.
+ (sreal::shift_right): Rename from sreal_sift_right.
+ (sreal::normalize): Rename from normalize.
+ (sreal_init): Remove function.
+ (sreal::to_int): Rename from sreal_to_int.
+ (sreal_compare): Remove function.
+ (sreal::operator+): Rename from sreal_add.
+ (sreal::operator-): Rename from sreal_sub.
+ (sreal::operator*): Rename from sreal_mul.
+ (sreal::operator/): Rename from sreal_div.
+ * sreal.h (class sreal): Adjust.
+ (inline sreal &operator+=): New operator.
+ (inline sreal &operator-=): Likewise.
+ (inline sreal &operator/=): Likewise.
+ (inline sreal &operator*=): Likewise.
+ (inline bool operator!=): Likewise.
+ (inline bool operator>): Likewise.
+ (inline bool operator<=): Likewise.
+ (inline bool operator>=): Likewise.
+
2014-11-11 Bin Cheng <bin.cheng@arm.com>
* sched-deps.c (sched_analyze_1): Check pending list if it is not
diff --git a/gcc/ipa-inline.c b/gcc/ipa-inline.c
index 534b330..5c97815 100644
--- a/gcc/ipa-inline.c
+++ b/gcc/ipa-inline.c
@@ -962,29 +962,28 @@ edge_badness (struct cgraph_edge *edge, bool dump)
else if (max_count)
{
- sreal tmp, relbenefit_real, growth_real;
int relbenefit = relative_time_benefit (callee_info, edge, edge_time);
/* Capping edge->count to max_count. edge->count can be larger than
max_count if an inline adds new edges which increase max_count
after max_count is computed. */
gcov_type edge_count = edge->count > max_count ? max_count : edge->count;
- sreal_init (&relbenefit_real, relbenefit, 0);
- sreal_init (&growth_real, growth, 0);
+ sreal relbenefit_real (relbenefit, 0);
+ sreal growth_real (growth, 0);
/* relative_edge_count. */
- sreal_init (&tmp, edge_count, 0);
- sreal_div (&tmp, &tmp, &max_count_real);
+ sreal tmp (edge_count, 0);
+ tmp /= max_count_real;
/* relative_time_benefit. */
- sreal_mul (&tmp, &tmp, &relbenefit_real);
- sreal_div (&tmp, &tmp, &max_relbenefit_real);
+ tmp *= relbenefit_real;
+ tmp /= max_relbenefit_real;
/* growth_f_caller. */
- sreal_mul (&tmp, &tmp, &half_int_min_real);
- sreal_div (&tmp, &tmp, &growth_real);
+ tmp *= half_int_min_real;
+ tmp /= growth_real;
- badness = -1 * sreal_to_int (&tmp);
+ badness = -1 * tmp.to_int ();
if (dump)
{
@@ -1627,9 +1626,9 @@ inline_small_functions (void)
if (max_count < edge->count)
max_count = edge->count;
}
- sreal_init (&max_count_real, max_count, 0);
- sreal_init (&max_relbenefit_real, RELATIVE_TIME_BENEFIT_RANGE, 0);
- sreal_init (&half_int_min_real, INT_MAX / 2, 0);
+ max_count_real = sreal (max_count, 0);
+ max_relbenefit_real = sreal (RELATIVE_TIME_BENEFIT_RANGE, 0);
+ half_int_min_real = sreal (INT_MAX / 2, 0);
ipa_free_postorder_info ();
initialize_growth_caches ();
diff --git a/gcc/predict.c b/gcc/predict.c
index 714a88d..0215e91 100644
--- a/gcc/predict.c
+++ b/gcc/predict.c
@@ -2528,15 +2528,13 @@ propagate_freq (basic_block head, bitmap tovisit)
bb->count = bb->frequency = 0;
}
- memcpy (&BLOCK_INFO (head)->frequency, &real_one, sizeof (real_one));
+ BLOCK_INFO (head)->frequency = real_one;
last = head;
for (bb = head; bb; bb = nextbb)
{
edge_iterator ei;
- sreal cyclic_probability, frequency;
-
- memcpy (&cyclic_probability, &real_zero, sizeof (real_zero));
- memcpy (&frequency, &real_zero, sizeof (real_zero));
+ sreal cyclic_probability = real_zero;
+ sreal frequency = real_zero;
nextbb = BLOCK_INFO (bb)->next;
BLOCK_INFO (bb)->next = NULL;
@@ -2553,42 +2551,34 @@ propagate_freq (basic_block head, bitmap tovisit)
FOR_EACH_EDGE (e, ei, bb->preds)
if (EDGE_INFO (e)->back_edge)
{
- sreal_add (&cyclic_probability, &cyclic_probability,
- &EDGE_INFO (e)->back_edge_prob);
+ cyclic_probability += EDGE_INFO (e)->back_edge_prob;
}
else if (!(e->flags & EDGE_DFS_BACK))
{
- sreal tmp;
-
/* frequency += (e->probability
* BLOCK_INFO (e->src)->frequency /
REG_BR_PROB_BASE); */
- sreal_init (&tmp, e->probability, 0);
- sreal_mul (&tmp, &tmp, &BLOCK_INFO (e->src)->frequency);
- sreal_mul (&tmp, &tmp, &real_inv_br_prob_base);
- sreal_add (&frequency, &frequency, &tmp);
+ sreal tmp (e->probability, 0);
+ tmp *= BLOCK_INFO (e->src)->frequency;
+ tmp *= real_inv_br_prob_base;
+ frequency += tmp;
}
- if (sreal_compare (&cyclic_probability, &real_zero) == 0)
+ if (cyclic_probability == real_zero)
{
- memcpy (&BLOCK_INFO (bb)->frequency, &frequency,
- sizeof (frequency));
+ BLOCK_INFO (bb)->frequency = frequency;
}
else
{
- if (sreal_compare (&cyclic_probability, &real_almost_one) > 0)
- {
- memcpy (&cyclic_probability, &real_almost_one,
- sizeof (real_almost_one));
- }
+ if (cyclic_probability > real_almost_one)
+ cyclic_probability = real_almost_one;
/* BLOCK_INFO (bb)->frequency = frequency
/ (1 - cyclic_probability) */
- sreal_sub (&cyclic_probability, &real_one, &cyclic_probability);
- sreal_div (&BLOCK_INFO (bb)->frequency,
- &frequency, &cyclic_probability);
+ cyclic_probability = real_one - cyclic_probability;
+ BLOCK_INFO (bb)->frequency = frequency / cyclic_probability;
}
}
@@ -2597,16 +2587,13 @@ propagate_freq (basic_block head, bitmap tovisit)
e = find_edge (bb, head);
if (e)
{
- sreal tmp;
-
/* EDGE_INFO (e)->back_edge_prob
= ((e->probability * BLOCK_INFO (bb)->frequency)
/ REG_BR_PROB_BASE); */
- sreal_init (&tmp, e->probability, 0);
- sreal_mul (&tmp, &tmp, &BLOCK_INFO (bb)->frequency);
- sreal_mul (&EDGE_INFO (e)->back_edge_prob,
- &tmp, &real_inv_br_prob_base);
+ sreal tmp (e->probability, 0);
+ tmp *= BLOCK_INFO (bb)->frequency;
+ EDGE_INFO (e)->back_edge_prob = tmp * real_inv_br_prob_base;
}
/* Propagate to successor blocks. */
@@ -2886,13 +2873,13 @@ estimate_bb_frequencies (bool force)
if (!real_values_initialized)
{
real_values_initialized = 1;
- sreal_init (&real_zero, 0, 0);
- sreal_init (&real_one, 1, 0);
- sreal_init (&real_br_prob_base, REG_BR_PROB_BASE, 0);
- sreal_init (&real_bb_freq_max, BB_FREQ_MAX, 0);
- sreal_init (&real_one_half, 1, -1);
- sreal_div (&real_inv_br_prob_base, &real_one, &real_br_prob_base);
- sreal_sub (&real_almost_one, &real_one, &real_inv_br_prob_base);
+ real_zero = sreal (0, 0);
+ real_one = sreal (1, 0);
+ real_br_prob_base = sreal (REG_BR_PROB_BASE, 0);
+ real_bb_freq_max = sreal (BB_FREQ_MAX, 0);
+ real_one_half = sreal (1, -1);
+ real_inv_br_prob_base = real_one / real_br_prob_base;
+ real_almost_one = real_one - real_inv_br_prob_base;
}
mark_dfs_back_edges ();
@@ -2910,10 +2897,8 @@ estimate_bb_frequencies (bool force)
FOR_EACH_EDGE (e, ei, bb->succs)
{
- sreal_init (&EDGE_INFO (e)->back_edge_prob, e->probability, 0);
- sreal_mul (&EDGE_INFO (e)->back_edge_prob,
- &EDGE_INFO (e)->back_edge_prob,
- &real_inv_br_prob_base);
+ EDGE_INFO (e)->back_edge_prob = sreal (e->probability, 0);
+ EDGE_INFO (e)->back_edge_prob *= real_inv_br_prob_base;
}
}
@@ -2921,19 +2906,16 @@ estimate_bb_frequencies (bool force)
to outermost to examine frequencies for back edges. */
estimate_loops ();
- memcpy (&freq_max, &real_zero, sizeof (real_zero));
+ freq_max = real_zero;
FOR_EACH_BB_FN (bb, cfun)
- if (sreal_compare (&freq_max, &BLOCK_INFO (bb)->frequency) < 0)
- memcpy (&freq_max, &BLOCK_INFO (bb)->frequency, sizeof (freq_max));
+ if (freq_max < BLOCK_INFO (bb)->frequency)
+ freq_max = BLOCK_INFO (bb)->frequency;
- sreal_div (&freq_max, &real_bb_freq_max, &freq_max);
+ freq_max = real_bb_freq_max / freq_max;
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
- sreal tmp;
-
- sreal_mul (&tmp, &BLOCK_INFO (bb)->frequency, &freq_max);
- sreal_add (&tmp, &tmp, &real_one_half);
- bb->frequency = sreal_to_int (&tmp);
+ sreal tmp = BLOCK_INFO (bb)->frequency * freq_max + real_one_half;
+ bb->frequency = tmp.to_int ();
}
free_aux_for_blocks ();
diff --git a/gcc/sreal.c b/gcc/sreal.c
index 5c429c5..efde068 100644
--- a/gcc/sreal.c
+++ b/gcc/sreal.c
@@ -28,12 +28,10 @@ along with GCC; see the file COPYING3. If not see
(for < 64-bit machines sig = sig_lo + sig_hi * 2 ^ SREAL_PART_BITS)
exp = exponent
- One HOST_WIDE_INT is used for the significant on 64-bit (and more than
- 64-bit) machines,
- otherwise two HOST_WIDE_INTs are used for the significant.
+ One uint64_t is used for the significant.
Only a half of significant bits is used (in normalized sreals) so that we do
not have problems with overflow, for example when c->sig = a->sig * b->sig.
- So the precision for 64-bit and 32-bit machines is 32-bit.
+ So the precision is 32-bit.
Invariant: The numbers are normalized before and after each call of sreal_*.
@@ -54,28 +52,18 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "sreal.h"
-static inline void copy (sreal *, sreal *);
-static inline void shift_right (sreal *, int);
-static void normalize (sreal *);
-
/* Print the content of struct sreal. */
void
-dump_sreal (FILE *file, sreal *x)
+sreal::dump (FILE *file) const
{
-#if SREAL_PART_BITS < 32
- fprintf (file, "((" HOST_WIDE_INT_PRINT_UNSIGNED " * 2^16 + "
- HOST_WIDE_INT_PRINT_UNSIGNED ") * 2^%d)",
- x->sig_hi, x->sig_lo, x->exp);
-#else
- fprintf (file, "(" HOST_WIDE_INT_PRINT_UNSIGNED " * 2^%d)", x->sig, x->exp);
-#endif
+ fprintf (file, "(%" PRIu64 " * 2^%d)", m_sig, m_exp);
}
DEBUG_FUNCTION void
debug (sreal &ref)
{
- dump_sreal (stderr, &ref);
+ ref.dump (stderr);
}
DEBUG_FUNCTION void
@@ -87,472 +75,195 @@ debug (sreal *ptr)
fprintf (stderr, "<nil>\n");
}
+/* Shift this right by S bits. Needed: 0 < S <= SREAL_BITS.
+ When the most significant bit shifted out is 1, add 1 to this (rounding).
+ */
-/* Copy the sreal number. */
-
-static inline void
-copy (sreal *r, sreal *a)
-{
-#if SREAL_PART_BITS < 32
- r->sig_lo = a->sig_lo;
- r->sig_hi = a->sig_hi;
-#else
- r->sig = a->sig;
-#endif
- r->exp = a->exp;
-}
-
-/* Shift X right by S bits. Needed: 0 < S <= SREAL_BITS.
- When the most significant bit shifted out is 1, add 1 to X (rounding). */
-
-static inline void
-shift_right (sreal *x, int s)
+void
+sreal::shift_right (int s)
{
gcc_assert (s > 0);
gcc_assert (s <= SREAL_BITS);
/* Exponent should never be so large because shift_right is used only by
sreal_add and sreal_sub ant thus the number cannot be shifted out from
exponent range. */
- gcc_assert (x->exp + s <= SREAL_MAX_EXP);
+ gcc_assert (m_exp + s <= SREAL_MAX_EXP);
- x->exp += s;
+ m_exp += s;
-#if SREAL_PART_BITS < 32
- if (s > SREAL_PART_BITS)
- {
- s -= SREAL_PART_BITS;
- x->sig_hi += (uhwi) 1 << (s - 1);
- x->sig_lo = x->sig_hi >> s;
- x->sig_hi = 0;
- }
- else
- {
- x->sig_lo += (uhwi) 1 << (s - 1);
- if (x->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
- {
- x->sig_hi++;
- x->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
- }
- x->sig_lo >>= s;
- x->sig_lo |= (x->sig_hi & (((uhwi) 1 << s) - 1)) << (SREAL_PART_BITS - s);
- x->sig_hi >>= s;
- }
-#else
- x->sig += (uhwi) 1 << (s - 1);
- x->sig >>= s;
-#endif
+ m_sig += (uint64_t) 1 << (s - 1);
+ m_sig >>= s;
}
-/* Normalize *X. */
+/* Normalize *this. */
-static void
-normalize (sreal *x)
+void
+sreal::normalize ()
{
-#if SREAL_PART_BITS < 32
- int shift;
- HOST_WIDE_INT mask;
-
- if (x->sig_lo == 0 && x->sig_hi == 0)
+ if (m_sig == 0)
{
- x->exp = -SREAL_MAX_EXP;
- }
- else if (x->sig_hi < SREAL_MIN_SIG)
- {
- if (x->sig_hi == 0)
- {
- /* Move lower part of significant to higher part. */
- x->sig_hi = x->sig_lo;
- x->sig_lo = 0;
- x->exp -= SREAL_PART_BITS;
- }
- shift = 0;
- while (x->sig_hi < SREAL_MIN_SIG)
- {
- x->sig_hi <<= 1;
- x->exp--;
- shift++;
- }
- /* Check underflow. */
- if (x->exp < -SREAL_MAX_EXP)
- {
- x->exp = -SREAL_MAX_EXP;
- x->sig_hi = 0;
- x->sig_lo = 0;
- }
- else if (shift)
- {
- mask = (1 << SREAL_PART_BITS) - (1 << (SREAL_PART_BITS - shift));
- x->sig_hi |= (x->sig_lo & mask) >> (SREAL_PART_BITS - shift);
- x->sig_lo = (x->sig_lo << shift) & (((uhwi) 1 << SREAL_PART_BITS) - 1);
- }
+ m_exp = -SREAL_MAX_EXP;
}
- else if (x->sig_hi > SREAL_MAX_SIG)
+ else if (m_sig < SREAL_MIN_SIG)
{
- unsigned HOST_WIDE_INT tmp = x->sig_hi;
-
- /* Find out how many bits will be shifted. */
- shift = 0;
do
{
- tmp >>= 1;
- shift++;
- }
- while (tmp > SREAL_MAX_SIG);
-
- /* Round the number. */
- x->sig_lo += (uhwi) 1 << (shift - 1);
-
- x->sig_lo >>= shift;
- x->sig_lo += ((x->sig_hi & (((uhwi) 1 << shift) - 1))
- << (SREAL_PART_BITS - shift));
- x->sig_hi >>= shift;
- x->exp += shift;
- if (x->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
- {
- x->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
- x->sig_hi++;
- if (x->sig_hi > SREAL_MAX_SIG)
- {
- /* x->sig_hi was SREAL_MAX_SIG before increment
- so now last bit is zero. */
- x->sig_hi >>= 1;
- x->sig_lo >>= 1;
- x->exp++;
- }
+ m_sig <<= 1;
+ m_exp--;
}
-
- /* Check overflow. */
- if (x->exp > SREAL_MAX_EXP)
- {
- x->exp = SREAL_MAX_EXP;
- x->sig_hi = SREAL_MAX_SIG;
- x->sig_lo = SREAL_MAX_SIG;
- }
- }
-#else
- if (x->sig == 0)
- {
- x->exp = -SREAL_MAX_EXP;
- }
- else if (x->sig < SREAL_MIN_SIG)
- {
- do
- {
- x->sig <<= 1;
- x->exp--;
- }
- while (x->sig < SREAL_MIN_SIG);
+ while (m_sig < SREAL_MIN_SIG);
/* Check underflow. */
- if (x->exp < -SREAL_MAX_EXP)
+ if (m_exp < -SREAL_MAX_EXP)
{
- x->exp = -SREAL_MAX_EXP;
- x->sig = 0;
+ m_exp = -SREAL_MAX_EXP;
+ m_sig = 0;
}
}
- else if (x->sig > SREAL_MAX_SIG)
+ else if (m_sig > SREAL_MAX_SIG)
{
int last_bit;
do
{
- last_bit = x->sig & 1;
- x->sig >>= 1;
- x->exp++;
+ last_bit = m_sig & 1;
+ m_sig >>= 1;
+ m_exp++;
}
- while (x->sig > SREAL_MAX_SIG);
+ while (m_sig > SREAL_MAX_SIG);
/* Round the number. */
- x->sig += last_bit;
- if (x->sig > SREAL_MAX_SIG)
+ m_sig += last_bit;
+ if (m_sig > SREAL_MAX_SIG)
{
- x->sig >>= 1;
- x->exp++;
+ m_sig >>= 1;
+ m_exp++;
}
/* Check overflow. */
- if (x->exp > SREAL_MAX_EXP)
+ if (m_exp > SREAL_MAX_EXP)
{
- x->exp = SREAL_MAX_EXP;
- x->sig = SREAL_MAX_SIG;
+ m_exp = SREAL_MAX_EXP;
+ m_sig = SREAL_MAX_SIG;
}
}
-#endif
-}
-
-/* Set *R to SIG * 2 ^ EXP. Return R. */
-
-sreal *
-sreal_init (sreal *r, unsigned HOST_WIDE_INT sig, signed int exp)
-{
-#if SREAL_PART_BITS < 32
- r->sig_lo = 0;
- r->sig_hi = sig;
- r->exp = exp - 16;
-#else
- r->sig = sig;
- r->exp = exp;
-#endif
- normalize (r);
- return r;
}
-/* Return integer value of *R. */
+/* Return integer value of *this. */
-HOST_WIDE_INT
-sreal_to_int (sreal *r)
+int64_t
+sreal::to_int () const
{
-#if SREAL_PART_BITS < 32
- if (r->exp <= -SREAL_BITS)
+ if (m_exp <= -SREAL_BITS)
return 0;
- if (r->exp >= 0)
- return MAX_HOST_WIDE_INT;
- return ((r->sig_hi << SREAL_PART_BITS) + r->sig_lo) >> -r->exp;
-#else
- if (r->exp <= -SREAL_BITS)
- return 0;
- if (r->exp >= SREAL_PART_BITS)
- return MAX_HOST_WIDE_INT;
- if (r->exp > 0)
- return r->sig << r->exp;
- if (r->exp < 0)
- return r->sig >> -r->exp;
- return r->sig;
-#endif
-}
-
-/* Compare *A and *B. Return -1 if *A < *B, 1 if *A > *B and 0 if *A == *B. */
-
-int
-sreal_compare (sreal *a, sreal *b)
-{
- if (a->exp > b->exp)
- return 1;
- if (a->exp < b->exp)
- return -1;
-#if SREAL_PART_BITS < 32
- if (a->sig_hi > b->sig_hi)
- return 1;
- if (a->sig_hi < b->sig_hi)
- return -1;
- if (a->sig_lo > b->sig_lo)
- return 1;
- if (a->sig_lo < b->sig_lo)
- return -1;
-#else
- if (a->sig > b->sig)
- return 1;
- if (a->sig < b->sig)
- return -1;
-#endif
- return 0;
+ if (m_exp >= SREAL_PART_BITS)
+ return INT64_MAX;
+ if (m_exp > 0)
+ return m_sig << m_exp;
+ if (m_exp < 0)
+ return m_sig >> -m_exp;
+ return m_sig;
}
-/* *R = *A + *B. Return R. */
+/* Return *this + other. */
-sreal *
-sreal_add (sreal *r, sreal *a, sreal *b)
+sreal
+sreal::operator+ (const sreal &other) const
{
int dexp;
- sreal tmp;
- sreal *bb;
+ sreal tmp, r;
+const sreal *a_p = this, *b_p = &other, *bb;
- if (sreal_compare (a, b) < 0)
+ if (*a_p < *b_p)
{
- sreal *swap;
- swap = a;
- a = b;
- b = swap;
+ const sreal *swap;
+ swap = a_p;
+ a_p = b_p;
+ b_p = swap;
}
- dexp = a->exp - b->exp;
- r->exp = a->exp;
+ dexp = a_p->m_exp - b_p->m_exp;
+ r.m_exp = a_p->m_exp;
if (dexp > SREAL_BITS)
{
-#if SREAL_PART_BITS < 32
- r->sig_hi = a->sig_hi;
- r->sig_lo = a->sig_lo;
-#else
- r->sig = a->sig;
-#endif
+ r.m_sig = a_p->m_sig;
return r;
}
if (dexp == 0)
- bb = b;
+ bb = b_p;
else
{
- copy (&tmp, b);
- shift_right (&tmp, dexp);
+ tmp = *b_p;
+ tmp.shift_right (dexp);
bb = &tmp;
}
-#if SREAL_PART_BITS < 32
- r->sig_hi = a->sig_hi + bb->sig_hi;
- r->sig_lo = a->sig_lo + bb->sig_lo;
- if (r->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
- {
- r->sig_hi++;
- r->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
- }
-#else
- r->sig = a->sig + bb->sig;
-#endif
- normalize (r);
+ r.m_sig = a_p->m_sig + bb->m_sig;
+ r.normalize ();
return r;
}
-/* *R = *A - *B. Return R. */
+/* Return *this - other. */
-sreal *
-sreal_sub (sreal *r, sreal *a, sreal *b)
+sreal
+sreal::operator- (const sreal &other) const
{
int dexp;
- sreal tmp;
- sreal *bb;
+ sreal tmp, r;
+ const sreal *bb;
- gcc_assert (sreal_compare (a, b) >= 0);
+ gcc_assert (*this >= other);
- dexp = a->exp - b->exp;
- r->exp = a->exp;
+ dexp = m_exp - other.m_exp;
+ r.m_exp = m_exp;
if (dexp > SREAL_BITS)
{
-#if SREAL_PART_BITS < 32
- r->sig_hi = a->sig_hi;
- r->sig_lo = a->sig_lo;
-#else
- r->sig = a->sig;
-#endif
+ r.m_sig = m_sig;
return r;
}
if (dexp == 0)
- bb = b;
+ bb = &other;
else
{
- copy (&tmp, b);
- shift_right (&tmp, dexp);
+ tmp = other;
+ tmp.shift_right (dexp);
bb = &tmp;
}
-#if SREAL_PART_BITS < 32
- if (a->sig_lo < bb->sig_lo)
- {
- r->sig_hi = a->sig_hi - bb->sig_hi - 1;
- r->sig_lo = a->sig_lo + ((uhwi) 1 << SREAL_PART_BITS) - bb->sig_lo;
- }
- else
- {
- r->sig_hi = a->sig_hi - bb->sig_hi;
- r->sig_lo = a->sig_lo - bb->sig_lo;
- }
-#else
- r->sig = a->sig - bb->sig;
-#endif
- normalize (r);
+ r.m_sig = m_sig - bb->m_sig;
+ r.normalize ();
return r;
}
-/* *R = *A * *B. Return R. */
+/* Return *this * other. */
-sreal *
-sreal_mul (sreal *r, sreal *a, sreal *b)
+sreal
+sreal::operator* (const sreal &other) const
{
-#if SREAL_PART_BITS < 32
- if (a->sig_hi < SREAL_MIN_SIG || b->sig_hi < SREAL_MIN_SIG)
+sreal r;
+ if (m_sig < SREAL_MIN_SIG || other.m_sig < SREAL_MIN_SIG)
{
- r->sig_lo = 0;
- r->sig_hi = 0;
- r->exp = -SREAL_MAX_EXP;
+ r.m_sig = 0;
+ r.m_exp = -SREAL_MAX_EXP;
}
else
{
- unsigned HOST_WIDE_INT tmp1, tmp2, tmp3;
- if (sreal_compare (a, b) < 0)
- {
- sreal *swap;
- swap = a;
- a = b;
- b = swap;
- }
-
- r->exp = a->exp + b->exp + SREAL_PART_BITS;
-
- tmp1 = a->sig_lo * b->sig_lo;
- tmp2 = a->sig_lo * b->sig_hi;
- tmp3 = a->sig_hi * b->sig_lo + (tmp1 >> SREAL_PART_BITS);
-
- r->sig_hi = a->sig_hi * b->sig_hi;
- r->sig_hi += (tmp2 >> SREAL_PART_BITS) + (tmp3 >> SREAL_PART_BITS);
- tmp2 &= ((uhwi) 1 << SREAL_PART_BITS) - 1;
- tmp3 &= ((uhwi) 1 << SREAL_PART_BITS) - 1;
- tmp1 = tmp2 + tmp3;
-
- r->sig_lo = tmp1 & (((uhwi) 1 << SREAL_PART_BITS) - 1);
- r->sig_hi += tmp1 >> SREAL_PART_BITS;
-
- normalize (r);
- }
-#else
- if (a->sig < SREAL_MIN_SIG || b->sig < SREAL_MIN_SIG)
- {
- r->sig = 0;
- r->exp = -SREAL_MAX_EXP;
+ r.m_sig = m_sig * other.m_sig;
+ r.m_exp = m_exp + other.m_exp;
+ r.normalize ();
}
- else
- {
- r->sig = a->sig * b->sig;
- r->exp = a->exp + b->exp;
- normalize (r);
- }
-#endif
return r;
}
-/* *R = *A / *B. Return R. */
+/* Return *this / other. */
-sreal *
-sreal_div (sreal *r, sreal *a, sreal *b)
+sreal
+sreal::operator/ (const sreal &other) const
{
-#if SREAL_PART_BITS < 32
- unsigned HOST_WIDE_INT tmp, tmp1, tmp2;
-
- gcc_assert (b->sig_hi >= SREAL_MIN_SIG);
- if (a->sig_hi < SREAL_MIN_SIG)
- {
- r->sig_hi = 0;
- r->sig_lo = 0;
- r->exp = -SREAL_MAX_EXP;
- }
- else
- {
- /* Since division by the whole number is pretty ugly to write
- we are dividing by first 3/4 of bits of number. */
-
- tmp1 = (a->sig_hi << SREAL_PART_BITS) + a->sig_lo;
- tmp2 = ((b->sig_hi << (SREAL_PART_BITS / 2))
- + (b->sig_lo >> (SREAL_PART_BITS / 2)));
- if (b->sig_lo & ((uhwi) 1 << ((SREAL_PART_BITS / 2) - 1)))
- tmp2++;
-
- r->sig_lo = 0;
- tmp = tmp1 / tmp2;
- tmp1 = (tmp1 % tmp2) << (SREAL_PART_BITS / 2);
- r->sig_hi = tmp << SREAL_PART_BITS;
-
- tmp = tmp1 / tmp2;
- tmp1 = (tmp1 % tmp2) << (SREAL_PART_BITS / 2);
- r->sig_hi += tmp << (SREAL_PART_BITS / 2);
-
- tmp = tmp1 / tmp2;
- r->sig_hi += tmp;
-
- r->exp = a->exp - b->exp - SREAL_BITS - SREAL_PART_BITS / 2;
- normalize (r);
- }
-#else
- gcc_assert (b->sig != 0);
- r->sig = (a->sig << SREAL_PART_BITS) / b->sig;
- r->exp = a->exp - b->exp - SREAL_PART_BITS;
- normalize (r);
-#endif
+ gcc_assert (other.m_sig != 0);
+sreal r;
+ r.m_sig = (m_sig << SREAL_PART_BITS) / other.m_sig;
+ r.m_exp = m_exp - other.m_exp - SREAL_PART_BITS;
+ r.normalize ();
return r;
}
diff --git a/gcc/sreal.h b/gcc/sreal.h
index 08d577a..461e28b 100644
--- a/gcc/sreal.h
+++ b/gcc/sreal.h
@@ -21,46 +21,92 @@ along with GCC; see the file COPYING3. If not see
#define GCC_SREAL_H
/* SREAL_PART_BITS has to be an even number. */
-#if (HOST_BITS_PER_WIDE_INT / 2) % 2 == 1
-#define SREAL_PART_BITS (HOST_BITS_PER_WIDE_INT / 2 - 1)
-#else
-#define SREAL_PART_BITS (HOST_BITS_PER_WIDE_INT / 2)
-#endif
-
-#define uhwi unsigned HOST_WIDE_INT
-#define MAX_HOST_WIDE_INT (((uhwi) 1 << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
+#define SREAL_PART_BITS 32
-#define SREAL_MIN_SIG ((uhwi) 1 << (SREAL_PART_BITS - 1))
-#define SREAL_MAX_SIG (((uhwi) 1 << SREAL_PART_BITS) - 1)
+#define SREAL_MIN_SIG ((uint64_t) 1 << (SREAL_PART_BITS - 1))
+#define SREAL_MAX_SIG (((uint64_t) 1 << SREAL_PART_BITS) - 1)
#define SREAL_MAX_EXP (INT_MAX / 4)
-#if SREAL_PART_BITS < 32
-#define SREAL_BITS (SREAL_PART_BITS * 2)
-#else
#define SREAL_BITS SREAL_PART_BITS
-#endif
/* Structure for holding a simple real number. */
-struct sreal
+class sreal
{
-#if SREAL_PART_BITS < 32
- unsigned HOST_WIDE_INT sig_lo; /* Significant (lower part). */
- unsigned HOST_WIDE_INT sig_hi; /* Significant (higher part). */
-#else
- unsigned HOST_WIDE_INT sig; /* Significant. */
-#endif
- signed int exp; /* Exponent. */
+public:
+ /* Construct an uninitialized sreal. */
+ sreal () : m_sig (-1), m_exp (-1) {}
+
+ /* Construct a sreal. */
+ sreal (uint64_t sig, int exp) : m_sig (sig), m_exp (exp) { normalize (); }
+
+ void dump (FILE *) const;
+ int64_t to_int () const;
+
+ sreal operator+ (const sreal &other) const;
+ sreal operator- (const sreal &other) const;
+ sreal operator* (const sreal &other) const;
+ sreal operator/ (const sreal &other) const;
+
+ bool operator< (const sreal &other) const
+ {
+ return m_exp < other.m_exp
+ || (m_exp == other.m_exp && m_sig < other.m_sig);
+ }
+
+ bool operator== (const sreal &other) const
+ {
+ return m_exp == other.m_exp && m_sig == other.m_sig;
+ }
+
+private:
+ void normalize ();
+ void shift_right (int amount);
+
+ uint64_t m_sig; /* Significant. */
+ signed int m_exp; /* Exponent. */
};
-extern void dump_sreal (FILE *, sreal *);
extern void debug (sreal &ref);
extern void debug (sreal *ptr);
-extern sreal *sreal_init (sreal *, unsigned HOST_WIDE_INT, signed int);
-extern HOST_WIDE_INT sreal_to_int (sreal *);
-extern int sreal_compare (sreal *, sreal *);
-extern sreal *sreal_add (sreal *, sreal *, sreal *);
-extern sreal *sreal_sub (sreal *, sreal *, sreal *);
-extern sreal *sreal_mul (sreal *, sreal *, sreal *);
-extern sreal *sreal_div (sreal *, sreal *, sreal *);
+
+inline sreal &operator+= (sreal &a, const sreal &b)
+{
+ return a = a + b;
+}
+
+inline sreal &operator-= (sreal &a, const sreal &b)
+{
+return a = a - b;
+}
+
+inline sreal &operator/= (sreal &a, const sreal &b)
+{
+return a = a / b;
+}
+
+inline sreal &operator*= (sreal &a, const sreal &b)
+{
+ return a = a * b;
+}
+
+inline bool operator!= (const sreal &a, const sreal &b)
+{
+ return !(a == b);
+}
+
+inline bool operator> (const sreal &a, const sreal &b)
+{
+ return !(a == b || a < b);
+}
+
+inline bool operator<= (const sreal &a, const sreal &b)
+{
+ return a < b || a == b;
+}
+
+inline bool operator>= (const sreal &a, const sreal &b)
+{
+ return a == b || a > b;
+}
#endif