aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohannes Singler <singler@ira.uka.de>2009-09-16 13:50:17 +0000
committerJohannes Singler <singler@gcc.gnu.org>2009-09-16 13:50:17 +0000
commit54384f7f0ac2aae9b2da7cf32e22f8d3418fa836 (patch)
treec9d0da1d3abba12bdb115d975ce34cdddd306255
parente038c37bb97c516378e39af6b04625dd6751ec00 (diff)
downloadgcc-54384f7f0ac2aae9b2da7cf32e22f8d3418fa836.zip
gcc-54384f7f0ac2aae9b2da7cf32e22f8d3418fa836.tar.gz
gcc-54384f7f0ac2aae9b2da7cf32e22f8d3418fa836.tar.bz2
base.h (_EqualFromLess): Correct uglification of member variables and method arguments.
2009-09-16 Johannes Singler <singler@ira.uka.de> * include/parallel/base.h (_EqualFromLess): Correct uglification of member variables and method arguments. * include/parallel/balanced_quicksort.h (_QSBThreadLocal): Likewise. * include/parallel/find_selectors.h (__find_first_of_selector): Likewise. * include/parallel/iterator.h (_IteratorTriple): Likewise. * include/parallel/multiseq_selection.h (_Lexicographic, _LexicographicReverse): Likewise. * include/parallel/multiway_mergesort.h (_Piece, _PMWMSSortingData): Likewise. * include/parallel/random_number.h (_RandomNumber): Likewise. * include/parallel/random_shuffle.h (_DRandomShufflingGlobalData): Likewise. * include/parallel/set_operations.h (__symmetric_difference_func, __difference_func, __intersection_func, __union_func, parallel_set_union, parallel_set_intersection, parallel_set_difference, parallel_set_symmetric_difference): Likewise. * include/parallel/tags.h (parallel_tag): Likewise. * include/parallel/workstealing.h (_Job): Likewise. * include/parallel/multiway_merge.h (__multiway_merge_k_variant_sentinel_switch:operator()) correct uglification of _*LoserTree*. * include/parallel/losertree.h (_*LoserTree*): Likewise; correct uglification of member variables and method arguments. * include/parallel/par_loop.h: Correct uglification of finish_iterator. * include/parallel/for_each_selectors.h: Likewise. * include/parallel/omp_loop.h: Likewise. * include/parallel/algo.h: Likewise; uglify c_rand_number. From-SVN: r151754
-rw-r--r--libstdc++-v3/ChangeLog31
-rw-r--r--libstdc++-v3/include/parallel/algo.h8
-rw-r--r--libstdc++-v3/include/parallel/balanced_quicksort.h6
-rw-r--r--libstdc++-v3/include/parallel/base.h6
-rw-r--r--libstdc++-v3/include/parallel/find_selectors.h12
-rw-r--r--libstdc++-v3/include/parallel/for_each_selectors.h4
-rw-r--r--libstdc++-v3/include/parallel/iterator.h44
-rw-r--r--libstdc++-v3/include/parallel/losertree.h568
-rw-r--r--libstdc++-v3/include/parallel/multiseq_selection.h16
-rw-r--r--libstdc++-v3/include/parallel/multiway_merge.h2
-rw-r--r--libstdc++-v3/include/parallel/multiway_mergesort.h60
-rw-r--r--libstdc++-v3/include/parallel/omp_loop.h2
-rw-r--r--libstdc++-v3/include/parallel/par_loop.h2
-rw-r--r--libstdc++-v3/include/parallel/random_number.h26
-rw-r--r--libstdc++-v3/include/parallel/random_shuffle.h28
-rw-r--r--libstdc++-v3/include/parallel/set_operations.h70
-rw-r--r--libstdc++-v3/include/parallel/tags.h12
-rw-r--r--libstdc++-v3/include/parallel/workstealing.h52
18 files changed, 490 insertions, 459 deletions
diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog
index 600e2a8..ce55b2f 100644
--- a/libstdc++-v3/ChangeLog
+++ b/libstdc++-v3/ChangeLog
@@ -1,5 +1,36 @@
2009-09-16 Johannes Singler <singler@ira.uka.de>
+ * include/parallel/base.h (_EqualFromLess):
+ Correct uglification of member variables and method arguments.
+ * include/parallel/balanced_quicksort.h (_QSBThreadLocal): Likewise.
+ * include/parallel/find_selectors.h (__find_first_of_selector):
+ Likewise.
+ * include/parallel/iterator.h (_IteratorTriple): Likewise.
+ * include/parallel/multiseq_selection.h
+ (_Lexicographic, _LexicographicReverse): Likewise.
+ * include/parallel/multiway_mergesort.h (_Piece, _PMWMSSortingData):
+ Likewise.
+ * include/parallel/random_number.h (_RandomNumber): Likewise.
+ * include/parallel/random_shuffle.h (_DRandomShufflingGlobalData):
+ Likewise.
+ * include/parallel/set_operations.h (__symmetric_difference_func,
+ __difference_func, __intersection_func, __union_func,
+ parallel_set_union, parallel_set_intersection, parallel_set_difference,
+ parallel_set_symmetric_difference): Likewise.
+ * include/parallel/tags.h (parallel_tag): Likewise.
+ * include/parallel/workstealing.h (_Job): Likewise.
+ * include/parallel/multiway_merge.h
+ (__multiway_merge_k_variant_sentinel_switch:operator())
+ correct uglification of _*LoserTree*.
+ * include/parallel/losertree.h (_*LoserTree*): Likewise; correct
+ uglification of member variables and method arguments.
+ * include/parallel/par_loop.h: Correct uglification of finish_iterator.
+ * include/parallel/for_each_selectors.h: Likewise.
+ * include/parallel/omp_loop.h: Likewise.
+ * include/parallel/algo.h: Likewise; uglify c_rand_number.
+
+2009-09-16 Johannes Singler <singler@ira.uka.de>
+
* include/parallel/base.h (_PseudoSequenceIterator, _PseudoSequence):
Replace redundant _Self.
* include/parallel/iterator.h (_IteratorPair, _IteratorTriple):
diff --git a/libstdc++-v3/include/parallel/algo.h b/libstdc++-v3/include/parallel/algo.h
index cb04221..a94962d 100644
--- a/libstdc++-v3/include/parallel/algo.h
+++ b/libstdc++-v3/include/parallel/algo.h
@@ -1229,7 +1229,7 @@ namespace __parallel
unary_op, __functionality,
__gnu_parallel::_DummyReduct(),
__dummy, __dummy, -1, __parallelism_tag);
- return __functionality.finish_iterator;
+ return __functionality._M_finish_iterator;
}
else
return transform(__begin, __end, __result, unary_op,
@@ -1322,7 +1322,7 @@ namespace __parallel
__gnu_parallel::_DummyReduct(),
__dummy, __dummy, -1,
__parallelism_tag);
- return __functionality.finish_iterator;
+ return __functionality._M_finish_iterator;
}
else
return transform(__begin1, __end1, __begin2, __result, __binary_op,
@@ -1629,7 +1629,7 @@ namespace __parallel
/** @brief Functor wrapper for std::rand(). */
template<typename _MustBeInt = int>
- struct c_rand_number
+ struct _CRandNumber
{
int
operator()(int __limit)
@@ -1641,7 +1641,7 @@ namespace __parallel
inline void
random_shuffle(_RAIter __begin, _RAIter __end)
{
- c_rand_number<> __r;
+ _CRandNumber<> __r;
// Parallelization still possible.
__gnu_parallel::random_shuffle(__begin, __end, __r);
}
diff --git a/libstdc++-v3/include/parallel/balanced_quicksort.h b/libstdc++-v3/include/parallel/balanced_quicksort.h
index 2e93914..6a6f6a2 100644
--- a/libstdc++-v3/include/parallel/balanced_quicksort.h
+++ b/libstdc++-v3/include/parallel/balanced_quicksort.h
@@ -75,7 +75,7 @@ template<typename _RAIter>
_RestrictedBoundedConcurrentQueue<_Piece> _M_leftover_parts;
/** @brief Number of threads involved in this algorithm. */
- _ThreadIndex __num_threads;
+ _ThreadIndex _M_num_threads;
/** @brief Pointer to a counter of elements left over to sort. */
volatile _DifferenceType* _M_elements_leftover;
@@ -250,7 +250,7 @@ template<typename _RAIter, typename _Compare>
_Settings::get().sort_qsb_base_case_maximal_n;
if (__base_case_n < 2)
__base_case_n = 2;
- _ThreadIndex __num_threads = __tl.__num_threads;
+ _ThreadIndex __num_threads = __tl._M_num_threads;
// Every thread has its own random number generator.
_RandomNumber __rng(__iam + 1);
@@ -451,7 +451,7 @@ template<typename _RAIter, typename _Compare>
for (int __i = 0; __i < __num_threads; ++__i)
{
__tls[__i]->_M_elements_leftover = &_M_elements_leftover;
- __tls[__i]->__num_threads = __num_threads;
+ __tls[__i]->_M_num_threads = __num_threads;
__tls[__i]->_M_global = std::make_pair(__begin, __end);
// Just in case nothing is left to assign.
diff --git a/libstdc++-v3/include/parallel/base.h b/libstdc++-v3/include/parallel/base.h
index 5edc2138..80232bc 100644
--- a/libstdc++-v3/include/parallel/base.h
+++ b/libstdc++-v3/include/parallel/base.h
@@ -159,14 +159,14 @@ template<typename _Compare, typename _T1, typename _T2>
class _EqualFromLess : public std::binary_function<_T1, _T2, bool>
{
private:
- _Compare& __comp;
+ _Compare& _M_comp;
public:
- _EqualFromLess(_Compare& _comp) : __comp(_comp) { }
+ _EqualFromLess(_Compare& __comp) : _M_comp(__comp) { }
bool operator()(const _T1& __a, const _T2& __b)
{
- return !__comp(__a, __b) && !__comp(__b, __a);
+ return !_M_comp(__a, __b) && !_M_comp(__b, __a);
}
};
diff --git a/libstdc++-v3/include/parallel/find_selectors.h b/libstdc++-v3/include/parallel/find_selectors.h
index 8f6db75..8dda9c6 100644
--- a/libstdc++-v3/include/parallel/find_selectors.h
+++ b/libstdc++-v3/include/parallel/find_selectors.h
@@ -151,11 +151,11 @@ namespace __gnu_parallel
template<typename _ForwardIterator>
struct __find_first_of_selector : public __generic_find_selector
{
- _ForwardIterator __begin;
- _ForwardIterator __end;
+ _ForwardIterator _M_begin;
+ _ForwardIterator _M_end;
explicit __find_first_of_selector(_ForwardIterator __begin, _ForwardIterator __end)
- : __begin(__begin), __end(__end) { }
+ : _M_begin(__begin), _M_end(__end) { }
/** @brief Test on one __position.
* @param __i1 _Iterator on first sequence.
@@ -166,8 +166,8 @@ namespace __gnu_parallel
bool
operator()(_RAIter1 __i1, _RAIter2 __i2, _Pred __pred)
{
- for (_ForwardIterator __pos_in_candidates = __begin;
- __pos_in_candidates != __end; ++__pos_in_candidates)
+ for (_ForwardIterator __pos_in_candidates = _M_begin;
+ __pos_in_candidates != _M_end; ++__pos_in_candidates)
if (__pred(*__i1, *__pos_in_candidates))
return true;
return false;
@@ -184,7 +184,7 @@ namespace __gnu_parallel
_M_sequential_algorithm(_RAIter1 __begin1,
_RAIter1 __end1,
_RAIter2 __begin2, _Pred __pred)
- { return std::make_pair(find_first_of(__begin1, __end1, __begin, __end, __pred,
+ { return std::make_pair(find_first_of(__begin1, __end1, _M_begin, _M_end, __pred,
sequential_tag()), __begin2); }
};
}
diff --git a/libstdc++-v3/include/parallel/for_each_selectors.h b/libstdc++-v3/include/parallel/for_each_selectors.h
index ed8728d..ae12c94 100644
--- a/libstdc++-v3/include/parallel/for_each_selectors.h
+++ b/libstdc++-v3/include/parallel/for_each_selectors.h
@@ -45,7 +45,7 @@ namespace __gnu_parallel
/** @brief _Iterator on last element processed; needed for some
* algorithms (e. g. std::transform()).
*/
- _It finish_iterator;
+ _It _M_finish_iterator;
};
@@ -124,7 +124,7 @@ namespace __gnu_parallel
bool
operator()(_Op& __o, _It __i)
{
- *__i.__third = __o(*__i.__first, *__i.__second);
+ *__i._M_third = __o(*__i._M_first, *__i._M_second);
return true;
}
};
diff --git a/libstdc++-v3/include/parallel/iterator.h b/libstdc++-v3/include/parallel/iterator.h
index 0b7cbf2..c49ade2 100644
--- a/libstdc++-v3/include/parallel/iterator.h
+++ b/libstdc++-v3/include/parallel/iterator.h
@@ -125,70 +125,70 @@ namespace __gnu_parallel
typedef _IteratorTriple* pointer;
typedef _IteratorTriple& reference;
- _Iterator1 __first;
- _Iterator2 __second;
- _Iterator3 __third;
+ _Iterator1 _M_first;
+ _Iterator2 _M_second;
+ _Iterator3 _M_third;
_IteratorTriple() { }
- _IteratorTriple(const _Iterator1& _first, const _Iterator2& _second,
- const _Iterator3& _third)
+ _IteratorTriple(const _Iterator1& __first, const _Iterator2& __second,
+ const _Iterator3& __third)
{
- __first = _first;
- __second = _second;
- __third = _third;
+ _M_first = __first;
+ _M_second = __second;
+ _M_third = __third;
}
// Pre-increment operator.
_IteratorTriple&
operator++()
{
- ++__first;
- ++__second;
- ++__third;
+ ++_M_first;
+ ++_M_second;
+ ++_M_third;
return *this;
}
// Post-increment operator.
const _IteratorTriple
operator++(int)
- { return _IteratorTriple(__first++, __second++, __third++); }
+ { return _IteratorTriple(_M_first++, _M_second++, _M_third++); }
// Pre-decrement operator.
_IteratorTriple&
operator--()
{
- --__first;
- --__second;
- --__third;
+ --_M_first;
+ --_M_second;
+ --_M_third;
return *this;
}
// Post-decrement operator.
const _IteratorTriple
operator--(int)
- { return _IteratorTriple(__first--, __second--, __third--); }
+ { return _IteratorTriple(_M_first--, _M_second--, _M_third--); }
// Type conversion.
operator _Iterator3() const
- { return __third; }
+ { return _M_third; }
_IteratorTriple&
operator=(const _IteratorTriple& __other)
{
- __first = __other.__first;
- __second = __other.__second;
- __third = __other.__third;
+ _M_first = __other._M_first;
+ _M_second = __other._M_second;
+ _M_third = __other._M_third;
return *this;
}
_IteratorTriple
operator+(difference_type __delta) const
- { return _IteratorTriple(__first + __delta, __second + __delta, __third + __delta); }
+ { return _IteratorTriple(_M_first + __delta, _M_second + __delta, _M_third + __delta); }
difference_type
operator-(const _IteratorTriple& __other) const
- { return __first - __other.__first; }
+ { return _M_first - __other._M_first; }
};
}
diff --git a/libstdc++-v3/include/parallel/losertree.h b/libstdc++-v3/include/parallel/losertree.h
index b98608d..33a4070 100644
--- a/libstdc++-v3/include/parallel/losertree.h
+++ b/libstdc++-v3/include/parallel/losertree.h
@@ -54,70 +54,70 @@ namespace __gnu_parallel
* @param _Compare the comparator to use, defaults to std::less<_Tp>
*/
template<typename _Tp, typename _Compare>
-class LoserTreeBase
+class _LoserTreeBase
{
protected:
- /** @brief Internal representation of a LoserTree element. */
+ /** @brief Internal representation of a _LoserTree element. */
struct _Loser
{
/** @brief flag, true iff this is a "maximum" __sentinel. */
bool _M_sup;
/** @brief __index of the _M_source __sequence. */
int _M_source;
- /** @brief _M_key of the element in the LoserTree. */
+ /** @brief _M_key of the element in the _LoserTree. */
_Tp _M_key;
};
- unsigned int __ik, __k, __offset;
+ unsigned int _M_ik, _M_k, _M_offset;
- /** log_2{__k} */
+ /** log_2{_M_k} */
unsigned int _M_log_k;
- /** @brief LoserTree __elements. */
- _Loser* __losers;
+ /** @brief _LoserTree __elements. */
+ _Loser* _M_losers;
/** @brief _Compare to use. */
- _Compare __comp;
+ _Compare _M_comp;
/**
- * @brief State flag that determines whether the LoserTree is empty.
+ * @brief State flag that determines whether the _LoserTree is empty.
*
- * Only used for building the LoserTree.
+ * Only used for building the _LoserTree.
*/
- bool __first_insert;
+ bool _M_first_insert;
public:
/**
* @brief The constructor.
*
- * @param _k The number of sequences to merge.
- * @param _comp The comparator to use.
+ * @param __k The number of sequences to merge.
+ * @param __comp The comparator to use.
*/
- LoserTreeBase(unsigned int _k, _Compare _comp)
- : __comp(_comp)
+ _LoserTreeBase(unsigned int __k, _Compare __comp)
+ : _M_comp(__comp)
{
- __ik = _k;
+ _M_ik = __k;
- // Compute log_2{__k} for the _Loser Tree
- _M_log_k = __log2(__ik - 1) + 1;
+ // Compute log_2{_M_k} for the _Loser Tree
+ _M_log_k = __log2(_M_ik - 1) + 1;
// Next greater power of 2.
- __k = 1 << _M_log_k;
- __offset = __k;
+ _M_k = 1 << _M_log_k;
+ _M_offset = _M_k;
- // Avoid default-constructing __losers[]._M_key
- __losers = static_cast<_Loser*>(::operator new(2 * __k * sizeof(_Loser)));
- for (unsigned int __i = __ik - 1; __i < __k; ++__i)
- __losers[__i + __k]._M_sup = true;
+ // Avoid default-constructing _M_losers[]._M_key
+ _M_losers = static_cast<_Loser*>(::operator new(2 * _M_k * sizeof(_Loser)));
+ for (unsigned int __i = _M_ik - 1; __i < _M_k; ++__i)
+ _M_losers[__i + _M_k]._M_sup = true;
- __first_insert = true;
+ _M_first_insert = true;
}
/**
* @brief The destructor.
*/
- ~LoserTreeBase()
- { ::operator delete(__losers); }
+ ~_LoserTreeBase()
+ { ::operator delete(_M_losers); }
/**
* @brief Initializes the sequence "_M_source" with the element "_M_key".
@@ -130,31 +130,31 @@ public:
inline void
__insert_start(const _Tp& _M_key, int _M_source, bool _M_sup)
{
- unsigned int __pos = __k + _M_source;
+ unsigned int __pos = _M_k + _M_source;
- if(__first_insert)
+ if(_M_first_insert)
{
// Construct all keys, so we can easily deconstruct them.
- for (unsigned int __i = 0; __i < (2 * __k); ++__i)
- new(&(__losers[__i]._M_key)) _Tp(_M_key);
- __first_insert = false;
+ for (unsigned int __i = 0; __i < (2 * _M_k); ++__i)
+ new(&(_M_losers[__i]._M_key)) _Tp(_M_key);
+ _M_first_insert = false;
}
else
- new(&(__losers[__pos]._M_key)) _Tp(_M_key);
+ new(&(_M_losers[__pos]._M_key)) _Tp(_M_key);
- __losers[__pos]._M_sup = _M_sup;
- __losers[__pos]._M_source = _M_source;
+ _M_losers[__pos]._M_sup = _M_sup;
+ _M_losers[__pos]._M_source = _M_source;
}
/**
* @return the index of the sequence with the smallest element.
*/
int __get_min_source()
- { return __losers[0]._M_source; }
+ { return _M_losers[0]._M_source; }
};
/**
- * @brief Stable LoserTree variant.
+ * @brief Stable _LoserTree variant.
*
* Provides the stable implementations of insert_start, __init_winner,
* __init and __delete_min_insert.
@@ -162,22 +162,22 @@ public:
* Unstable variant is done using partial specialisation below.
*/
template<bool __stable/* default == true */, typename _Tp, typename _Compare>
-class LoserTree : public LoserTreeBase<_Tp, _Compare>
+class _LoserTree : public _LoserTreeBase<_Tp, _Compare>
{
- typedef LoserTreeBase<_Tp, _Compare> Base;
- using Base::__k;
- using Base::__losers;
- using Base::__first_insert;
+ typedef _LoserTreeBase<_Tp, _Compare> Base;
+ using Base::_M_k;
+ using Base::_M_losers;
+ using Base::_M_first_insert;
public:
- LoserTree(unsigned int _k, _Compare _comp)
- : Base::LoserTreeBase(_k, _comp)
+ _LoserTree(unsigned int __k, _Compare __comp)
+ : Base::_LoserTreeBase(__k, __comp)
{}
unsigned int
__init_winner(unsigned int __root)
{
- if (__root >= __k)
+ if (__root >= _M_k)
{
return __root;
}
@@ -185,25 +185,25 @@ public:
{
unsigned int __left = __init_winner (2 * __root);
unsigned int __right = __init_winner (2 * __root + 1);
- if (__losers[__right]._M_sup
- || (!__losers[__left]._M_sup
- && !__comp(__losers[__right]._M_key, __losers[__left]._M_key)))
+ if (_M_losers[__right]._M_sup
+ || (!_M_losers[__left]._M_sup
+ && !_M_comp(_M_losers[__right]._M_key, _M_losers[__left]._M_key)))
{
// Left one is less or equal.
- __losers[__root] = __losers[__right];
+ _M_losers[__root] = _M_losers[__right];
return __left;
}
else
{
// Right one is less.
- __losers[__root] = __losers[__left];
+ _M_losers[__root] = _M_losers[__left];
return __right;
}
}
}
void __init()
- { __losers[0] = __losers[__init_winner(1)]; }
+ { _M_losers[0] = _M_losers[__init_winner(1)]; }
/**
* @brief Delete the smallest element and insert a new element from
@@ -216,50 +216,50 @@ public:
{
#if _GLIBCXX_ASSERTIONS
// no dummy sequence can ever be at the top!
- _GLIBCXX_PARALLEL_ASSERT(__losers[0]._M_source != -1);
+ _GLIBCXX_PARALLEL_ASSERT(_M_losers[0]._M_source != -1);
#endif
- int _M_source = __losers[0]._M_source;
- for (unsigned int __pos = (__k + _M_source) / 2; __pos > 0; __pos /= 2)
+ int _M_source = _M_losers[0]._M_source;
+ for (unsigned int __pos = (_M_k + _M_source) / 2; __pos > 0; __pos /= 2)
{
// The smaller one gets promoted, ties are broken by _M_source.
- if ((_M_sup && (!__losers[__pos]._M_sup || __losers[__pos]._M_source < _M_source))
- || (!_M_sup && !__losers[__pos]._M_sup
- && ((__comp(__losers[__pos]._M_key, _M_key))
- || (!__comp(_M_key, __losers[__pos]._M_key)
- && __losers[__pos]._M_source < _M_source))))
+ if ((_M_sup && (!_M_losers[__pos]._M_sup || _M_losers[__pos]._M_source < _M_source))
+ || (!_M_sup && !_M_losers[__pos]._M_sup
+ && ((_M_comp(_M_losers[__pos]._M_key, _M_key))
+ || (!_M_comp(_M_key, _M_losers[__pos]._M_key)
+ && _M_losers[__pos]._M_source < _M_source))))
{
// The other one is smaller.
- std::swap(__losers[__pos]._M_sup, _M_sup);
- std::swap(__losers[__pos]._M_source, _M_source);
- std::swap(__losers[__pos]._M_key, _M_key);
+ std::swap(_M_losers[__pos]._M_sup, _M_sup);
+ std::swap(_M_losers[__pos]._M_source, _M_source);
+ std::swap(_M_losers[__pos]._M_key, _M_key);
}
}
- __losers[0]._M_sup = _M_sup;
- __losers[0]._M_source = _M_source;
- __losers[0]._M_key = _M_key;
+ _M_losers[0]._M_sup = _M_sup;
+ _M_losers[0]._M_source = _M_source;
+ _M_losers[0]._M_key = _M_key;
}
};
/**
- * @brief Unstable LoserTree variant.
+ * @brief Unstable _LoserTree variant.
*
* Stability (non-stable here) is selected with partial specialization.
*/
template<typename _Tp, typename _Compare>
-class LoserTree</* __stable == */false, _Tp, _Compare> :
- public LoserTreeBase<_Tp, _Compare>
+class _LoserTree</* __stable == */false, _Tp, _Compare> :
+ public _LoserTreeBase<_Tp, _Compare>
{
- typedef LoserTreeBase<_Tp, _Compare> Base;
+ typedef _LoserTreeBase<_Tp, _Compare> Base;
using Base::_M_log_k;
- using Base::__k;
- using Base::__losers;
- using Base::__first_insert;
+ using Base::_M_k;
+ using Base::_M_losers;
+ using Base::_M_first_insert;
public:
- LoserTree(unsigned int _k, _Compare _comp)
- : Base::LoserTreeBase(_k, _comp)
+ _LoserTree(unsigned int __k, _Compare __comp)
+ : Base::_LoserTreeBase(__k, __comp)
{}
/**
@@ -272,7 +272,7 @@ public:
unsigned int
__init_winner (unsigned int __root)
{
- if (__root >= __k)
+ if (__root >= _M_k)
{
return __root;
}
@@ -280,18 +280,18 @@ public:
{
unsigned int __left = __init_winner (2 * __root);
unsigned int __right = __init_winner (2 * __root + 1);
- if (__losers[__right]._M_sup ||
- (!__losers[__left]._M_sup
- && !__comp(__losers[__right]._M_key, __losers[__left]._M_key)))
+ if (_M_losers[__right]._M_sup ||
+ (!_M_losers[__left]._M_sup
+ && !_M_comp(_M_losers[__right]._M_key, _M_losers[__left]._M_key)))
{
// Left one is less or equal.
- __losers[__root] = __losers[__right];
+ _M_losers[__root] = _M_losers[__right];
return __left;
}
else
{
// Right one is less.
- __losers[__root] = __losers[__left];
+ _M_losers[__root] = _M_losers[__left];
return __right;
}
}
@@ -299,7 +299,7 @@ public:
inline void
__init()
- { __losers[0] = __losers[__init_winner(1)]; }
+ { _M_losers[0] = _M_losers[__init_winner(1)]; }
/**
* Delete the _M_key smallest element and insert the element _M_key instead.
@@ -313,25 +313,25 @@ public:
{
#if _GLIBCXX_ASSERTIONS
// no dummy sequence can ever be at the top!
- _GLIBCXX_PARALLEL_ASSERT(__losers[0]._M_source != -1);
+ _GLIBCXX_PARALLEL_ASSERT(_M_losers[0]._M_source != -1);
#endif
- int _M_source = __losers[0]._M_source;
- for (unsigned int __pos = (__k + _M_source) / 2; __pos > 0; __pos /= 2)
+ int _M_source = _M_losers[0]._M_source;
+ for (unsigned int __pos = (_M_k + _M_source) / 2; __pos > 0; __pos /= 2)
{
// The smaller one gets promoted.
- if (_M_sup || (!__losers[__pos]._M_sup && __comp(__losers[__pos]._M_key, _M_key)))
+ if (_M_sup || (!_M_losers[__pos]._M_sup && _M_comp(_M_losers[__pos]._M_key, _M_key)))
{
// The other one is smaller.
- std::swap(__losers[__pos]._M_sup, _M_sup);
- std::swap(__losers[__pos]._M_source, _M_source);
- std::swap(__losers[__pos]._M_key, _M_key);
+ std::swap(_M_losers[__pos]._M_sup, _M_sup);
+ std::swap(_M_losers[__pos]._M_source, _M_source);
+ std::swap(_M_losers[__pos]._M_key, _M_key);
}
}
- __losers[0]._M_sup = _M_sup;
- __losers[0]._M_source = _M_source;
- __losers[0]._M_key = _M_key;
+ _M_losers[0]._M_sup = _M_sup;
+ _M_losers[0]._M_source = _M_source;
+ _M_losers[0]._M_key = _M_key;
}
};
@@ -343,7 +343,7 @@ template<typename _Tp, typename _Compare>
class _LoserTreePointerBase
{
protected:
- /** @brief Internal representation of LoserTree __elements. */
+ /** @brief Internal representation of _LoserTree __elements. */
struct _Loser
{
bool _M_sup;
@@ -351,42 +351,42 @@ protected:
const _Tp* _M_keyp;
};
- unsigned int __ik, __k, __offset;
- _Loser* __losers;
- _Compare __comp;
+ unsigned int _M_ik, _M_k, _M_offset;
+ _Loser* _M_losers;
+ _Compare _M_comp;
public:
- _LoserTreePointerBase(unsigned int _k, _Compare _comp = std::less<_Tp>())
- : __comp(_comp)
+ _LoserTreePointerBase(unsigned int __k, _Compare __comp = std::less<_Tp>())
+ : _M_comp(__comp)
{
- __ik = _k;
+ _M_ik = __k;
// Next greater power of 2.
- __k = 1 << (__log2(__ik - 1) + 1);
- __offset = __k;
- __losers = new _Loser[__k * 2];
- for (unsigned int __i = __ik - 1; __i < __k; __i++)
- __losers[__i + __k]._M_sup = true;
+ _M_k = 1 << (__log2(_M_ik - 1) + 1);
+ _M_offset = _M_k;
+ _M_losers = new _Loser[_M_k * 2];
+ for (unsigned int __i = _M_ik - 1; __i < _M_k; __i++)
+ _M_losers[__i + _M_k]._M_sup = true;
}
~_LoserTreePointerBase()
- { ::operator delete[](__losers); }
+ { ::operator delete[](_M_losers); }
int __get_min_source()
- { return __losers[0]._M_source; }
+ { return _M_losers[0]._M_source; }
void __insert_start(const _Tp& _M_key, int _M_source, bool _M_sup)
{
- unsigned int __pos = __k + _M_source;
+ unsigned int __pos = _M_k + _M_source;
- __losers[__pos]._M_sup = _M_sup;
- __losers[__pos]._M_source = _M_source;
- __losers[__pos]._M_keyp = &_M_key;
+ _M_losers[__pos]._M_sup = _M_sup;
+ _M_losers[__pos]._M_source = _M_source;
+ _M_losers[__pos]._M_keyp = &_M_key;
}
};
/**
- * @brief Stable LoserTree implementation.
+ * @brief Stable _LoserTree implementation.
*
* The unstable variant is implemented using partial instantiation below.
*/
@@ -394,18 +394,18 @@ template<bool __stable/* default == true */, typename _Tp, typename _Compare>
class _LoserTreePointer : public _LoserTreePointerBase<_Tp, _Compare>
{
typedef _LoserTreePointerBase<_Tp, _Compare> Base;
- using Base::__k;
- using Base::__losers;
+ using Base::_M_k;
+ using Base::_M_losers;
public:
- _LoserTreePointer(unsigned int _k, _Compare _comp = std::less<_Tp>())
- : Base::_LoserTreePointerBase(_k, _comp)
+ _LoserTreePointer(unsigned int __k, _Compare __comp = std::less<_Tp>())
+ : Base::_LoserTreePointerBase(__k, __comp)
{}
unsigned int
__init_winner(unsigned int __root)
{
- if (__root >= __k)
+ if (__root >= _M_k)
{
return __root;
}
@@ -413,59 +413,59 @@ public:
{
unsigned int __left = __init_winner (2 * __root);
unsigned int __right = __init_winner (2 * __root + 1);
- if (__losers[__right]._M_sup
- || (!__losers[__left]._M_sup && !__comp(*__losers[__right]._M_keyp,
- *__losers[__left]._M_keyp)))
+ if (_M_losers[__right]._M_sup
+ || (!_M_losers[__left]._M_sup && !_M_comp(*_M_losers[__right]._M_keyp,
+ *_M_losers[__left]._M_keyp)))
{
// Left one is less or equal.
- __losers[__root] = __losers[__right];
+ _M_losers[__root] = _M_losers[__right];
return __left;
}
else
{
// Right one is less.
- __losers[__root] = __losers[__left];
+ _M_losers[__root] = _M_losers[__left];
return __right;
}
}
}
void __init()
- { __losers[0] = __losers[__init_winner(1)]; }
+ { _M_losers[0] = _M_losers[__init_winner(1)]; }
void __delete_min_insert(const _Tp& _M_key, bool _M_sup)
{
#if _GLIBCXX_ASSERTIONS
// no dummy sequence can ever be at the top!
- _GLIBCXX_PARALLEL_ASSERT(__losers[0]._M_source != -1);
+ _GLIBCXX_PARALLEL_ASSERT(_M_losers[0]._M_source != -1);
#endif
const _Tp* _M_keyp = &_M_key;
- int _M_source = __losers[0]._M_source;
- for (unsigned int __pos = (__k + _M_source) / 2; __pos > 0; __pos /= 2)
+ int _M_source = _M_losers[0]._M_source;
+ for (unsigned int __pos = (_M_k + _M_source) / 2; __pos > 0; __pos /= 2)
{
// The smaller one gets promoted, ties are broken by _M_source.
- if ((_M_sup && (!__losers[__pos]._M_sup || __losers[__pos]._M_source < _M_source)) ||
- (!_M_sup && !__losers[__pos]._M_sup &&
- ((__comp(*__losers[__pos]._M_keyp, *_M_keyp)) ||
- (!__comp(*_M_keyp, *__losers[__pos]._M_keyp)
- && __losers[__pos]._M_source < _M_source))))
+ if ((_M_sup && (!_M_losers[__pos]._M_sup || _M_losers[__pos]._M_source < _M_source)) ||
+ (!_M_sup && !_M_losers[__pos]._M_sup &&
+ ((_M_comp(*_M_losers[__pos]._M_keyp, *_M_keyp)) ||
+ (!_M_comp(*_M_keyp, *_M_losers[__pos]._M_keyp)
+ && _M_losers[__pos]._M_source < _M_source))))
{
// The other one is smaller.
- std::swap(__losers[__pos]._M_sup, _M_sup);
- std::swap(__losers[__pos]._M_source, _M_source);
- std::swap(__losers[__pos]._M_keyp, _M_keyp);
+ std::swap(_M_losers[__pos]._M_sup, _M_sup);
+ std::swap(_M_losers[__pos]._M_source, _M_source);
+ std::swap(_M_losers[__pos]._M_keyp, _M_keyp);
}
}
- __losers[0]._M_sup = _M_sup;
- __losers[0]._M_source = _M_source;
- __losers[0]._M_keyp = _M_keyp;
+ _M_losers[0]._M_sup = _M_sup;
+ _M_losers[0]._M_source = _M_source;
+ _M_losers[0]._M_keyp = _M_keyp;
}
};
/**
- * @brief Unstable LoserTree implementation.
+ * @brief Unstable _LoserTree implementation.
*
* The stable variant is above.
*/
@@ -474,18 +474,18 @@ class _LoserTreePointer</* __stable == */false, _Tp, _Compare> :
public _LoserTreePointerBase<_Tp, _Compare>
{
typedef _LoserTreePointerBase<_Tp, _Compare> Base;
- using Base::__k;
- using Base::__losers;
+ using Base::_M_k;
+ using Base::_M_losers;
public:
- _LoserTreePointer(unsigned int _k, _Compare _comp = std::less<_Tp>())
- : Base::_LoserTreePointerBase(_k, _comp)
+ _LoserTreePointer(unsigned int __k, _Compare __comp = std::less<_Tp>())
+ : Base::_LoserTreePointerBase(__k, __comp)
{}
unsigned int
__init_winner(unsigned int __root)
{
- if (__root >= __k)
+ if (__root >= _M_k)
{
return __root;
}
@@ -493,54 +493,54 @@ public:
{
unsigned int __left = __init_winner (2 * __root);
unsigned int __right = __init_winner (2 * __root + 1);
- if (__losers[__right]._M_sup
- || (!__losers[__left]._M_sup
- && !__comp(*__losers[__right]._M_keyp, *__losers[__left]._M_keyp)))
+ if (_M_losers[__right]._M_sup
+ || (!_M_losers[__left]._M_sup
+ && !_M_comp(*_M_losers[__right]._M_keyp, *_M_losers[__left]._M_keyp)))
{
// Left one is less or equal.
- __losers[__root] = __losers[__right];
+ _M_losers[__root] = _M_losers[__right];
return __left;
}
else
{
// Right one is less.
- __losers[__root] = __losers[__left];
+ _M_losers[__root] = _M_losers[__left];
return __right;
}
}
}
void __init()
- { __losers[0] = __losers[__init_winner(1)]; }
+ { _M_losers[0] = _M_losers[__init_winner(1)]; }
void __delete_min_insert(const _Tp& _M_key, bool _M_sup)
{
#if _GLIBCXX_ASSERTIONS
// no dummy sequence can ever be at the top!
- _GLIBCXX_PARALLEL_ASSERT(__losers[0]._M_source != -1);
+ _GLIBCXX_PARALLEL_ASSERT(_M_losers[0]._M_source != -1);
#endif
const _Tp* _M_keyp = &_M_key;
- int _M_source = __losers[0]._M_source;
- for (unsigned int __pos = (__k + _M_source) / 2; __pos > 0; __pos /= 2)
+ int _M_source = _M_losers[0]._M_source;
+ for (unsigned int __pos = (_M_k + _M_source) / 2; __pos > 0; __pos /= 2)
{
// The smaller one gets promoted.
- if (_M_sup || (!__losers[__pos]._M_sup && __comp(*__losers[__pos]._M_keyp, *_M_keyp)))
+ if (_M_sup || (!_M_losers[__pos]._M_sup && _M_comp(*_M_losers[__pos]._M_keyp, *_M_keyp)))
{
// The other one is smaller.
- std::swap(__losers[__pos]._M_sup, _M_sup);
- std::swap(__losers[__pos]._M_source, _M_source);
- std::swap(__losers[__pos]._M_keyp, _M_keyp);
+ std::swap(_M_losers[__pos]._M_sup, _M_sup);
+ std::swap(_M_losers[__pos]._M_source, _M_source);
+ std::swap(_M_losers[__pos]._M_keyp, _M_keyp);
}
}
- __losers[0]._M_sup = _M_sup;
- __losers[0]._M_source = _M_source;
- __losers[0]._M_keyp = _M_keyp;
+ _M_losers[0]._M_sup = _M_sup;
+ _M_losers[0]._M_source = _M_source;
+ _M_losers[0]._M_keyp = _M_keyp;
}
};
-/** @brief Base class for unguarded LoserTree implementation.
+/** @brief Base class for unguarded _LoserTree implementation.
*
* The whole element is copied into the tree structure.
*
@@ -560,56 +560,56 @@ protected:
_Tp _M_key;
};
- unsigned int __ik, __k, __offset;
- _Loser* __losers;
- _Compare __comp;
+ unsigned int _M_ik, _M_k, _M_offset;
+ _Loser* _M_losers;
+ _Compare _M_comp;
public:
inline
- _LoserTreeUnguardedBase(unsigned int _k, const _Tp _sentinel,
- _Compare _comp = std::less<_Tp>())
- : __comp(_comp)
+ _LoserTreeUnguardedBase(unsigned int __k, const _Tp _sentinel,
+ _Compare __comp = std::less<_Tp>())
+ : _M_comp(__comp)
{
- __ik = _k;
+ _M_ik = __k;
// Next greater power of 2.
- __k = 1 << (__log2(__ik - 1) + 1);
- __offset = __k;
- // Avoid default-constructing __losers[]._M_key
- __losers = static_cast<_Loser*>(::operator new(2 * __k * sizeof(_Loser)));
+ _M_k = 1 << (__log2(_M_ik - 1) + 1);
+ _M_offset = _M_k;
+ // Avoid default-constructing _M_losers[]._M_key
+ _M_losers = static_cast<_Loser*>(::operator new(2 * _M_k * sizeof(_Loser)));
- for (unsigned int __i = __k + __ik - 1; __i < (2 * __k); ++__i)
+ for (unsigned int __i = _M_k + _M_ik - 1; __i < (2 * _M_k); ++__i)
{
- __losers[__i]._M_key = _sentinel;
- __losers[__i]._M_source = -1;
+ _M_losers[__i]._M_key = _sentinel;
+ _M_losers[__i]._M_source = -1;
}
}
inline ~_LoserTreeUnguardedBase()
- { ::operator delete(__losers); }
+ { ::operator delete(_M_losers); }
inline int
__get_min_source()
{
#if _GLIBCXX_ASSERTIONS
// no dummy sequence can ever be at the top!
- _GLIBCXX_PARALLEL_ASSERT(__losers[0]._M_source != -1);
+ _GLIBCXX_PARALLEL_ASSERT(_M_losers[0]._M_source != -1);
#endif
- return __losers[0]._M_source;
+ return _M_losers[0]._M_source;
}
inline void
__insert_start(const _Tp& _M_key, int _M_source, bool)
{
- unsigned int __pos = __k + _M_source;
+ unsigned int __pos = _M_k + _M_source;
- new(&(__losers[__pos]._M_key)) _Tp(_M_key);
- __losers[__pos]._M_source = _M_source;
+ new(&(_M_losers[__pos]._M_key)) _Tp(_M_key);
+ _M_losers[__pos]._M_source = _M_source;
}
};
/**
- * @brief Stable implementation of unguarded LoserTree.
+ * @brief Stable implementation of unguarded _LoserTree.
*
* Unstable variant is selected below with partial specialization.
*/
@@ -617,19 +617,19 @@ template<bool __stable/* default == true */, typename _Tp, typename _Compare>
class _LoserTreeUnguarded : public _LoserTreeUnguardedBase<_Tp, _Compare>
{
typedef _LoserTreeUnguardedBase<_Tp, _Compare> Base;
- using Base::__k;
- using Base::__losers;
+ using Base::_M_k;
+ using Base::_M_losers;
public:
- _LoserTreeUnguarded(unsigned int _k, const _Tp _sentinel,
- _Compare _comp = std::less<_Tp>())
- : Base::_LoserTreeUnguardedBase(_k, _sentinel, _comp)
+ _LoserTreeUnguarded(unsigned int __k, const _Tp _sentinel,
+ _Compare __comp = std::less<_Tp>())
+ : Base::_LoserTreeUnguardedBase(__k, _sentinel, __comp)
{}
unsigned int
__init_winner(unsigned int __root)
{
- if (__root >= __k)
+ if (__root >= _M_k)
{
return __root;
}
@@ -637,16 +637,16 @@ public:
{
unsigned int __left = __init_winner (2 * __root);
unsigned int __right = __init_winner (2 * __root + 1);
- if (!__comp(__losers[__right]._M_key, __losers[__left]._M_key))
+ if (!_M_comp(_M_losers[__right]._M_key, _M_losers[__left]._M_key))
{
// Left one is less or equal.
- __losers[__root] = __losers[__right];
+ _M_losers[__root] = _M_losers[__right];
return __left;
}
else
{
// Right one is less.
- __losers[__root] = __losers[__left];
+ _M_losers[__root] = _M_losers[__left];
return __right;
}
}
@@ -655,11 +655,11 @@ public:
inline void
__init()
{
- __losers[0] = __losers[__init_winner(1)];
+ _M_losers[0] = _M_losers[__init_winner(1)];
#if _GLIBCXX_ASSERTIONS
// no dummy sequence can ever be at the top at the beginning (0 sequences!)
- _GLIBCXX_PARALLEL_ASSERT(__losers[0]._M_source != -1);
+ _GLIBCXX_PARALLEL_ASSERT(_M_losers[0]._M_source != -1);
#endif
}
@@ -669,29 +669,29 @@ public:
{
#if _GLIBCXX_ASSERTIONS
// no dummy sequence can ever be at the top!
- _GLIBCXX_PARALLEL_ASSERT(__losers[0]._M_source != -1);
+ _GLIBCXX_PARALLEL_ASSERT(_M_losers[0]._M_source != -1);
#endif
- int _M_source = __losers[0]._M_source;
- for (unsigned int __pos = (__k + _M_source) / 2; __pos > 0; __pos /= 2)
+ int _M_source = _M_losers[0]._M_source;
+ for (unsigned int __pos = (_M_k + _M_source) / 2; __pos > 0; __pos /= 2)
{
// The smaller one gets promoted, ties are broken by _M_source.
- if (__comp(__losers[__pos]._M_key, _M_key)
- || (!__comp(_M_key, __losers[__pos]._M_key) && __losers[__pos]._M_source < _M_source))
+ if (_M_comp(_M_losers[__pos]._M_key, _M_key)
+ || (!_M_comp(_M_key, _M_losers[__pos]._M_key) && _M_losers[__pos]._M_source < _M_source))
{
// The other one is smaller.
- std::swap(__losers[__pos]._M_source, _M_source);
- std::swap(__losers[__pos]._M_key, _M_key);
+ std::swap(_M_losers[__pos]._M_source, _M_source);
+ std::swap(_M_losers[__pos]._M_key, _M_key);
}
}
- __losers[0]._M_source = _M_source;
- __losers[0]._M_key = _M_key;
+ _M_losers[0]._M_source = _M_source;
+ _M_losers[0]._M_key = _M_key;
}
};
/**
- * @brief Non-Stable implementation of unguarded LoserTree.
+ * @brief Non-Stable implementation of unguarded _LoserTree.
*
* Stable implementation is above.
*/
@@ -700,19 +700,19 @@ class _LoserTreeUnguarded</* __stable == */false, _Tp, _Compare> :
public _LoserTreeUnguardedBase<_Tp, _Compare>
{
typedef _LoserTreeUnguardedBase<_Tp, _Compare> Base;
- using Base::__k;
- using Base::__losers;
+ using Base::_M_k;
+ using Base::_M_losers;
public:
- _LoserTreeUnguarded(unsigned int _k, const _Tp _sentinel,
- _Compare _comp = std::less<_Tp>())
- : Base::_LoserTreeUnguardedBase(_k, _sentinel, _comp)
+ _LoserTreeUnguarded(unsigned int __k, const _Tp _sentinel,
+ _Compare __comp = std::less<_Tp>())
+ : Base::_LoserTreeUnguardedBase(__k, _sentinel, __comp)
{}
unsigned int
__init_winner (unsigned int __root)
{
- if (__root >= __k)
+ if (__root >= _M_k)
{
return __root;
}
@@ -723,20 +723,20 @@ public:
#if _GLIBCXX_ASSERTIONS
// If __left one is sentinel then __right one must be, too.
- if (__losers[__left]._M_source == -1)
- _GLIBCXX_PARALLEL_ASSERT(__losers[__right]._M_source == -1);
+ if (_M_losers[__left]._M_source == -1)
+ _GLIBCXX_PARALLEL_ASSERT(_M_losers[__right]._M_source == -1);
#endif
- if (!__comp(__losers[__right]._M_key, __losers[__left]._M_key))
+ if (!_M_comp(_M_losers[__right]._M_key, _M_losers[__left]._M_key))
{
// Left one is less or equal.
- __losers[__root] = __losers[__right];
+ _M_losers[__root] = _M_losers[__right];
return __left;
}
else
{
// Right one is less.
- __losers[__root] = __losers[__left];
+ _M_losers[__root] = _M_losers[__left];
return __right;
}
}
@@ -745,11 +745,11 @@ public:
inline void
__init()
{
- __losers[0] = __losers[__init_winner(1)];
+ _M_losers[0] = _M_losers[__init_winner(1)];
#if _GLIBCXX_ASSERTIONS
// no dummy sequence can ever be at the top at the beginning (0 sequences!)
- _GLIBCXX_PARALLEL_ASSERT(__losers[0]._M_source != -1);
+ _GLIBCXX_PARALLEL_ASSERT(_M_losers[0]._M_source != -1);
#endif
}
@@ -759,23 +759,23 @@ public:
{
#if _GLIBCXX_ASSERTIONS
// no dummy sequence can ever be at the top!
- _GLIBCXX_PARALLEL_ASSERT(__losers[0]._M_source != -1);
+ _GLIBCXX_PARALLEL_ASSERT(_M_losers[0]._M_source != -1);
#endif
- int _M_source = __losers[0]._M_source;
- for (unsigned int __pos = (__k + _M_source) / 2; __pos > 0; __pos /= 2)
+ int _M_source = _M_losers[0]._M_source;
+ for (unsigned int __pos = (_M_k + _M_source) / 2; __pos > 0; __pos /= 2)
{
// The smaller one gets promoted.
- if (__comp(__losers[__pos]._M_key, _M_key))
+ if (_M_comp(_M_losers[__pos]._M_key, _M_key))
{
// The other one is smaller.
- std::swap(__losers[__pos]._M_source, _M_source);
- std::swap(__losers[__pos]._M_key, _M_key);
+ std::swap(_M_losers[__pos]._M_source, _M_source);
+ std::swap(_M_losers[__pos]._M_key, _M_key);
}
}
- __losers[0]._M_source = _M_source;
- __losers[0]._M_key = _M_key;
+ _M_losers[0]._M_source = _M_source;
+ _M_losers[0]._M_key = _M_key;
}
};
@@ -795,57 +795,57 @@ protected:
const _Tp* _M_keyp;
};
- unsigned int __ik, __k, __offset;
- _Loser* __losers;
- _Compare __comp;
+ unsigned int _M_ik, _M_k, _M_offset;
+ _Loser* _M_losers;
+ _Compare _M_comp;
public:
inline
- LoserTreePointerUnguardedBase(unsigned int _k, const _Tp& _sentinel,
- _Compare _comp = std::less<_Tp>())
- : __comp(_comp)
+ LoserTreePointerUnguardedBase(unsigned int __k, const _Tp& _sentinel,
+ _Compare __comp = std::less<_Tp>())
+ : _M_comp(__comp)
{
- __ik = _k;
+ _M_ik = __k;
// Next greater power of 2.
- __k = 1 << (__log2(__ik - 1) + 1);
- __offset = __k;
- // Avoid default-constructing __losers[]._M_key
- __losers = new _Loser[2 * __k];
+ _M_k = 1 << (__log2(_M_ik - 1) + 1);
+ _M_offset = _M_k;
+ // Avoid default-constructing _M_losers[]._M_key
+ _M_losers = new _Loser[2 * _M_k];
- for (unsigned int __i = __k + __ik - 1; __i < (2 * __k); ++__i)
+ for (unsigned int __i = _M_k + _M_ik - 1; __i < (2 * _M_k); ++__i)
{
- __losers[__i]._M_keyp = &_sentinel;
- __losers[__i]._M_source = -1;
+ _M_losers[__i]._M_keyp = &_sentinel;
+ _M_losers[__i]._M_source = -1;
}
}
inline ~LoserTreePointerUnguardedBase()
- { delete[] __losers; }
+ { delete[] _M_losers; }
inline int
__get_min_source()
{
#if _GLIBCXX_ASSERTIONS
// no dummy sequence can ever be at the top!
- _GLIBCXX_PARALLEL_ASSERT(__losers[0]._M_source != -1);
+ _GLIBCXX_PARALLEL_ASSERT(_M_losers[0]._M_source != -1);
#endif
- return __losers[0]._M_source;
+ return _M_losers[0]._M_source;
}
inline void
__insert_start(const _Tp& _M_key, int _M_source, bool)
{
- unsigned int __pos = __k + _M_source;
+ unsigned int __pos = _M_k + _M_source;
- __losers[__pos]._M_keyp = &_M_key;
- __losers[__pos]._M_source = _M_source;
+ _M_losers[__pos]._M_keyp = &_M_key;
+ _M_losers[__pos]._M_source = _M_source;
}
};
/**
- * @brief Stable unguarded LoserTree variant storing pointers.
+ * @brief Stable unguarded _LoserTree variant storing pointers.
*
* Unstable variant is implemented below using partial specialization.
*/
@@ -854,19 +854,19 @@ class LoserTreePointerUnguarded :
public LoserTreePointerUnguardedBase<_Tp, _Compare>
{
typedef LoserTreePointerUnguardedBase<_Tp, _Compare> Base;
- using Base::__k;
- using Base::__losers;
+ using Base::_M_k;
+ using Base::_M_losers;
public:
- LoserTreePointerUnguarded(unsigned int _k, const _Tp& _sentinel,
- _Compare _comp = std::less<_Tp>())
- : Base::LoserTreePointerUnguardedBase(_k, _sentinel, _comp)
+ LoserTreePointerUnguarded(unsigned int __k, const _Tp& _sentinel,
+ _Compare __comp = std::less<_Tp>())
+ : Base::LoserTreePointerUnguardedBase(__k, _sentinel, __comp)
{}
unsigned int
__init_winner(unsigned int __root)
{
- if (__root >= __k)
+ if (__root >= _M_k)
{
return __root;
}
@@ -874,16 +874,16 @@ public:
{
unsigned int __left = __init_winner (2 * __root);
unsigned int __right = __init_winner (2 * __root + 1);
- if (!__comp(*__losers[__right]._M_keyp, *__losers[__left]._M_keyp))
+ if (!_M_comp(*_M_losers[__right]._M_keyp, *_M_losers[__left]._M_keyp))
{
// Left one is less or equal.
- __losers[__root] = __losers[__right];
+ _M_losers[__root] = _M_losers[__right];
return __left;
}
else
{
// Right one is less.
- __losers[__root] = __losers[__left];
+ _M_losers[__root] = _M_losers[__left];
return __right;
}
}
@@ -892,11 +892,11 @@ public:
inline void
__init()
{
- __losers[0] = __losers[__init_winner(1)];
+ _M_losers[0] = _M_losers[__init_winner(1)];
#if _GLIBCXX_ASSERTIONS
// no dummy sequence can ever be at the top at the beginning (0 sequences!)
- _GLIBCXX_PARALLEL_ASSERT(__losers[0]._M_source != -1);
+ _GLIBCXX_PARALLEL_ASSERT(_M_losers[0]._M_source != -1);
#endif
}
@@ -905,30 +905,30 @@ public:
{
#if _GLIBCXX_ASSERTIONS
// no dummy sequence can ever be at the top!
- _GLIBCXX_PARALLEL_ASSERT(__losers[0]._M_source != -1);
+ _GLIBCXX_PARALLEL_ASSERT(_M_losers[0]._M_source != -1);
#endif
const _Tp* _M_keyp = &_M_key;
- int _M_source = __losers[0]._M_source;
- for (unsigned int __pos = (__k + _M_source) / 2; __pos > 0; __pos /= 2)
+ int _M_source = _M_losers[0]._M_source;
+ for (unsigned int __pos = (_M_k + _M_source) / 2; __pos > 0; __pos /= 2)
{
// The smaller one gets promoted, ties are broken by _M_source.
- if (__comp(*__losers[__pos]._M_keyp, *_M_keyp)
- || (!__comp(*_M_keyp, *__losers[__pos]._M_keyp) && __losers[__pos]._M_source < _M_source))
+ if (_M_comp(*_M_losers[__pos]._M_keyp, *_M_keyp)
+ || (!_M_comp(*_M_keyp, *_M_losers[__pos]._M_keyp) && _M_losers[__pos]._M_source < _M_source))
{
// The other one is smaller.
- std::swap(__losers[__pos]._M_source, _M_source);
- std::swap(__losers[__pos]._M_keyp, _M_keyp);
+ std::swap(_M_losers[__pos]._M_source, _M_source);
+ std::swap(_M_losers[__pos]._M_keyp, _M_keyp);
}
}
- __losers[0]._M_source = _M_source;
- __losers[0]._M_keyp = _M_keyp;
+ _M_losers[0]._M_source = _M_source;
+ _M_losers[0]._M_keyp = _M_keyp;
}
};
/**
- * @brief Unstable unguarded LoserTree variant storing pointers.
+ * @brief Unstable unguarded _LoserTree variant storing pointers.
*
* Stable variant is above.
*/
@@ -937,19 +937,19 @@ class LoserTreePointerUnguarded</* __stable == */false, _Tp, _Compare> :
public LoserTreePointerUnguardedBase<_Tp, _Compare>
{
typedef LoserTreePointerUnguardedBase<_Tp, _Compare> Base;
- using Base::__k;
- using Base::__losers;
+ using Base::_M_k;
+ using Base::_M_losers;
public:
- LoserTreePointerUnguarded(unsigned int _k, const _Tp& _sentinel,
- _Compare _comp = std::less<_Tp>())
- : Base::LoserTreePointerUnguardedBase(_k, _sentinel, _comp)
+ LoserTreePointerUnguarded(unsigned int __k, const _Tp& _sentinel,
+ _Compare __comp = std::less<_Tp>())
+ : Base::LoserTreePointerUnguardedBase(__k, _sentinel, __comp)
{}
unsigned int
__init_winner(unsigned int __root)
{
- if (__root >= __k)
+ if (__root >= _M_k)
{
return __root;
}
@@ -960,20 +960,20 @@ public:
#if _GLIBCXX_ASSERTIONS
// If __left one is sentinel then __right one must be, too.
- if (__losers[__left]._M_source == -1)
- _GLIBCXX_PARALLEL_ASSERT(__losers[__right]._M_source == -1);
+ if (_M_losers[__left]._M_source == -1)
+ _GLIBCXX_PARALLEL_ASSERT(_M_losers[__right]._M_source == -1);
#endif
- if (!__comp(*__losers[__right]._M_keyp, *__losers[__left]._M_keyp))
+ if (!_M_comp(*_M_losers[__right]._M_keyp, *_M_losers[__left]._M_keyp))
{
// Left one is less or equal.
- __losers[__root] = __losers[__right];
+ _M_losers[__root] = _M_losers[__right];
return __left;
}
else
{
// Right one is less.
- __losers[__root] = __losers[__left];
+ _M_losers[__root] = _M_losers[__left];
return __right;
}
}
@@ -982,11 +982,11 @@ public:
inline void
__init()
{
- __losers[0] = __losers[__init_winner(1)];
+ _M_losers[0] = _M_losers[__init_winner(1)];
#if _GLIBCXX_ASSERTIONS
// no dummy sequence can ever be at the top at the beginning (0 sequences!)
- _GLIBCXX_PARALLEL_ASSERT(__losers[0]._M_source != -1);
+ _GLIBCXX_PARALLEL_ASSERT(_M_losers[0]._M_source != -1);
#endif
}
@@ -995,24 +995,24 @@ public:
{
#if _GLIBCXX_ASSERTIONS
// no dummy sequence can ever be at the top!
- _GLIBCXX_PARALLEL_ASSERT(__losers[0]._M_source != -1);
+ _GLIBCXX_PARALLEL_ASSERT(_M_losers[0]._M_source != -1);
#endif
const _Tp* _M_keyp = &_M_key;
- int _M_source = __losers[0]._M_source;
- for (unsigned int __pos = (__k + _M_source) / 2; __pos > 0; __pos /= 2)
+ int _M_source = _M_losers[0]._M_source;
+ for (unsigned int __pos = (_M_k + _M_source) / 2; __pos > 0; __pos /= 2)
{
// The smaller one gets promoted.
- if (__comp(*(__losers[__pos]._M_keyp), *_M_keyp))
+ if (_M_comp(*(_M_losers[__pos]._M_keyp), *_M_keyp))
{
// The other one is smaller.
- std::swap(__losers[__pos]._M_source, _M_source);
- std::swap(__losers[__pos]._M_keyp, _M_keyp);
+ std::swap(_M_losers[__pos]._M_source, _M_source);
+ std::swap(_M_losers[__pos]._M_keyp, _M_keyp);
}
}
- __losers[0]._M_source = _M_source;
- __losers[0]._M_keyp = _M_keyp;
+ _M_losers[0]._M_source = _M_source;
+ _M_losers[0]._M_keyp = _M_keyp;
}
};
diff --git a/libstdc++-v3/include/parallel/multiseq_selection.h b/libstdc++-v3/include/parallel/multiseq_selection.h
index 353330e..148b4ab 100644
--- a/libstdc++-v3/include/parallel/multiseq_selection.h
+++ b/libstdc++-v3/include/parallel/multiseq_selection.h
@@ -56,19 +56,19 @@ namespace __gnu_parallel
: public std::binary_function<std::pair<_T1, _T2>, std::pair<_T1, _T2>, bool>
{
private:
- _Compare& __comp;
+ _Compare& _M_comp;
public:
- _Lexicographic(_Compare& _comp) : __comp(_comp) { }
+ _Lexicographic(_Compare& __comp) : _M_comp(__comp) { }
bool
operator()(const std::pair<_T1, _T2>& __p1,
const std::pair<_T1, _T2>& __p2) const
{
- if (__comp(__p1.first, __p2.first))
+ if (_M_comp(__p1.first, __p2.first))
return true;
- if (__comp(__p2.first, __p1.first))
+ if (_M_comp(__p2.first, __p1.first))
return false;
// Firsts are equal.
@@ -81,19 +81,19 @@ namespace __gnu_parallel
class _LexicographicReverse : public std::binary_function<_T1, _T2, bool>
{
private:
- _Compare& __comp;
+ _Compare& _M_comp;
public:
- _LexicographicReverse(_Compare& _comp) : __comp(_comp) { }
+ _LexicographicReverse(_Compare& __comp) : _M_comp(__comp) { }
bool
operator()(const std::pair<_T1, _T2>& __p1,
const std::pair<_T1, _T2>& __p2) const
{
- if (__comp(__p2.first, __p1.first))
+ if (_M_comp(__p2.first, __p1.first))
return true;
- if (__comp(__p1.first, __p2.first))
+ if (_M_comp(__p1.first, __p2.first))
return false;
// Firsts are equal.
diff --git a/libstdc++-v3/include/parallel/multiway_merge.h b/libstdc++-v3/include/parallel/multiway_merge.h
index bc64db4..2604d3a 100644
--- a/libstdc++-v3/include/parallel/multiway_merge.h
+++ b/libstdc++-v3/include/parallel/multiway_merge.h
@@ -950,7 +950,7 @@ struct __multiway_merge_k_variant_sentinel_switch
typename __gnu_cxx::__conditional_type<
_LoserTreeTraits<_ValueType>::_M_use_pointer
, _LoserTreePointer<__stable, _ValueType, _Compare>
- , LoserTree<__stable, _ValueType, _Compare>
+ , _LoserTree<__stable, _ValueType, _Compare>
>::__type >(__seqs_begin, __seqs_end, __target, __length, __comp);
}
};
diff --git a/libstdc++-v3/include/parallel/multiway_mergesort.h b/libstdc++-v3/include/parallel/multiway_mergesort.h
index 1c2c7e4..3a6c7e9 100644
--- a/libstdc++-v3/include/parallel/multiway_mergesort.h
+++ b/libstdc++-v3/include/parallel/multiway_mergesort.h
@@ -49,10 +49,10 @@ template<typename _DifferenceTp>
typedef _DifferenceTp _DifferenceType;
/** @brief Begin of subsequence. */
- _DifferenceType __begin;
+ _DifferenceType _M_begin;
/** @brief End of subsequence. */
- _DifferenceType __end;
+ _DifferenceType _M_end;
};
/** @brief Data accessed by all threads.
@@ -66,7 +66,7 @@ template<typename _RAIter>
typedef typename _TraitsType::difference_type _DifferenceType;
/** @brief Number of threads involved. */
- _ThreadIndex __num_threads;
+ _ThreadIndex _M_num_threads;
/** @brief Input __begin. */
_RAIter _M_source;
@@ -141,40 +141,40 @@ template<typename _RAIter, typename _Compare,
# pragma omp barrier
std::vector<std::pair<_SortingPlacesIterator, _SortingPlacesIterator> >
- seqs(__sd->__num_threads);
- for (_ThreadIndex __s = 0; __s < __sd->__num_threads; __s++)
+ seqs(__sd->_M_num_threads);
+ for (_ThreadIndex __s = 0; __s < __sd->_M_num_threads; __s++)
seqs[__s] = std::make_pair(__sd->_M_temporary[__s],
__sd->_M_temporary[__s]
+ (__sd->_M_starts[__s + 1] - __sd->_M_starts[__s]));
- std::vector<_SortingPlacesIterator> _M_offsets(__sd->__num_threads);
+ std::vector<_SortingPlacesIterator> _M_offsets(__sd->_M_num_threads);
// if not last thread
- if (__iam < __sd->__num_threads - 1)
+ if (__iam < __sd->_M_num_threads - 1)
multiseq_partition(seqs.begin(), seqs.end(),
__sd->_M_starts[__iam + 1], _M_offsets.begin(), __comp);
- for (int __seq = 0; __seq < __sd->__num_threads; __seq++)
+ for (int __seq = 0; __seq < __sd->_M_num_threads; __seq++)
{
// for each sequence
- if (__iam < (__sd->__num_threads - 1))
- __sd->_M_pieces[__iam][__seq].__end = _M_offsets[__seq] - seqs[__seq].first;
+ if (__iam < (__sd->_M_num_threads - 1))
+ __sd->_M_pieces[__iam][__seq]._M_end = _M_offsets[__seq] - seqs[__seq].first;
else
// very end of this sequence
- __sd->_M_pieces[__iam][__seq].__end =
+ __sd->_M_pieces[__iam][__seq]._M_end =
__sd->_M_starts[__seq + 1] - __sd->_M_starts[__seq];
}
# pragma omp barrier
- for (_ThreadIndex __seq = 0; __seq < __sd->__num_threads; __seq++)
+ for (_ThreadIndex __seq = 0; __seq < __sd->_M_num_threads; __seq++)
{
// For each sequence.
if (__iam > 0)
- __sd->_M_pieces[__iam][__seq].__begin = __sd->_M_pieces[__iam - 1][__seq].__end;
+ __sd->_M_pieces[__iam][__seq]._M_begin = __sd->_M_pieces[__iam - 1][__seq]._M_end;
else
// Absolute beginning.
- __sd->_M_pieces[__iam][__seq].__begin = 0;
+ __sd->_M_pieces[__iam][__seq]._M_begin = 0;
}
}
};
@@ -204,16 +204,16 @@ template<typename _RAIter, typename _Compare,
# pragma omp single
__gnu_sequential::sort(__sd->_M_samples,
- __sd->_M_samples + (__num_samples * __sd->__num_threads),
+ __sd->_M_samples + (__num_samples * __sd->_M_num_threads),
__comp);
# pragma omp barrier
- for (_ThreadIndex __s = 0; __s < __sd->__num_threads; ++__s)
+ for (_ThreadIndex __s = 0; __s < __sd->_M_num_threads; ++__s)
{
// For each sequence.
if (__num_samples * __iam > 0)
- __sd->_M_pieces[__iam][__s].__begin =
+ __sd->_M_pieces[__iam][__s]._M_begin =
std::lower_bound(__sd->_M_temporary[__s],
__sd->_M_temporary[__s]
+ (__sd->_M_starts[__s + 1] - __sd->_M_starts[__s]),
@@ -222,10 +222,10 @@ template<typename _RAIter, typename _Compare,
- __sd->_M_temporary[__s];
else
// Absolute beginning.
- __sd->_M_pieces[__iam][__s].__begin = 0;
+ __sd->_M_pieces[__iam][__s]._M_begin = 0;
- if ((__num_samples * (__iam + 1)) < (__num_samples * __sd->__num_threads))
- __sd->_M_pieces[__iam][__s].__end =
+ if ((__num_samples * (__iam + 1)) < (__num_samples * __sd->_M_num_threads))
+ __sd->_M_pieces[__iam][__s]._M_end =
std::lower_bound(__sd->_M_temporary[__s],
__sd->_M_temporary[__s]
+ (__sd->_M_starts[__s + 1] - __sd->_M_starts[__s]),
@@ -234,7 +234,7 @@ template<typename _RAIter, typename _Compare,
- __sd->_M_temporary[__s];
else
// Absolute __end.
- __sd->_M_pieces[__iam][__s].__end = __sd->_M_starts[__s + 1] - __sd->_M_starts[__s];
+ __sd->_M_pieces[__iam][__s]._M_end = __sd->_M_starts[__s + 1] - __sd->_M_starts[__s];
}
}
};
@@ -346,29 +346,29 @@ template<bool __stable, bool __exact, typename _RAIter,
// No barrier here: Synchronization is done by the splitting routine.
_DifferenceType __num_samples =
- _Settings::get().sort_mwms_oversampling * __sd->__num_threads - 1;
+ _Settings::get().sort_mwms_oversampling * __sd->_M_num_threads - 1;
_SplitConsistently
<__exact, _RAIter, _Compare, _SortingPlacesIterator>()
(__iam, __sd, __comp, __num_samples);
// Offset from __target __begin, __length after merging.
_DifferenceType __offset = 0, __length_am = 0;
- for (_ThreadIndex __s = 0; __s < __sd->__num_threads; __s++)
+ for (_ThreadIndex __s = 0; __s < __sd->_M_num_threads; __s++)
{
- __length_am += __sd->_M_pieces[__iam][__s].__end - __sd->_M_pieces[__iam][__s].__begin;
- __offset += __sd->_M_pieces[__iam][__s].__begin;
+ __length_am += __sd->_M_pieces[__iam][__s]._M_end - __sd->_M_pieces[__iam][__s]._M_begin;
+ __offset += __sd->_M_pieces[__iam][__s]._M_begin;
}
typedef std::vector<
std::pair<_SortingPlacesIterator, _SortingPlacesIterator> >
seq_vector_type;
- seq_vector_type seqs(__sd->__num_threads);
+ seq_vector_type seqs(__sd->_M_num_threads);
- for (int __s = 0; __s < __sd->__num_threads; ++__s)
+ for (int __s = 0; __s < __sd->_M_num_threads; ++__s)
{
seqs[__s] =
- std::make_pair(__sd->_M_temporary[__s] + __sd->_M_pieces[__iam][__s].__begin,
- __sd->_M_temporary[__s] + __sd->_M_pieces[__iam][__s].__end);
+ std::make_pair(__sd->_M_temporary[__s] + __sd->_M_pieces[__iam][__s]._M_begin,
+ __sd->_M_temporary[__s] + __sd->_M_pieces[__iam][__s]._M_end);
}
__possibly_stable_multiway_merge<
@@ -424,7 +424,7 @@ template<bool __stable, bool __exact, typename _RAIter,
# pragma omp single
{
- __sd.__num_threads = __num_threads;
+ __sd._M_num_threads = __num_threads;
__sd._M_source = __begin;
__sd._M_temporary = new _ValueType*[__num_threads];
diff --git a/libstdc++-v3/include/parallel/omp_loop.h b/libstdc++-v3/include/parallel/omp_loop.h
index 55191d4..5f5df67 100644
--- a/libstdc++-v3/include/parallel/omp_loop.h
+++ b/libstdc++-v3/include/parallel/omp_loop.h
@@ -108,7 +108,7 @@ template<typename _RAIter,
// Points to last element processed (needed as return value for
// some algorithms like transform).
- __f.finish_iterator = __begin + __length;
+ __f._M_finish_iterator = __begin + __length;
return __o;
}
diff --git a/libstdc++-v3/include/parallel/par_loop.h b/libstdc++-v3/include/parallel/par_loop.h
index 0b5e47a..68dee42 100644
--- a/libstdc++-v3/include/parallel/par_loop.h
+++ b/libstdc++-v3/include/parallel/par_loop.h
@@ -122,7 +122,7 @@ template<typename _RAIter,
// Points to last element processed (needed as return value for
// some algorithms like transform).
- __f.finish_iterator = __begin + __length;
+ __f._M_finish_iterator = __begin + __length;
delete[] __thread_results;
delete[] __constructed;
diff --git a/libstdc++-v3/include/parallel/random_number.h b/libstdc++-v3/include/parallel/random_number.h
index 69cbc94..12f646b 100644
--- a/libstdc++-v3/include/parallel/random_number.h
+++ b/libstdc++-v3/include/parallel/random_number.h
@@ -43,9 +43,9 @@ namespace __gnu_parallel
private:
std::tr1::mt19937 _M_mt;
uint64 _M_supremum;
- uint64 _RAND_SUP;
+ uint64 _M_rand_sup;
double _M_supremum_reciprocal;
- double _RAND_SUP_REC;
+ double _M_rand_sup_reciprocal;
// Assumed to be twice as long as the usual random number.
uint64 __cache;
@@ -72,9 +72,9 @@ namespace __gnu_parallel
/** @brief Default constructor. Seed with 0. */
_RandomNumber()
: _M_mt(0), _M_supremum(0x100000000ULL),
- _RAND_SUP(1ULL << (sizeof(uint32) * 8)),
- _M_supremum_reciprocal(double(_M_supremum) / double(_RAND_SUP)),
- _RAND_SUP_REC(1.0 / double(_RAND_SUP)),
+ _M_rand_sup(1ULL << (sizeof(uint32) * 8)),
+ _M_supremum_reciprocal(double(_M_supremum) / double(_M_rand_sup)),
+ _M_rand_sup_reciprocal(1.0 / double(_M_rand_sup)),
__cache(0), __bits_left(0) { }
/** @brief Constructor.
@@ -83,9 +83,9 @@ namespace __gnu_parallel
* interval @__c [0,_M_supremum). */
_RandomNumber(uint32 __seed, uint64 _M_supremum = 0x100000000ULL)
: _M_mt(__seed), _M_supremum(_M_supremum),
- _RAND_SUP(1ULL << (sizeof(uint32) * 8)),
- _M_supremum_reciprocal(double(_M_supremum) / double(_RAND_SUP)),
- _RAND_SUP_REC(1.0 / double(_RAND_SUP)),
+ _M_rand_sup(1ULL << (sizeof(uint32) * 8)),
+ _M_supremum_reciprocal(double(_M_supremum) / double(_M_rand_sup)),
+ _M_rand_sup_reciprocal(1.0 / double(_M_rand_sup)),
__cache(0), __bits_left(0) { }
/** @brief Generate unsigned random 32-bit integer. */
@@ -99,17 +99,17 @@ namespace __gnu_parallel
operator()(uint64 local_supremum)
{
return __scale_down(_M_mt(), local_supremum,
- double(local_supremum * _RAND_SUP_REC));
+ double(local_supremum * _M_rand_sup_reciprocal));
}
/** @brief Generate a number of random bits, run-time parameter.
* @param bits Number of bits to generate. */
unsigned long
- __genrand_bits(int bits)
+ __genrand_bits(int __bits)
{
- unsigned long __res = __cache & ((1 << bits) - 1);
- __cache = __cache >> bits;
- __bits_left -= bits;
+ unsigned long __res = __cache & ((1 << __bits) - 1);
+ __cache = __cache >> __bits;
+ __bits_left -= __bits;
if (__bits_left < 32)
{
__cache |= ((uint64(_M_mt())) << __bits_left);
diff --git a/libstdc++-v3/include/parallel/random_shuffle.h b/libstdc++-v3/include/parallel/random_shuffle.h
index 77ac639..5994190 100644
--- a/libstdc++-v3/include/parallel/random_shuffle.h
+++ b/libstdc++-v3/include/parallel/random_shuffle.h
@@ -63,7 +63,7 @@ template<typename _RAIter>
/** @brief Two-dimensional array to hold the thread-bin distribution.
*
- * Dimensions (__num_threads + 1) __x (_M_num_bins + 1). */
+ * Dimensions (_M_num_threads + 1) __x (_M_num_bins + 1). */
_DifferenceType** _M_dist;
/** @brief Start indexes of the threads' __chunks. */
@@ -91,7 +91,7 @@ template<typename _RAIter, typename RandomNumberGenerator>
struct _DRSSorterPU
{
/** @brief Number of threads participating in total. */
- int __num_threads;
+ int _M_num_threads;
/** @brief Begin __index for bins taken care of by this thread. */
_BinIndex _M_bins_begin;
@@ -135,7 +135,7 @@ template<typename _RAIter, typename RandomNumberGenerator>
_BinIndex* __oracles = new _BinIndex[__length];
_DifferenceType* _M_dist = new _DifferenceType[_M_sd->_M_num_bins + 1];
_BinIndex* _M_bin_proc = new _BinIndex[_M_sd->_M_num_bins];
- _ValueType** _M_temporaries = new _ValueType*[d->__num_threads];
+ _ValueType** _M_temporaries = new _ValueType*[d->_M_num_threads];
// Compute oracles and count appearances.
for (_BinIndex __b = 0; __b < _M_sd->_M_num_bins + 1; ++__b)
@@ -161,11 +161,11 @@ template<typename _RAIter, typename RandomNumberGenerator>
# pragma omp single
{
- // Sum up bins, _M_sd->_M_dist[__s + 1][d->__num_threads] now contains the
+ // Sum up bins, _M_sd->_M_dist[__s + 1][d->_M_num_threads] now contains the
// total number of items in bin __s
for (_BinIndex __s = 0; __s < _M_sd->_M_num_bins; ++__s)
__gnu_sequential::partial_sum(_M_sd->_M_dist[__s + 1],
- _M_sd->_M_dist[__s + 1] + d->__num_threads + 1,
+ _M_sd->_M_dist[__s + 1] + d->_M_num_threads + 1,
_M_sd->_M_dist[__s + 1]);
}
@@ -173,15 +173,15 @@ template<typename _RAIter, typename RandomNumberGenerator>
_SequenceIndex __offset = 0, __global_offset = 0;
for (_BinIndex __s = 0; __s < d->_M_bins_begin; ++__s)
- __global_offset += _M_sd->_M_dist[__s + 1][d->__num_threads];
+ __global_offset += _M_sd->_M_dist[__s + 1][d->_M_num_threads];
# pragma omp barrier
for (_BinIndex __s = d->_M_bins_begin; __s < d->__bins_end; ++__s)
{
- for (int __t = 0; __t < d->__num_threads + 1; ++__t)
+ for (int __t = 0; __t < d->_M_num_threads + 1; ++__t)
_M_sd->_M_dist[__s + 1][__t] += __offset;
- __offset = _M_sd->_M_dist[__s + 1][d->__num_threads];
+ __offset = _M_sd->_M_dist[__s + 1][d->_M_num_threads];
}
_M_sd->_M_temporaries[__iam] = static_cast<_ValueType*>(
@@ -194,7 +194,7 @@ template<typename _RAIter, typename RandomNumberGenerator>
_M_dist[__b] = _M_sd->_M_dist[__b][__iam];
for (_BinIndex __b = 0; __b < _M_sd->_M_num_bins; ++__b)
_M_bin_proc[__b] = _M_sd->_M_bin_proc[__b];
- for (_ThreadIndex __t = 0; __t < d->__num_threads; ++__t)
+ for (_ThreadIndex __t = 0; __t < d->_M_num_threads; ++__t)
_M_temporaries[__t] = _M_sd->_M_temporaries[__t];
_RAIter _M_source = _M_sd->_M_source;
@@ -206,7 +206,7 @@ template<typename _RAIter, typename RandomNumberGenerator>
_BinIndex target_bin = __oracles[__i];
_ThreadIndex target_p = _M_bin_proc[target_bin];
- // Last column [d->__num_threads] stays unchanged.
+ // Last column [d->_M_num_threads] stays unchanged.
::new(&(_M_temporaries[target_p][_M_dist[target_bin + 1]++]))
_ValueType(*(_M_source + __i + __start));
}
@@ -223,12 +223,12 @@ template<typename _RAIter, typename RandomNumberGenerator>
{
_ValueType* __begin =
_M_sd->_M_temporaries[__iam] +
- ((__b == d->_M_bins_begin) ? 0 : _M_sd->_M_dist[__b][d->__num_threads]),
+ ((__b == d->_M_bins_begin) ? 0 : _M_sd->_M_dist[__b][d->_M_num_threads]),
* __end =
- _M_sd->_M_temporaries[__iam] + _M_sd->_M_dist[__b + 1][d->__num_threads];
+ _M_sd->_M_temporaries[__iam] + _M_sd->_M_dist[__b + 1][d->_M_num_threads];
__sequential_random_shuffle(__begin, __end, __rng);
std::copy(__begin, __end, _M_sd->_M_source + __global_offset +
- ((__b == d->_M_bins_begin) ? 0 : _M_sd->_M_dist[__b][d->__num_threads]));
+ ((__b == d->_M_bins_begin) ? 0 : _M_sd->_M_dist[__b][d->_M_num_threads]));
}
::operator delete(_M_sd->_M_temporaries[__iam]);
@@ -364,7 +364,7 @@ template<typename _RAIter, typename RandomNumberGenerator>
__pus[__i].__bins_end = bin_cursor;
for (; __j < bin_cursor; ++__j)
_M_sd._M_bin_proc[__j] = __i;
- __pus[__i].__num_threads = __num_threads;
+ __pus[__i]._M_num_threads = __num_threads;
__pus[__i]._M_seed = __rng(std::numeric_limits<uint32>::max());
__pus[__i]._M_sd = &_M_sd;
}
diff --git a/libstdc++-v3/include/parallel/set_operations.h b/libstdc++-v3/include/parallel/set_operations.h
index a726d30..4bbbf08 100644
--- a/libstdc++-v3/include/parallel/set_operations.h
+++ b/libstdc++-v3/include/parallel/set_operations.h
@@ -71,9 +71,9 @@ template<typename _IIter,
typedef typename _TraitsType::difference_type _DifferenceType;
typedef typename std::pair<_IIter, _IIter> _IteratorPair;
- symmetric_difference_func(_Compare __c) : __comp(__c) {}
+ symmetric_difference_func(_Compare __comp) : _M_comp(__comp) {}
- _Compare __comp;
+ _Compare _M_comp;
_OutputIterator
_M_invoke(_IIter __a, _IIter __b,
@@ -82,13 +82,13 @@ template<typename _IIter,
{
while (__a != __b && __c != d)
{
- if (__comp(*__a, *__c))
+ if (_M_comp(*__a, *__c))
{
*__r = *__a;
++__a;
++__r;
}
- else if (__comp(*__c, *__a))
+ else if (_M_comp(*__c, *__a))
{
*__r = *__c;
++__c;
@@ -111,12 +111,12 @@ template<typename _IIter,
while (__a != __b && __c != d)
{
- if (__comp(*__a, *__c))
+ if (_M_comp(*__a, *__c))
{
++__a;
++__counter;
}
- else if (__comp(*__c, *__a))
+ else if (_M_comp(*__c, *__a))
{
++__c;
++__counter;
@@ -150,9 +150,9 @@ template<typename _IIter,
typedef typename _TraitsType::difference_type _DifferenceType;
typedef typename std::pair<_IIter, _IIter> _IteratorPair;
- __difference_func(_Compare __c) : __comp(__c) {}
+ __difference_func(_Compare __comp) : _M_comp(__comp) {}
- _Compare __comp;
+ _Compare _M_comp;
_OutputIterator
_M_invoke(_IIter __a, _IIter __b, _IIter __c, _IIter d,
@@ -160,13 +160,13 @@ template<typename _IIter,
{
while (__a != __b && __c != d)
{
- if (__comp(*__a, *__c))
+ if (_M_comp(*__a, *__c))
{
*__r = *__a;
++__a;
++__r;
}
- else if (__comp(*__c, *__a))
+ else if (_M_comp(*__c, *__a))
{ ++__c; }
else
{
@@ -185,12 +185,12 @@ template<typename _IIter,
while (__a != __b && __c != d)
{
- if (__comp(*__a, *__c))
+ if (_M_comp(*__a, *__c))
{
++__a;
++__counter;
}
- else if (__comp(*__c, *__a))
+ else if (_M_comp(*__c, *__a))
{ ++__c; }
else
{ ++__a; ++__c; }
@@ -218,9 +218,9 @@ template<typename _IIter,
typedef typename _TraitsType::difference_type _DifferenceType;
typedef typename std::pair<_IIter, _IIter> _IteratorPair;
- __intersection_func(_Compare __c) : __comp(__c) {}
+ __intersection_func(_Compare __comp) : _M_comp(__comp) {}
- _Compare __comp;
+ _Compare _M_comp;
_OutputIterator
_M_invoke(_IIter __a, _IIter __b, _IIter __c, _IIter d,
@@ -228,9 +228,9 @@ template<typename _IIter,
{
while (__a != __b && __c != d)
{
- if (__comp(*__a, *__c))
+ if (_M_comp(*__a, *__c))
{ ++__a; }
- else if (__comp(*__c, *__a))
+ else if (_M_comp(*__c, *__a))
{ ++__c; }
else
{
@@ -252,9 +252,9 @@ template<typename _IIter,
while (__a != __b && __c != d)
{
- if (__comp(*__a, *__c))
+ if (_M_comp(*__a, *__c))
{ ++__a; }
- else if (__comp(*__c, *__a))
+ else if (_M_comp(*__c, *__a))
{ ++__c; }
else
{
@@ -282,9 +282,9 @@ template<class _IIter, class _OutputIterator, class _Compare>
typedef typename std::iterator_traits<_IIter>::difference_type
_DifferenceType;
- __union_func(_Compare __c) : __comp(__c) {}
+ __union_func(_Compare __comp) : _M_comp(__comp) {}
- _Compare __comp;
+ _Compare _M_comp;
_OutputIterator
_M_invoke(_IIter __a, const _IIter __b, _IIter __c,
@@ -292,12 +292,12 @@ template<class _IIter, class _OutputIterator, class _Compare>
{
while (__a != __b && __c != d)
{
- if (__comp(*__a, *__c))
+ if (_M_comp(*__a, *__c))
{
*__r = *__a;
++__a;
}
- else if (__comp(*__c, *__a))
+ else if (_M_comp(*__c, *__a))
{
*__r = *__c;
++__c;
@@ -321,9 +321,9 @@ template<class _IIter, class _OutputIterator, class _Compare>
while (__a != __b && __c != d)
{
- if (__comp(*__a, *__c))
+ if (_M_comp(*__a, *__c))
{ ++__a; }
- else if (__comp(*__c, *__a))
+ else if (_M_comp(*__c, *__a))
{ ++__c; }
else
{
@@ -400,14 +400,14 @@ template<typename _IIter,
_IIter __offset[2];
const _DifferenceType __rank = __borders[__iam + 1];
- multiseq_partition(__sequence, __sequence + 2, __rank, __offset, __op.__comp);
+ multiseq_partition(__sequence, __sequence + 2, __rank, __offset, __op._M_comp);
// allowed to read?
// together
// *(__offset[ 0 ] - 1) == *__offset[ 1 ]
if (__offset[ 0 ] != __begin1 && __offset[ 1 ] != __end2
- && !__op.__comp(*(__offset[ 0 ] - 1), *__offset[ 1 ])
- && !__op.__comp(*__offset[ 1 ], *(__offset[ 0 ] - 1)))
+ && !__op._M_comp(*(__offset[ 0 ] - 1), *__offset[ 1 ])
+ && !__op._M_comp(*__offset[ 1 ], *(__offset[ 0 ] - 1)))
{
// Avoid split between globally equal elements: move one to
// front in first sequence.
@@ -476,10 +476,10 @@ template<typename _IIter,
inline _OutputIterator
__parallel_set_union(_IIter __begin1, _IIter __end1,
_IIter __begin2, _IIter __end2,
- _OutputIterator __result, _Compare __comp)
+ _OutputIterator __result, _Compare _M_comp)
{
return __parallel_set_operation(__begin1, __end1, __begin2, __end2, __result,
- __union_func< _IIter, _OutputIterator, _Compare>(__comp));
+ __union_func< _IIter, _OutputIterator, _Compare>(_M_comp));
}
template<typename _IIter,
@@ -488,10 +488,10 @@ template<typename _IIter,
inline _OutputIterator
__parallel_set_intersection(_IIter __begin1, _IIter __end1,
_IIter __begin2, _IIter __end2,
- _OutputIterator __result, _Compare __comp)
+ _OutputIterator __result, _Compare _M_comp)
{
return __parallel_set_operation(__begin1, __end1, __begin2, __end2, __result,
- __intersection_func<_IIter, _OutputIterator, _Compare>(__comp));
+ __intersection_func<_IIter, _OutputIterator, _Compare>(_M_comp));
}
template<typename _IIter,
@@ -500,10 +500,10 @@ template<typename _IIter,
inline _OutputIterator
__parallel_set_difference(_IIter __begin1, _IIter __end1,
_IIter __begin2, _IIter __end2,
- _OutputIterator __result, _Compare __comp)
+ _OutputIterator __result, _Compare _M_comp)
{
return __parallel_set_operation(__begin1, __end1, __begin2, __end2, __result,
- __difference_func<_IIter, _OutputIterator, _Compare>(__comp));
+ __difference_func<_IIter, _OutputIterator, _Compare>(_M_comp));
}
template<typename _IIter,
@@ -512,11 +512,11 @@ template<typename _IIter,
inline _OutputIterator
__parallel_set_symmetric_difference(_IIter __begin1, _IIter __end1,
_IIter __begin2, _IIter __end2,
- _OutputIterator __result, _Compare __comp)
+ _OutputIterator __result, _Compare _M_comp)
{
return __parallel_set_operation(__begin1, __end1, __begin2, __end2, __result,
symmetric_difference_func<_IIter, _OutputIterator, _Compare>
- (__comp));
+ (_M_comp));
}
}
diff --git a/libstdc++-v3/include/parallel/tags.h b/libstdc++-v3/include/parallel/tags.h
index ae0c202..5dfc7f3 100644
--- a/libstdc++-v3/include/parallel/tags.h
+++ b/libstdc++-v3/include/parallel/tags.h
@@ -46,37 +46,37 @@ namespace __gnu_parallel
struct parallel_tag
{
private:
- _ThreadIndex __num_threads;
+ _ThreadIndex _M_num_threads;
public:
/** @brief Default constructor. Use default number of threads. */
parallel_tag()
{
- this->__num_threads = 0;
+ this->_M_num_threads = 0;
}
/** @brief Default constructor. Recommend number of threads to use.
* @param __num_threads Desired number of threads. */
parallel_tag(_ThreadIndex __num_threads)
{
- this->__num_threads = __num_threads;
+ this->_M_num_threads = __num_threads;
}
/** @brief Find __out desired number of threads.
* @return Desired number of threads. */
inline _ThreadIndex __get_num_threads()
{
- if(__num_threads == 0)
+ if(_M_num_threads == 0)
return omp_get_max_threads();
else
- return __num_threads;
+ return _M_num_threads;
}
/** @brief Set the desired number of threads.
* @param __num_threads Desired number of threads. */
inline void set_num_threads(_ThreadIndex __num_threads)
{
- this->__num_threads = __num_threads;
+ this->_M_num_threads = __num_threads;
}
};
diff --git a/libstdc++-v3/include/parallel/workstealing.h b/libstdc++-v3/include/parallel/workstealing.h
index a43c261..3fb71f5 100644
--- a/libstdc++-v3/include/parallel/workstealing.h
+++ b/libstdc++-v3/include/parallel/workstealing.h
@@ -59,17 +59,17 @@ template<typename _DifferenceTp>
*
* Changed by owning and stealing thread. By stealing thread,
* always incremented. */
- _GLIBCXX_JOB_VOLATILE _DifferenceType __first;
+ _GLIBCXX_JOB_VOLATILE _DifferenceType _M_first;
/** @brief Last element.
*
* Changed by owning thread only. */
- _GLIBCXX_JOB_VOLATILE _DifferenceType __last;
+ _GLIBCXX_JOB_VOLATILE _DifferenceType _M_last;
- /** @brief Number of elements, i.e. @__c __last-__first+1.
+ /** @brief Number of elements, i.e. @__c _M_last-_M_first+1.
*
* Changed by owning thread only. */
- _GLIBCXX_JOB_VOLATILE _DifferenceType __load;
+ _GLIBCXX_JOB_VOLATILE _DifferenceType _M_load;
};
/** @brief Work stealing algorithm for random access iterators.
@@ -177,21 +177,21 @@ template<typename _RAIter,
__iam_working = true;
// How many jobs per thread? last thread gets the rest.
- __my_job.__first =
+ __my_job._M_first =
static_cast<_DifferenceType>(__iam * (__length / __num_threads));
- __my_job.__last = (__iam == (__num_threads - 1)) ?
+ __my_job._M_last = (__iam == (__num_threads - 1)) ?
(__length - 1) : ((__iam + 1) * (__length / __num_threads) - 1);
- __my_job.__load = __my_job.__last - __my_job.__first + 1;
+ __my_job._M_load = __my_job._M_last - __my_job._M_first + 1;
- // Init __result with __first __value (to have a base value for reduction).
- if (__my_job.__first <= __my_job.__last)
+ // Init __result with _M_first __value (to have a base value for reduction).
+ if (__my_job._M_first <= __my_job._M_last)
{
// Cannot use volatile variable directly.
- _DifferenceType __my_first = __my_job.__first;
+ _DifferenceType __my_first = __my_job._M_first;
__result = __f(__op, __begin + __my_first);
- ++__my_job.__first;
- --__my_job.__load;
+ ++__my_job._M_first;
+ --__my_job._M_load;
}
_RAIter __current;
@@ -206,18 +206,18 @@ template<typename _RAIter,
# pragma omp flush(__busy)
// Thread has own work to do
- while (__my_job.__first <= __my_job.__last)
+ while (__my_job._M_first <= __my_job._M_last)
{
// fetch-and-add call
// Reserve __current __job block (size __chunk_size) in my queue.
_DifferenceType current_job =
- __fetch_and_add<_DifferenceType>(&(__my_job.__first), __chunk_size);
+ __fetch_and_add<_DifferenceType>(&(__my_job._M_first), __chunk_size);
- // Update __load, to make the three values consistent,
- // __first might have been changed in the meantime
- __my_job.__load = __my_job.__last - __my_job.__first + 1;
+ // Update _M_load, to make the three values consistent,
+ // _M_first might have been changed in the meantime
+ __my_job._M_load = __my_job._M_last - __my_job._M_first + 1;
for (_DifferenceType job_counter = 0;
- job_counter < __chunk_size && current_job <= __my_job.__last;
+ job_counter < __chunk_size && current_job <= __my_job._M_last;
++job_counter)
{
// Yes: process it!
@@ -248,9 +248,9 @@ template<typename _RAIter,
__yield();
# pragma omp flush(__busy)
__victim = rand_gen();
- __supposed_first = __job[__victim * __stride].__first;
- __supposed_last = __job[__victim * __stride].__last;
- __supposed_load = __job[__victim * __stride].__load;
+ __supposed_first = __job[__victim * __stride]._M_first;
+ __supposed_last = __job[__victim * __stride]._M_last;
+ __supposed_load = __job[__victim * __stride]._M_load;
}
while (__busy > 0
&& ((__supposed_load <= 0)
@@ -268,13 +268,13 @@ template<typename _RAIter,
// Push __victim's __start forward.
_DifferenceType __stolen_first =
__fetch_and_add<_DifferenceType>(
- &(__job[__victim * __stride].__first), __steal);
+ &(__job[__victim * __stride]._M_first), __steal);
_DifferenceType stolen_try =
__stolen_first + __steal - _DifferenceType(1);
- __my_job.__first = __stolen_first;
- __my_job.__last = __gnu_parallel::min(stolen_try, __supposed_last);
- __my_job.__load = __my_job.__last - __my_job.__first + 1;
+ __my_job._M_first = __stolen_first;
+ __my_job._M_last = __gnu_parallel::min(stolen_try, __supposed_last);
+ __my_job._M_load = __my_job._M_last - __my_job._M_first + 1;
// Has potential work again.
# pragma omp atomic
@@ -295,7 +295,7 @@ template<typename _RAIter,
// Points to last element processed (needed as return value for
// some algorithms like transform)
- __f.finish_iterator = __begin + __length;
+ __f._M_finish_iterator = __begin + __length;
omp_destroy_lock(&__output_lock);