aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc')
-rw-r--r--gcc/basic-block.h14
-rw-r--r--gcc/coverage.c12
-rw-r--r--gcc/gcov-dump.c22
-rw-r--r--gcc/gcov-io.c259
-rw-r--r--gcc/gcov-io.h41
-rw-r--r--gcc/profile.c157
6 files changed, 495 insertions, 10 deletions
diff --git a/gcc/basic-block.h b/gcc/basic-block.h
index 09b5eb0..288127f 100644
--- a/gcc/basic-block.h
+++ b/gcc/basic-block.h
@@ -31,6 +31,7 @@ along with GCC; see the file COPYING3. If not see
flow graph is manipulated by various optimizations. A signed type
makes those easy to detect. */
typedef HOST_WIDEST_INT gcov_type;
+typedef unsigned HOST_WIDEST_INT gcov_type_unsigned;
/* Control flow edge information. */
struct GTY((user)) edge_def {
@@ -91,6 +92,16 @@ enum cfg_edge_flags {
profile.c. */
extern const struct gcov_ctr_summary *profile_info;
+/* Working set size statistics for a given percentage of the entire
+ profile (sum_all from the counter summary). */
+typedef struct gcov_working_set_info
+{
+ /* Number of hot counters included in this working set. */
+ unsigned num_counters;
+ /* Smallest counter included in this working set. */
+ gcov_type min_counter;
+} gcov_working_set_t;
+
/* Declared in cfgloop.h. */
struct loop;
@@ -897,4 +908,7 @@ extern void rtl_profile_for_bb (basic_block);
extern void rtl_profile_for_edge (edge);
extern void default_rtl_profile (void);
+/* In profile.c. */
+extern gcov_working_set_t *find_working_set(unsigned pct_times_10);
+
#endif /* GCC_BASIC_BLOCK_H */
diff --git a/gcc/coverage.c b/gcc/coverage.c
index b4d22df..f9b12e8 100644
--- a/gcc/coverage.c
+++ b/gcc/coverage.c
@@ -248,6 +248,13 @@ read_counts_file (void)
summary.ctrs[ix].run_max = sum.ctrs[ix].run_max;
summary.ctrs[ix].sum_max += sum.ctrs[ix].sum_max;
}
+ if (new_summary)
+ memcpy (summary.ctrs[GCOV_COUNTER_ARCS].histogram,
+ sum.ctrs[GCOV_COUNTER_ARCS].histogram,
+ sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
+ else
+ gcov_histogram_merge (summary.ctrs[GCOV_COUNTER_ARCS].histogram,
+ sum.ctrs[GCOV_COUNTER_ARCS].histogram);
new_summary = 0;
}
else if (GCOV_TAG_IS_COUNTER (tag) && fn_ident)
@@ -268,8 +275,9 @@ read_counts_file (void)
entry->ctr = elt.ctr;
entry->lineno_checksum = lineno_checksum;
entry->cfg_checksum = cfg_checksum;
- entry->summary = summary.ctrs[elt.ctr];
- entry->summary.num = n_counts;
+ if (elt.ctr < GCOV_COUNTERS_SUMMABLE)
+ entry->summary = summary.ctrs[elt.ctr];
+ entry->summary.num = n_counts;
entry->counts = XCNEWVEC (gcov_type, n_counts);
}
else if (entry->lineno_checksum != lineno_checksum
diff --git a/gcc/gcov-dump.c b/gcc/gcov-dump.c
index 59b8380..fb01108 100644
--- a/gcc/gcov-dump.c
+++ b/gcc/gcov-dump.c
@@ -447,7 +447,8 @@ tag_summary (const char *filename ATTRIBUTE_UNUSED,
unsigned tag ATTRIBUTE_UNUSED, unsigned length ATTRIBUTE_UNUSED)
{
struct gcov_summary summary;
- unsigned ix;
+ unsigned ix, h_ix;
+ gcov_bucket_type *histo_bucket;
gcov_read_summary (&summary);
printf (" checksum=0x%08x", summary.checksum);
@@ -465,5 +466,24 @@ tag_summary (const char *filename ATTRIBUTE_UNUSED,
(HOST_WIDEST_INT)summary.ctrs[ix].run_max);
printf (", sum_max=" HOST_WIDEST_INT_PRINT_DEC,
(HOST_WIDEST_INT)summary.ctrs[ix].sum_max);
+ if (ix != GCOV_COUNTER_ARCS)
+ continue;
+ printf ("\n");
+ print_prefix (filename, 0, 0);
+ printf ("\t\tcounter histogram:");
+ for (h_ix = 0; h_ix < GCOV_HISTOGRAM_SIZE; h_ix++)
+ {
+ histo_bucket = &summary.ctrs[ix].histogram[h_ix];
+ if (!histo_bucket->num_counters)
+ continue;
+ printf ("\n");
+ print_prefix (filename, 0, 0);
+ printf ("\t\t%d: num counts=%u, min counter="
+ HOST_WIDEST_INT_PRINT_DEC ", cum_counter="
+ HOST_WIDEST_INT_PRINT_DEC,
+ h_ix, histo_bucket->num_counters,
+ (HOST_WIDEST_INT)histo_bucket->min_value,
+ (HOST_WIDEST_INT)histo_bucket->cum_value);
+ }
}
}
diff --git a/gcc/gcov-io.c b/gcc/gcov-io.c
index 37c1c3e..d64fb42 100644
--- a/gcc/gcov-io.c
+++ b/gcc/gcov-io.c
@@ -368,10 +368,25 @@ gcov_write_tag_length (gcov_unsigned_t tag, gcov_unsigned_t length)
GCOV_LINKAGE void
gcov_write_summary (gcov_unsigned_t tag, const struct gcov_summary *summary)
{
- unsigned ix;
+ unsigned ix, h_ix, bv_ix, h_cnt = 0;
const struct gcov_ctr_summary *csum;
-
- gcov_write_tag_length (tag, GCOV_TAG_SUMMARY_LENGTH);
+ unsigned histo_bitvector[GCOV_HISTOGRAM_BITVECTOR_SIZE];
+
+ /* Count number of non-zero histogram entries, and fill in a bit vector
+ of non-zero indices. The histogram is only currently computed for arc
+ counters. */
+ for (bv_ix = 0; bv_ix < GCOV_HISTOGRAM_BITVECTOR_SIZE; bv_ix++)
+ histo_bitvector[bv_ix] = 0;
+ csum = &summary->ctrs[GCOV_COUNTER_ARCS];
+ for (h_ix = 0; h_ix < GCOV_HISTOGRAM_SIZE; h_ix++)
+ {
+ if (csum->histogram[h_ix].num_counters > 0)
+ {
+ histo_bitvector[h_ix / 32] |= 1 << (h_ix % 32);
+ h_cnt++;
+ }
+ }
+ gcov_write_tag_length (tag, GCOV_TAG_SUMMARY_LENGTH(h_cnt));
gcov_write_unsigned (summary->checksum);
for (csum = summary->ctrs, ix = GCOV_COUNTERS_SUMMABLE; ix--; csum++)
{
@@ -380,6 +395,22 @@ gcov_write_summary (gcov_unsigned_t tag, const struct gcov_summary *summary)
gcov_write_counter (csum->sum_all);
gcov_write_counter (csum->run_max);
gcov_write_counter (csum->sum_max);
+ if (ix != GCOV_COUNTER_ARCS)
+ {
+ for (bv_ix = 0; bv_ix < GCOV_HISTOGRAM_BITVECTOR_SIZE; bv_ix++)
+ gcov_write_unsigned (0);
+ continue;
+ }
+ for (bv_ix = 0; bv_ix < GCOV_HISTOGRAM_BITVECTOR_SIZE; bv_ix++)
+ gcov_write_unsigned (histo_bitvector[bv_ix]);
+ for (h_ix = 0; h_ix < GCOV_HISTOGRAM_SIZE; h_ix++)
+ {
+ if (!csum->histogram[h_ix].num_counters)
+ continue;
+ gcov_write_unsigned (csum->histogram[h_ix].num_counters);
+ gcov_write_counter (csum->histogram[h_ix].min_value);
+ gcov_write_counter (csum->histogram[h_ix].cum_value);
+ }
}
}
#endif /* IN_LIBGCOV */
@@ -488,8 +519,10 @@ gcov_read_string (void)
GCOV_LINKAGE void
gcov_read_summary (struct gcov_summary *summary)
{
- unsigned ix;
+ unsigned ix, h_ix, bv_ix, h_cnt = 0;
struct gcov_ctr_summary *csum;
+ unsigned histo_bitvector[GCOV_HISTOGRAM_BITVECTOR_SIZE];
+ unsigned cur_bitvector;
summary->checksum = gcov_read_unsigned ();
for (csum = summary->ctrs, ix = GCOV_COUNTERS_SUMMABLE; ix--; csum++)
@@ -499,6 +532,43 @@ gcov_read_summary (struct gcov_summary *summary)
csum->sum_all = gcov_read_counter ();
csum->run_max = gcov_read_counter ();
csum->sum_max = gcov_read_counter ();
+ memset (csum->histogram, 0,
+ sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
+ for (bv_ix = 0; bv_ix < GCOV_HISTOGRAM_BITVECTOR_SIZE; bv_ix++)
+ {
+ histo_bitvector[bv_ix] = gcov_read_unsigned ();
+ h_cnt += __builtin_popcountll (histo_bitvector[bv_ix]);
+ }
+ bv_ix = 0;
+ h_ix = 0;
+ cur_bitvector = 0;
+ while (h_cnt--)
+ {
+ /* Find the index corresponding to the next entry we will read in.
+ First find the next non-zero bitvector and re-initialize
+ the histogram index accordingly, then right shift and increment
+ the index until we find a set bit. */
+ while (!cur_bitvector)
+ {
+ h_ix = bv_ix * 32;
+ cur_bitvector = histo_bitvector[bv_ix++];
+ gcc_assert(bv_ix <= GCOV_HISTOGRAM_BITVECTOR_SIZE);
+ }
+ while (!(cur_bitvector & 0x1))
+ {
+ h_ix++;
+ cur_bitvector >>= 1;
+ }
+ gcc_assert(h_ix < GCOV_HISTOGRAM_SIZE);
+
+ csum->histogram[h_ix].num_counters = gcov_read_unsigned ();
+ csum->histogram[h_ix].min_value = gcov_read_counter ();
+ csum->histogram[h_ix].cum_value = gcov_read_counter ();
+ /* Shift off the index we are done with and increment to the
+ corresponding next histogram entry. */
+ cur_bitvector >>= 1;
+ h_ix++;
+ }
}
}
@@ -550,3 +620,184 @@ gcov_time (void)
return status.st_mtime;
}
#endif /* IN_GCOV */
+
+#if IN_LIBGCOV || !IN_GCOV
+/* Determine the index into histogram for VALUE. */
+
+static unsigned
+gcov_histo_index(gcov_type value)
+{
+ gcov_type_unsigned v = (gcov_type_unsigned)value;
+ unsigned r = 0;
+ unsigned prev2bits = 0;
+
+ /* Find index into log2 scale histogram, where each of the log2
+ sized buckets is divided into 4 linear sub-buckets for better
+ focus in the higher buckets. */
+
+ /* Find the place of the most-significant bit set. */
+ if (v > 0)
+ r = 63 - __builtin_clzll (v);
+
+ /* If at most the 2 least significant bits are set (value is
+ 0 - 3) then that value is our index into the lowest set of
+ four buckets. */
+ if (r < 2)
+ return (unsigned)value;
+
+ gcc_assert (r < 64);
+
+ /* Find the two next most significant bits to determine which
+ of the four linear sub-buckets to select. */
+ prev2bits = (v >> (r - 2)) & 0x3;
+ /* Finally, compose the final bucket index from the log2 index and
+ the next 2 bits. The minimum r value at this point is 2 since we
+ returned above if r was 2 or more, so the minimum bucket at this
+ point is 4. */
+ return (r - 1) * 4 + prev2bits;
+}
+
+/* Merge SRC_HISTO into TGT_HISTO. The counters are assumed to be in
+ the same relative order in both histograms, and are matched up
+ and merged in reverse order. Each counter is assigned an equal portion of
+ its entry's original cumulative counter value when computing the
+ new merged cum_value. */
+
+static void gcov_histogram_merge(gcov_bucket_type *tgt_histo,
+ gcov_bucket_type *src_histo)
+{
+ int src_i, tgt_i, tmp_i = 0;
+ unsigned src_num, tgt_num, merge_num;
+ gcov_type src_cum, tgt_cum, merge_src_cum, merge_tgt_cum, merge_cum;
+ gcov_type merge_min;
+ gcov_bucket_type tmp_histo[GCOV_HISTOGRAM_SIZE];
+ int src_done = 0;
+
+ memset(tmp_histo, 0, sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
+
+ /* Assume that the counters are in the same relative order in both
+ histograms. Walk the histograms from largest to smallest entry,
+ matching up and combining counters in order. */
+ src_num = 0;
+ src_cum = 0;
+ src_i = GCOV_HISTOGRAM_SIZE - 1;
+ for (tgt_i = GCOV_HISTOGRAM_SIZE - 1; tgt_i >= 0 && !src_done; tgt_i--)
+ {
+ tgt_num = tgt_histo[tgt_i].num_counters;
+ tgt_cum = tgt_histo[tgt_i].cum_value;
+ /* Keep going until all of the target histogram's counters at this
+ position have been matched and merged with counters from the
+ source histogram. */
+ while (tgt_num > 0 && !src_done)
+ {
+ /* If this is either the first time through this loop or we just
+ exhausted the previous non-zero source histogram entry, look
+ for the next non-zero source histogram entry. */
+ if (!src_num)
+ {
+ /* Locate the next non-zero entry. */
+ while (src_i >= 0 && !src_histo[src_i].num_counters)
+ src_i--;
+ /* If source histogram has fewer counters, then just copy over the
+ remaining target counters and quit. */
+ if (src_i < 0)
+ {
+ tmp_histo[tgt_i].num_counters += tgt_num;
+ tmp_histo[tgt_i].cum_value += tgt_cum;
+ if (!tmp_histo[tgt_i].min_value ||
+ tgt_histo[tgt_i].min_value < tmp_histo[tgt_i].min_value)
+ tmp_histo[tgt_i].min_value = tgt_histo[tgt_i].min_value;
+ while (--tgt_i >= 0)
+ {
+ tmp_histo[tgt_i].num_counters
+ += tgt_histo[tgt_i].num_counters;
+ tmp_histo[tgt_i].cum_value += tgt_histo[tgt_i].cum_value;
+ if (!tmp_histo[tgt_i].min_value ||
+ tgt_histo[tgt_i].min_value
+ < tmp_histo[tgt_i].min_value)
+ tmp_histo[tgt_i].min_value = tgt_histo[tgt_i].min_value;
+ }
+
+ src_done = 1;
+ break;
+ }
+
+ src_num = src_histo[src_i].num_counters;
+ src_cum = src_histo[src_i].cum_value;
+ }
+
+ /* The number of counters to merge on this pass is the minimum
+ of the remaining counters from the current target and source
+ histogram entries. */
+ merge_num = tgt_num;
+ if (src_num < merge_num)
+ merge_num = src_num;
+
+ /* The merged min_value is the sum of the min_values from target
+ and source. */
+ merge_min = tgt_histo[tgt_i].min_value + src_histo[src_i].min_value;
+
+ /* Compute the portion of source and target entries' cum_value
+ that will be apportioned to the counters being merged.
+ The total remaining cum_value from each entry is divided
+ equally among the counters from that histogram entry if we
+ are not merging all of them. */
+ merge_src_cum = src_cum;
+ if (merge_num < src_num)
+ merge_src_cum = merge_num * src_cum / src_num;
+ merge_tgt_cum = tgt_cum;
+ if (merge_num < tgt_num)
+ merge_tgt_cum = merge_num * tgt_cum / tgt_num;
+ /* The merged cum_value is the sum of the source and target
+ components. */
+ merge_cum = merge_src_cum + merge_tgt_cum;
+
+ /* Update the remaining number of counters and cum_value left
+ to be merged from this source and target entry. */
+ src_cum -= merge_src_cum;
+ tgt_cum -= merge_tgt_cum;
+ src_num -= merge_num;
+ tgt_num -= merge_num;
+
+ /* The merged counters get placed in the new merged histogram
+ at the entry for the merged min_value. */
+ tmp_i = gcov_histo_index(merge_min);
+ gcc_assert (tmp_i < GCOV_HISTOGRAM_SIZE);
+ tmp_histo[tmp_i].num_counters += merge_num;
+ tmp_histo[tmp_i].cum_value += merge_cum;
+ if (!tmp_histo[tmp_i].min_value ||
+ merge_min < tmp_histo[tmp_i].min_value)
+ tmp_histo[tmp_i].min_value = merge_min;
+
+ /* Ensure the search for the next non-zero src_histo entry starts
+ at the next smallest histogram bucket. */
+ if (!src_num)
+ src_i--;
+ }
+ }
+
+ gcc_assert (tgt_i < 0);
+
+ /* In the case where there were more counters in the source histogram,
+ accumulate the remaining unmerged cumulative counter values. Add
+ those to the smallest non-zero target histogram entry. Otherwise,
+ the total cumulative counter values in the histogram will be smaller
+ than the sum_all stored in the summary, which will complicate
+ computing the working set information from the histogram later on. */
+ if (src_num)
+ src_i--;
+ while (src_i >= 0)
+ {
+ src_cum += src_histo[src_i].cum_value;
+ src_i--;
+ }
+ /* At this point, tmp_i should be the smallest non-zero entry in the
+ tmp_histo. */
+ gcc_assert(tmp_i >= 0 && tmp_i < GCOV_HISTOGRAM_SIZE
+ && tmp_histo[tmp_i].num_counters > 0);
+ tmp_histo[tmp_i].cum_value += src_cum;
+
+ /* Finally, copy the merged histogram into tgt_histo. */
+ memcpy(tgt_histo, tmp_histo, sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
+}
+#endif /* IN_LIBGCOV || !IN_GCOV */
diff --git a/gcc/gcov-io.h b/gcc/gcov-io.h
index 972dc93..e1532d7 100644
--- a/gcc/gcov-io.h
+++ b/gcc/gcov-io.h
@@ -139,7 +139,9 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
counts: header int64:count*
summary: int32:checksum {count-summary}GCOV_COUNTERS_SUMMABLE
count-summary: int32:num int32:runs int64:sum
- int64:max int64:sum_max
+ int64:max int64:sum_max histogram
+ histogram: {int32:bitvector}8 histogram-buckets*
+ histogram-buckets: int32:num int64:min int64:sum
The ANNOUNCE_FUNCTION record is the same as that in the note file,
but without the source location. The COUNTS gives the
@@ -171,8 +173,10 @@ typedef unsigned gcov_unsigned_t __attribute__ ((mode (SI)));
typedef unsigned gcov_position_t __attribute__ ((mode (SI)));
#if LONG_LONG_TYPE_SIZE > 32
typedef signed gcov_type __attribute__ ((mode (DI)));
+typedef unsigned gcov_type_unsigned __attribute__ ((mode (DI)));
#else
typedef signed gcov_type __attribute__ ((mode (SI)));
+typedef unsigned gcov_type_unsigned __attribute__ ((mode (SI)));
#endif
#else
#if BITS_PER_UNIT == 16
@@ -180,16 +184,20 @@ typedef unsigned gcov_unsigned_t __attribute__ ((mode (HI)));
typedef unsigned gcov_position_t __attribute__ ((mode (HI)));
#if LONG_LONG_TYPE_SIZE > 32
typedef signed gcov_type __attribute__ ((mode (SI)));
+typedef unsigned gcov_type_unsigned __attribute__ ((mode (SI)));
#else
typedef signed gcov_type __attribute__ ((mode (HI)));
+typedef unsigned gcov_type_unsigned __attribute__ ((mode (HI)));
#endif
#else
typedef unsigned gcov_unsigned_t __attribute__ ((mode (QI)));
typedef unsigned gcov_position_t __attribute__ ((mode (QI)));
#if LONG_LONG_TYPE_SIZE > 32
typedef signed gcov_type __attribute__ ((mode (HI)));
+typedef unsigned gcov_type_unsigned __attribute__ ((mode (HI)));
#else
typedef signed gcov_type __attribute__ ((mode (QI)));
+typedef unsigned gcov_type_unsigned __attribute__ ((mode (QI)));
#endif
#endif
#endif
@@ -210,6 +218,7 @@ typedef unsigned gcov_position_t;
#if IN_GCOV
#define GCOV_LINKAGE static
typedef HOST_WIDEST_INT gcov_type;
+typedef unsigned HOST_WIDEST_INT gcov_type_unsigned;
#if IN_GCOV > 0
#include <sys/types.h>
#endif
@@ -309,8 +318,9 @@ typedef HOST_WIDEST_INT gcov_type;
#define GCOV_TAG_COUNTER_NUM(LENGTH) ((LENGTH) / 2)
#define GCOV_TAG_OBJECT_SUMMARY ((gcov_unsigned_t)0xa1000000) /* Obsolete */
#define GCOV_TAG_PROGRAM_SUMMARY ((gcov_unsigned_t)0xa3000000)
-#define GCOV_TAG_SUMMARY_LENGTH \
- (1 + GCOV_COUNTERS_SUMMABLE * (2 + 3 * 2))
+#define GCOV_TAG_SUMMARY_LENGTH(NUM) \
+ (1 + GCOV_COUNTERS_SUMMABLE * (10 + 3 * 2) + (NUM) * 5)
+
/* Counters that are collected. */
#define GCOV_COUNTER_ARCS 0 /* Arc transitions. */
@@ -389,6 +399,29 @@ typedef HOST_WIDEST_INT gcov_type;
/* Structured records. */
+/* Structure used for each bucket of the log2 histogram of counter values. */
+typedef struct
+{
+ /* Number of counters whose profile count falls within the bucket. */
+ gcov_unsigned_t num_counters;
+ /* Smallest profile count included in this bucket. */
+ gcov_type min_value;
+ /* Cumulative value of the profile counts in this bucket. */
+ gcov_type cum_value;
+} gcov_bucket_type;
+
+/* For a log2 scale histogram with each range split into 4
+ linear sub-ranges, there will be at most 64 (max gcov_type bit size) - 1 log2
+ ranges since the lowest 2 log2 values share the lowest 4 linear
+ sub-range (values 0 - 3). This is 252 total entries (63*4). */
+
+#define GCOV_HISTOGRAM_SIZE 252
+
+/* How many unsigned ints are required to hold a bit vector of non-zero
+ histogram entries when the histogram is written to the gcov file.
+ This is essentially a ceiling divide by 32 bits. */
+#define GCOV_HISTOGRAM_BITVECTOR_SIZE (GCOV_HISTOGRAM_SIZE + 31) / 32
+
/* Cumulative counter data. */
struct gcov_ctr_summary
{
@@ -397,6 +430,8 @@ struct gcov_ctr_summary
gcov_type sum_all; /* sum of all counters accumulated. */
gcov_type run_max; /* maximum value on a single run. */
gcov_type sum_max; /* sum of individual run max values. */
+ gcov_bucket_type histogram[GCOV_HISTOGRAM_SIZE]; /* histogram of
+ counter values. */
};
/* Object & program summary record. */
diff --git a/gcc/profile.c b/gcc/profile.c
index 3d0689a..a5029a1 100644
--- a/gcc/profile.c
+++ b/gcc/profile.c
@@ -84,6 +84,15 @@ struct bb_info {
const struct gcov_ctr_summary *profile_info;
+/* Number of data points in the working set summary array. Using 128
+ provides information for at least every 1% increment of the total
+ profile size. The last entry is hardwired to 99.9% of the total. */
+#define NUM_GCOV_WORKING_SETS 128
+
+/* Counter working set information computed from the current counter
+ summary. Not initialized unless profile_info summary is non-NULL. */
+static gcov_working_set_t gcov_working_sets[NUM_GCOV_WORKING_SETS];
+
/* Collect statistics on the performance of this pass for the entire source
file. */
@@ -192,6 +201,152 @@ instrument_values (histogram_values values)
}
+/* Compute the working set information from the counter histogram in
+ the profile summary. This is an array of information corresponding to a
+ range of percentages of the total execution count (sum_all), and includes
+ the number of counters required to cover that working set percentage and
+ the minimum counter value in that working set. */
+
+static void
+compute_working_sets (void)
+{
+ gcov_type working_set_cum_values[NUM_GCOV_WORKING_SETS];
+ gcov_type ws_cum_hotness_incr;
+ gcov_type cum, tmp_cum;
+ const gcov_bucket_type *histo_bucket;
+ unsigned ws_ix, c_num, count, pctinc, pct;
+ int h_ix;
+ gcov_working_set_t *ws_info;
+
+ if (!profile_info)
+ return;
+
+ /* Compute the amount of sum_all that the cumulative hotness grows
+ by in each successive working set entry, which depends on the
+ number of working set entries. */
+ ws_cum_hotness_incr = profile_info->sum_all / NUM_GCOV_WORKING_SETS;
+
+ /* Next fill in an array of the cumulative hotness values corresponding
+ to each working set summary entry we are going to compute below.
+ Skip 0% statistics, which can be extrapolated from the
+ rest of the summary data. */
+ cum = ws_cum_hotness_incr;
+ for (ws_ix = 0; ws_ix < NUM_GCOV_WORKING_SETS;
+ ws_ix++, cum += ws_cum_hotness_incr)
+ working_set_cum_values[ws_ix] = cum;
+ /* The last summary entry is reserved for (roughly) 99.9% of the
+ working set. Divide by 1024 so it becomes a shift, which gives
+ almost exactly 99.9%. */
+ working_set_cum_values[NUM_GCOV_WORKING_SETS-1]
+ = profile_info->sum_all - profile_info->sum_all/1024;
+
+ /* Next, walk through the histogram in decending order of hotness
+ and compute the statistics for the working set summary array.
+ As histogram entries are accumulated, we check to see which
+ working set entries have had their expected cum_value reached
+ and fill them in, walking the working set entries in increasing
+ size of cum_value. */
+ ws_ix = 0; /* The current entry into the working set array. */
+ cum = 0; /* The current accumulated counter sum. */
+ count = 0; /* The current accumulated count of block counters. */
+ for (h_ix = GCOV_HISTOGRAM_SIZE - 1;
+ h_ix >= 0 && ws_ix < NUM_GCOV_WORKING_SETS; h_ix--)
+ {
+ histo_bucket = &profile_info->histogram[h_ix];
+
+ /* If we haven't reached the required cumulative counter value for
+ the current working set percentage, simply accumulate this histogram
+ entry into the running sums and continue to the next histogram
+ entry. */
+ if (cum + histo_bucket->cum_value < working_set_cum_values[ws_ix])
+ {
+ cum += histo_bucket->cum_value;
+ count += histo_bucket->num_counters;
+ continue;
+ }
+
+ /* If adding the current histogram entry's cumulative counter value
+ causes us to exceed the current working set size, then estimate
+ how many of this histogram entry's counter values are required to
+ reach the working set size, and fill in working set entries
+ as we reach their expected cumulative value. */
+ for (c_num = 0, tmp_cum = cum;
+ c_num < histo_bucket->num_counters && ws_ix < NUM_GCOV_WORKING_SETS;
+ c_num++)
+ {
+ count++;
+ /* If we haven't reached the last histogram entry counter, add
+ in the minimum value again. This will underestimate the
+ cumulative sum so far, because many of the counter values in this
+ entry may have been larger than the minimum. We could add in the
+ average value every time, but that would require an expensive
+ divide operation. */
+ if (c_num + 1 < histo_bucket->num_counters)
+ tmp_cum += histo_bucket->min_value;
+ /* If we have reached the last histogram entry counter, then add
+ in the entire cumulative value. */
+ else
+ tmp_cum = cum + histo_bucket->cum_value;
+
+ /* Next walk through successive working set entries and fill in
+ the statistics for any whose size we have reached by accumulating
+ this histogram counter. */
+ while (tmp_cum >= working_set_cum_values[ws_ix]
+ && ws_ix < NUM_GCOV_WORKING_SETS)
+ {
+ gcov_working_sets[ws_ix].num_counters = count;
+ gcov_working_sets[ws_ix].min_counter
+ = histo_bucket->min_value;
+ ws_ix++;
+ }
+ }
+ /* Finally, update the running cumulative value since we were
+ using a temporary above. */
+ cum += histo_bucket->cum_value;
+ }
+ gcc_assert (ws_ix == NUM_GCOV_WORKING_SETS);
+
+ if (dump_file)
+ {
+ fprintf (dump_file, "Counter working sets:\n");
+ /* Multiply the percentage by 100 to avoid float. */
+ pctinc = 100 * 100 / NUM_GCOV_WORKING_SETS;
+ for (ws_ix = 0, pct = pctinc; ws_ix < NUM_GCOV_WORKING_SETS;
+ ws_ix++, pct += pctinc)
+ {
+ if (ws_ix == NUM_GCOV_WORKING_SETS - 1)
+ pct = 9990;
+ ws_info = &gcov_working_sets[ws_ix];
+ /* Print out the percentage using int arithmatic to avoid float. */
+ fprintf (dump_file, "\t\t%u.%02u%%: num counts=%u, min counter="
+ HOST_WIDEST_INT_PRINT_DEC "\n",
+ pct / 100, pct - (pct / 100 * 100),
+ ws_info->num_counters,
+ (HOST_WIDEST_INT)ws_info->min_counter);
+ }
+ }
+}
+
+/* Given a the desired percentage of the full profile (sum_all from the
+ summary), multiplied by 10 to avoid float in PCT_TIMES_10, returns
+ the corresponding working set information. If an exact match for
+ the percentage isn't found, the closest value is used. */
+
+gcov_working_set_t *
+find_working_set (unsigned pct_times_10)
+{
+ unsigned i;
+ if (!profile_info)
+ return NULL;
+ gcc_assert (pct_times_10 <= 1000);
+ if (pct_times_10 >= 999)
+ return &gcov_working_sets[NUM_GCOV_WORKING_SETS - 1];
+ i = pct_times_10 * NUM_GCOV_WORKING_SETS / 1000;
+ if (!i)
+ return &gcov_working_sets[0];
+ return &gcov_working_sets[i - 1];
+}
+
/* Computes hybrid profile for all matching entries in da_file.
CFG_CHECKSUM is the precomputed checksum for the CFG. */
@@ -219,6 +374,8 @@ get_exec_counts (unsigned cfg_checksum, unsigned lineno_checksum)
if (!counts)
return NULL;
+ compute_working_sets();
+
if (dump_file && profile_info)
fprintf(dump_file, "Merged %u profiles with maximal count %u.\n",
profile_info->runs, (unsigned) profile_info->sum_max);