aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTim Wiederhake <tim.wiederhake@intel.com>2017-05-30 12:47:37 +0200
committerTim Wiederhake <tim.wiederhake@intel.com>2017-05-30 12:49:25 +0200
commitf158f208759703b94dbfc6de2222ae7740420faf (patch)
treed0018075f05ed63d7f54083d22b99f73d5a18adb
parent521103fd00e593c08a6bedb619c5a9f8f7cc5a91 (diff)
downloadgdb-f158f208759703b94dbfc6de2222ae7740420faf.zip
gdb-f158f208759703b94dbfc6de2222ae7740420faf.tar.gz
gdb-f158f208759703b94dbfc6de2222ae7740420faf.tar.bz2
btrace: Use function segment index in call iterator.
Remove FUNCTION pointer in struct btrace_call_iterator and use an index into the list of function segments instead.
-rw-r--r--gdb/ChangeLog13
-rw-r--r--gdb/btrace.c198
-rw-r--r--gdb/btrace.h7
-rw-r--r--gdb/record-btrace.c2
4 files changed, 101 insertions, 119 deletions
diff --git a/gdb/ChangeLog b/gdb/ChangeLog
index d6e5c03..d04197a 100644
--- a/gdb/ChangeLog
+++ b/gdb/ChangeLog
@@ -1,5 +1,18 @@
2017-05-30 Tim Wiederhake <tim.wiederhake@intel.com>
+ * btrace.c (btrace_ends_with_single_insn): New function.
+ (btrace_call_get, btrace_call_number, btrace_call_begin,
+ btrace_call_end, btrace_call_next, btrace_call_prev,
+ btrace_find_call_by_number): Use index into call segment vector
+ instead of pointer.
+ (btrace_call_cmp): Simplify.
+ * btrace.h (struct btrace_call_iterator): Replace function call segment
+ pointer with index into vector.
+ * record-btrace.c (record_btrace_call_history): Use index instead of
+ pointer.
+
+2017-05-30 Tim Wiederhake <tim.wiederhake@intel.com>
+
* btrace.c (btrace_insn_begin, btrace_insn_end,
btrace_find_insn_by_number): Add btinfo to iterator.
* btrace.h (struct btrace_insn_iterator): Add btinfo.
diff --git a/gdb/btrace.c b/gdb/btrace.c
index c2d3730..42ab33d 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -2527,12 +2527,33 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it,
return 1;
}
+/* Returns true if the recording ends with a function segment that
+ contains only a single (i.e. the current) instruction. */
+
+static bool
+btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
+{
+ const btrace_function *bfun;
+
+ if (btinfo->functions.empty ())
+ return false;
+
+ bfun = btinfo->functions.back ();
+ if (bfun->errcode != 0)
+ return false;
+
+ return ftrace_call_num_insn (bfun) == 1;
+}
+
/* See btrace.h. */
const struct btrace_function *
btrace_call_get (const struct btrace_call_iterator *it)
{
- return it->function;
+ if (it->index >= it->btinfo->functions.size ())
+ return NULL;
+
+ return it->btinfo->functions[it->index];
}
/* See btrace.h. */
@@ -2540,28 +2561,14 @@ btrace_call_get (const struct btrace_call_iterator *it)
unsigned int
btrace_call_number (const struct btrace_call_iterator *it)
{
- const struct btrace_thread_info *btinfo;
- const struct btrace_function *bfun;
- unsigned int insns;
+ const unsigned int length = it->btinfo->functions.size ();
- btinfo = it->btinfo;
- bfun = it->function;
- if (bfun != NULL)
- return bfun->number;
-
- /* For the end iterator, i.e. bfun == NULL, we return one more than the
- number of the last function. */
- bfun = btinfo->end;
- insns = VEC_length (btrace_insn_s, bfun->insn);
+ /* If the last function segment contains only a single instruction (i.e. the
+ current instruction), skip it. */
+ if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
+ return length;
- /* If the function contains only a single instruction (i.e. the current
- instruction), it will be skipped and its number is already the number
- we seek. */
- if (insns == 1)
- return bfun->number;
-
- /* Otherwise, return one more than the number of the last function. */
- return bfun->number + 1;
+ return it->index + 1;
}
/* See btrace.h. */
@@ -2570,14 +2577,11 @@ void
btrace_call_begin (struct btrace_call_iterator *it,
const struct btrace_thread_info *btinfo)
{
- const struct btrace_function *bfun;
-
- bfun = btinfo->begin;
- if (bfun == NULL)
+ if (btinfo->functions.empty ())
error (_("No trace."));
it->btinfo = btinfo;
- it->function = bfun;
+ it->index = 0;
}
/* See btrace.h. */
@@ -2586,14 +2590,11 @@ void
btrace_call_end (struct btrace_call_iterator *it,
const struct btrace_thread_info *btinfo)
{
- const struct btrace_function *bfun;
-
- bfun = btinfo->end;
- if (bfun == NULL)
+ if (btinfo->functions.empty ())
error (_("No trace."));
it->btinfo = btinfo;
- it->function = NULL;
+ it->index = btinfo->functions.size ();
}
/* See btrace.h. */
@@ -2601,35 +2602,35 @@ btrace_call_end (struct btrace_call_iterator *it,
unsigned int
btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
{
- const struct btrace_function *bfun;
- unsigned int steps;
+ const unsigned int length = it->btinfo->functions.size ();
- bfun = it->function;
- steps = 0;
- while (bfun != NULL)
+ if (it->index + stride < length - 1)
+ /* Default case: Simply advance the iterator. */
+ it->index += stride;
+ else if (it->index + stride == length - 1)
{
- const struct btrace_function *next;
- unsigned int insns;
-
- next = bfun->flow.next;
- if (next == NULL)
- {
- /* Ignore the last function if it only contains a single
- (i.e. the current) instruction. */
- insns = VEC_length (btrace_insn_s, bfun->insn);
- if (insns == 1)
- steps -= 1;
- }
-
- if (stride == steps)
- break;
+ /* We land exactly at the last function segment. If it contains only one
+ instruction (i.e. the current instruction) it is not actually part of
+ the trace. */
+ if (btrace_ends_with_single_insn (it->btinfo))
+ it->index = length;
+ else
+ it->index = length - 1;
+ }
+ else
+ {
+ /* We land past the last function segment and have to adjust the stride.
+ If the last function segment contains only one instruction (i.e. the
+ current instruction) it is not actually part of the trace. */
+ if (btrace_ends_with_single_insn (it->btinfo))
+ stride = length - it->index - 1;
+ else
+ stride = length - it->index;
- bfun = next;
- steps += 1;
+ it->index = length;
}
- it->function = bfun;
- return steps;
+ return stride;
}
/* See btrace.h. */
@@ -2637,48 +2638,33 @@ btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
unsigned int
btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
{
- const struct btrace_thread_info *btinfo;
- const struct btrace_function *bfun;
- unsigned int steps;
+ const unsigned int length = it->btinfo->functions.size ();
+ int steps = 0;
- bfun = it->function;
- steps = 0;
+ gdb_assert (it->index <= length);
- if (bfun == NULL)
- {
- unsigned int insns;
-
- btinfo = it->btinfo;
- bfun = btinfo->end;
- if (bfun == NULL)
- return 0;
-
- /* Ignore the last function if it only contains a single
- (i.e. the current) instruction. */
- insns = VEC_length (btrace_insn_s, bfun->insn);
- if (insns == 1)
- bfun = bfun->flow.prev;
-
- if (bfun == NULL)
- return 0;
-
- steps += 1;
- }
+ if (stride == 0 || it->index == 0)
+ return 0;
- while (steps < stride)
+ /* If we are at the end, the first step is a special case. If the last
+ function segment contains only one instruction (i.e. the current
+ instruction) it is not actually part of the trace. To be able to step
+ over this instruction, we need at least one more function segment. */
+ if ((it->index == length) && (length > 1))
{
- const struct btrace_function *prev;
-
- prev = bfun->flow.prev;
- if (prev == NULL)
- break;
+ if (btrace_ends_with_single_insn (it->btinfo))
+ it->index = length - 2;
+ else
+ it->index = length - 1;
- bfun = prev;
- steps += 1;
+ steps = 1;
+ stride -= 1;
}
- it->function = bfun;
- return steps;
+ stride = std::min (stride, it->index);
+
+ it->index -= stride;
+ return steps + stride;
}
/* See btrace.h. */
@@ -2687,12 +2673,8 @@ int
btrace_call_cmp (const struct btrace_call_iterator *lhs,
const struct btrace_call_iterator *rhs)
{
- unsigned int lnum, rnum;
-
- lnum = btrace_call_number (lhs);
- rnum = btrace_call_number (rhs);
-
- return (int) (lnum - rnum);
+ gdb_assert (lhs->btinfo == rhs->btinfo);
+ return (int) (lhs->index - rhs->index);
}
/* See btrace.h. */
@@ -2702,26 +2684,14 @@ btrace_find_call_by_number (struct btrace_call_iterator *it,
const struct btrace_thread_info *btinfo,
unsigned int number)
{
- const struct btrace_function *bfun;
+ const unsigned int length = btinfo->functions.size ();
- for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
- {
- unsigned int bnum;
-
- bnum = bfun->number;
- if (number == bnum)
- {
- it->btinfo = btinfo;
- it->function = bfun;
- return 1;
- }
-
- /* Functions are ordered and numbered consecutively. We could bail out
- earlier. On the other hand, it is very unlikely that we search for
- a nonexistent function. */
- }
+ if ((number == 0) || (number > length))
+ return 0;
- return 0;
+ it->btinfo = btinfo;
+ it->index = number - 1;
+ return 1;
}
/* See btrace.h. */
diff --git a/gdb/btrace.h b/gdb/btrace.h
index e567ef7..8fefc84 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -172,7 +172,7 @@ struct btrace_function
segment in control-flow order. */
unsigned int insn_offset;
- /* The function number in control-flow order.
+ /* The 1-based function number in control-flow order.
If INSN is empty indicating a gap in the trace due to a decode error,
we still count the gap as a function. */
unsigned int number;
@@ -209,9 +209,8 @@ struct btrace_call_iterator
/* The branch trace information for this thread. Will never be NULL. */
const struct btrace_thread_info *btinfo;
- /* The branch trace function segment.
- This will be NULL for the iterator pointing to the end of the trace. */
- const struct btrace_function *function;
+ /* The index of the function segment in BTINFO->FUNCTIONS. */
+ unsigned int index;
};
/* Branch trace iteration state for "record instruction-history". */
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index d4f1bcf..86a4b1e 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -1101,8 +1101,8 @@ record_btrace_call_history (struct target_ops *self, int size, int int_flags)
replay = btinfo->replay;
if (replay != NULL)
{
- begin.function = replay->function;
begin.btinfo = btinfo;
+ begin.index = replay->function->number - 1;
}
else
btrace_call_end (&begin, btinfo);