aboutsummaryrefslogtreecommitdiff
path: root/gdb/btrace.c
diff options
context:
space:
mode:
authorTim Wiederhake <tim.wiederhake@intel.com>2016-11-21 16:39:57 +0100
committerTim Wiederhake <tim.wiederhake@intel.com>2017-02-14 10:57:56 +0100
commitfdd2bd920bd67e6a1e877baf52b9c138c00da13f (patch)
treed922fe98c14de1b2831fe4958b221eff5a298941 /gdb/btrace.c
parent508352a9bf3f84f2d731397bb0d9382c84f27f25 (diff)
downloadgdb-fdd2bd920bd67e6a1e877baf52b9c138c00da13f.zip
gdb-fdd2bd920bd67e6a1e877baf52b9c138c00da13f.tar.gz
gdb-fdd2bd920bd67e6a1e877baf52b9c138c00da13f.tar.bz2
btrace: Use binary search to find instruction.
Currently, btrace_find_insn_by_number will iterate over all function call segments to find the one that contains the needed instruction. This linear search is too slow for the upcoming Python bindings that will use this function to access instructions. This patch introduces a vector in struct btrace_thread_info that holds pointers to all recorded function segments and allows to use binary search. The proper solution is to turn the underlying tree into a vector of objects and use indices for access. This requires more work. A patch set is currently being worked on and will be published later. Signed-off-by: Tim Wiederhake <tim.wiederhake@intel.com> gdb/ChangeLog: * btrace.c (btrace_fetch): Copy function call segments pointer into a vector. (btrace_clear): Clear the vector. (btrace_find_insn_by_number): Use binary search to find the correct function call segment. * btrace.h (brace_fun_p): New typedef. (struct btrace_thread_info) <functions>: New field. Change-Id: I8a7f67e80bfe4ff62c4192f74a2153a70bf2a035
Diffstat (limited to 'gdb/btrace.c')
-rw-r--r--gdb/btrace.c45
1 files changed, 39 insertions, 6 deletions
diff --git a/gdb/btrace.c b/gdb/btrace.c
index 06122cd..95dc7ab 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -1839,13 +1839,19 @@ btrace_fetch (struct thread_info *tp)
/* Compute the trace, provided we have any. */
if (!btrace_data_empty (&btrace))
{
+ struct btrace_function *bfun;
+
/* Store the raw trace data. The stored data will be cleared in
btrace_clear, so we always append the new trace. */
btrace_data_append (&btinfo->data, &btrace);
btrace_maint_clear (btinfo);
+ VEC_truncate (btrace_fun_p, btinfo->functions, 0);
btrace_clear_history (btinfo);
btrace_compute_ftrace (tp, &btrace);
+
+ for (bfun = btinfo->begin; bfun != NULL; bfun = bfun->flow.next)
+ VEC_safe_push (btrace_fun_p, btinfo->functions, bfun);
}
do_cleanups (cleanup);
@@ -1868,6 +1874,8 @@ btrace_clear (struct thread_info *tp)
btinfo = &tp->btrace;
+ VEC_free (btrace_fun_p, btinfo->functions);
+
it = btinfo->begin;
while (it != NULL)
{
@@ -2458,20 +2466,45 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it,
unsigned int number)
{
const struct btrace_function *bfun;
+ unsigned int upper, lower;
- for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
- if (bfun->insn_offset <= number)
- break;
+ if (VEC_empty (btrace_fun_p, btinfo->functions))
+ return 0;
- if (bfun == NULL)
+ lower = 0;
+ bfun = VEC_index (btrace_fun_p, btinfo->functions, lower);
+ if (number < bfun->insn_offset)
return 0;
- if (bfun->insn_offset + ftrace_call_num_insn (bfun) <= number)
+ upper = VEC_length (btrace_fun_p, btinfo->functions) - 1;
+ bfun = VEC_index (btrace_fun_p, btinfo->functions, upper);
+ if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
return 0;
+ /* We assume that there are no holes in the numbering. */
+ for (;;)
+ {
+ const unsigned int average = lower + (upper - lower) / 2;
+
+ bfun = VEC_index (btrace_fun_p, btinfo->functions, average);
+
+ if (number < bfun->insn_offset)
+ {
+ upper = average - 1;
+ continue;
+ }
+
+ if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
+ {
+ lower = average + 1;
+ continue;
+ }
+
+ break;
+ }
+
it->function = bfun;
it->index = number - bfun->insn_offset;
-
return 1;
}