aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarkus Metzger <markus.t.metzger@intel.com>2016-01-21 15:02:27 +0100
committerMarkus Metzger <markus.t.metzger@intel.com>2016-10-28 11:05:07 +0200
commitd87fdac3591d57c1a667b9d2dfeb05e3198b0b53 (patch)
treeee8859aae4f88bac1c789c569387f8e3316bdf0e
parent259ba1e8acfd5ade4b6fa81e68a5e694e438fa28 (diff)
downloadgdb-d87fdac3591d57c1a667b9d2dfeb05e3198b0b53.zip
gdb-d87fdac3591d57c1a667b9d2dfeb05e3198b0b53.tar.gz
gdb-d87fdac3591d57c1a667b9d2dfeb05e3198b0b53.tar.bz2
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
-rw-r--r--gdb/ChangeLog13
-rw-r--r--gdb/btrace.c430
2 files changed, 425 insertions, 18 deletions
diff --git a/gdb/ChangeLog b/gdb/ChangeLog
index 2a601a9..7b5ff1a 100644
--- a/gdb/ChangeLog
+++ b/gdb/ChangeLog
@@ -1,5 +1,18 @@
2016-10-28 Markus Metzger <markus.t.metzger@intel.com>
+ * btrace.c (bfun_s): New typedef.
+ (ftrace_update_caller): Print caller in debug dump.
+ (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
+ (ftrace_compute_global_level_offset, ftrace_connect_bfun)
+ (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
+ (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
+ (btrace_compute_ftrace_pt): Likewise.
+ (btrace_compute_ftrace): Split into this, ...
+ (btrace_compute_ftrace_1): ... this, and ...
+ (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
+
+2016-10-28 Markus Metzger <markus.t.metzger@intel.com>
+
* btrace.c (ftrace_new_return): Start from the previous function's
level if we can't find a matching call for a return.
diff --git a/gdb/btrace.c b/gdb/btrace.c
index 10b6db4..39d537c 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -49,6 +49,10 @@ static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
/* Control whether to skip PAD packets when computing the packet history. */
static int maint_btrace_pt_skip_pad = 1;
+/* A vector of function segments. */
+typedef struct btrace_function * bfun_s;
+DEF_VEC_P (bfun_s);
+
static void btrace_add_pc (struct thread_info *tp);
/* Print a record debug message. Use do ... while (0) to avoid ambiguities
@@ -234,6 +238,7 @@ ftrace_update_caller (struct btrace_function *bfun,
bfun->flags = flags;
ftrace_debug (bfun, "set caller");
+ ftrace_debug (caller, "..to");
}
/* Fix up the caller for all segments of a function. */
@@ -296,6 +301,18 @@ ftrace_new_tailcall (struct btrace_function *caller,
return bfun;
}
+/* Return the caller of BFUN or NULL if there is none. This function skips
+ tail calls in the call chain. */
+static struct btrace_function *
+ftrace_get_caller (struct btrace_function *bfun)
+{
+ for (; bfun != NULL; bfun = bfun->up)
+ if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
+ return bfun->up;
+
+ return NULL;
+}
+
/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
symbol information. */
@@ -599,23 +616,359 @@ ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
return iclass;
}
+/* Try to match the back trace at LHS to the back trace at RHS. Returns the
+ number of matching function segments or zero if the back traces do not
+ match. */
+
+static int
+ftrace_match_backtrace (struct btrace_function *lhs,
+ struct btrace_function *rhs)
+{
+ int matches;
+
+ for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
+ {
+ if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
+ return 0;
+
+ lhs = ftrace_get_caller (lhs);
+ rhs = ftrace_get_caller (rhs);
+ }
+
+ return matches;
+}
+
+/* Add ADJUSTMENT to the level of BFUN and succeeding function segments. */
+
+static void
+ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
+{
+ if (adjustment == 0)
+ return;
+
+ DEBUG_FTRACE ("fixup level (%+d)", adjustment);
+ ftrace_debug (bfun, "..bfun");
+
+ for (; bfun != NULL; bfun = bfun->flow.next)
+ bfun->level += adjustment;
+}
+
+/* Recompute the global level offset. Traverse the function trace and compute
+ the global level offset as the negative of the minimal function level. */
+
+static void
+ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
+{
+ struct btrace_function *bfun, *end;
+ int level;
+
+ if (btinfo == NULL)
+ return;
+
+ bfun = btinfo->begin;
+ if (bfun == NULL)
+ return;
+
+ /* The last function segment contains the current instruction, which is not
+ really part of the trace. If it contains just this one instruction, we
+ stop when we reach it; otherwise, we let the below loop run to the end. */
+ end = btinfo->end;
+ if (VEC_length (btrace_insn_s, end->insn) > 1)
+ end = NULL;
+
+ level = INT_MAX;
+ for (; bfun != end; bfun = bfun->flow.next)
+ level = std::min (level, bfun->level);
+
+ DEBUG_FTRACE ("setting global level offset: %d", -level);
+ btinfo->level = -level;
+}
+
+/* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
+ ftrace_connect_backtrace. */
+
+static void
+ftrace_connect_bfun (struct btrace_function *prev,
+ struct btrace_function *next)
+{
+ DEBUG_FTRACE ("connecting...");
+ ftrace_debug (prev, "..prev");
+ ftrace_debug (next, "..next");
+
+ /* The function segments are not yet connected. */
+ gdb_assert (prev->segment.next == NULL);
+ gdb_assert (next->segment.prev == NULL);
+
+ prev->segment.next = next;
+ next->segment.prev = prev;
+
+ /* We may have moved NEXT to a different function level. */
+ ftrace_fixup_level (next, prev->level - next->level);
+
+ /* If we run out of back trace for one, let's use the other's. */
+ if (prev->up == NULL)
+ {
+ if (next->up != NULL)
+ {
+ DEBUG_FTRACE ("using next's callers");
+ ftrace_fixup_caller (prev, next->up, next->flags);
+ }
+ }
+ else if (next->up == NULL)
+ {
+ if (prev->up != NULL)
+ {
+ DEBUG_FTRACE ("using prev's callers");
+ ftrace_fixup_caller (next, prev->up, prev->flags);
+ }
+ }
+ else
+ {
+ /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
+ link to add the tail callers to NEXT's back trace.
+
+ This removes NEXT->UP from NEXT's back trace. It will be added back
+ when connecting NEXT and PREV's callers - provided they exist.
+
+ If PREV's back trace consists of a series of tail calls without an
+ actual call, there will be no further connection and NEXT's caller will
+ be removed for good. To catch this case, we handle it here and connect
+ the top of PREV's back trace to NEXT's caller. */
+ if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
+ {
+ struct btrace_function *caller;
+ btrace_function_flags flags;
+
+ /* We checked NEXT->UP above so CALLER can't be NULL. */
+ caller = next->up;
+ flags = next->flags;
+
+ DEBUG_FTRACE ("adding prev's tail calls to next");
+
+ ftrace_fixup_caller (next, prev->up, prev->flags);
+
+ for (prev = prev->up; prev != NULL; prev = prev->up)
+ {
+ /* At the end of PREV's back trace, continue with CALLER. */
+ if (prev->up == NULL)
+ {
+ DEBUG_FTRACE ("fixing up link for tailcall chain");
+ ftrace_debug (prev, "..top");
+ ftrace_debug (caller, "..up");
+
+ ftrace_fixup_caller (prev, caller, flags);
+
+ /* If we skipped any tail calls, this may move CALLER to a
+ different function level.
+
+ Note that changing CALLER's level is only OK because we
+ know that this is the last iteration of the bottom-to-top
+ walk in ftrace_connect_backtrace.
+
+ Otherwise we will fix up CALLER's level when we connect it
+ to PREV's caller in the next iteration. */
+ ftrace_fixup_level (caller, prev->level - caller->level - 1);
+ break;
+ }
+
+ /* There's nothing to do if we find a real call. */
+ if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
+ {
+ DEBUG_FTRACE ("will fix up link in next iteration");
+ break;
+ }
+ }
+ }
+ }
+}
+
+/* Connect function segments on the same level in the back trace at LHS and RHS.
+ The back traces at LHS and RHS are expected to match according to
+ ftrace_match_backtrace. */
+
+static void
+ftrace_connect_backtrace (struct btrace_function *lhs,
+ struct btrace_function *rhs)
+{
+ while (lhs != NULL && rhs != NULL)
+ {
+ struct btrace_function *prev, *next;
+
+ gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
+
+ /* Connecting LHS and RHS may change the up link. */
+ prev = lhs;
+ next = rhs;
+
+ lhs = ftrace_get_caller (lhs);
+ rhs = ftrace_get_caller (rhs);
+
+ ftrace_connect_bfun (prev, next);
+ }
+}
+
+/* Bridge the gap between two function segments left and right of a gap if their
+ respective back traces match in at least MIN_MATCHES functions.
+
+ Returns non-zero if the gap could be bridged, zero otherwise. */
+
+static int
+ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs,
+ int min_matches)
+{
+ struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
+ int best_matches;
+
+ DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
+ rhs->insn_offset - 1, min_matches);
+
+ best_matches = 0;
+ best_l = NULL;
+ best_r = NULL;
+
+ /* We search the back traces of LHS and RHS for valid connections and connect
+ the two functon segments that give the longest combined back trace. */
+
+ for (cand_l = lhs; cand_l != NULL; cand_l = ftrace_get_caller (cand_l))
+ for (cand_r = rhs; cand_r != NULL; cand_r = ftrace_get_caller (cand_r))
+ {
+ int matches;
+
+ matches = ftrace_match_backtrace (cand_l, cand_r);
+ if (best_matches < matches)
+ {
+ best_matches = matches;
+ best_l = cand_l;
+ best_r = cand_r;
+ }
+ }
+
+ /* We need at least MIN_MATCHES matches. */
+ gdb_assert (min_matches > 0);
+ if (best_matches < min_matches)
+ return 0;
+
+ DEBUG_FTRACE ("..matches: %d", best_matches);
+
+ /* We will fix up the level of BEST_R and succeeding function segments such
+ that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
+
+ This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
+ BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
+
+ To catch this, we already fix up the level here where we can start at RHS
+ instead of at BEST_R. We will ignore the level fixup when connecting
+ BEST_L to BEST_R as they will already be on the same level. */
+ ftrace_fixup_level (rhs, best_l->level - best_r->level);
+
+ ftrace_connect_backtrace (best_l, best_r);
+
+ return best_matches;
+}
+
+/* Try to bridge gaps due to overflow or decode errors by connecting the
+ function segments that are separated by the gap. */
+
+static void
+btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
+{
+ VEC (bfun_s) *remaining;
+ struct cleanup *old_chain;
+ int min_matches;
+
+ DEBUG ("bridge gaps");
+
+ remaining = NULL;
+ old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining);
+
+ /* We require a minimum amount of matches for bridging a gap. The number of
+ required matches will be lowered with each iteration.
+
+ The more matches the higher our confidence that the bridging is correct.
+ For big gaps or small traces, however, it may not be feasible to require a
+ high number of matches. */
+ for (min_matches = 5; min_matches > 0; --min_matches)
+ {
+ /* Let's try to bridge as many gaps as we can. In some cases, we need to
+ skip a gap and revisit it again after we closed later gaps. */
+ while (!VEC_empty (bfun_s, *gaps))
+ {
+ struct btrace_function *gap;
+ unsigned int idx;
+
+ for (idx = 0; VEC_iterate (bfun_s, *gaps, idx, gap); ++idx)
+ {
+ struct btrace_function *lhs, *rhs;
+ int bridged;
+
+ /* We may have a sequence of gaps if we run from one error into
+ the next as we try to re-sync onto the trace stream. Ignore
+ all but the leftmost gap in such a sequence.
+
+ Also ignore gaps at the beginning of the trace. */
+ lhs = gap->flow.prev;
+ if (lhs == NULL || lhs->errcode != 0)
+ continue;
+
+ /* Skip gaps to the right. */
+ for (rhs = gap->flow.next; rhs != NULL; rhs = rhs->flow.next)
+ if (rhs->errcode == 0)
+ break;
+
+ /* Ignore gaps at the end of the trace. */
+ if (rhs == NULL)
+ continue;
+
+ bridged = ftrace_bridge_gap (lhs, rhs, min_matches);
+
+ /* Keep track of gaps we were not able to bridge and try again.
+ If we just pushed them to the end of GAPS we would risk an
+ infinite loop in case we simply cannot bridge a gap. */
+ if (bridged == 0)
+ VEC_safe_push (bfun_s, remaining, gap);
+ }
+
+ /* Let's see if we made any progress. */
+ if (VEC_length (bfun_s, remaining) == VEC_length (bfun_s, *gaps))
+ break;
+
+ VEC_free (bfun_s, *gaps);
+
+ *gaps = remaining;
+ remaining = NULL;
+ }
+
+ /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
+ if (VEC_empty (bfun_s, *gaps))
+ break;
+
+ VEC_free (bfun_s, remaining);
+ }
+
+ do_cleanups (old_chain);
+
+ /* We may omit this in some cases. Not sure it is worth the extra
+ complication, though. */
+ ftrace_compute_global_level_offset (&tp->btrace);
+}
+
/* Compute the function branch trace from BTS trace. */
static void
btrace_compute_ftrace_bts (struct thread_info *tp,
- const struct btrace_data_bts *btrace)
+ const struct btrace_data_bts *btrace,
+ VEC (bfun_s) **gaps)
{
struct btrace_thread_info *btinfo;
struct btrace_function *begin, *end;
struct gdbarch *gdbarch;
- unsigned int blk, ngaps;
+ unsigned int blk;
int level;
gdbarch = target_gdbarch ();
btinfo = &tp->btrace;
begin = btinfo->begin;
end = btinfo->end;
- ngaps = btinfo->ngaps;
level = begin != NULL ? -btinfo->level : INT_MAX;
blk = VEC_length (btrace_block_s, btrace->blocks);
@@ -642,7 +995,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
if (begin == NULL)
begin = end;
- ngaps += 1;
+ VEC_safe_push (bfun_s, *gaps, end);
warning (_("Recorded trace may be corrupted at instruction "
"%u (pc = %s)."), end->insn_offset - 1,
@@ -687,7 +1040,8 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
/* Indicate the gap in the trace. We just added INSN so we're
not at the beginning. */
end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
- ngaps += 1;
+
+ VEC_safe_push (bfun_s, *gaps, end);
warning (_("Recorded trace may be incomplete at instruction %u "
"(pc = %s)."), end->insn_offset - 1,
@@ -711,7 +1065,6 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
btinfo->begin = begin;
btinfo->end = end;
- btinfo->ngaps = ngaps;
/* LEVEL is the minimal function level of all btrace function segments.
Define the global level offset to -LEVEL so all function levels are
@@ -759,7 +1112,7 @@ static void
ftrace_add_pt (struct pt_insn_decoder *decoder,
struct btrace_function **pbegin,
struct btrace_function **pend, int *plevel,
- unsigned int *ngaps)
+ VEC (bfun_s) **gaps)
{
struct btrace_function *begin, *end, *upd;
uint64_t offset;
@@ -799,7 +1152,8 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
if (insn.enabled)
{
*pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
- *ngaps += 1;
+
+ VEC_safe_push (bfun_s, *gaps, end);
pt_insn_get_offset (decoder, &offset);
@@ -816,7 +1170,7 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
if (begin == NULL)
*pbegin = begin = end;
- *ngaps += 1;
+ VEC_safe_push (bfun_s, *gaps, end);
pt_insn_get_offset (decoder, &offset);
@@ -852,7 +1206,8 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
*pend = end = ftrace_new_gap (end, errcode);
if (begin == NULL)
*pbegin = begin = end;
- *ngaps += 1;
+
+ VEC_safe_push (bfun_s, *gaps, end);
pt_insn_get_offset (decoder, &offset);
@@ -927,7 +1282,8 @@ static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
static void
btrace_compute_ftrace_pt (struct thread_info *tp,
- const struct btrace_data_pt *btrace)
+ const struct btrace_data_pt *btrace,
+ VEC (bfun_s) **gaps)
{
struct btrace_thread_info *btinfo;
struct pt_insn_decoder *decoder;
@@ -971,8 +1327,7 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
error (_("Failed to configure the Intel Processor Trace decoder: "
"%s."), pt_errstr (pt_errcode (errcode)));
- ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level,
- &btinfo->ngaps);
+ ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level, gaps);
}
CATCH (error, RETURN_MASK_ALL)
{
@@ -980,7 +1335,8 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
if (error.reason == RETURN_QUIT && btinfo->end != NULL)
{
btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
- btinfo->ngaps++;
+
+ VEC_safe_push (bfun_s, *gaps, btinfo->end);
}
btrace_finalize_ftrace_pt (decoder, tp, level);
@@ -996,7 +1352,8 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
static void
btrace_compute_ftrace_pt (struct thread_info *tp,
- const struct btrace_data_pt *btrace)
+ const struct btrace_data_pt *btrace,
+ VEC (bfun_s) **gaps)
{
internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
}
@@ -1007,7 +1364,8 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
a thread given by BTINFO. */
static void
-btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
+btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
+ VEC (bfun_s) **gaps)
{
DEBUG ("compute ftrace");
@@ -1017,17 +1375,53 @@ btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
return;
case BTRACE_FORMAT_BTS:
- btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
+ btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
return;
case BTRACE_FORMAT_PT:
- btrace_compute_ftrace_pt (tp, &btrace->variant.pt);
+ btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
return;
}
internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
}
+static void
+btrace_finalize_ftrace (struct thread_info *tp, VEC (bfun_s) **gaps)
+{
+ if (!VEC_empty (bfun_s, *gaps))
+ {
+ tp->btrace.ngaps += VEC_length (bfun_s, *gaps);
+ btrace_bridge_gaps (tp, gaps);
+ }
+}
+
+static void
+btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
+{
+ VEC (bfun_s) *gaps;
+ struct cleanup *old_chain;
+
+ gaps = NULL;
+ old_chain = make_cleanup (VEC_cleanup (bfun_s), &gaps);
+
+ TRY
+ {
+ btrace_compute_ftrace_1 (tp, btrace, &gaps);
+ }
+ CATCH (error, RETURN_MASK_ALL)
+ {
+ btrace_finalize_ftrace (tp, &gaps);
+
+ throw_exception (error);
+ }
+ END_CATCH
+
+ btrace_finalize_ftrace (tp, &gaps);
+
+ do_cleanups (old_chain);
+}
+
/* Add an entry for the current PC. */
static void