aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gdb/ChangeLog67
-rw-r--r--gdb/btrace.c1151
-rw-r--r--gdb/btrace.h237
-rw-r--r--gdb/record-btrace.c342
-rw-r--r--gdb/testsuite/ChangeLog7
-rw-r--r--gdb/testsuite/gdb.btrace/function_call_history.exp28
-rw-r--r--gdb/testsuite/gdb.btrace/instruction_history.exp14
7 files changed, 1448 insertions, 398 deletions
diff --git a/gdb/ChangeLog b/gdb/ChangeLog
index 4a6ef7f..c84ae2d 100644
--- a/gdb/ChangeLog
+++ b/gdb/ChangeLog
@@ -1,5 +1,72 @@
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
+ * btrace.h (struct btrace_func_link): New.
+ (enum btrace_function_flag): New.
+ (struct btrace_inst): Rename to ...
+ (struct btrace_insn): ...this. Update all users.
+ (struct btrace_func) <ibegin, iend>: Remove.
+ (struct btrace_func_link): New.
+ (struct btrace_func): Rename to ...
+ (struct btrace_function): ...this. Update all users.
+ (struct btrace_function) <segment, flow, up, insn, insn_offset)
+ (number, level, flags>: New.
+ (struct btrace_insn_iterator): Rename to ...
+ (struct btrace_insn_history): ...this.
+ Update all users.
+ (struct btrace_insn_iterator, btrace_call_iterator): New.
+ (struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
+ (struct btrace_target_info) <begin, end, level>
+ <insn_history, call_history>: New.
+ (btrace_insn_get, btrace_insn_number, btrace_insn_begin)
+ (btrace_insn_end, btrace_insn_prev, btrace_insn_next)
+ (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
+ (btrace_call_number, btrace_call_begin, btrace_call_end)
+ (btrace_call_prev, btrace_call_next, btrace_call_cmp)
+ (btrace_find_function_by_number, btrace_set_insn_history)
+ (btrace_set_call_history): New.
+ * btrace.c (btrace_init_insn_iterator)
+ (btrace_init_func_iterator, compute_itrace): Remove.
+ (ftrace_print_function_name, ftrace_print_filename)
+ (ftrace_skip_file): Change
+ parameter to const.
+ (ftrace_init_func): Remove.
+ (ftrace_debug): Use new btrace_function fields.
+ (ftrace_function_switched): Also consider gaining and
+ losing symbol information).
+ (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
+ (ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
+ (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
+ New.
+ (ftrace_new_function): Move. Remove debug print.
+ (ftrace_update_lines, ftrace_update_insns): New.
+ (ftrace_update_function): Check for call, ret, and jump.
+ (compute_ftrace): Renamed to ...
+ (btrace_compute_ftrace): ...this. Rewritten to compute call
+ stack.
+ (btrace_fetch, btrace_clear): Updated.
+ (btrace_insn_get, btrace_insn_number, btrace_insn_begin)
+ (btrace_insn_end, btrace_insn_prev, btrace_insn_next)
+ (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
+ (btrace_call_number, btrace_call_begin, btrace_call_end)
+ (btrace_call_prev, btrace_call_next, btrace_call_cmp)
+ (btrace_find_function_by_number, btrace_set_insn_history)
+ (btrace_set_call_history): New.
+ * record-btrace.c (require_btrace): Use new btrace thread
+ info fields.
+ (record_btrace_info, btrace_insn_history)
+ (record_btrace_insn_history, record_btrace_insn_history_range):
+ Use new btrace thread info fields and new iterator.
+ (btrace_func_history_src_line): Rename to ...
+ (btrace_call_history_src_line): ...this. Use new btrace
+ thread info fields.
+ (btrace_func_history): Rename to ...
+ (btrace_call_history): ...this. Use new btrace thread info
+ fields and new iterator.
+ (record_btrace_call_history, record_btrace_call_history_range):
+ Use new btrace thread info fields and new iterator.
+
+2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
+
* frame.h (frame_id_build_unavailable_stack_special): New.
* frame.c (frame_id_build_unavailable_stack_special): New.
diff --git a/gdb/btrace.c b/gdb/btrace.c
index df62da8..5bda127 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -45,92 +45,11 @@
#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
-/* Initialize the instruction iterator. */
-
-static void
-btrace_init_insn_iterator (struct btrace_thread_info *btinfo)
-{
- DEBUG ("init insn iterator");
-
- btinfo->insn_iterator.begin = 1;
- btinfo->insn_iterator.end = 0;
-}
-
-/* Initialize the function iterator. */
-
-static void
-btrace_init_func_iterator (struct btrace_thread_info *btinfo)
-{
- DEBUG ("init func iterator");
-
- btinfo->func_iterator.begin = 1;
- btinfo->func_iterator.end = 0;
-}
-
-/* Compute the instruction trace from the block trace. */
-
-static VEC (btrace_inst_s) *
-compute_itrace (VEC (btrace_block_s) *btrace)
-{
- VEC (btrace_inst_s) *itrace;
- struct gdbarch *gdbarch;
- unsigned int b;
-
- DEBUG ("compute itrace");
-
- itrace = NULL;
- gdbarch = target_gdbarch ();
- b = VEC_length (btrace_block_s, btrace);
-
- while (b-- != 0)
- {
- btrace_block_s *block;
- CORE_ADDR pc;
-
- block = VEC_index (btrace_block_s, btrace, b);
- pc = block->begin;
-
- /* Add instructions for this block. */
- for (;;)
- {
- btrace_inst_s *inst;
- int size;
-
- /* We should hit the end of the block. Warn if we went too far. */
- if (block->end < pc)
- {
- warning (_("Recorded trace may be corrupted."));
- break;
- }
-
- inst = VEC_safe_push (btrace_inst_s, itrace, NULL);
- inst->pc = pc;
-
- /* We're done once we pushed the instruction at the end. */
- if (block->end == pc)
- break;
-
- size = gdb_insn_length (gdbarch, pc);
-
- /* Make sure we terminate if we fail to compute the size. */
- if (size <= 0)
- {
- warning (_("Recorded trace may be incomplete."));
- break;
- }
-
- pc += size;
- }
- }
-
- return itrace;
-}
-
/* Return the function name of a recorded function segment for printing.
This function never returns NULL. */
static const char *
-ftrace_print_function_name (struct btrace_func *bfun)
+ftrace_print_function_name (const struct btrace_function *bfun)
{
struct minimal_symbol *msym;
struct symbol *sym;
@@ -151,7 +70,7 @@ ftrace_print_function_name (struct btrace_func *bfun)
This function never returns NULL. */
static const char *
-ftrace_print_filename (struct btrace_func *bfun)
+ftrace_print_filename (const struct btrace_function *bfun)
{
struct symbol *sym;
const char *filename;
@@ -166,44 +85,53 @@ ftrace_print_filename (struct btrace_func *bfun)
return filename;
}
-/* Print an ftrace debug status message. */
+/* Return a string representation of the address of an instruction.
+ This function never returns NULL. */
-static void
-ftrace_debug (struct btrace_func *bfun, const char *prefix)
+static const char *
+ftrace_print_insn_addr (const struct btrace_insn *insn)
{
- DEBUG_FTRACE ("%s: fun = %s, file = %s, lines = [%d; %d], insn = [%u; %u]",
- prefix, ftrace_print_function_name (bfun),
- ftrace_print_filename (bfun), bfun->lbegin, bfun->lend,
- bfun->ibegin, bfun->iend);
+ if (insn == NULL)
+ return "<nil>";
+
+ return core_addr_to_string_nz (insn->pc);
}
-/* Initialize a recorded function segment. */
+/* Print an ftrace debug status message. */
static void
-ftrace_init_func (struct btrace_func *bfun, struct minimal_symbol *mfun,
- struct symbol *fun, unsigned int idx)
+ftrace_debug (const struct btrace_function *bfun, const char *prefix)
{
- bfun->msym = mfun;
- bfun->sym = fun;
- bfun->lbegin = INT_MAX;
- bfun->lend = 0;
- bfun->ibegin = idx;
- bfun->iend = idx;
+ const char *fun, *file;
+ unsigned int ibegin, iend;
+ int lbegin, lend, level;
+
+ fun = ftrace_print_function_name (bfun);
+ file = ftrace_print_filename (bfun);
+ level = bfun->level;
+
+ lbegin = bfun->lbegin;
+ lend = bfun->lend;
+
+ ibegin = bfun->insn_offset;
+ iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
+
+ DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
+ "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
+ ibegin, iend);
}
-/* Check whether the function has changed. */
+/* Return non-zero if BFUN does not match MFUN and FUN,
+ return zero otherwise. */
static int
-ftrace_function_switched (struct btrace_func *bfun,
- struct minimal_symbol *mfun, struct symbol *fun)
+ftrace_function_switched (const struct btrace_function *bfun,
+ const struct minimal_symbol *mfun,
+ const struct symbol *fun)
{
struct minimal_symbol *msym;
struct symbol *sym;
- /* The function changed if we did not have one before. */
- if (bfun == NULL)
- return 1;
-
msym = bfun->msym;
sym = bfun->sym;
@@ -228,109 +156,505 @@ ftrace_function_switched (struct btrace_func *bfun,
return 1;
}
+ /* If we lost symbol information, we switched functions. */
+ if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
+ return 1;
+
+ /* If we gained symbol information, we switched functions. */
+ if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
+ return 1;
+
return 0;
}
-/* Check if we should skip this file when generating the function call
- history. We would want to do that if, say, a macro that is defined
- in another file is expanded in this function. */
+/* Return non-zero if we should skip this file when generating the function
+ call history, zero otherwise.
+ We would want to do that if, say, a macro that is defined in another file
+ is expanded in this function. */
static int
-ftrace_skip_file (struct btrace_func *bfun, const char *filename)
+ftrace_skip_file (const struct btrace_function *bfun, const char *fullname)
{
struct symbol *sym;
const char *bfile;
sym = bfun->sym;
+ if (sym == NULL)
+ return 1;
- if (sym != NULL)
- bfile = symtab_to_fullname (sym->symtab);
- else
- bfile = "";
+ bfile = symtab_to_fullname (sym->symtab);
+
+ return (filename_cmp (bfile, fullname) != 0);
+}
+
+/* Allocate and initialize a new branch trace function segment.
+ PREV is the chronologically preceding function segment.
+ MFUN and FUN are the symbol information we have for this function. */
+
+static struct btrace_function *
+ftrace_new_function (struct btrace_function *prev,
+ struct minimal_symbol *mfun,
+ struct symbol *fun)
+{
+ struct btrace_function *bfun;
+
+ bfun = xzalloc (sizeof (*bfun));
+
+ bfun->msym = mfun;
+ bfun->sym = fun;
+ bfun->flow.prev = prev;
+
+ /* We start with the identities of min and max, respectively. */
+ bfun->lbegin = INT_MAX;
+ bfun->lend = INT_MIN;
- if (filename == NULL)
- filename = "";
+ if (prev != NULL)
+ {
+ gdb_assert (prev->flow.next == NULL);
+ prev->flow.next = bfun;
- return (filename_cmp (bfile, filename) != 0);
+ bfun->number = prev->number + 1;
+ bfun->insn_offset = (prev->insn_offset
+ + VEC_length (btrace_insn_s, prev->insn));
+ }
+
+ return bfun;
}
-/* Compute the function trace from the instruction trace. */
+/* Update the UP field of a function segment. */
-static VEC (btrace_func_s) *
-compute_ftrace (VEC (btrace_inst_s) *itrace)
+static void
+ftrace_update_caller (struct btrace_function *bfun,
+ struct btrace_function *caller,
+ enum btrace_function_flag flags)
{
- VEC (btrace_func_s) *ftrace;
- struct btrace_inst *binst;
- struct btrace_func *bfun;
- unsigned int idx;
+ if (bfun->up != NULL)
+ ftrace_debug (bfun, "updating caller");
- DEBUG ("compute ftrace");
+ bfun->up = caller;
+ bfun->flags = flags;
+
+ ftrace_debug (bfun, "set caller");
+}
+
+/* Fix up the caller for all segments of a function. */
+
+static void
+ftrace_fixup_caller (struct btrace_function *bfun,
+ struct btrace_function *caller,
+ enum btrace_function_flag flags)
+{
+ struct btrace_function *prev, *next;
+
+ ftrace_update_caller (bfun, caller, flags);
+
+ /* Update all function segments belonging to the same function. */
+ for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
+ ftrace_update_caller (prev, caller, flags);
+
+ for (next = bfun->segment.next; next != NULL; next = next->segment.next)
+ ftrace_update_caller (next, caller, flags);
+}
+
+/* Add a new function segment for a call.
+ CALLER is the chronologically preceding function segment.
+ MFUN and FUN are the symbol information we have for this function. */
+
+static struct btrace_function *
+ftrace_new_call (struct btrace_function *caller,
+ struct minimal_symbol *mfun,
+ struct symbol *fun)
+{
+ struct btrace_function *bfun;
+
+ bfun = ftrace_new_function (caller, mfun, fun);
+ bfun->up = caller;
+ bfun->level = caller->level + 1;
+
+ ftrace_debug (bfun, "new call");
+
+ return bfun;
+}
+
+/* Add a new function segment for a tail call.
+ CALLER is the chronologically preceding function segment.
+ MFUN and FUN are the symbol information we have for this function. */
+
+static struct btrace_function *
+ftrace_new_tailcall (struct btrace_function *caller,
+ struct minimal_symbol *mfun,
+ struct symbol *fun)
+{
+ struct btrace_function *bfun;
- ftrace = NULL;
- bfun = NULL;
+ bfun = ftrace_new_function (caller, mfun, fun);
+ bfun->up = caller;
+ bfun->level = caller->level + 1;
+ bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
- for (idx = 0; VEC_iterate (btrace_inst_s, itrace, idx, binst); ++idx)
+ ftrace_debug (bfun, "new tail call");
+
+ return bfun;
+}
+
+/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
+ symbol information. */
+
+static struct btrace_function *
+ftrace_find_caller (struct btrace_function *bfun,
+ struct minimal_symbol *mfun,
+ struct symbol *fun)
+{
+ for (; bfun != NULL; bfun = bfun->up)
+ {
+ /* Skip functions with incompatible symbol information. */
+ if (ftrace_function_switched (bfun, mfun, fun))
+ continue;
+
+ /* This is the function segment we're looking for. */
+ break;
+ }
+
+ return bfun;
+}
+
+/* Find the innermost caller in the back trace of BFUN, skipping all
+ function segments that do not end with a call instruction (e.g.
+ tail calls ending with a jump). */
+
+static struct btrace_function *
+ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun)
+{
+ for (; bfun != NULL; bfun = bfun->up)
{
- struct symtab_and_line sal;
- struct bound_minimal_symbol mfun;
- struct symbol *fun;
- const char *filename;
+ struct btrace_insn *last;
CORE_ADDR pc;
- pc = binst->pc;
+ /* We do not allow empty function segments. */
+ gdb_assert (!VEC_empty (btrace_insn_s, bfun->insn));
+
+ last = VEC_last (btrace_insn_s, bfun->insn);
+ pc = last->pc;
- /* Try to determine the function we're in. We use both types of symbols
- to avoid surprises when we sometimes get a full symbol and sometimes
- only a minimal symbol. */
- fun = find_pc_function (pc);
- mfun = lookup_minimal_symbol_by_pc (pc);
+ if (gdbarch_insn_is_call (gdbarch, pc))
+ break;
+ }
+
+ return bfun;
+}
+
+/* Add a continuation segment for a function into which we return.
+ PREV is the chronologically preceding function segment.
+ MFUN and FUN are the symbol information we have for this function. */
+
+static struct btrace_function *
+ftrace_new_return (struct gdbarch *gdbarch,
+ struct btrace_function *prev,
+ struct minimal_symbol *mfun,
+ struct symbol *fun)
+{
+ struct btrace_function *bfun, *caller;
+
+ bfun = ftrace_new_function (prev, mfun, fun);
+
+ /* It is important to start at PREV's caller. Otherwise, we might find
+ PREV itself, if PREV is a recursive function. */
+ caller = ftrace_find_caller (prev->up, mfun, fun);
+ if (caller != NULL)
+ {
+ /* The caller of PREV is the preceding btrace function segment in this
+ function instance. */
+ gdb_assert (caller->segment.next == NULL);
+
+ caller->segment.next = bfun;
+ bfun->segment.prev = caller;
+
+ /* Maintain the function level. */
+ bfun->level = caller->level;
+
+ /* Maintain the call stack. */
+ bfun->up = caller->up;
+ bfun->flags = caller->flags;
+
+ ftrace_debug (bfun, "new return");
+ }
+ else
+ {
+ /* We did not find a caller. This could mean that something went
+ wrong or that the call is simply not included in the trace. */
- if (fun == NULL && mfun.minsym == NULL)
+ /* Let's search for some actual call. */
+ caller = ftrace_find_call (gdbarch, prev->up);
+ if (caller == NULL)
{
- DEBUG_FTRACE ("no symbol at %u, pc=%s", idx,
- core_addr_to_string_nz (pc));
- continue;
- }
+ /* There is no call in PREV's back trace. We assume that the
+ branch trace did not include it. */
+
+ /* Let's find the topmost call function - this skips tail calls. */
+ while (prev->up != NULL)
+ prev = prev->up;
- /* If we're switching functions, we start over. */
- if (ftrace_function_switched (bfun, mfun.minsym, fun))
+ /* We maintain levels for a series of returns for which we have
+ not seen the calls.
+ We start at the preceding function's level in case this has
+ already been a return for which we have not seen the call.
+ We start at level 0 otherwise, to handle tail calls correctly. */
+ bfun->level = min (0, prev->level) - 1;
+
+ /* Fix up the call stack for PREV. */
+ ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
+
+ ftrace_debug (bfun, "new return - no caller");
+ }
+ else
{
- bfun = VEC_safe_push (btrace_func_s, ftrace, NULL);
+ /* There is a call in PREV's back trace to which we should have
+ returned. Let's remain at this level. */
+ bfun->level = prev->level;
- ftrace_init_func (bfun, mfun.minsym, fun, idx);
- ftrace_debug (bfun, "init");
+ ftrace_debug (bfun, "new return - unknown caller");
}
+ }
+
+ return bfun;
+}
+
+/* Add a new function segment for a function switch.
+ PREV is the chronologically preceding function segment.
+ MFUN and FUN are the symbol information we have for this function. */
+
+static struct btrace_function *
+ftrace_new_switch (struct btrace_function *prev,
+ struct minimal_symbol *mfun,
+ struct symbol *fun)
+{
+ struct btrace_function *bfun;
+
+ /* This is an unexplained function switch. The call stack will likely
+ be wrong at this point. */
+ bfun = ftrace_new_function (prev, mfun, fun);
- /* Update the instruction range. */
- bfun->iend = idx;
- ftrace_debug (bfun, "update insns");
+ /* We keep the function level. */
+ bfun->level = prev->level;
- /* Let's see if we have source correlation, as well. */
- sal = find_pc_line (pc, 0);
- if (sal.symtab == NULL || sal.line == 0)
+ ftrace_debug (bfun, "new switch");
+
+ return bfun;
+}
+
+/* Update BFUN with respect to the instruction at PC. This may create new
+ function segments.
+ Return the chronologically latest function segment, never NULL. */
+
+static struct btrace_function *
+ftrace_update_function (struct gdbarch *gdbarch,
+ struct btrace_function *bfun, CORE_ADDR pc)
+{
+ struct bound_minimal_symbol bmfun;
+ struct minimal_symbol *mfun;
+ struct symbol *fun;
+ struct btrace_insn *last;
+
+ /* Try to determine the function we're in. We use both types of symbols
+ to avoid surprises when we sometimes get a full symbol and sometimes
+ only a minimal symbol. */
+ fun = find_pc_function (pc);
+ bmfun = lookup_minimal_symbol_by_pc (pc);
+ mfun = bmfun.minsym;
+
+ if (fun == NULL && mfun == NULL)
+ DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
+
+ /* If we didn't have a function before, we create one. */
+ if (bfun == NULL)
+ return ftrace_new_function (bfun, mfun, fun);
+
+ /* Check the last instruction, if we have one.
+ We do this check first, since it allows us to fill in the call stack
+ links in addition to the normal flow links. */
+ last = NULL;
+ if (!VEC_empty (btrace_insn_s, bfun->insn))
+ last = VEC_last (btrace_insn_s, bfun->insn);
+
+ if (last != NULL)
+ {
+ CORE_ADDR lpc;
+
+ lpc = last->pc;
+
+ /* Check for returns. */
+ if (gdbarch_insn_is_ret (gdbarch, lpc))
+ return ftrace_new_return (gdbarch, bfun, mfun, fun);
+
+ /* Check for calls. */
+ if (gdbarch_insn_is_call (gdbarch, lpc))
{
- DEBUG_FTRACE ("no lines at %u, pc=%s", idx,
- core_addr_to_string_nz (pc));
- continue;
+ int size;
+
+ size = gdb_insn_length (gdbarch, lpc);
+
+ /* Ignore calls to the next instruction. They are used for PIC. */
+ if (lpc + size != pc)
+ return ftrace_new_call (bfun, mfun, fun);
}
+ }
+
+ /* Check if we're switching functions for some other reason. */
+ if (ftrace_function_switched (bfun, mfun, fun))
+ {
+ DEBUG_FTRACE ("switching from %s in %s at %s",
+ ftrace_print_insn_addr (last),
+ ftrace_print_function_name (bfun),
+ ftrace_print_filename (bfun));
- /* Check if we switched files. This could happen if, say, a macro that
- is defined in another file is expanded here. */
- filename = symtab_to_fullname (sal.symtab);
- if (ftrace_skip_file (bfun, filename))
+ if (last != NULL)
{
- DEBUG_FTRACE ("ignoring file at %u, pc=%s, file=%s", idx,
- core_addr_to_string_nz (pc), filename);
- continue;
+ CORE_ADDR start, lpc;
+
+ start = get_pc_function_start (pc);
+
+ /* If we can't determine the function for PC, we treat a jump at
+ the end of the block as tail call. */
+ if (start == 0)
+ start = pc;
+
+ lpc = last->pc;
+
+ /* Jumps indicate optimized tail calls. */
+ if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc))
+ return ftrace_new_tailcall (bfun, mfun, fun);
}
- /* Update the line range. */
- bfun->lbegin = min (bfun->lbegin, sal.line);
- bfun->lend = max (bfun->lend, sal.line);
- ftrace_debug (bfun, "update lines");
+ return ftrace_new_switch (bfun, mfun, fun);
+ }
+
+ return bfun;
+}
+
+/* Update BFUN's source range with respect to the instruction at PC. */
+
+static void
+ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
+{
+ struct symtab_and_line sal;
+ const char *fullname;
+
+ sal = find_pc_line (pc, 0);
+ if (sal.symtab == NULL || sal.line == 0)
+ {
+ DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
+ return;
+ }
+
+ /* Check if we switched files. This could happen if, say, a macro that
+ is defined in another file is expanded here. */
+ fullname = symtab_to_fullname (sal.symtab);
+ if (ftrace_skip_file (bfun, fullname))
+ {
+ DEBUG_FTRACE ("ignoring file at %s, file=%s",
+ core_addr_to_string_nz (pc), fullname);
+ return;
+ }
+
+ /* Update the line range. */
+ bfun->lbegin = min (bfun->lbegin, sal.line);
+ bfun->lend = max (bfun->lend, sal.line);
+
+ if (record_debug > 1)
+ ftrace_debug (bfun, "update lines");
+}
+
+/* Add the instruction at PC to BFUN's instructions. */
+
+static void
+ftrace_update_insns (struct btrace_function *bfun, CORE_ADDR pc)
+{
+ struct btrace_insn *insn;
+
+ insn = VEC_safe_push (btrace_insn_s, bfun->insn, NULL);
+ insn->pc = pc;
+
+ if (record_debug > 1)
+ ftrace_debug (bfun, "update insn");
+}
+
+/* Compute the function branch trace from a block branch trace BTRACE for
+ a thread given by BTINFO. */
+
+static void
+btrace_compute_ftrace (struct btrace_thread_info *btinfo,
+ VEC (btrace_block_s) *btrace)
+{
+ struct btrace_function *begin, *end;
+ struct gdbarch *gdbarch;
+ unsigned int blk;
+ int level;
+
+ DEBUG ("compute ftrace");
+
+ gdbarch = target_gdbarch ();
+ begin = NULL;
+ end = NULL;
+ level = INT_MAX;
+ blk = VEC_length (btrace_block_s, btrace);
+
+ while (blk != 0)
+ {
+ btrace_block_s *block;
+ CORE_ADDR pc;
+
+ blk -= 1;
+
+ block = VEC_index (btrace_block_s, btrace, blk);
+ pc = block->begin;
+
+ for (;;)
+ {
+ int size;
+
+ /* We should hit the end of the block. Warn if we went too far. */
+ if (block->end < pc)
+ {
+ warning (_("Recorded trace may be corrupted around %s."),
+ core_addr_to_string_nz (pc));
+ break;
+ }
+
+ end = ftrace_update_function (gdbarch, end, pc);
+ if (begin == NULL)
+ begin = end;
+
+ /* Maintain the function level offset. */
+ level = min (level, end->level);
+
+ ftrace_update_insns (end, pc);
+ ftrace_update_lines (end, pc);
+
+ /* We're done once we pushed the instruction at the end. */
+ if (block->end == pc)
+ break;
+
+ size = gdb_insn_length (gdbarch, pc);
+
+ /* Make sure we terminate if we fail to compute the size. */
+ if (size <= 0)
+ {
+ warning (_("Recorded trace may be incomplete around %s."),
+ core_addr_to_string_nz (pc));
+ break;
+ }
+
+ pc += size;
+ }
}
- return ftrace;
+ btinfo->begin = begin;
+ btinfo->end = end;
+
+ /* LEVEL is the minimal function level of all btrace function segments.
+ Define the global level offset to -LEVEL so all function levels are
+ normalized to start at zero. */
+ btinfo->level = -level;
}
/* See btrace.h. */
@@ -394,6 +718,7 @@ btrace_fetch (struct thread_info *tp)
{
struct btrace_thread_info *btinfo;
VEC (btrace_block_s) *btrace;
+ struct cleanup *cleanup;
DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
@@ -402,18 +727,15 @@ btrace_fetch (struct thread_info *tp)
return;
btrace = target_read_btrace (btinfo->target, BTRACE_READ_NEW);
- if (VEC_empty (btrace_block_s, btrace))
- return;
-
- btrace_clear (tp);
+ cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
- btinfo->btrace = btrace;
- btinfo->itrace = compute_itrace (btinfo->btrace);
- btinfo->ftrace = compute_ftrace (btinfo->itrace);
+ if (!VEC_empty (btrace_block_s, btrace))
+ {
+ btrace_clear (tp);
+ btrace_compute_ftrace (btinfo, btrace);
+ }
- /* Initialize branch trace iterators. */
- btrace_init_insn_iterator (btinfo);
- btrace_init_func_iterator (btinfo);
+ do_cleanups (cleanup);
}
/* See btrace.h. */
@@ -422,18 +744,29 @@ void
btrace_clear (struct thread_info *tp)
{
struct btrace_thread_info *btinfo;
+ struct btrace_function *it, *trash;
DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
btinfo = &tp->btrace;
- VEC_free (btrace_block_s, btinfo->btrace);
- VEC_free (btrace_inst_s, btinfo->itrace);
- VEC_free (btrace_func_s, btinfo->ftrace);
+ it = btinfo->begin;
+ while (it != NULL)
+ {
+ trash = it;
+ it = it->flow.next;
- btinfo->btrace = NULL;
- btinfo->itrace = NULL;
- btinfo->ftrace = NULL;
+ xfree (trash);
+ }
+
+ btinfo->begin = NULL;
+ btinfo->end = NULL;
+
+ xfree (btinfo->insn_history);
+ xfree (btinfo->call_history);
+
+ btinfo->insn_history = NULL;
+ btinfo->call_history = NULL;
}
/* See btrace.h. */
@@ -541,3 +874,451 @@ parse_xml_btrace (const char *buffer)
return btrace;
}
+
+/* See btrace.h. */
+
+const struct btrace_insn *
+btrace_insn_get (const struct btrace_insn_iterator *it)
+{
+ const struct btrace_function *bfun;
+ unsigned int index, end;
+
+ index = it->index;
+ bfun = it->function;
+
+ /* The index is within the bounds of this function's instruction vector. */
+ end = VEC_length (btrace_insn_s, bfun->insn);
+ gdb_assert (0 < end);
+ gdb_assert (index < end);
+
+ return VEC_index (btrace_insn_s, bfun->insn, index);
+}
+
+/* See btrace.h. */
+
+unsigned int
+btrace_insn_number (const struct btrace_insn_iterator *it)
+{
+ const struct btrace_function *bfun;
+
+ bfun = it->function;
+ return bfun->insn_offset + it->index;
+}
+
+/* See btrace.h. */
+
+void
+btrace_insn_begin (struct btrace_insn_iterator *it,
+ const struct btrace_thread_info *btinfo)
+{
+ const struct btrace_function *bfun;
+
+ bfun = btinfo->begin;
+ if (bfun == NULL)
+ error (_("No trace."));
+
+ it->function = bfun;
+ it->index = 0;
+}
+
+/* See btrace.h. */
+
+void
+btrace_insn_end (struct btrace_insn_iterator *it,
+ const struct btrace_thread_info *btinfo)
+{
+ const struct btrace_function *bfun;
+ unsigned int length;
+
+ bfun = btinfo->end;
+ if (bfun == NULL)
+ error (_("No trace."));
+
+ /* The last instruction in the last function is the current instruction.
+ We point to it - it is one past the end of the execution trace. */
+ length = VEC_length (btrace_insn_s, bfun->insn);
+
+ it->function = bfun;
+ it->index = length - 1;
+}
+
+/* See btrace.h. */
+
+unsigned int
+btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
+{
+ const struct btrace_function *bfun;
+ unsigned int index, steps;
+
+ bfun = it->function;
+ steps = 0;
+ index = it->index;
+
+ while (stride != 0)
+ {
+ unsigned int end, space, adv;
+
+ end = VEC_length (btrace_insn_s, bfun->insn);
+
+ gdb_assert (0 < end);
+ gdb_assert (index < end);
+
+ /* Compute the number of instructions remaining in this segment. */
+ space = end - index;
+
+ /* Advance the iterator as far as possible within this segment. */
+ adv = min (space, stride);
+ stride -= adv;
+ index += adv;
+ steps += adv;
+
+ /* Move to the next function if we're at the end of this one. */
+ if (index == end)
+ {
+ const struct btrace_function *next;
+
+ next = bfun->flow.next;
+ if (next == NULL)
+ {
+ /* We stepped past the last function.
+
+ Let's adjust the index to point to the last instruction in
+ the previous function. */
+ index -= 1;
+ steps -= 1;
+ break;
+ }
+
+ /* We now point to the first instruction in the new function. */
+ bfun = next;
+ index = 0;
+ }
+
+ /* We did make progress. */
+ gdb_assert (adv > 0);
+ }
+
+ /* Update the iterator. */
+ it->function = bfun;
+ it->index = index;
+
+ return steps;
+}
+
+/* See btrace.h. */
+
+unsigned int
+btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
+{
+ const struct btrace_function *bfun;
+ unsigned int index, steps;
+
+ bfun = it->function;
+ steps = 0;
+ index = it->index;
+
+ while (stride != 0)
+ {
+ unsigned int adv;
+
+ /* Move to the previous function if we're at the start of this one. */
+ if (index == 0)
+ {
+ const struct btrace_function *prev;
+
+ prev = bfun->flow.prev;
+ if (prev == NULL)
+ break;
+
+ /* We point to one after the last instruction in the new function. */
+ bfun = prev;
+ index = VEC_length (btrace_insn_s, bfun->insn);
+
+ /* There is at least one instruction in this function segment. */
+ gdb_assert (index > 0);
+ }
+
+ /* Advance the iterator as far as possible within this segment. */
+ adv = min (index, stride);
+ stride -= adv;
+ index -= adv;
+ steps += adv;
+
+ /* We did make progress. */
+ gdb_assert (adv > 0);
+ }
+
+ /* Update the iterator. */
+ it->function = bfun;
+ it->index = index;
+
+ return steps;
+}
+
+/* See btrace.h. */
+
+int
+btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
+ const struct btrace_insn_iterator *rhs)
+{
+ unsigned int lnum, rnum;
+
+ lnum = btrace_insn_number (lhs);
+ rnum = btrace_insn_number (rhs);
+
+ return (int) (lnum - rnum);
+}
+
+/* See btrace.h. */
+
+int
+btrace_find_insn_by_number (struct btrace_insn_iterator *it,
+ const struct btrace_thread_info *btinfo,
+ unsigned int number)
+{
+ const struct btrace_function *bfun;
+ unsigned int end;
+
+ for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
+ if (bfun->insn_offset <= number)
+ break;
+
+ if (bfun == NULL)
+ return 0;
+
+ end = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
+ if (end <= number)
+ return 0;
+
+ it->function = bfun;
+ it->index = number - bfun->insn_offset;
+
+ return 1;
+}
+
+/* See btrace.h. */
+
+const struct btrace_function *
+btrace_call_get (const struct btrace_call_iterator *it)
+{
+ return it->function;
+}
+
+/* See btrace.h. */
+
+unsigned int
+btrace_call_number (const struct btrace_call_iterator *it)
+{
+ const struct btrace_thread_info *btinfo;
+ const struct btrace_function *bfun;
+ unsigned int insns;
+
+ btinfo = it->btinfo;
+ bfun = it->function;
+ if (bfun != NULL)
+ return bfun->number;
+
+ /* For the end iterator, i.e. bfun == NULL, we return one more than the
+ number of the last function. */
+ bfun = btinfo->end;
+ insns = VEC_length (btrace_insn_s, bfun->insn);
+
+ /* If the function contains only a single instruction (i.e. the current
+ instruction), it will be skipped and its number is already the number
+ we seek. */
+ if (insns == 1)
+ return bfun->number;
+
+ /* Otherwise, return one more than the number of the last function. */
+ return bfun->number + 1;
+}
+
+/* See btrace.h. */
+
+void
+btrace_call_begin (struct btrace_call_iterator *it,
+ const struct btrace_thread_info *btinfo)
+{
+ const struct btrace_function *bfun;
+
+ bfun = btinfo->begin;
+ if (bfun == NULL)
+ error (_("No trace."));
+
+ it->btinfo = btinfo;
+ it->function = bfun;
+}
+
+/* See btrace.h. */
+
+void
+btrace_call_end (struct btrace_call_iterator *it,
+ const struct btrace_thread_info *btinfo)
+{
+ const struct btrace_function *bfun;
+
+ bfun = btinfo->end;
+ if (bfun == NULL)
+ error (_("No trace."));
+
+ it->btinfo = btinfo;
+ it->function = NULL;
+}
+
+/* See btrace.h. */
+
+unsigned int
+btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
+{
+ const struct btrace_function *bfun;
+ unsigned int steps;
+
+ bfun = it->function;
+ steps = 0;
+ while (bfun != NULL)
+ {
+ const struct btrace_function *next;
+ unsigned int insns;
+
+ next = bfun->flow.next;
+ if (next == NULL)
+ {
+ /* Ignore the last function if it only contains a single
+ (i.e. the current) instruction. */
+ insns = VEC_length (btrace_insn_s, bfun->insn);
+ if (insns == 1)
+ steps -= 1;
+ }
+
+ if (stride == steps)
+ break;
+
+ bfun = next;
+ steps += 1;
+ }
+
+ it->function = bfun;
+ return steps;
+}
+
+/* See btrace.h. */
+
+unsigned int
+btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
+{
+ const struct btrace_thread_info *btinfo;
+ const struct btrace_function *bfun;
+ unsigned int steps;
+
+ bfun = it->function;
+ steps = 0;
+
+ if (bfun == NULL)
+ {
+ unsigned int insns;
+
+ btinfo = it->btinfo;
+ bfun = btinfo->end;
+ if (bfun == NULL)
+ return 0;
+
+ /* Ignore the last function if it only contains a single
+ (i.e. the current) instruction. */
+ insns = VEC_length (btrace_insn_s, bfun->insn);
+ if (insns == 1)
+ bfun = bfun->flow.prev;
+
+ if (bfun == NULL)
+ return 0;
+
+ steps += 1;
+ }
+
+ while (steps < stride)
+ {
+ const struct btrace_function *prev;
+
+ prev = bfun->flow.prev;
+ if (prev == NULL)
+ break;
+
+ bfun = prev;
+ steps += 1;
+ }
+
+ it->function = bfun;
+ return steps;
+}
+
+/* See btrace.h. */
+
+int
+btrace_call_cmp (const struct btrace_call_iterator *lhs,
+ const struct btrace_call_iterator *rhs)
+{
+ unsigned int lnum, rnum;
+
+ lnum = btrace_call_number (lhs);
+ rnum = btrace_call_number (rhs);
+
+ return (int) (lnum - rnum);
+}
+
+/* See btrace.h. */
+
+int
+btrace_find_call_by_number (struct btrace_call_iterator *it,
+ const struct btrace_thread_info *btinfo,
+ unsigned int number)
+{
+ const struct btrace_function *bfun;
+
+ for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
+ {
+ unsigned int bnum;
+
+ bnum = bfun->number;
+ if (number == bnum)
+ {
+ it->btinfo = btinfo;
+ it->function = bfun;
+ return 1;
+ }
+
+ /* Functions are ordered and numbered consecutively. We could bail out
+ earlier. On the other hand, it is very unlikely that we search for
+ a nonexistent function. */
+ }
+
+ return 0;
+}
+
+/* See btrace.h. */
+
+void
+btrace_set_insn_history (struct btrace_thread_info *btinfo,
+ const struct btrace_insn_iterator *begin,
+ const struct btrace_insn_iterator *end)
+{
+ if (btinfo->insn_history == NULL)
+ btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
+
+ btinfo->insn_history->begin = *begin;
+ btinfo->insn_history->end = *end;
+}
+
+/* See btrace.h. */
+
+void
+btrace_set_call_history (struct btrace_thread_info *btinfo,
+ const struct btrace_call_iterator *begin,
+ const struct btrace_call_iterator *end)
+{
+ gdb_assert (begin->btinfo == end->btinfo);
+
+ if (btinfo->call_history == NULL)
+ btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
+
+ btinfo->call_history->begin = *begin;
+ btinfo->call_history->end = *end;
+}
diff --git a/gdb/btrace.h b/gdb/btrace.h
index 9bd7176..d219f69 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -29,63 +29,128 @@
#include "btrace-common.h"
struct thread_info;
+struct btrace_function;
/* A branch trace instruction.
This represents a single instruction in a branch trace. */
-struct btrace_inst
+struct btrace_insn
{
/* The address of this instruction. */
CORE_ADDR pc;
};
-/* A branch trace function.
+/* A vector of branch trace instructions. */
+typedef struct btrace_insn btrace_insn_s;
+DEF_VEC_O (btrace_insn_s);
+
+/* A doubly-linked list of branch trace function segments. */
+struct btrace_func_link
+{
+ struct btrace_function *prev;
+ struct btrace_function *next;
+};
+
+/* Flags for btrace function segments. */
+enum btrace_function_flag
+{
+ /* The 'up' link interpretation.
+ If set, it points to the function segment we returned to.
+ If clear, it points to the function segment we called from. */
+ BFUN_UP_LINKS_TO_RET = (1 << 0),
+
+ /* The 'up' link points to a tail call. This obviously only makes sense
+ if bfun_up_links_to_ret is clear. */
+ BFUN_UP_LINKS_TO_TAILCALL = (1 << 1)
+};
+
+/* A branch trace function segment.
This represents a function segment in a branch trace, i.e. a consecutive
- number of instructions belonging to the same function. */
-struct btrace_func
+ number of instructions belonging to the same function.
+
+ We do not allow function segments without any instructions. */
+struct btrace_function
{
- /* The full and minimal symbol for the function. One of them may be NULL. */
+ /* The full and minimal symbol for the function. Both may be NULL. */
struct minimal_symbol *msym;
struct symbol *sym;
+ /* The previous and next segment belonging to the same function.
+ If a function calls another function, the former will have at least
+ two segments: one before the call and another after the return. */
+ struct btrace_func_link segment;
+
+ /* The previous and next function in control flow order. */
+ struct btrace_func_link flow;
+
+ /* The directly preceding function segment in a (fake) call stack. */
+ struct btrace_function *up;
+
+ /* The instructions in this function segment.
+ The instruction vector will never be empty. */
+ VEC (btrace_insn_s) *insn;
+
+ /* The instruction number offset for the first instruction in this
+ function segment. */
+ unsigned int insn_offset;
+
+ /* The function number in control-flow order. */
+ unsigned int number;
+
+ /* The function level in a back trace across the entire branch trace.
+ A caller's level is one lower than the level of its callee.
+
+ Levels can be negative if we see returns for which we have not seen
+ the corresponding calls. The branch trace thread information provides
+ a fixup to normalize function levels so the smallest level is zero. */
+ int level;
+
/* The source line range of this function segment (both inclusive). */
int lbegin, lend;
- /* The instruction number range in the instruction trace corresponding
- to this function segment (both inclusive). */
- unsigned int ibegin, iend;
+ /* A bit-vector of btrace_function_flag. */
+ enum btrace_function_flag flags;
};
-/* Branch trace may also be represented as a vector of:
+/* A branch trace instruction iterator. */
+struct btrace_insn_iterator
+{
+ /* The branch trace function segment containing the instruction.
+ Will never be NULL. */
+ const struct btrace_function *function;
- - branch trace instructions starting with the oldest instruction.
- - branch trace functions starting with the oldest function. */
-typedef struct btrace_inst btrace_inst_s;
-typedef struct btrace_func btrace_func_s;
+ /* The index into the function segment's instruction vector. */
+ unsigned int index;
+};
-/* Define functions operating on branch trace vectors. */
-DEF_VEC_O (btrace_inst_s);
-DEF_VEC_O (btrace_func_s);
+/* A branch trace function call iterator. */
+struct btrace_call_iterator
+{
+ /* The branch trace information for this thread. Will never be NULL. */
+ const struct btrace_thread_info *btinfo;
+
+ /* The branch trace function segment.
+ This will be NULL for the iterator pointing to the end of the trace. */
+ const struct btrace_function *function;
+};
/* Branch trace iteration state for "record instruction-history". */
-struct btrace_insn_iterator
+struct btrace_insn_history
{
- /* The instruction index range from begin (inclusive) to end (exclusive)
- that has been covered last time.
- If end < begin, the branch trace has just been updated. */
- unsigned int begin;
- unsigned int end;
+ /* The branch trace instruction range from BEGIN (inclusive) to
+ END (exclusive) that has been covered last time. */
+ struct btrace_insn_iterator begin;
+ struct btrace_insn_iterator end;
};
/* Branch trace iteration state for "record function-call-history". */
-struct btrace_func_iterator
+struct btrace_call_history
{
- /* The function index range from begin (inclusive) to end (exclusive)
- that has been covered last time.
- If end < begin, the branch trace has just been updated. */
- unsigned int begin;
- unsigned int end;
+ /* The branch trace function range from BEGIN (inclusive) to END (exclusive)
+ that has been covered last time. */
+ struct btrace_call_iterator begin;
+ struct btrace_call_iterator end;
};
/* Branch trace information per thread.
@@ -103,16 +168,25 @@ struct btrace_thread_info
the underlying architecture. */
struct btrace_target_info *target;
- /* The current branch trace for this thread. */
- VEC (btrace_block_s) *btrace;
- VEC (btrace_inst_s) *itrace;
- VEC (btrace_func_s) *ftrace;
+ /* The current branch trace for this thread (both inclusive).
+
+ The last instruction of END is the current instruction, which is not
+ part of the execution history.
+ Both will be NULL if there is no branch trace available. If there is
+ branch trace available, both will be non-NULL. */
+ struct btrace_function *begin;
+ struct btrace_function *end;
+
+ /* The function level offset. When added to each function's LEVEL,
+ this normalizes the function levels such that the smallest level
+ becomes zero. */
+ int level;
/* The instruction history iterator. */
- struct btrace_insn_iterator insn_iterator;
+ struct btrace_insn_history *insn_history;
/* The function call history iterator. */
- struct btrace_func_iterator func_iterator;
+ struct btrace_call_history *call_history;
};
/* Enable branch tracing for a thread. */
@@ -139,4 +213,99 @@ extern void btrace_free_objfile (struct objfile *);
/* Parse a branch trace xml document into a block vector. */
extern VEC (btrace_block_s) *parse_xml_btrace (const char*);
+/* Dereference a branch trace instruction iterator. Return a pointer to the
+ instruction the iterator points to. */
+extern const struct btrace_insn *
+ btrace_insn_get (const struct btrace_insn_iterator *);
+
+/* Return the instruction number for a branch trace iterator.
+ Returns one past the maximum instruction number for the end iterator.
+ Returns zero if the iterator does not point to a valid instruction. */
+extern unsigned int btrace_insn_number (const struct btrace_insn_iterator *);
+
+/* Initialize a branch trace instruction iterator to point to the begin/end of
+ the branch trace. Throws an error if there is no branch trace. */
+extern void btrace_insn_begin (struct btrace_insn_iterator *,
+ const struct btrace_thread_info *);
+extern void btrace_insn_end (struct btrace_insn_iterator *,
+ const struct btrace_thread_info *);
+
+/* Increment/decrement a branch trace instruction iterator by at most STRIDE
+ instructions. Return the number of instructions by which the instruction
+ iterator has been advanced.
+ Returns zero, if the operation failed or STRIDE had been zero. */
+extern unsigned int btrace_insn_next (struct btrace_insn_iterator *,
+ unsigned int stride);
+extern unsigned int btrace_insn_prev (struct btrace_insn_iterator *,
+ unsigned int stride);
+
+/* Compare two branch trace instruction iterators.
+ Return a negative number if LHS < RHS.
+ Return zero if LHS == RHS.
+ Return a positive number if LHS > RHS. */
+extern int btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
+ const struct btrace_insn_iterator *rhs);
+
+/* Find an instruction in the function branch trace by its number.
+ If the instruction is found, initialize the branch trace instruction
+ iterator to point to this instruction and return non-zero.
+ Return zero otherwise. */
+extern int btrace_find_insn_by_number (struct btrace_insn_iterator *,
+ const struct btrace_thread_info *,
+ unsigned int number);
+
+/* Dereference a branch trace call iterator. Return a pointer to the
+ function the iterator points to or NULL if the interator points past
+ the end of the branch trace. */
+extern const struct btrace_function *
+ btrace_call_get (const struct btrace_call_iterator *);
+
+/* Return the function number for a branch trace call iterator.
+ Returns one past the maximum function number for the end iterator.
+ Returns zero if the iterator does not point to a valid function. */
+extern unsigned int btrace_call_number (const struct btrace_call_iterator *);
+
+/* Initialize a branch trace call iterator to point to the begin/end of
+ the branch trace. Throws an error if there is no branch trace. */
+extern void btrace_call_begin (struct btrace_call_iterator *,
+ const struct btrace_thread_info *);
+extern void btrace_call_end (struct btrace_call_iterator *,
+ const struct btrace_thread_info *);
+
+/* Increment/decrement a branch trace call iterator by at most STRIDE function
+ segments. Return the number of function segments by which the call
+ iterator has been advanced.
+ Returns zero, if the operation failed or STRIDE had been zero. */
+extern unsigned int btrace_call_next (struct btrace_call_iterator *,
+ unsigned int stride);
+extern unsigned int btrace_call_prev (struct btrace_call_iterator *,
+ unsigned int stride);
+
+/* Compare two branch trace call iterators.
+ Return a negative number if LHS < RHS.
+ Return zero if LHS == RHS.
+ Return a positive number if LHS > RHS. */
+extern int btrace_call_cmp (const struct btrace_call_iterator *lhs,
+ const struct btrace_call_iterator *rhs);
+
+/* Find a function in the function branch trace by its NUMBER.
+ If the function is found, initialize the branch trace call
+ iterator to point to this function and return non-zero.
+ Return zero otherwise. */
+extern int btrace_find_call_by_number (struct btrace_call_iterator *,
+ const struct btrace_thread_info *,
+ unsigned int number);
+
+/* Set the branch trace instruction history from BEGIN (inclusive) to
+ END (exclusive). */
+extern void btrace_set_insn_history (struct btrace_thread_info *,
+ const struct btrace_insn_iterator *begin,
+ const struct btrace_insn_iterator *end);
+
+/* Set the branch trace function call history from BEGIN (inclusive) to
+ END (exclusive). */
+extern void btrace_set_call_history (struct btrace_thread_info *,
+ const struct btrace_call_iterator *begin,
+ const struct btrace_call_iterator *end);
+
#endif /* BTRACE_H */
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index c3330e9..5bdab8d 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -74,7 +74,7 @@ require_btrace (void)
btinfo = &tp->btrace;
- if (VEC_empty (btrace_inst_s, btinfo->itrace))
+ if (btinfo->begin == NULL)
error (_("No trace."));
return btinfo;
@@ -205,7 +205,7 @@ record_btrace_info (void)
{
struct btrace_thread_info *btinfo;
struct thread_info *tp;
- unsigned int insts, funcs;
+ unsigned int insns, calls;
DEBUG ("info");
@@ -215,12 +215,26 @@ record_btrace_info (void)
btrace_fetch (tp);
+ insns = 0;
+ calls = 0;
+
btinfo = &tp->btrace;
- insts = VEC_length (btrace_inst_s, btinfo->itrace);
- funcs = VEC_length (btrace_func_s, btinfo->ftrace);
+ if (btinfo->begin != NULL)
+ {
+ struct btrace_call_iterator call;
+ struct btrace_insn_iterator insn;
+
+ btrace_call_end (&call, btinfo);
+ btrace_call_prev (&call, 1);
+ calls = btrace_call_number (&call) + 1;
+
+ btrace_insn_end (&insn, btinfo);
+ btrace_insn_prev (&insn, 1);
+ insns = btrace_insn_number (&insn) + 1;
+ }
printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
- "%d (%s).\n"), insts, funcs, tp->num,
+ "%d (%s).\n"), insns, calls, tp->num,
target_pid_to_str (tp->ptid));
}
@@ -235,27 +249,31 @@ ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
/* Disassemble a section of the recorded instruction trace. */
static void
-btrace_insn_history (struct btrace_thread_info *btinfo, struct ui_out *uiout,
- unsigned int begin, unsigned int end, int flags)
+btrace_insn_history (struct ui_out *uiout,
+ const struct btrace_insn_iterator *begin,
+ const struct btrace_insn_iterator *end, int flags)
{
struct gdbarch *gdbarch;
- struct btrace_inst *inst;
- unsigned int idx;
+ struct btrace_insn_iterator it;
- DEBUG ("itrace (0x%x): [%u; %u[", flags, begin, end);
+ DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
+ btrace_insn_number (end));
gdbarch = target_gdbarch ();
- for (idx = begin; VEC_iterate (btrace_inst_s, btinfo->itrace, idx, inst)
- && idx < end; ++idx)
+ for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
{
+ const struct btrace_insn *insn;
+
+ insn = btrace_insn_get (&it);
+
/* Print the instruction index. */
- ui_out_field_uint (uiout, "index", idx);
+ ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
ui_out_text (uiout, "\t");
/* Disassembly with '/m' flag may not produce the expected result.
See PR gdb/11833. */
- gdb_disassembly (gdbarch, uiout, NULL, flags, 1, inst->pc, inst->pc + 1);
+ gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
}
}
@@ -265,72 +283,62 @@ static void
record_btrace_insn_history (int size, int flags)
{
struct btrace_thread_info *btinfo;
+ struct btrace_insn_history *history;
+ struct btrace_insn_iterator begin, end;
struct cleanup *uiout_cleanup;
struct ui_out *uiout;
- unsigned int context, last, begin, end;
+ unsigned int context, covered;
uiout = current_uiout;
uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
"insn history");
- btinfo = require_btrace ();
- last = VEC_length (btrace_inst_s, btinfo->itrace);
-
context = abs (size);
- begin = btinfo->insn_iterator.begin;
- end = btinfo->insn_iterator.end;
-
- DEBUG ("insn-history (0x%x): %d, prev: [%u; %u[", flags, size, begin, end);
-
if (context == 0)
error (_("Bad record instruction-history-size."));
- /* We start at the end. */
- if (end < begin)
- {
- /* Truncate the context, if necessary. */
- context = min (context, last);
-
- end = last;
- begin = end - context;
- }
- else if (size < 0)
+ btinfo = require_btrace ();
+ history = btinfo->insn_history;
+ if (history == NULL)
{
- if (begin == 0)
- {
- printf_unfiltered (_("At the start of the branch trace record.\n"));
-
- btinfo->insn_iterator.end = 0;
- return;
- }
+ /* No matter the direction, we start with the tail of the trace. */
+ btrace_insn_end (&begin, btinfo);
+ end = begin;
- /* Truncate the context, if necessary. */
- context = min (context, begin);
+ DEBUG ("insn-history (0x%x): %d", flags, size);
- end = begin;
- begin -= context;
+ covered = btrace_insn_prev (&begin, context);
}
else
{
- if (end == last)
- {
- printf_unfiltered (_("At the end of the branch trace record.\n"));
+ begin = history->begin;
+ end = history->end;
- btinfo->insn_iterator.begin = last;
- return;
- }
+ DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
+ btrace_insn_number (&begin), btrace_insn_number (&end));
- /* Truncate the context, if necessary. */
- context = min (context, last - end);
-
- begin = end;
- end += context;
+ if (size < 0)
+ {
+ end = begin;
+ covered = btrace_insn_prev (&begin, context);
+ }
+ else
+ {
+ begin = end;
+ covered = btrace_insn_next (&end, context);
+ }
}
- btrace_insn_history (btinfo, uiout, begin, end, flags);
-
- btinfo->insn_iterator.begin = begin;
- btinfo->insn_iterator.end = end;
+ if (covered > 0)
+ btrace_insn_history (uiout, &begin, &end, flags);
+ else
+ {
+ if (size < 0)
+ printf_unfiltered (_("At the start of the branch trace record.\n"));
+ else
+ printf_unfiltered (_("At the end of the branch trace record.\n"));
+ }
+ btrace_set_insn_history (btinfo, &begin, &end);
do_cleanups (uiout_cleanup);
}
@@ -340,39 +348,41 @@ static void
record_btrace_insn_history_range (ULONGEST from, ULONGEST to, int flags)
{
struct btrace_thread_info *btinfo;
+ struct btrace_insn_history *history;
+ struct btrace_insn_iterator begin, end;
struct cleanup *uiout_cleanup;
struct ui_out *uiout;
- unsigned int last, begin, end;
+ unsigned int low, high;
+ int found;
uiout = current_uiout;
uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
"insn history");
- btinfo = require_btrace ();
- last = VEC_length (btrace_inst_s, btinfo->itrace);
+ low = from;
+ high = to;
- begin = (unsigned int) from;
- end = (unsigned int) to;
-
- DEBUG ("insn-history (0x%x): [%u; %u[", flags, begin, end);
+ DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
/* Check for wrap-arounds. */
- if (begin != from || end != to)
+ if (low != from || high != to)
error (_("Bad range."));
- if (end <= begin)
+ if (high <= low)
error (_("Bad range."));
- if (last <= begin)
- error (_("Range out of bounds."));
+ btinfo = require_btrace ();
- /* Truncate the range, if necessary. */
- if (last < end)
- end = last;
+ found = btrace_find_insn_by_number (&begin, btinfo, low);
+ if (found == 0)
+ error (_("Range out of bounds."));
- btrace_insn_history (btinfo, uiout, begin, end, flags);
+ /* Silently truncate the range, if necessary. */
+ found = btrace_find_insn_by_number (&end, btinfo, high);
+ if (found == 0)
+ btrace_insn_end (&end, btinfo);
- btinfo->insn_iterator.begin = begin;
- btinfo->insn_iterator.end = end;
+ btrace_insn_history (uiout, &begin, &end, flags);
+ btrace_set_insn_history (btinfo, &begin, &end);
do_cleanups (uiout_cleanup);
}
@@ -411,23 +421,27 @@ record_btrace_insn_history_from (ULONGEST from, int size, int flags)
/* Print the instruction number range for a function call history line. */
static void
-btrace_func_history_insn_range (struct ui_out *uiout, struct btrace_func *bfun)
+btrace_call_history_insn_range (struct ui_out *uiout,
+ const struct btrace_function *bfun)
{
- ui_out_field_uint (uiout, "insn begin", bfun->ibegin);
+ unsigned int begin, end;
- if (bfun->ibegin == bfun->iend)
- return;
+ begin = bfun->insn_offset;
+ end = begin + VEC_length (btrace_insn_s, bfun->insn);
+ ui_out_field_uint (uiout, "insn begin", begin);
ui_out_text (uiout, "-");
- ui_out_field_uint (uiout, "insn end", bfun->iend);
+ ui_out_field_uint (uiout, "insn end", end);
}
/* Print the source line information for a function call history line. */
static void
-btrace_func_history_src_line (struct ui_out *uiout, struct btrace_func *bfun)
+btrace_call_history_src_line (struct ui_out *uiout,
+ const struct btrace_function *bfun)
{
struct symbol *sym;
+ int begin, end;
sym = bfun->sym;
if (sym == NULL)
@@ -436,54 +450,66 @@ btrace_func_history_src_line (struct ui_out *uiout, struct btrace_func *bfun)
ui_out_field_string (uiout, "file",
symtab_to_filename_for_display (sym->symtab));
- if (bfun->lend == 0)
+ begin = bfun->lbegin;
+ end = bfun->lend;
+
+ if (end < begin)
return;
ui_out_text (uiout, ":");
- ui_out_field_int (uiout, "min line", bfun->lbegin);
+ ui_out_field_int (uiout, "min line", begin);
- if (bfun->lend == bfun->lbegin)
+ if (end == begin)
return;
ui_out_text (uiout, "-");
- ui_out_field_int (uiout, "max line", bfun->lend);
+ ui_out_field_int (uiout, "max line", end);
}
/* Disassemble a section of the recorded function trace. */
static void
-btrace_func_history (struct btrace_thread_info *btinfo, struct ui_out *uiout,
- unsigned int begin, unsigned int end,
+btrace_call_history (struct ui_out *uiout,
+ const struct btrace_call_iterator *begin,
+ const struct btrace_call_iterator *end,
enum record_print_flag flags)
{
- struct btrace_func *bfun;
- unsigned int idx;
+ struct btrace_call_iterator it;
- DEBUG ("ftrace (0x%x): [%u; %u[", flags, begin, end);
+ DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
+ btrace_call_number (end));
- for (idx = begin; VEC_iterate (btrace_func_s, btinfo->ftrace, idx, bfun)
- && idx < end; ++idx)
+ for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
{
+ const struct btrace_function *bfun;
+ struct minimal_symbol *msym;
+ struct symbol *sym;
+
+ bfun = btrace_call_get (&it);
+ msym = bfun->msym;
+ sym = bfun->sym;
+
/* Print the function index. */
- ui_out_field_uint (uiout, "index", idx);
+ ui_out_field_uint (uiout, "index", bfun->number);
ui_out_text (uiout, "\t");
if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
{
- btrace_func_history_insn_range (uiout, bfun);
+ btrace_call_history_insn_range (uiout, bfun);
ui_out_text (uiout, "\t");
}
if ((flags & RECORD_PRINT_SRC_LINE) != 0)
{
- btrace_func_history_src_line (uiout, bfun);
+ btrace_call_history_src_line (uiout, bfun);
ui_out_text (uiout, "\t");
}
- if (bfun->sym != NULL)
- ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (bfun->sym));
- else if (bfun->msym != NULL)
- ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (bfun->msym));
+ if (sym != NULL)
+ ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
+ else if (msym != NULL)
+ ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym));
+
ui_out_text (uiout, "\n");
}
}
@@ -494,72 +520,62 @@ static void
record_btrace_call_history (int size, int flags)
{
struct btrace_thread_info *btinfo;
+ struct btrace_call_history *history;
+ struct btrace_call_iterator begin, end;
struct cleanup *uiout_cleanup;
struct ui_out *uiout;
- unsigned int context, last, begin, end;
+ unsigned int context, covered;
uiout = current_uiout;
uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
"insn history");
- btinfo = require_btrace ();
- last = VEC_length (btrace_func_s, btinfo->ftrace);
-
context = abs (size);
- begin = btinfo->func_iterator.begin;
- end = btinfo->func_iterator.end;
-
- DEBUG ("func-history (0x%x): %d, prev: [%u; %u[", flags, size, begin, end);
-
if (context == 0)
error (_("Bad record function-call-history-size."));
- /* We start at the end. */
- if (end < begin)
- {
- /* Truncate the context, if necessary. */
- context = min (context, last);
-
- end = last;
- begin = end - context;
- }
- else if (size < 0)
+ btinfo = require_btrace ();
+ history = btinfo->call_history;
+ if (history == NULL)
{
- if (begin == 0)
- {
- printf_unfiltered (_("At the start of the branch trace record.\n"));
-
- btinfo->func_iterator.end = 0;
- return;
- }
+ /* No matter the direction, we start with the tail of the trace. */
+ btrace_call_end (&begin, btinfo);
+ end = begin;
- /* Truncate the context, if necessary. */
- context = min (context, begin);
+ DEBUG ("call-history (0x%x): %d", flags, size);
- end = begin;
- begin -= context;
+ covered = btrace_call_prev (&begin, context);
}
else
{
- if (end == last)
- {
- printf_unfiltered (_("At the end of the branch trace record.\n"));
+ begin = history->begin;
+ end = history->end;
- btinfo->func_iterator.begin = last;
- return;
- }
+ DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
+ btrace_call_number (&begin), btrace_call_number (&end));
- /* Truncate the context, if necessary. */
- context = min (context, last - end);
-
- begin = end;
- end += context;
+ if (size < 0)
+ {
+ end = begin;
+ covered = btrace_call_prev (&begin, context);
+ }
+ else
+ {
+ begin = end;
+ covered = btrace_call_next (&end, context);
+ }
}
- btrace_func_history (btinfo, uiout, begin, end, flags);
-
- btinfo->func_iterator.begin = begin;
- btinfo->func_iterator.end = end;
+ if (covered > 0)
+ btrace_call_history (uiout, &begin, &end, flags);
+ else
+ {
+ if (size < 0)
+ printf_unfiltered (_("At the start of the branch trace record.\n"));
+ else
+ printf_unfiltered (_("At the end of the branch trace record.\n"));
+ }
+ btrace_set_call_history (btinfo, &begin, &end);
do_cleanups (uiout_cleanup);
}
@@ -569,39 +585,41 @@ static void
record_btrace_call_history_range (ULONGEST from, ULONGEST to, int flags)
{
struct btrace_thread_info *btinfo;
+ struct btrace_call_history *history;
+ struct btrace_call_iterator begin, end;
struct cleanup *uiout_cleanup;
struct ui_out *uiout;
- unsigned int last, begin, end;
+ unsigned int low, high;
+ int found;
uiout = current_uiout;
uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
"func history");
- btinfo = require_btrace ();
- last = VEC_length (btrace_func_s, btinfo->ftrace);
+ low = from;
+ high = to;
- begin = (unsigned int) from;
- end = (unsigned int) to;
-
- DEBUG ("func-history (0x%x): [%u; %u[", flags, begin, end);
+ DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
/* Check for wrap-arounds. */
- if (begin != from || end != to)
+ if (low != from || high != to)
error (_("Bad range."));
- if (end <= begin)
+ if (high <= low)
error (_("Bad range."));
- if (last <= begin)
- error (_("Range out of bounds."));
+ btinfo = require_btrace ();
- /* Truncate the range, if necessary. */
- if (last < end)
- end = last;
+ found = btrace_find_call_by_number (&begin, btinfo, low);
+ if (found == 0)
+ error (_("Range out of bounds."));
- btrace_func_history (btinfo, uiout, begin, end, flags);
+ /* Silently truncate the range, if necessary. */
+ found = btrace_find_call_by_number (&end, btinfo, high);
+ if (found == 0)
+ btrace_call_end (&end, btinfo);
- btinfo->func_iterator.begin = begin;
- btinfo->func_iterator.end = end;
+ btrace_call_history (uiout, &begin, &end, flags);
+ btrace_set_call_history (btinfo, &begin, &end);
do_cleanups (uiout_cleanup);
}
diff --git a/gdb/testsuite/ChangeLog b/gdb/testsuite/ChangeLog
index 2749e90..52ed566 100644
--- a/gdb/testsuite/ChangeLog
+++ b/gdb/testsuite/ChangeLog
@@ -1,5 +1,12 @@
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
+ * gdb.btrace/function_call_history.exp: Fix expected function
+ trace.
+ * gdb.btrace/instruction_history.exp: Initialize traced.
+ Remove traced_functions.
+
+2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
+
* gdb.btrace/function_call_history.exp: Update
* gdb.btrace/instruction_history.exp: Update.
diff --git a/gdb/testsuite/gdb.btrace/function_call_history.exp b/gdb/testsuite/gdb.btrace/function_call_history.exp
index 7ee4e66..bf2458b 100644
--- a/gdb/testsuite/gdb.btrace/function_call_history.exp
+++ b/gdb/testsuite/gdb.btrace/function_call_history.exp
@@ -187,16 +187,18 @@ set bp_location [gdb_get_line_number "bp.2" $testfile.c]
gdb_breakpoint $bp_location
gdb_continue_to_breakpoint "cont to $bp_location" ".*$testfile.c:$bp_location.*"
-# at this point we expect to have main, fib, ..., fib, main, where fib occurs 8 times,
-# so we limit the output to only show the latest 10 function calls
-gdb_test_no_output "set record function-call-history-size 10"
-set message "recursive"
-gdb_test_multiple "record function-call-history" $message {
- -re "13\tmain\r\n14\tfib\r\n15\tfib\r\n16\tfib\r\n17\tfib\r\n18\tfib\r\n19\tfib\r\n20\tfib\r\n21\tfib\r\n22 main\r\n$gdb_prompt $" {
- pass $message
- }
- -re "13\tinc\r\n14\tmain\r\n15\tinc\r\n16\tmain\r\n17\tinc\r\n18\tmain\r\n19\tinc\r\n20\tmain\r\n21\tfib\r\n22\tmain\r\n$gdb_prompt $" {
- # recursive function calls appear only as 1 call
- kfail "gdb/15240" $message
- }
-}
+# at this point we expect to have main, fib, ..., fib, main, where fib occurs 9 times,
+# so we limit the output to only show the latest 11 function calls
+gdb_test_no_output "set record function-call-history-size 11"
+gdb_test "record function-call-history" [join [list \
+ "20\tmain" \
+ "21\tfib" \
+ "22\tfib" \
+ "23\tfib" \
+ "24\tfib" \
+ "25\tfib" \
+ "26\tfib" \
+ "27\tfib" \
+ "28\tfib" \
+ "29\tfib" \
+ "30\tmain"] "\r\n"] "recursive"
diff --git a/gdb/testsuite/gdb.btrace/instruction_history.exp b/gdb/testsuite/gdb.btrace/instruction_history.exp
index 6048ba1..c6f6500 100644
--- a/gdb/testsuite/gdb.btrace/instruction_history.exp
+++ b/gdb/testsuite/gdb.btrace/instruction_history.exp
@@ -47,18 +47,18 @@ gdb_continue_to_breakpoint "cont to $bp_location" ".*$srcfile2:$bp_location.*"
# it is necessary to count the number of lines that are
# shown by the "record instruction-history" command.
+set traced {}
set testname "determine number of recorded instructions"
gdb_test_multiple "info record" $testname {
-re "Active record target: record-btrace\r\nRecorded \(\[0-9\]*\) instructions in \(\[0-9\]*\) functions for thread 1 .*\\.\r\n$gdb_prompt $" {
set traced $expect_out(1,string)
- set traced_functions $expect_out(2,string)
pass $testname
}
}
-# we have exactly 7 instructions here
-set message "exactly 7 instructions"
-if { $traced != 7 } {
+# we have exactly 6 instructions here
+set message "exactly 6 instructions"
+if { $traced != 6 } {
fail $message
} else {
pass $message
@@ -147,6 +147,8 @@ if { $lines != $history_size } {
pass $message
}
+set history_size 2
+gdb_test_no_output "set record instruction-history-size $history_size"
set message "browse history forward middle"
set lines [test_lines_length "record instruction-history +" $message]
if { $lines != $history_size } {
@@ -168,6 +170,8 @@ gdb_test "record instruction-history" "At the end of the branch trace record\\."
# make sure we cannot move further
gdb_test "record instruction-history" "At the end of the branch trace record\\." "browse history forward beyond 2"
+set history_size 3
+gdb_test_no_output "set record instruction-history-size $history_size"
set message "browse history backward last"
set lines [test_lines_length "record instruction-history -" $message]
if { $lines != $history_size } {
@@ -176,6 +180,8 @@ if { $lines != $history_size } {
pass $message
}
+set history_size 2
+gdb_test_no_output "set record instruction-history-size $history_size"
set message "browse history backward middle"
set lines [test_lines_length "record instruction-history -" $message]
if { $lines != $history_size } {