diff options
author | Markus Metzger <markus.t.metzger@intel.com> | 2013-03-22 14:32:47 +0100 |
---|---|---|
committer | Markus Metzger <markus.t.metzger@intel.com> | 2014-01-16 12:45:11 +0100 |
commit | 23a7fe7580c5625dd19c852faf9a3acb56293207 (patch) | |
tree | b164b90e84669446759917fe21180707a88253ed /gdb/btrace.c | |
parent | 8372a7cb96e9d193cb1f85f3fd2c01e38c33a750 (diff) | |
download | gdb-23a7fe7580c5625dd19c852faf9a3acb56293207.zip gdb-23a7fe7580c5625dd19c852faf9a3acb56293207.tar.gz gdb-23a7fe7580c5625dd19c852faf9a3acb56293207.tar.bz2 |
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
Diffstat (limited to 'gdb/btrace.c')
-rw-r--r-- | gdb/btrace.c | 1151 |
1 files changed, 966 insertions, 185 deletions
diff --git a/gdb/btrace.c b/gdb/btrace.c index df62da8..5bda127 100644 --- a/gdb/btrace.c +++ b/gdb/btrace.c @@ -45,92 +45,11 @@ #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args) -/* Initialize the instruction iterator. */ - -static void -btrace_init_insn_iterator (struct btrace_thread_info *btinfo) -{ - DEBUG ("init insn iterator"); - - btinfo->insn_iterator.begin = 1; - btinfo->insn_iterator.end = 0; -} - -/* Initialize the function iterator. */ - -static void -btrace_init_func_iterator (struct btrace_thread_info *btinfo) -{ - DEBUG ("init func iterator"); - - btinfo->func_iterator.begin = 1; - btinfo->func_iterator.end = 0; -} - -/* Compute the instruction trace from the block trace. */ - -static VEC (btrace_inst_s) * -compute_itrace (VEC (btrace_block_s) *btrace) -{ - VEC (btrace_inst_s) *itrace; - struct gdbarch *gdbarch; - unsigned int b; - - DEBUG ("compute itrace"); - - itrace = NULL; - gdbarch = target_gdbarch (); - b = VEC_length (btrace_block_s, btrace); - - while (b-- != 0) - { - btrace_block_s *block; - CORE_ADDR pc; - - block = VEC_index (btrace_block_s, btrace, b); - pc = block->begin; - - /* Add instructions for this block. */ - for (;;) - { - btrace_inst_s *inst; - int size; - - /* We should hit the end of the block. Warn if we went too far. */ - if (block->end < pc) - { - warning (_("Recorded trace may be corrupted.")); - break; - } - - inst = VEC_safe_push (btrace_inst_s, itrace, NULL); - inst->pc = pc; - - /* We're done once we pushed the instruction at the end. */ - if (block->end == pc) - break; - - size = gdb_insn_length (gdbarch, pc); - - /* Make sure we terminate if we fail to compute the size. */ - if (size <= 0) - { - warning (_("Recorded trace may be incomplete.")); - break; - } - - pc += size; - } - } - - return itrace; -} - /* Return the function name of a recorded function segment for printing. This function never returns NULL. */ static const char * -ftrace_print_function_name (struct btrace_func *bfun) +ftrace_print_function_name (const struct btrace_function *bfun) { struct minimal_symbol *msym; struct symbol *sym; @@ -151,7 +70,7 @@ ftrace_print_function_name (struct btrace_func *bfun) This function never returns NULL. */ static const char * -ftrace_print_filename (struct btrace_func *bfun) +ftrace_print_filename (const struct btrace_function *bfun) { struct symbol *sym; const char *filename; @@ -166,44 +85,53 @@ ftrace_print_filename (struct btrace_func *bfun) return filename; } -/* Print an ftrace debug status message. */ +/* Return a string representation of the address of an instruction. + This function never returns NULL. */ -static void -ftrace_debug (struct btrace_func *bfun, const char *prefix) +static const char * +ftrace_print_insn_addr (const struct btrace_insn *insn) { - DEBUG_FTRACE ("%s: fun = %s, file = %s, lines = [%d; %d], insn = [%u; %u]", - prefix, ftrace_print_function_name (bfun), - ftrace_print_filename (bfun), bfun->lbegin, bfun->lend, - bfun->ibegin, bfun->iend); + if (insn == NULL) + return "<nil>"; + + return core_addr_to_string_nz (insn->pc); } -/* Initialize a recorded function segment. */ +/* Print an ftrace debug status message. */ static void -ftrace_init_func (struct btrace_func *bfun, struct minimal_symbol *mfun, - struct symbol *fun, unsigned int idx) +ftrace_debug (const struct btrace_function *bfun, const char *prefix) { - bfun->msym = mfun; - bfun->sym = fun; - bfun->lbegin = INT_MAX; - bfun->lend = 0; - bfun->ibegin = idx; - bfun->iend = idx; + const char *fun, *file; + unsigned int ibegin, iend; + int lbegin, lend, level; + + fun = ftrace_print_function_name (bfun); + file = ftrace_print_filename (bfun); + level = bfun->level; + + lbegin = bfun->lbegin; + lend = bfun->lend; + + ibegin = bfun->insn_offset; + iend = ibegin + VEC_length (btrace_insn_s, bfun->insn); + + DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], " + "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend, + ibegin, iend); } -/* Check whether the function has changed. */ +/* Return non-zero if BFUN does not match MFUN and FUN, + return zero otherwise. */ static int -ftrace_function_switched (struct btrace_func *bfun, - struct minimal_symbol *mfun, struct symbol *fun) +ftrace_function_switched (const struct btrace_function *bfun, + const struct minimal_symbol *mfun, + const struct symbol *fun) { struct minimal_symbol *msym; struct symbol *sym; - /* The function changed if we did not have one before. */ - if (bfun == NULL) - return 1; - msym = bfun->msym; sym = bfun->sym; @@ -228,109 +156,505 @@ ftrace_function_switched (struct btrace_func *bfun, return 1; } + /* If we lost symbol information, we switched functions. */ + if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL) + return 1; + + /* If we gained symbol information, we switched functions. */ + if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL)) + return 1; + return 0; } -/* Check if we should skip this file when generating the function call - history. We would want to do that if, say, a macro that is defined - in another file is expanded in this function. */ +/* Return non-zero if we should skip this file when generating the function + call history, zero otherwise. + We would want to do that if, say, a macro that is defined in another file + is expanded in this function. */ static int -ftrace_skip_file (struct btrace_func *bfun, const char *filename) +ftrace_skip_file (const struct btrace_function *bfun, const char *fullname) { struct symbol *sym; const char *bfile; sym = bfun->sym; + if (sym == NULL) + return 1; - if (sym != NULL) - bfile = symtab_to_fullname (sym->symtab); - else - bfile = ""; + bfile = symtab_to_fullname (sym->symtab); + + return (filename_cmp (bfile, fullname) != 0); +} + +/* Allocate and initialize a new branch trace function segment. + PREV is the chronologically preceding function segment. + MFUN and FUN are the symbol information we have for this function. */ + +static struct btrace_function * +ftrace_new_function (struct btrace_function *prev, + struct minimal_symbol *mfun, + struct symbol *fun) +{ + struct btrace_function *bfun; + + bfun = xzalloc (sizeof (*bfun)); + + bfun->msym = mfun; + bfun->sym = fun; + bfun->flow.prev = prev; + + /* We start with the identities of min and max, respectively. */ + bfun->lbegin = INT_MAX; + bfun->lend = INT_MIN; - if (filename == NULL) - filename = ""; + if (prev != NULL) + { + gdb_assert (prev->flow.next == NULL); + prev->flow.next = bfun; - return (filename_cmp (bfile, filename) != 0); + bfun->number = prev->number + 1; + bfun->insn_offset = (prev->insn_offset + + VEC_length (btrace_insn_s, prev->insn)); + } + + return bfun; } -/* Compute the function trace from the instruction trace. */ +/* Update the UP field of a function segment. */ -static VEC (btrace_func_s) * -compute_ftrace (VEC (btrace_inst_s) *itrace) +static void +ftrace_update_caller (struct btrace_function *bfun, + struct btrace_function *caller, + enum btrace_function_flag flags) { - VEC (btrace_func_s) *ftrace; - struct btrace_inst *binst; - struct btrace_func *bfun; - unsigned int idx; + if (bfun->up != NULL) + ftrace_debug (bfun, "updating caller"); - DEBUG ("compute ftrace"); + bfun->up = caller; + bfun->flags = flags; + + ftrace_debug (bfun, "set caller"); +} + +/* Fix up the caller for all segments of a function. */ + +static void +ftrace_fixup_caller (struct btrace_function *bfun, + struct btrace_function *caller, + enum btrace_function_flag flags) +{ + struct btrace_function *prev, *next; + + ftrace_update_caller (bfun, caller, flags); + + /* Update all function segments belonging to the same function. */ + for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev) + ftrace_update_caller (prev, caller, flags); + + for (next = bfun->segment.next; next != NULL; next = next->segment.next) + ftrace_update_caller (next, caller, flags); +} + +/* Add a new function segment for a call. + CALLER is the chronologically preceding function segment. + MFUN and FUN are the symbol information we have for this function. */ + +static struct btrace_function * +ftrace_new_call (struct btrace_function *caller, + struct minimal_symbol *mfun, + struct symbol *fun) +{ + struct btrace_function *bfun; + + bfun = ftrace_new_function (caller, mfun, fun); + bfun->up = caller; + bfun->level = caller->level + 1; + + ftrace_debug (bfun, "new call"); + + return bfun; +} + +/* Add a new function segment for a tail call. + CALLER is the chronologically preceding function segment. + MFUN and FUN are the symbol information we have for this function. */ + +static struct btrace_function * +ftrace_new_tailcall (struct btrace_function *caller, + struct minimal_symbol *mfun, + struct symbol *fun) +{ + struct btrace_function *bfun; - ftrace = NULL; - bfun = NULL; + bfun = ftrace_new_function (caller, mfun, fun); + bfun->up = caller; + bfun->level = caller->level + 1; + bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL; - for (idx = 0; VEC_iterate (btrace_inst_s, itrace, idx, binst); ++idx) + ftrace_debug (bfun, "new tail call"); + + return bfun; +} + +/* Find the innermost caller in the back trace of BFUN with MFUN/FUN + symbol information. */ + +static struct btrace_function * +ftrace_find_caller (struct btrace_function *bfun, + struct minimal_symbol *mfun, + struct symbol *fun) +{ + for (; bfun != NULL; bfun = bfun->up) + { + /* Skip functions with incompatible symbol information. */ + if (ftrace_function_switched (bfun, mfun, fun)) + continue; + + /* This is the function segment we're looking for. */ + break; + } + + return bfun; +} + +/* Find the innermost caller in the back trace of BFUN, skipping all + function segments that do not end with a call instruction (e.g. + tail calls ending with a jump). */ + +static struct btrace_function * +ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun) +{ + for (; bfun != NULL; bfun = bfun->up) { - struct symtab_and_line sal; - struct bound_minimal_symbol mfun; - struct symbol *fun; - const char *filename; + struct btrace_insn *last; CORE_ADDR pc; - pc = binst->pc; + /* We do not allow empty function segments. */ + gdb_assert (!VEC_empty (btrace_insn_s, bfun->insn)); + + last = VEC_last (btrace_insn_s, bfun->insn); + pc = last->pc; - /* Try to determine the function we're in. We use both types of symbols - to avoid surprises when we sometimes get a full symbol and sometimes - only a minimal symbol. */ - fun = find_pc_function (pc); - mfun = lookup_minimal_symbol_by_pc (pc); + if (gdbarch_insn_is_call (gdbarch, pc)) + break; + } + + return bfun; +} + +/* Add a continuation segment for a function into which we return. + PREV is the chronologically preceding function segment. + MFUN and FUN are the symbol information we have for this function. */ + +static struct btrace_function * +ftrace_new_return (struct gdbarch *gdbarch, + struct btrace_function *prev, + struct minimal_symbol *mfun, + struct symbol *fun) +{ + struct btrace_function *bfun, *caller; + + bfun = ftrace_new_function (prev, mfun, fun); + + /* It is important to start at PREV's caller. Otherwise, we might find + PREV itself, if PREV is a recursive function. */ + caller = ftrace_find_caller (prev->up, mfun, fun); + if (caller != NULL) + { + /* The caller of PREV is the preceding btrace function segment in this + function instance. */ + gdb_assert (caller->segment.next == NULL); + + caller->segment.next = bfun; + bfun->segment.prev = caller; + + /* Maintain the function level. */ + bfun->level = caller->level; + + /* Maintain the call stack. */ + bfun->up = caller->up; + bfun->flags = caller->flags; + + ftrace_debug (bfun, "new return"); + } + else + { + /* We did not find a caller. This could mean that something went + wrong or that the call is simply not included in the trace. */ - if (fun == NULL && mfun.minsym == NULL) + /* Let's search for some actual call. */ + caller = ftrace_find_call (gdbarch, prev->up); + if (caller == NULL) { - DEBUG_FTRACE ("no symbol at %u, pc=%s", idx, - core_addr_to_string_nz (pc)); - continue; - } + /* There is no call in PREV's back trace. We assume that the + branch trace did not include it. */ + + /* Let's find the topmost call function - this skips tail calls. */ + while (prev->up != NULL) + prev = prev->up; - /* If we're switching functions, we start over. */ - if (ftrace_function_switched (bfun, mfun.minsym, fun)) + /* We maintain levels for a series of returns for which we have + not seen the calls. + We start at the preceding function's level in case this has + already been a return for which we have not seen the call. + We start at level 0 otherwise, to handle tail calls correctly. */ + bfun->level = min (0, prev->level) - 1; + + /* Fix up the call stack for PREV. */ + ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET); + + ftrace_debug (bfun, "new return - no caller"); + } + else { - bfun = VEC_safe_push (btrace_func_s, ftrace, NULL); + /* There is a call in PREV's back trace to which we should have + returned. Let's remain at this level. */ + bfun->level = prev->level; - ftrace_init_func (bfun, mfun.minsym, fun, idx); - ftrace_debug (bfun, "init"); + ftrace_debug (bfun, "new return - unknown caller"); } + } + + return bfun; +} + +/* Add a new function segment for a function switch. + PREV is the chronologically preceding function segment. + MFUN and FUN are the symbol information we have for this function. */ + +static struct btrace_function * +ftrace_new_switch (struct btrace_function *prev, + struct minimal_symbol *mfun, + struct symbol *fun) +{ + struct btrace_function *bfun; + + /* This is an unexplained function switch. The call stack will likely + be wrong at this point. */ + bfun = ftrace_new_function (prev, mfun, fun); - /* Update the instruction range. */ - bfun->iend = idx; - ftrace_debug (bfun, "update insns"); + /* We keep the function level. */ + bfun->level = prev->level; - /* Let's see if we have source correlation, as well. */ - sal = find_pc_line (pc, 0); - if (sal.symtab == NULL || sal.line == 0) + ftrace_debug (bfun, "new switch"); + + return bfun; +} + +/* Update BFUN with respect to the instruction at PC. This may create new + function segments. + Return the chronologically latest function segment, never NULL. */ + +static struct btrace_function * +ftrace_update_function (struct gdbarch *gdbarch, + struct btrace_function *bfun, CORE_ADDR pc) +{ + struct bound_minimal_symbol bmfun; + struct minimal_symbol *mfun; + struct symbol *fun; + struct btrace_insn *last; + + /* Try to determine the function we're in. We use both types of symbols + to avoid surprises when we sometimes get a full symbol and sometimes + only a minimal symbol. */ + fun = find_pc_function (pc); + bmfun = lookup_minimal_symbol_by_pc (pc); + mfun = bmfun.minsym; + + if (fun == NULL && mfun == NULL) + DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc)); + + /* If we didn't have a function before, we create one. */ + if (bfun == NULL) + return ftrace_new_function (bfun, mfun, fun); + + /* Check the last instruction, if we have one. + We do this check first, since it allows us to fill in the call stack + links in addition to the normal flow links. */ + last = NULL; + if (!VEC_empty (btrace_insn_s, bfun->insn)) + last = VEC_last (btrace_insn_s, bfun->insn); + + if (last != NULL) + { + CORE_ADDR lpc; + + lpc = last->pc; + + /* Check for returns. */ + if (gdbarch_insn_is_ret (gdbarch, lpc)) + return ftrace_new_return (gdbarch, bfun, mfun, fun); + + /* Check for calls. */ + if (gdbarch_insn_is_call (gdbarch, lpc)) { - DEBUG_FTRACE ("no lines at %u, pc=%s", idx, - core_addr_to_string_nz (pc)); - continue; + int size; + + size = gdb_insn_length (gdbarch, lpc); + + /* Ignore calls to the next instruction. They are used for PIC. */ + if (lpc + size != pc) + return ftrace_new_call (bfun, mfun, fun); } + } + + /* Check if we're switching functions for some other reason. */ + if (ftrace_function_switched (bfun, mfun, fun)) + { + DEBUG_FTRACE ("switching from %s in %s at %s", + ftrace_print_insn_addr (last), + ftrace_print_function_name (bfun), + ftrace_print_filename (bfun)); - /* Check if we switched files. This could happen if, say, a macro that - is defined in another file is expanded here. */ - filename = symtab_to_fullname (sal.symtab); - if (ftrace_skip_file (bfun, filename)) + if (last != NULL) { - DEBUG_FTRACE ("ignoring file at %u, pc=%s, file=%s", idx, - core_addr_to_string_nz (pc), filename); - continue; + CORE_ADDR start, lpc; + + start = get_pc_function_start (pc); + + /* If we can't determine the function for PC, we treat a jump at + the end of the block as tail call. */ + if (start == 0) + start = pc; + + lpc = last->pc; + + /* Jumps indicate optimized tail calls. */ + if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc)) + return ftrace_new_tailcall (bfun, mfun, fun); } - /* Update the line range. */ - bfun->lbegin = min (bfun->lbegin, sal.line); - bfun->lend = max (bfun->lend, sal.line); - ftrace_debug (bfun, "update lines"); + return ftrace_new_switch (bfun, mfun, fun); + } + + return bfun; +} + +/* Update BFUN's source range with respect to the instruction at PC. */ + +static void +ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc) +{ + struct symtab_and_line sal; + const char *fullname; + + sal = find_pc_line (pc, 0); + if (sal.symtab == NULL || sal.line == 0) + { + DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc)); + return; + } + + /* Check if we switched files. This could happen if, say, a macro that + is defined in another file is expanded here. */ + fullname = symtab_to_fullname (sal.symtab); + if (ftrace_skip_file (bfun, fullname)) + { + DEBUG_FTRACE ("ignoring file at %s, file=%s", + core_addr_to_string_nz (pc), fullname); + return; + } + + /* Update the line range. */ + bfun->lbegin = min (bfun->lbegin, sal.line); + bfun->lend = max (bfun->lend, sal.line); + + if (record_debug > 1) + ftrace_debug (bfun, "update lines"); +} + +/* Add the instruction at PC to BFUN's instructions. */ + +static void +ftrace_update_insns (struct btrace_function *bfun, CORE_ADDR pc) +{ + struct btrace_insn *insn; + + insn = VEC_safe_push (btrace_insn_s, bfun->insn, NULL); + insn->pc = pc; + + if (record_debug > 1) + ftrace_debug (bfun, "update insn"); +} + +/* Compute the function branch trace from a block branch trace BTRACE for + a thread given by BTINFO. */ + +static void +btrace_compute_ftrace (struct btrace_thread_info *btinfo, + VEC (btrace_block_s) *btrace) +{ + struct btrace_function *begin, *end; + struct gdbarch *gdbarch; + unsigned int blk; + int level; + + DEBUG ("compute ftrace"); + + gdbarch = target_gdbarch (); + begin = NULL; + end = NULL; + level = INT_MAX; + blk = VEC_length (btrace_block_s, btrace); + + while (blk != 0) + { + btrace_block_s *block; + CORE_ADDR pc; + + blk -= 1; + + block = VEC_index (btrace_block_s, btrace, blk); + pc = block->begin; + + for (;;) + { + int size; + + /* We should hit the end of the block. Warn if we went too far. */ + if (block->end < pc) + { + warning (_("Recorded trace may be corrupted around %s."), + core_addr_to_string_nz (pc)); + break; + } + + end = ftrace_update_function (gdbarch, end, pc); + if (begin == NULL) + begin = end; + + /* Maintain the function level offset. */ + level = min (level, end->level); + + ftrace_update_insns (end, pc); + ftrace_update_lines (end, pc); + + /* We're done once we pushed the instruction at the end. */ + if (block->end == pc) + break; + + size = gdb_insn_length (gdbarch, pc); + + /* Make sure we terminate if we fail to compute the size. */ + if (size <= 0) + { + warning (_("Recorded trace may be incomplete around %s."), + core_addr_to_string_nz (pc)); + break; + } + + pc += size; + } } - return ftrace; + btinfo->begin = begin; + btinfo->end = end; + + /* LEVEL is the minimal function level of all btrace function segments. + Define the global level offset to -LEVEL so all function levels are + normalized to start at zero. */ + btinfo->level = -level; } /* See btrace.h. */ @@ -394,6 +718,7 @@ btrace_fetch (struct thread_info *tp) { struct btrace_thread_info *btinfo; VEC (btrace_block_s) *btrace; + struct cleanup *cleanup; DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid)); @@ -402,18 +727,15 @@ btrace_fetch (struct thread_info *tp) return; btrace = target_read_btrace (btinfo->target, BTRACE_READ_NEW); - if (VEC_empty (btrace_block_s, btrace)) - return; - - btrace_clear (tp); + cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace); - btinfo->btrace = btrace; - btinfo->itrace = compute_itrace (btinfo->btrace); - btinfo->ftrace = compute_ftrace (btinfo->itrace); + if (!VEC_empty (btrace_block_s, btrace)) + { + btrace_clear (tp); + btrace_compute_ftrace (btinfo, btrace); + } - /* Initialize branch trace iterators. */ - btrace_init_insn_iterator (btinfo); - btrace_init_func_iterator (btinfo); + do_cleanups (cleanup); } /* See btrace.h. */ @@ -422,18 +744,29 @@ void btrace_clear (struct thread_info *tp) { struct btrace_thread_info *btinfo; + struct btrace_function *it, *trash; DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid)); btinfo = &tp->btrace; - VEC_free (btrace_block_s, btinfo->btrace); - VEC_free (btrace_inst_s, btinfo->itrace); - VEC_free (btrace_func_s, btinfo->ftrace); + it = btinfo->begin; + while (it != NULL) + { + trash = it; + it = it->flow.next; - btinfo->btrace = NULL; - btinfo->itrace = NULL; - btinfo->ftrace = NULL; + xfree (trash); + } + + btinfo->begin = NULL; + btinfo->end = NULL; + + xfree (btinfo->insn_history); + xfree (btinfo->call_history); + + btinfo->insn_history = NULL; + btinfo->call_history = NULL; } /* See btrace.h. */ @@ -541,3 +874,451 @@ parse_xml_btrace (const char *buffer) return btrace; } + +/* See btrace.h. */ + +const struct btrace_insn * +btrace_insn_get (const struct btrace_insn_iterator *it) +{ + const struct btrace_function *bfun; + unsigned int index, end; + + index = it->index; + bfun = it->function; + + /* The index is within the bounds of this function's instruction vector. */ + end = VEC_length (btrace_insn_s, bfun->insn); + gdb_assert (0 < end); + gdb_assert (index < end); + + return VEC_index (btrace_insn_s, bfun->insn, index); +} + +/* See btrace.h. */ + +unsigned int +btrace_insn_number (const struct btrace_insn_iterator *it) +{ + const struct btrace_function *bfun; + + bfun = it->function; + return bfun->insn_offset + it->index; +} + +/* See btrace.h. */ + +void +btrace_insn_begin (struct btrace_insn_iterator *it, + const struct btrace_thread_info *btinfo) +{ + const struct btrace_function *bfun; + + bfun = btinfo->begin; + if (bfun == NULL) + error (_("No trace.")); + + it->function = bfun; + it->index = 0; +} + +/* See btrace.h. */ + +void +btrace_insn_end (struct btrace_insn_iterator *it, + const struct btrace_thread_info *btinfo) +{ + const struct btrace_function *bfun; + unsigned int length; + + bfun = btinfo->end; + if (bfun == NULL) + error (_("No trace.")); + + /* The last instruction in the last function is the current instruction. + We point to it - it is one past the end of the execution trace. */ + length = VEC_length (btrace_insn_s, bfun->insn); + + it->function = bfun; + it->index = length - 1; +} + +/* See btrace.h. */ + +unsigned int +btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride) +{ + const struct btrace_function *bfun; + unsigned int index, steps; + + bfun = it->function; + steps = 0; + index = it->index; + + while (stride != 0) + { + unsigned int end, space, adv; + + end = VEC_length (btrace_insn_s, bfun->insn); + + gdb_assert (0 < end); + gdb_assert (index < end); + + /* Compute the number of instructions remaining in this segment. */ + space = end - index; + + /* Advance the iterator as far as possible within this segment. */ + adv = min (space, stride); + stride -= adv; + index += adv; + steps += adv; + + /* Move to the next function if we're at the end of this one. */ + if (index == end) + { + const struct btrace_function *next; + + next = bfun->flow.next; + if (next == NULL) + { + /* We stepped past the last function. + + Let's adjust the index to point to the last instruction in + the previous function. */ + index -= 1; + steps -= 1; + break; + } + + /* We now point to the first instruction in the new function. */ + bfun = next; + index = 0; + } + + /* We did make progress. */ + gdb_assert (adv > 0); + } + + /* Update the iterator. */ + it->function = bfun; + it->index = index; + + return steps; +} + +/* See btrace.h. */ + +unsigned int +btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride) +{ + const struct btrace_function *bfun; + unsigned int index, steps; + + bfun = it->function; + steps = 0; + index = it->index; + + while (stride != 0) + { + unsigned int adv; + + /* Move to the previous function if we're at the start of this one. */ + if (index == 0) + { + const struct btrace_function *prev; + + prev = bfun->flow.prev; + if (prev == NULL) + break; + + /* We point to one after the last instruction in the new function. */ + bfun = prev; + index = VEC_length (btrace_insn_s, bfun->insn); + + /* There is at least one instruction in this function segment. */ + gdb_assert (index > 0); + } + + /* Advance the iterator as far as possible within this segment. */ + adv = min (index, stride); + stride -= adv; + index -= adv; + steps += adv; + + /* We did make progress. */ + gdb_assert (adv > 0); + } + + /* Update the iterator. */ + it->function = bfun; + it->index = index; + + return steps; +} + +/* See btrace.h. */ + +int +btrace_insn_cmp (const struct btrace_insn_iterator *lhs, + const struct btrace_insn_iterator *rhs) +{ + unsigned int lnum, rnum; + + lnum = btrace_insn_number (lhs); + rnum = btrace_insn_number (rhs); + + return (int) (lnum - rnum); +} + +/* See btrace.h. */ + +int +btrace_find_insn_by_number (struct btrace_insn_iterator *it, + const struct btrace_thread_info *btinfo, + unsigned int number) +{ + const struct btrace_function *bfun; + unsigned int end; + + for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev) + if (bfun->insn_offset <= number) + break; + + if (bfun == NULL) + return 0; + + end = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn); + if (end <= number) + return 0; + + it->function = bfun; + it->index = number - bfun->insn_offset; + + return 1; +} + +/* See btrace.h. */ + +const struct btrace_function * +btrace_call_get (const struct btrace_call_iterator *it) +{ + return it->function; +} + +/* See btrace.h. */ + +unsigned int +btrace_call_number (const struct btrace_call_iterator *it) +{ + const struct btrace_thread_info *btinfo; + const struct btrace_function *bfun; + unsigned int insns; + + btinfo = it->btinfo; + bfun = it->function; + if (bfun != NULL) + return bfun->number; + + /* For the end iterator, i.e. bfun == NULL, we return one more than the + number of the last function. */ + bfun = btinfo->end; + insns = VEC_length (btrace_insn_s, bfun->insn); + + /* If the function contains only a single instruction (i.e. the current + instruction), it will be skipped and its number is already the number + we seek. */ + if (insns == 1) + return bfun->number; + + /* Otherwise, return one more than the number of the last function. */ + return bfun->number + 1; +} + +/* See btrace.h. */ + +void +btrace_call_begin (struct btrace_call_iterator *it, + const struct btrace_thread_info *btinfo) +{ + const struct btrace_function *bfun; + + bfun = btinfo->begin; + if (bfun == NULL) + error (_("No trace.")); + + it->btinfo = btinfo; + it->function = bfun; +} + +/* See btrace.h. */ + +void +btrace_call_end (struct btrace_call_iterator *it, + const struct btrace_thread_info *btinfo) +{ + const struct btrace_function *bfun; + + bfun = btinfo->end; + if (bfun == NULL) + error (_("No trace.")); + + it->btinfo = btinfo; + it->function = NULL; +} + +/* See btrace.h. */ + +unsigned int +btrace_call_next (struct btrace_call_iterator *it, unsigned int stride) +{ + const struct btrace_function *bfun; + unsigned int steps; + + bfun = it->function; + steps = 0; + while (bfun != NULL) + { + const struct btrace_function *next; + unsigned int insns; + + next = bfun->flow.next; + if (next == NULL) + { + /* Ignore the last function if it only contains a single + (i.e. the current) instruction. */ + insns = VEC_length (btrace_insn_s, bfun->insn); + if (insns == 1) + steps -= 1; + } + + if (stride == steps) + break; + + bfun = next; + steps += 1; + } + + it->function = bfun; + return steps; +} + +/* See btrace.h. */ + +unsigned int +btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride) +{ + const struct btrace_thread_info *btinfo; + const struct btrace_function *bfun; + unsigned int steps; + + bfun = it->function; + steps = 0; + + if (bfun == NULL) + { + unsigned int insns; + + btinfo = it->btinfo; + bfun = btinfo->end; + if (bfun == NULL) + return 0; + + /* Ignore the last function if it only contains a single + (i.e. the current) instruction. */ + insns = VEC_length (btrace_insn_s, bfun->insn); + if (insns == 1) + bfun = bfun->flow.prev; + + if (bfun == NULL) + return 0; + + steps += 1; + } + + while (steps < stride) + { + const struct btrace_function *prev; + + prev = bfun->flow.prev; + if (prev == NULL) + break; + + bfun = prev; + steps += 1; + } + + it->function = bfun; + return steps; +} + +/* See btrace.h. */ + +int +btrace_call_cmp (const struct btrace_call_iterator *lhs, + const struct btrace_call_iterator *rhs) +{ + unsigned int lnum, rnum; + + lnum = btrace_call_number (lhs); + rnum = btrace_call_number (rhs); + + return (int) (lnum - rnum); +} + +/* See btrace.h. */ + +int +btrace_find_call_by_number (struct btrace_call_iterator *it, + const struct btrace_thread_info *btinfo, + unsigned int number) +{ + const struct btrace_function *bfun; + + for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev) + { + unsigned int bnum; + + bnum = bfun->number; + if (number == bnum) + { + it->btinfo = btinfo; + it->function = bfun; + return 1; + } + + /* Functions are ordered and numbered consecutively. We could bail out + earlier. On the other hand, it is very unlikely that we search for + a nonexistent function. */ + } + + return 0; +} + +/* See btrace.h. */ + +void +btrace_set_insn_history (struct btrace_thread_info *btinfo, + const struct btrace_insn_iterator *begin, + const struct btrace_insn_iterator *end) +{ + if (btinfo->insn_history == NULL) + btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history)); + + btinfo->insn_history->begin = *begin; + btinfo->insn_history->end = *end; +} + +/* See btrace.h. */ + +void +btrace_set_call_history (struct btrace_thread_info *btinfo, + const struct btrace_call_iterator *begin, + const struct btrace_call_iterator *end) +{ + gdb_assert (begin->btinfo == end->btinfo); + + if (btinfo->call_history == NULL) + btinfo->call_history = xzalloc (sizeof (*btinfo->call_history)); + + btinfo->call_history->begin = *begin; + btinfo->call_history->end = *end; +} |