aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarkus Metzger <markus.t.metzger@intel.com>2013-06-03 15:39:35 +0200
committerMarkus Metzger <markus.t.metzger@intel.com>2014-01-16 13:11:42 +0100
commit969c39fbcd6a5675c1f4b97cd23d680e4b5b6487 (patch)
tree54d7a2c546ecf86fbe37536db86d0916734203d8
parent0b722aec57e2e54083c1d56657762945ad4604fc (diff)
downloadgdb-969c39fbcd6a5675c1f4b97cd23d680e4b5b6487.zip
gdb-969c39fbcd6a5675c1f4b97cd23d680e4b5b6487.tar.gz
gdb-969c39fbcd6a5675c1f4b97cd23d680e4b5b6487.tar.bz2
btrace, gdbserver: read branch trace incrementally
Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
-rw-r--r--gdb/ChangeLog27
-rw-r--r--gdb/NEWS6
-rw-r--r--gdb/amd64-linux-nat.c6
-rw-r--r--gdb/btrace.c156
-rw-r--r--gdb/common/btrace-common.h27
-rw-r--r--gdb/common/linux-btrace.c97
-rw-r--r--gdb/common/linux-btrace.h15
-rw-r--r--gdb/doc/gdb.texinfo8
-rw-r--r--gdb/gdbserver/ChangeLog10
-rw-r--r--gdb/gdbserver/linux-low.c36
-rw-r--r--gdb/gdbserver/server.c11
-rw-r--r--gdb/gdbserver/target.h9
-rw-r--r--gdb/i386-linux-nat.c6
-rw-r--r--gdb/remote.c23
-rw-r--r--gdb/target.c9
-rw-r--r--gdb/target.h15
16 files changed, 370 insertions, 91 deletions
diff --git a/gdb/ChangeLog b/gdb/ChangeLog
index 5cdf569..fc1f23d 100644
--- a/gdb/ChangeLog
+++ b/gdb/ChangeLog
@@ -1,5 +1,32 @@
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
+ * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace):
+ Support delta reads.
+ (linux_disable_btrace): Change return type.
+ * common/linux-btrace.h (linux_read_btrace): Change parameters
+ and return type to allow error reporting. Update users.
+ (linux_disable_btrace): Change return type. Update users.
+ * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>:
+ New.
+ (btrace_error): New.
+ (btrace_block) <begin>: Comment on BEGIN == 0.
+ * btrace.c (btrace_compute_ftrace): Start from the end of
+ the current trace.
+ (btrace_stitch_trace, btrace_clear_history): New.
+ (btrace_fetch): Read delta trace, return if replaying.
+ (btrace_clear): Move clear history code to btrace_clear_history.
+ (parse_xml_btrace): Throw an error if parsing failed.
+ * target.h (struct target_ops) <to_read_btrace>: Change parameters
+ and return type to allow error reporting.
+ (target_read_btrace): Change parameters and return type to allow
+ error reporting.
+ * target.c (target_read_btrace): Update.
+ * remote.c (remote_read_btrace): Support delta reads. Pass
+ errors on.
+ * NEWS: Announce it.
+
+2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
+
* record.h (record_btrace_frame_unwind)
(record_btrace_tailcall_frame_unwind): New declarations.
* dwarf2-frame: Include record.h
diff --git a/gdb/NEWS b/gdb/NEWS
index 6b7f4a4..840d139 100644
--- a/gdb/NEWS
+++ b/gdb/NEWS
@@ -22,6 +22,12 @@
For locations inside the execution trace, the back trace is computed
based on the information stored in the execution trace.
+* New remote packets
+
+qXfer:btrace:read's annex
+ The qXfer:btrace:read packet supports a new annex 'delta' to read
+ branch trace incrementally.
+
*** Changes in GDB 7.7
* Improved support for process record-replay and reverse debugging on
diff --git a/gdb/amd64-linux-nat.c b/gdb/amd64-linux-nat.c
index 7c967f8..55973a6 100644
--- a/gdb/amd64-linux-nat.c
+++ b/gdb/amd64-linux-nat.c
@@ -1173,10 +1173,10 @@ amd64_linux_enable_btrace (ptid_t ptid)
static void
amd64_linux_disable_btrace (struct btrace_target_info *tinfo)
{
- int errcode = linux_disable_btrace (tinfo);
+ enum btrace_error errcode = linux_disable_btrace (tinfo);
- if (errcode != 0)
- error (_("Could not disable branch tracing: %s."), safe_strerror (errcode));
+ if (errcode != BTRACE_ERR_NONE)
+ error (_("Could not disable branch tracing."));
}
/* Teardown branch tracing. */
diff --git a/gdb/btrace.c b/gdb/btrace.c
index ba87e16..28970c3 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -599,9 +599,9 @@ btrace_compute_ftrace (struct btrace_thread_info *btinfo,
DEBUG ("compute ftrace");
gdbarch = target_gdbarch ();
- begin = NULL;
- end = NULL;
- level = INT_MAX;
+ begin = btinfo->begin;
+ end = btinfo->end;
+ level = begin != NULL ? -btinfo->level : INT_MAX;
blk = VEC_length (btrace_block_s, btrace);
while (blk != 0)
@@ -728,27 +728,158 @@ btrace_teardown (struct thread_info *tp)
btrace_clear (tp);
}
+/* Adjust the block trace in order to stitch old and new trace together.
+ BTRACE is the new delta trace between the last and the current stop.
+ BTINFO is the old branch trace until the last stop.
+ May modify BTRACE as well as the existing trace in BTINFO.
+ Return 0 on success, -1 otherwise. */
+
+static int
+btrace_stitch_trace (VEC (btrace_block_s) **btrace,
+ const struct btrace_thread_info *btinfo)
+{
+ struct btrace_function *last_bfun;
+ struct btrace_insn *last_insn;
+ btrace_block_s *first_new_block;
+
+ /* If we don't have trace, there's nothing to do. */
+ if (VEC_empty (btrace_block_s, *btrace))
+ return 0;
+
+ last_bfun = btinfo->end;
+ gdb_assert (last_bfun != NULL);
+
+ /* Beware that block trace starts with the most recent block, so the
+ chronologically first block in the new trace is the last block in
+ the new trace's block vector. */
+ first_new_block = VEC_last (btrace_block_s, *btrace);
+ last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
+
+ /* If the current PC at the end of the block is the same as in our current
+ trace, there are two explanations:
+ 1. we executed the instruction and some branch brought us back.
+ 2. we have not made any progress.
+ In the first case, the delta trace vector should contain at least two
+ entries.
+ In the second case, the delta trace vector should contain exactly one
+ entry for the partial block containing the current PC. Remove it. */
+ if (first_new_block->end == last_insn->pc
+ && VEC_length (btrace_block_s, *btrace) == 1)
+ {
+ VEC_pop (btrace_block_s, *btrace);
+ return 0;
+ }
+
+ DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
+ core_addr_to_string_nz (first_new_block->end));
+
+ /* Do a simple sanity check to make sure we don't accidentally end up
+ with a bad block. This should not occur in practice. */
+ if (first_new_block->end < last_insn->pc)
+ {
+ warning (_("Error while trying to read delta trace. Falling back to "
+ "a full read."));
+ return -1;
+ }
+
+ /* We adjust the last block to start at the end of our current trace. */
+ gdb_assert (first_new_block->begin == 0);
+ first_new_block->begin = last_insn->pc;
+
+ /* We simply pop the last insn so we can insert it again as part of
+ the normal branch trace computation.
+ Since instruction iterators are based on indices in the instructions
+ vector, we don't leave any pointers dangling. */
+ DEBUG ("pruning insn at %s for stitching",
+ ftrace_print_insn_addr (last_insn));
+
+ VEC_pop (btrace_insn_s, last_bfun->insn);
+
+ /* The instructions vector may become empty temporarily if this has
+ been the only instruction in this function segment.
+ This violates the invariant but will be remedied shortly by
+ btrace_compute_ftrace when we add the new trace. */
+ return 0;
+}
+
+/* Clear the branch trace histories in BTINFO. */
+
+static void
+btrace_clear_history (struct btrace_thread_info *btinfo)
+{
+ xfree (btinfo->insn_history);
+ xfree (btinfo->call_history);
+ xfree (btinfo->replay);
+
+ btinfo->insn_history = NULL;
+ btinfo->call_history = NULL;
+ btinfo->replay = NULL;
+}
+
/* See btrace.h. */
void
btrace_fetch (struct thread_info *tp)
{
struct btrace_thread_info *btinfo;
+ struct btrace_target_info *tinfo;
VEC (btrace_block_s) *btrace;
struct cleanup *cleanup;
+ int errcode;
DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
+ btrace = NULL;
btinfo = &tp->btrace;
- if (btinfo->target == NULL)
+ tinfo = btinfo->target;
+ if (tinfo == NULL)
+ return;
+
+ /* There's no way we could get new trace while replaying.
+ On the other hand, delta trace would return a partial record with the
+ current PC, which is the replay PC, not the last PC, as expected. */
+ if (btinfo->replay != NULL)
return;
- btrace = target_read_btrace (btinfo->target, BTRACE_READ_NEW);
cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
+ /* Let's first try to extend the trace we already have. */
+ if (btinfo->end != NULL)
+ {
+ errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
+ if (errcode == 0)
+ {
+ /* Success. Let's try to stitch the traces together. */
+ errcode = btrace_stitch_trace (&btrace, btinfo);
+ }
+ else
+ {
+ /* We failed to read delta trace. Let's try to read new trace. */
+ errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
+
+ /* If we got any new trace, discard what we have. */
+ if (errcode == 0 && !VEC_empty (btrace_block_s, btrace))
+ btrace_clear (tp);
+ }
+
+ /* If we were not able to read the trace, we start over. */
+ if (errcode != 0)
+ {
+ btrace_clear (tp);
+ errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
+ }
+ }
+ else
+ errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
+
+ /* If we were not able to read the branch trace, signal an error. */
+ if (errcode != 0)
+ error (_("Failed to read branch trace."));
+
+ /* Compute the trace, provided we have any. */
if (!VEC_empty (btrace_block_s, btrace))
{
- btrace_clear (tp);
+ btrace_clear_history (btinfo);
btrace_compute_ftrace (btinfo, btrace);
}
@@ -783,13 +914,7 @@ btrace_clear (struct thread_info *tp)
btinfo->begin = NULL;
btinfo->end = NULL;
- xfree (btinfo->insn_history);
- xfree (btinfo->call_history);
- xfree (btinfo->replay);
-
- btinfo->insn_history = NULL;
- btinfo->call_history = NULL;
- btinfo->replay = NULL;
+ btrace_clear_history (btinfo);
}
/* See btrace.h. */
@@ -881,10 +1006,7 @@ parse_xml_btrace (const char *buffer)
errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
buffer, &btrace);
if (errcode != 0)
- {
- do_cleanups (cleanup);
- return NULL;
- }
+ error (_("Error parsing branch trace."));
/* Keep parse results. */
discard_cleanups (cleanup);
diff --git a/gdb/common/btrace-common.h b/gdb/common/btrace-common.h
index 1d389af..25617bb 100644
--- a/gdb/common/btrace-common.h
+++ b/gdb/common/btrace-common.h
@@ -42,7 +42,9 @@
asynchronous, e.g. interrupts. */
struct btrace_block
{
- /* The address of the first byte of the first instruction in the block. */
+ /* The address of the first byte of the first instruction in the block.
+ The address may be zero if we do not know the beginning of this block,
+ such as for the first block in a delta trace. */
CORE_ADDR begin;
/* The address of the first byte of the last instruction in the block. */
@@ -67,7 +69,28 @@ enum btrace_read_type
BTRACE_READ_ALL,
/* Send all available trace, if it changed. */
- BTRACE_READ_NEW
+ BTRACE_READ_NEW,
+
+ /* Send the trace since the last request. This will fail if the trace
+ buffer overflowed. */
+ BTRACE_READ_DELTA
+};
+
+/* Enumeration of btrace errors. */
+
+enum btrace_error
+{
+ /* No error. Everything is OK. */
+ BTRACE_ERR_NONE,
+
+ /* An unknown error. */
+ BTRACE_ERR_UNKNOWN,
+
+ /* Branch tracing is not supported on this system. */
+ BTRACE_ERR_NOT_SUPPORTED,
+
+ /* The branch trace buffer overflowed; no delta read possible. */
+ BTRACE_ERR_OVERFLOW
};
#endif /* BTRACE_COMMON_H */
diff --git a/gdb/common/linux-btrace.c b/gdb/common/linux-btrace.c
index e469900..218e0ce 100644
--- a/gdb/common/linux-btrace.c
+++ b/gdb/common/linux-btrace.c
@@ -172,11 +172,11 @@ perf_event_sample_ok (const struct perf_event_sample *sample)
static VEC (btrace_block_s) *
perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
- const uint8_t *end, const uint8_t *start)
+ const uint8_t *end, const uint8_t *start, size_t size)
{
VEC (btrace_block_s) *btrace = NULL;
struct perf_event_sample sample;
- size_t read = 0, size = (end - begin);
+ size_t read = 0;
struct btrace_block block = { 0, 0 };
struct regcache *regcache;
@@ -252,6 +252,13 @@ perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
block.end = psample->bts.from;
}
+ /* Push the last block (i.e. the first one of inferior execution), as well.
+ We don't know where it ends, but we know where it starts. If we're
+ reading delta trace, we can fill in the start address later on.
+ Otherwise we will prune it. */
+ block.begin = 0;
+ VEC_safe_push (btrace_block_s, btrace, &block);
+
return btrace;
}
@@ -476,7 +483,7 @@ linux_enable_btrace (ptid_t ptid)
/* See linux-btrace.h. */
-int
+enum btrace_error
linux_disable_btrace (struct btrace_target_info *tinfo)
{
int errcode;
@@ -484,12 +491,12 @@ linux_disable_btrace (struct btrace_target_info *tinfo)
errno = 0;
errcode = munmap (tinfo->buffer, perf_event_mmap_size (tinfo));
if (errcode != 0)
- return errno;
+ return BTRACE_ERR_UNKNOWN;
close (tinfo->file);
xfree (tinfo);
- return 0;
+ return BTRACE_ERR_NONE;
}
/* Check whether the branch trace has changed. */
@@ -504,21 +511,24 @@ linux_btrace_has_changed (struct btrace_target_info *tinfo)
/* See linux-btrace.h. */
-VEC (btrace_block_s) *
-linux_read_btrace (struct btrace_target_info *tinfo,
+enum btrace_error
+linux_read_btrace (VEC (btrace_block_s) **btrace,
+ struct btrace_target_info *tinfo,
enum btrace_read_type type)
{
- VEC (btrace_block_s) *btrace = NULL;
volatile struct perf_event_mmap_page *header;
const uint8_t *begin, *end, *start;
- unsigned long data_head, retries = 5;
- size_t buffer_size;
+ unsigned long data_head, data_tail, retries = 5;
+ size_t buffer_size, size;
+ /* For delta reads, we return at least the partial last block containing
+ the current PC. */
if (type == BTRACE_READ_NEW && !linux_btrace_has_changed (tinfo))
- return NULL;
+ return BTRACE_ERR_NONE;
header = perf_event_header (tinfo);
buffer_size = perf_event_buffer_size (tinfo);
+ data_tail = tinfo->data_head;
/* We may need to retry reading the trace. See below. */
while (retries--)
@@ -526,23 +536,45 @@ linux_read_btrace (struct btrace_target_info *tinfo,
data_head = header->data_head;
/* Delete any leftover trace from the previous iteration. */
- VEC_free (btrace_block_s, btrace);
+ VEC_free (btrace_block_s, *btrace);
- /* If there's new trace, let's read it. */
- if (data_head != tinfo->data_head)
+ if (type == BTRACE_READ_DELTA)
{
- /* Data_head keeps growing; the buffer itself is circular. */
- begin = perf_event_buffer_begin (tinfo);
- start = begin + data_head % buffer_size;
-
- if (data_head <= buffer_size)
- end = start;
- else
- end = perf_event_buffer_end (tinfo);
+ /* Determine the number of bytes to read and check for buffer
+ overflows. */
+
+ /* Check for data head overflows. We might be able to recover from
+ those but they are very unlikely and it's not really worth the
+ effort, I think. */
+ if (data_head < data_tail)
+ return BTRACE_ERR_OVERFLOW;
+
+ /* If the buffer is smaller than the trace delta, we overflowed. */
+ size = data_head - data_tail;
+ if (buffer_size < size)
+ return BTRACE_ERR_OVERFLOW;
+ }
+ else
+ {
+ /* Read the entire buffer. */
+ size = buffer_size;
- btrace = perf_event_read_bts (tinfo, begin, end, start);
+ /* Adjust the size if the buffer has not overflowed, yet. */
+ if (data_head < size)
+ size = data_head;
}
+ /* Data_head keeps growing; the buffer itself is circular. */
+ begin = perf_event_buffer_begin (tinfo);
+ start = begin + data_head % buffer_size;
+
+ if (data_head <= buffer_size)
+ end = start;
+ else
+ end = perf_event_buffer_end (tinfo);
+
+ *btrace = perf_event_read_bts (tinfo, begin, end, start, size);
+
/* The stopping thread notifies its ptracer before it is scheduled out.
On multi-core systems, the debugger might therefore run while the
kernel might be writing the last branch trace records.
@@ -554,7 +586,13 @@ linux_read_btrace (struct btrace_target_info *tinfo,
tinfo->data_head = data_head;
- return btrace;
+ /* Prune the incomplete last block (i.e. the first one of inferior execution)
+ if we're not doing a delta read. There is no way of filling in its zeroed
+ BEGIN element. */
+ if (!VEC_empty (btrace_block_s, *btrace) && type != BTRACE_READ_DELTA)
+ VEC_pop (btrace_block_s, *btrace);
+
+ return BTRACE_ERR_NONE;
}
#else /* !HAVE_LINUX_PERF_EVENT_H */
@@ -577,19 +615,20 @@ linux_enable_btrace (ptid_t ptid)
/* See linux-btrace.h. */
-int
+enum btrace_error
linux_disable_btrace (struct btrace_target_info *tinfo)
{
- return ENOSYS;
+ return BTRACE_ERR_NOT_SUPPORTED;
}
/* See linux-btrace.h. */
-VEC (btrace_block_s) *
-linux_read_btrace (struct btrace_target_info *tinfo,
+enum btrace_error
+linux_read_btrace (VEC (btrace_block_s) **btrace,
+ struct btrace_target_info *tinfo,
enum btrace_read_type type)
{
- return NULL;
+ return BTRACE_ERR_NOT_SUPPORTED;
}
#endif /* !HAVE_LINUX_PERF_EVENT_H */
diff --git a/gdb/common/linux-btrace.h b/gdb/common/linux-btrace.h
index 32a0403..a97b697 100644
--- a/gdb/common/linux-btrace.h
+++ b/gdb/common/linux-btrace.h
@@ -61,17 +61,18 @@ struct btrace_target_info
int ptr_bits;
};
-/* Check whether branch tracing is supported. */
+/* See to_supports_btrace in target.h. */
extern int linux_supports_btrace (void);
-/* Enable branch tracing for @ptid. */
+/* See to_enable_btrace in target.h. */
extern struct btrace_target_info *linux_enable_btrace (ptid_t ptid);
-/* Disable branch tracing and deallocate @tinfo. */
-extern int linux_disable_btrace (struct btrace_target_info *tinfo);
+/* See to_disable_btrace in target.h. */
+extern enum btrace_error linux_disable_btrace (struct btrace_target_info *ti);
-/* Read branch trace data. */
-extern VEC (btrace_block_s) *linux_read_btrace (struct btrace_target_info *,
- enum btrace_read_type);
+/* See to_read_btrace in target.h. */
+extern enum btrace_error linux_read_btrace (VEC (btrace_block_s) **btrace,
+ struct btrace_target_info *btinfo,
+ enum btrace_read_type type);
#endif /* LINUX_BTRACE_H */
diff --git a/gdb/doc/gdb.texinfo b/gdb/doc/gdb.texinfo
index d0f2f71..57071d1 100644
--- a/gdb/doc/gdb.texinfo
+++ b/gdb/doc/gdb.texinfo
@@ -39978,6 +39978,14 @@ Returns all available branch trace.
@item new
Returns all available branch trace if the branch trace changed since
the last read request.
+
+@item delta
+Returns the new branch trace since the last read request. Adds a new
+block to the end of the trace that begins at zero and ends at the source
+location of the first branch in the trace buffer. This extra block is
+used to stitch traces together.
+
+If the trace buffer overflowed, returns an error indicating the overflow.
@end table
This packet is not probed by default; the remote stub must request it
diff --git a/gdb/gdbserver/ChangeLog b/gdb/gdbserver/ChangeLog
index 7347f08..5884639 100644
--- a/gdb/gdbserver/ChangeLog
+++ b/gdb/gdbserver/ChangeLog
@@ -1,3 +1,13 @@
+2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
+
+ * target.h (target_ops) <read_btrace>: Change parameters and
+ return type to allow error reporting.
+ * server.c (handle_qxfer_btrace): Support delta reads. Pass
+ trace reading errors on.
+ * linux-low.c (linux_low_read_btrace): Pass trace reading
+ errors on.
+ (linux_low_disable_btrace): New.
+
2014-01-15 Doug Evans <dje@google.com>
* inferiors.c (thread_id_to_gdb_id): Delete.
diff --git a/gdb/gdbserver/linux-low.c b/gdb/gdbserver/linux-low.c
index 2bc619a..01d0e84 100644
--- a/gdb/gdbserver/linux-low.c
+++ b/gdb/gdbserver/linux-low.c
@@ -5705,7 +5705,7 @@ linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
#ifdef HAVE_LINUX_BTRACE
-/* Enable branch tracing. */
+/* See to_enable_btrace target method. */
static struct btrace_target_info *
linux_low_enable_btrace (ptid_t ptid)
@@ -5725,17 +5725,39 @@ linux_low_enable_btrace (ptid_t ptid)
return tinfo;
}
-/* Read branch trace data as btrace xml document. */
+/* See to_disable_btrace target method. */
-static void
+static int
+linux_low_disable_btrace (struct btrace_target_info *tinfo)
+{
+ enum btrace_error err;
+
+ err = linux_disable_btrace (tinfo);
+ return (err == BTRACE_ERR_NONE ? 0 : -1);
+}
+
+/* See to_read_btrace target method. */
+
+static int
linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
int type)
{
VEC (btrace_block_s) *btrace;
struct btrace_block *block;
+ enum btrace_error err;
int i;
- btrace = linux_read_btrace (tinfo, type);
+ btrace = NULL;
+ err = linux_read_btrace (&btrace, tinfo, type);
+ if (err != BTRACE_ERR_NONE)
+ {
+ if (err == BTRACE_ERR_OVERFLOW)
+ buffer_grow_str0 (buffer, "E.Overflow.");
+ else
+ buffer_grow_str0 (buffer, "E.Generic Error.");
+
+ return -1;
+ }
buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
@@ -5744,9 +5766,11 @@ linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
paddress (block->begin), paddress (block->end));
- buffer_grow_str (buffer, "</btrace>\n");
+ buffer_grow_str0 (buffer, "</btrace>\n");
VEC_free (btrace_block_s, btrace);
+
+ return 0;
}
#endif /* HAVE_LINUX_BTRACE */
@@ -5819,7 +5843,7 @@ static struct target_ops linux_target_ops = {
#ifdef HAVE_LINUX_BTRACE
linux_supports_btrace,
linux_low_enable_btrace,
- linux_disable_btrace,
+ linux_low_disable_btrace,
linux_low_read_btrace,
#else
NULL,
diff --git a/gdb/gdbserver/server.c b/gdb/gdbserver/server.c
index 9ae28f8..28ea048 100644
--- a/gdb/gdbserver/server.c
+++ b/gdb/gdbserver/server.c
@@ -1348,7 +1348,7 @@ handle_qxfer_btrace (const char *annex,
{
static struct buffer cache;
struct thread_info *thread;
- int type;
+ int type, result;
if (the_target->read_btrace == NULL || writebuf != NULL)
return -2;
@@ -1380,6 +1380,8 @@ handle_qxfer_btrace (const char *annex,
type = BTRACE_READ_ALL;
else if (strcmp (annex, "new") == 0)
type = BTRACE_READ_NEW;
+ else if (strcmp (annex, "delta") == 0)
+ type = BTRACE_READ_DELTA;
else
{
strcpy (own_buf, "E.Bad annex.");
@@ -1390,7 +1392,12 @@ handle_qxfer_btrace (const char *annex,
{
buffer_free (&cache);
- target_read_btrace (thread->btrace, &cache, type);
+ result = target_read_btrace (thread->btrace, &cache, type);
+ if (result != 0)
+ {
+ memcpy (own_buf, cache.buffer, cache.used_size);
+ return -3;
+ }
}
else if (offset > cache.used_size)
{
diff --git a/gdb/gdbserver/target.h b/gdb/gdbserver/target.h
index d090a30..ae48cd7 100644
--- a/gdb/gdbserver/target.h
+++ b/gdb/gdbserver/target.h
@@ -356,12 +356,15 @@ struct target_ops
information struct for reading and for disabling branch trace. */
struct btrace_target_info *(*enable_btrace) (ptid_t ptid);
- /* Disable branch tracing. */
+ /* Disable branch tracing.
+ Returns zero on success, non-zero otherwise. */
int (*disable_btrace) (struct btrace_target_info *tinfo);
/* Read branch trace data into buffer. We use an int to specify the type
- to break a cyclic dependency. */
- void (*read_btrace) (struct btrace_target_info *, struct buffer *, int type);
+ to break a cyclic dependency.
+ Return 0 on success; print an error message into BUFFER and return -1,
+ otherwise. */
+ int (*read_btrace) (struct btrace_target_info *, struct buffer *, int type);
/* Return true if target supports range stepping. */
int (*supports_range_stepping) (void);
diff --git a/gdb/i386-linux-nat.c b/gdb/i386-linux-nat.c
index 75d3fa0..fdf5dee 100644
--- a/gdb/i386-linux-nat.c
+++ b/gdb/i386-linux-nat.c
@@ -1084,10 +1084,10 @@ i386_linux_enable_btrace (ptid_t ptid)
static void
i386_linux_disable_btrace (struct btrace_target_info *tinfo)
{
- int errcode = linux_disable_btrace (tinfo);
+ enum btrace_error errcode = linux_disable_btrace (tinfo);
- if (errcode != 0)
- error (_("Could not disable branch tracing: %s."), safe_strerror (errcode));
+ if (errcode != BTRACE_ERR_NONE)
+ error (_("Could not disable branch tracing."));
}
/* Teardown branch tracing. */
diff --git a/gdb/remote.c b/gdb/remote.c
index 10aab66..d40485a 100644
--- a/gdb/remote.c
+++ b/gdb/remote.c
@@ -11489,13 +11489,14 @@ remote_teardown_btrace (struct btrace_target_info *tinfo)
/* Read the branch trace. */
-static VEC (btrace_block_s) *
-remote_read_btrace (struct btrace_target_info *tinfo,
+static enum btrace_error
+remote_read_btrace (VEC (btrace_block_s) **btrace,
+ struct btrace_target_info *tinfo,
enum btrace_read_type type)
{
struct packet_config *packet = &remote_protocol_packets[PACKET_qXfer_btrace];
struct remote_state *rs = get_remote_state ();
- VEC (btrace_block_s) *btrace = NULL;
+ struct cleanup *cleanup;
const char *annex;
char *xml;
@@ -11514,6 +11515,9 @@ remote_read_btrace (struct btrace_target_info *tinfo,
case BTRACE_READ_NEW:
annex = "new";
break;
+ case BTRACE_READ_DELTA:
+ annex = "delta";
+ break;
default:
internal_error (__FILE__, __LINE__,
_("Bad branch tracing read type: %u."),
@@ -11522,15 +11526,14 @@ remote_read_btrace (struct btrace_target_info *tinfo,
xml = target_read_stralloc (&current_target,
TARGET_OBJECT_BTRACE, annex);
- if (xml != NULL)
- {
- struct cleanup *cleanup = make_cleanup (xfree, xml);
+ if (xml == NULL)
+ return BTRACE_ERR_UNKNOWN;
- btrace = parse_xml_btrace (xml);
- do_cleanups (cleanup);
- }
+ cleanup = make_cleanup (xfree, xml);
+ *btrace = parse_xml_btrace (xml);
+ do_cleanups (cleanup);
- return btrace;
+ return BTRACE_ERR_NONE;
}
static int
diff --git a/gdb/target.c b/gdb/target.c
index d9c27b8..a771893 100644
--- a/gdb/target.c
+++ b/gdb/target.c
@@ -4232,18 +4232,19 @@ target_teardown_btrace (struct btrace_target_info *btinfo)
/* See target.h. */
-VEC (btrace_block_s) *
-target_read_btrace (struct btrace_target_info *btinfo,
+enum btrace_error
+target_read_btrace (VEC (btrace_block_s) **btrace,
+ struct btrace_target_info *btinfo,
enum btrace_read_type type)
{
struct target_ops *t;
for (t = current_target.beneath; t != NULL; t = t->beneath)
if (t->to_read_btrace != NULL)
- return t->to_read_btrace (btinfo, type);
+ return t->to_read_btrace (btrace, btinfo, type);
tcomplain ();
- return NULL;
+ return BTRACE_ERR_NOT_SUPPORTED;
}
/* See target.h. */
diff --git a/gdb/target.h b/gdb/target.h
index 79b9886..d6de52a 100644
--- a/gdb/target.h
+++ b/gdb/target.h
@@ -839,9 +839,13 @@ struct target_ops
be attempting to talk to a remote target. */
void (*to_teardown_btrace) (struct btrace_target_info *tinfo);
- /* Read branch trace data. */
- VEC (btrace_block_s) *(*to_read_btrace) (struct btrace_target_info *,
- enum btrace_read_type);
+ /* Read branch trace data for the thread indicated by BTINFO into DATA.
+ DATA is cleared before new trace is added.
+ The branch trace will start with the most recent block and continue
+ towards older blocks. */
+ enum btrace_error (*to_read_btrace) (VEC (btrace_block_s) **data,
+ struct btrace_target_info *btinfo,
+ enum btrace_read_type type);
/* Stop trace recording. */
void (*to_stop_recording) (void);
@@ -1998,8 +2002,9 @@ extern void target_disable_btrace (struct btrace_target_info *btinfo);
extern void target_teardown_btrace (struct btrace_target_info *btinfo);
/* See to_read_btrace in struct target_ops. */
-extern VEC (btrace_block_s) *target_read_btrace (struct btrace_target_info *,
- enum btrace_read_type);
+extern enum btrace_error target_read_btrace (VEC (btrace_block_s) **,
+ struct btrace_target_info *,
+ enum btrace_read_type);
/* See to_stop_recording in struct target_ops. */
extern void target_stop_recording (void);