diff options
Diffstat (limited to 'gdb/common')
-rw-r--r-- | gdb/common/btrace-common.h | 27 | ||||
-rw-r--r-- | gdb/common/linux-btrace.c | 97 | ||||
-rw-r--r-- | gdb/common/linux-btrace.h | 15 |
3 files changed, 101 insertions, 38 deletions
diff --git a/gdb/common/btrace-common.h b/gdb/common/btrace-common.h index 1d389af..25617bb 100644 --- a/gdb/common/btrace-common.h +++ b/gdb/common/btrace-common.h @@ -42,7 +42,9 @@ asynchronous, e.g. interrupts. */ struct btrace_block { - /* The address of the first byte of the first instruction in the block. */ + /* The address of the first byte of the first instruction in the block. + The address may be zero if we do not know the beginning of this block, + such as for the first block in a delta trace. */ CORE_ADDR begin; /* The address of the first byte of the last instruction in the block. */ @@ -67,7 +69,28 @@ enum btrace_read_type BTRACE_READ_ALL, /* Send all available trace, if it changed. */ - BTRACE_READ_NEW + BTRACE_READ_NEW, + + /* Send the trace since the last request. This will fail if the trace + buffer overflowed. */ + BTRACE_READ_DELTA +}; + +/* Enumeration of btrace errors. */ + +enum btrace_error +{ + /* No error. Everything is OK. */ + BTRACE_ERR_NONE, + + /* An unknown error. */ + BTRACE_ERR_UNKNOWN, + + /* Branch tracing is not supported on this system. */ + BTRACE_ERR_NOT_SUPPORTED, + + /* The branch trace buffer overflowed; no delta read possible. */ + BTRACE_ERR_OVERFLOW }; #endif /* BTRACE_COMMON_H */ diff --git a/gdb/common/linux-btrace.c b/gdb/common/linux-btrace.c index e469900..218e0ce 100644 --- a/gdb/common/linux-btrace.c +++ b/gdb/common/linux-btrace.c @@ -172,11 +172,11 @@ perf_event_sample_ok (const struct perf_event_sample *sample) static VEC (btrace_block_s) * perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin, - const uint8_t *end, const uint8_t *start) + const uint8_t *end, const uint8_t *start, size_t size) { VEC (btrace_block_s) *btrace = NULL; struct perf_event_sample sample; - size_t read = 0, size = (end - begin); + size_t read = 0; struct btrace_block block = { 0, 0 }; struct regcache *regcache; @@ -252,6 +252,13 @@ perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin, block.end = psample->bts.from; } + /* Push the last block (i.e. the first one of inferior execution), as well. + We don't know where it ends, but we know where it starts. If we're + reading delta trace, we can fill in the start address later on. + Otherwise we will prune it. */ + block.begin = 0; + VEC_safe_push (btrace_block_s, btrace, &block); + return btrace; } @@ -476,7 +483,7 @@ linux_enable_btrace (ptid_t ptid) /* See linux-btrace.h. */ -int +enum btrace_error linux_disable_btrace (struct btrace_target_info *tinfo) { int errcode; @@ -484,12 +491,12 @@ linux_disable_btrace (struct btrace_target_info *tinfo) errno = 0; errcode = munmap (tinfo->buffer, perf_event_mmap_size (tinfo)); if (errcode != 0) - return errno; + return BTRACE_ERR_UNKNOWN; close (tinfo->file); xfree (tinfo); - return 0; + return BTRACE_ERR_NONE; } /* Check whether the branch trace has changed. */ @@ -504,21 +511,24 @@ linux_btrace_has_changed (struct btrace_target_info *tinfo) /* See linux-btrace.h. */ -VEC (btrace_block_s) * -linux_read_btrace (struct btrace_target_info *tinfo, +enum btrace_error +linux_read_btrace (VEC (btrace_block_s) **btrace, + struct btrace_target_info *tinfo, enum btrace_read_type type) { - VEC (btrace_block_s) *btrace = NULL; volatile struct perf_event_mmap_page *header; const uint8_t *begin, *end, *start; - unsigned long data_head, retries = 5; - size_t buffer_size; + unsigned long data_head, data_tail, retries = 5; + size_t buffer_size, size; + /* For delta reads, we return at least the partial last block containing + the current PC. */ if (type == BTRACE_READ_NEW && !linux_btrace_has_changed (tinfo)) - return NULL; + return BTRACE_ERR_NONE; header = perf_event_header (tinfo); buffer_size = perf_event_buffer_size (tinfo); + data_tail = tinfo->data_head; /* We may need to retry reading the trace. See below. */ while (retries--) @@ -526,23 +536,45 @@ linux_read_btrace (struct btrace_target_info *tinfo, data_head = header->data_head; /* Delete any leftover trace from the previous iteration. */ - VEC_free (btrace_block_s, btrace); + VEC_free (btrace_block_s, *btrace); - /* If there's new trace, let's read it. */ - if (data_head != tinfo->data_head) + if (type == BTRACE_READ_DELTA) { - /* Data_head keeps growing; the buffer itself is circular. */ - begin = perf_event_buffer_begin (tinfo); - start = begin + data_head % buffer_size; - - if (data_head <= buffer_size) - end = start; - else - end = perf_event_buffer_end (tinfo); + /* Determine the number of bytes to read and check for buffer + overflows. */ + + /* Check for data head overflows. We might be able to recover from + those but they are very unlikely and it's not really worth the + effort, I think. */ + if (data_head < data_tail) + return BTRACE_ERR_OVERFLOW; + + /* If the buffer is smaller than the trace delta, we overflowed. */ + size = data_head - data_tail; + if (buffer_size < size) + return BTRACE_ERR_OVERFLOW; + } + else + { + /* Read the entire buffer. */ + size = buffer_size; - btrace = perf_event_read_bts (tinfo, begin, end, start); + /* Adjust the size if the buffer has not overflowed, yet. */ + if (data_head < size) + size = data_head; } + /* Data_head keeps growing; the buffer itself is circular. */ + begin = perf_event_buffer_begin (tinfo); + start = begin + data_head % buffer_size; + + if (data_head <= buffer_size) + end = start; + else + end = perf_event_buffer_end (tinfo); + + *btrace = perf_event_read_bts (tinfo, begin, end, start, size); + /* The stopping thread notifies its ptracer before it is scheduled out. On multi-core systems, the debugger might therefore run while the kernel might be writing the last branch trace records. @@ -554,7 +586,13 @@ linux_read_btrace (struct btrace_target_info *tinfo, tinfo->data_head = data_head; - return btrace; + /* Prune the incomplete last block (i.e. the first one of inferior execution) + if we're not doing a delta read. There is no way of filling in its zeroed + BEGIN element. */ + if (!VEC_empty (btrace_block_s, *btrace) && type != BTRACE_READ_DELTA) + VEC_pop (btrace_block_s, *btrace); + + return BTRACE_ERR_NONE; } #else /* !HAVE_LINUX_PERF_EVENT_H */ @@ -577,19 +615,20 @@ linux_enable_btrace (ptid_t ptid) /* See linux-btrace.h. */ -int +enum btrace_error linux_disable_btrace (struct btrace_target_info *tinfo) { - return ENOSYS; + return BTRACE_ERR_NOT_SUPPORTED; } /* See linux-btrace.h. */ -VEC (btrace_block_s) * -linux_read_btrace (struct btrace_target_info *tinfo, +enum btrace_error +linux_read_btrace (VEC (btrace_block_s) **btrace, + struct btrace_target_info *tinfo, enum btrace_read_type type) { - return NULL; + return BTRACE_ERR_NOT_SUPPORTED; } #endif /* !HAVE_LINUX_PERF_EVENT_H */ diff --git a/gdb/common/linux-btrace.h b/gdb/common/linux-btrace.h index 32a0403..a97b697 100644 --- a/gdb/common/linux-btrace.h +++ b/gdb/common/linux-btrace.h @@ -61,17 +61,18 @@ struct btrace_target_info int ptr_bits; }; -/* Check whether branch tracing is supported. */ +/* See to_supports_btrace in target.h. */ extern int linux_supports_btrace (void); -/* Enable branch tracing for @ptid. */ +/* See to_enable_btrace in target.h. */ extern struct btrace_target_info *linux_enable_btrace (ptid_t ptid); -/* Disable branch tracing and deallocate @tinfo. */ -extern int linux_disable_btrace (struct btrace_target_info *tinfo); +/* See to_disable_btrace in target.h. */ +extern enum btrace_error linux_disable_btrace (struct btrace_target_info *ti); -/* Read branch trace data. */ -extern VEC (btrace_block_s) *linux_read_btrace (struct btrace_target_info *, - enum btrace_read_type); +/* See to_read_btrace in target.h. */ +extern enum btrace_error linux_read_btrace (VEC (btrace_block_s) **btrace, + struct btrace_target_info *btinfo, + enum btrace_read_type type); #endif /* LINUX_BTRACE_H */ |