aboutsummaryrefslogtreecommitdiff
path: root/gdb
diff options
context:
space:
mode:
authorMarkus Metzger <markus.t.metzger@intel.com>2015-07-08 09:38:16 +0200
committerMarkus Metzger <markus.t.metzger@intel.com>2015-07-15 08:40:57 +0200
commite7b01ce03de85cfbbddb6ed755486db528abf1d1 (patch)
treebefceca0e4eb95a1a8fa297c536e4665105d35a8 /gdb
parent210187848791f50ee7dd8eaae8fc4bd371b3550b (diff)
downloadgdb-e7b01ce03de85cfbbddb6ed755486db528abf1d1.zip
gdb-e7b01ce03de85cfbbddb6ed755486db528abf1d1.tar.gz
gdb-e7b01ce03de85cfbbddb6ed755486db528abf1d1.tar.bz2
ari, btrace: avoid unsigned long long
Fix the ARI warning about the use of unsigned long long. We can't use ULONGEST as this is defined unsigned long on 64-bit systems. This will result in a compile error when storing a pointer to an unsigned long long structure field (declared in perf_event.h as __u64) in a ULONGEST * variable. Use size_t to hold the buffer size inside GDB and __u64 when interfacing the Linux kernel. gdb/ * nat/linux-btrace.c (perf_event_read): Change the type of DATA_HEAD. (perf_event_read_all): Change the type of SIZE and DATA_HEAD. (perf_event_read_bts): Change the type of SIZE and READ. (linux_enable_bts): Change the type of SIZE, PAGES, DATA_SIZE, and DATA_OFFSET. Move DATA_SIZE declaration. Restrict the buffer size to UINT_MAX. Check for overflows when using DATA_HEAD from the perf mmap page. (linux_enable_pt): Change the type of PAGES and SIZE. Restrict the buffer size to UINT_MAX. (linux_read_bts): Change the type of BUFFER_SIZE, SIZE, DATA_HEAD, and DATA_TAIL. * nat/linux-btrace.h (struct perf_event_buffer)<size, data_head> <last_head>: Change type. * common/btrace-common.h (struct btrace_dat_pt) <size>: Change type. * common/btrace-common.c (btrace_data_append): Change the type of SIZE. * btrace.c (parse_xml_raw): Change the type of SIZE. Change oddness check.
Diffstat (limited to 'gdb')
-rw-r--r--gdb/ChangeLog22
-rw-r--r--gdb/btrace.c11
-rw-r--r--gdb/common/btrace-common.c2
-rw-r--r--gdb/common/btrace-common.h12
-rw-r--r--gdb/nat/linux-btrace.c102
-rw-r--r--gdb/nat/linux-btrace.h6
6 files changed, 108 insertions, 47 deletions
diff --git a/gdb/ChangeLog b/gdb/ChangeLog
index cd9139f..407dfb4 100644
--- a/gdb/ChangeLog
+++ b/gdb/ChangeLog
@@ -1,3 +1,25 @@
+2015-07-15 Markus Metzger <markus.t.metzger@intel.com>
+ Pedro Alves <palves@redhat.com>
+
+ * nat/linux-btrace.c (perf_event_read): Change the type of DATA_HEAD.
+ (perf_event_read_all): Change the type of SIZE and DATA_HEAD.
+ (perf_event_read_bts): Change the type of SIZE and READ.
+ (linux_enable_bts): Change the type of SIZE, PAGES, DATA_SIZE,
+ and DATA_OFFSET. Move DATA_SIZE declaration. Restrict the buffer size
+ to UINT_MAX. Check for overflows when using DATA_HEAD from the perf
+ mmap page.
+ (linux_enable_pt): Change the type of PAGES and SIZE. Restrict the
+ buffer size to UINT_MAX.
+ (linux_read_bts): Change the type of BUFFER_SIZE, SIZE, DATA_HEAD, and
+ DATA_TAIL.
+ * nat/linux-btrace.h (struct perf_event_buffer)<size, data_head>
+ <last_head>: Change type.
+ * common/btrace-common.h (struct btrace_dat_pt) <size>: Change type.
+ * common/btrace-common.c (btrace_data_append): Change the type of
+ SIZE.
+ * btrace.c (parse_xml_raw): Change the type of SIZE. Change oddness
+ check.
+
2015-07-14 Simon Marchi <simon.marchi@ericsson.com>
* gdbtypes.h (CHECK_TYPEDEF): Remove.
diff --git a/gdb/btrace.c b/gdb/btrace.c
index 731d237..94942f4 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -1414,19 +1414,18 @@ parse_xml_btrace_block (struct gdb_xml_parser *parser,
static void
parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
- gdb_byte **pdata, unsigned long *psize)
+ gdb_byte **pdata, size_t *psize)
{
struct cleanup *cleanup;
gdb_byte *data, *bin;
- unsigned long size;
- size_t len;
+ size_t len, size;
len = strlen (body_text);
- size = len / 2;
-
- if ((size_t) size * 2 != len)
+ if (len % 2 != 0)
gdb_xml_error (parser, _("Bad raw data size."));
+ size = len / 2;
+
bin = data = xmalloc (size);
cleanup = make_cleanup (xfree, data);
diff --git a/gdb/common/btrace-common.c b/gdb/common/btrace-common.c
index 95193eb..3595636 100644
--- a/gdb/common/btrace-common.c
+++ b/gdb/common/btrace-common.c
@@ -158,7 +158,7 @@ btrace_data_append (struct btrace_data *dst,
case BTRACE_FORMAT_BTS:
{
gdb_byte *data;
- unsigned long size;
+ size_t size;
size = src->variant.pt.size + dst->variant.pt.size;
data = xmalloc (size);
diff --git a/gdb/common/btrace-common.h b/gdb/common/btrace-common.h
index f22efc5..c90a331 100644
--- a/gdb/common/btrace-common.h
+++ b/gdb/common/btrace-common.h
@@ -96,7 +96,10 @@ struct btrace_cpu
struct btrace_config_bts
{
- /* The size of the branch trace buffer in bytes. */
+ /* The size of the branch trace buffer in bytes.
+
+ This is unsigned int and not size_t since it is registered as
+ control variable for "set record btrace bts buffer-size". */
unsigned int size;
};
@@ -104,7 +107,10 @@ struct btrace_config_bts
struct btrace_config_pt
{
- /* The size of the branch trace buffer in bytes. */
+ /* The size of the branch trace buffer in bytes.
+
+ This is unsigned int and not size_t since it is registered as
+ control variable for "set record btrace pt buffer-size". */
unsigned int size;
};
@@ -152,7 +158,7 @@ struct btrace_data_pt
gdb_byte *data;
/* The size of DATA in bytes. */
- unsigned long size;
+ size_t size;
};
/* The branch trace data. */
diff --git a/gdb/nat/linux-btrace.c b/gdb/nat/linux-btrace.c
index b6e13d3..63fcdc8 100644
--- a/gdb/nat/linux-btrace.c
+++ b/gdb/nat/linux-btrace.c
@@ -111,12 +111,13 @@ perf_event_new_data (const struct perf_event_buffer *pev)
The caller is responsible for freeing the memory. */
static gdb_byte *
-perf_event_read (const struct perf_event_buffer *pev, unsigned long data_head,
- unsigned long size)
+perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
+ size_t size)
{
const gdb_byte *begin, *end, *start, *stop;
gdb_byte *buffer;
- unsigned long data_tail, buffer_size;
+ size_t buffer_size;
+ __u64 data_tail;
if (size == 0)
return NULL;
@@ -149,15 +150,16 @@ perf_event_read (const struct perf_event_buffer *pev, unsigned long data_head,
static void
perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
- unsigned long *psize)
+ size_t *psize)
{
- unsigned long data_head, size;
+ size_t size;
+ __u64 data_head;
data_head = *pev->data_head;
size = pev->size;
if (data_head < size)
- size = data_head;
+ size = (size_t) data_head;
*data = perf_event_read (pev, data_head, size);
*psize = size;
@@ -269,12 +271,11 @@ perf_event_sample_ok (const struct perf_event_sample *sample)
static VEC (btrace_block_s) *
perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
- const uint8_t *end, const uint8_t *start,
- unsigned long long size)
+ const uint8_t *end, const uint8_t *start, size_t size)
{
VEC (btrace_block_s) *btrace = NULL;
struct perf_event_sample sample;
- unsigned long long read = 0;
+ size_t read = 0;
struct btrace_block block = { 0, 0 };
struct regcache *regcache;
@@ -642,7 +643,8 @@ linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
struct perf_event_mmap_page *header;
struct btrace_target_info *tinfo;
struct btrace_tinfo_bts *bts;
- unsigned long long size, pages, data_offset, data_size;
+ size_t size, pages;
+ __u64 data_offset;
int pid, pg;
tinfo = xzalloc (sizeof (*tinfo));
@@ -674,28 +676,36 @@ linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
goto err_out;
/* Convert the requested size in bytes to pages (rounding up). */
- pages = (((unsigned long long) conf->size) + PAGE_SIZE - 1) / PAGE_SIZE;
+ pages = ((size_t) conf->size / PAGE_SIZE
+ + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
/* We need at least one page. */
if (pages == 0)
pages = 1;
/* The buffer size can be requested in powers of two pages. Adjust PAGES
to the next power of two. */
- for (pg = 0; pages != (1u << pg); ++pg)
- if ((pages & (1u << pg)) != 0)
- pages += (1u << pg);
+ for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
+ if ((pages & ((size_t) 1 << pg)) != 0)
+ pages += ((size_t) 1 << pg);
/* We try to allocate the requested size.
If that fails, try to get as much as we can. */
for (; pages > 0; pages >>= 1)
{
size_t length;
+ __u64 data_size;
- size = pages * PAGE_SIZE;
+ data_size = (__u64) pages * PAGE_SIZE;
+
+ /* Don't ask for more than we can represent in the configuration. */
+ if ((__u64) UINT_MAX < data_size)
+ continue;
+
+ size = (size_t) data_size;
length = size + PAGE_SIZE;
/* Check for overflows. */
- if ((unsigned long long) length < size)
+ if ((__u64) length != data_size + PAGE_SIZE)
continue;
/* The number of pages we request needs to be a power of two. */
@@ -708,23 +718,33 @@ linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
goto err_file;
data_offset = PAGE_SIZE;
- data_size = size;
#if defined (PERF_ATTR_SIZE_VER5)
if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
{
+ __u64 data_size;
+
data_offset = header->data_offset;
data_size = header->data_size;
+
+ size = (unsigned int) data_size;
+
+ /* Check for overflows. */
+ if ((__u64) size != data_size)
+ {
+ munmap ((void *) header, size + PAGE_SIZE);
+ goto err_file;
+ }
}
#endif /* defined (PERF_ATTR_SIZE_VER5) */
bts->header = header;
bts->bts.mem = ((const uint8_t *) header) + data_offset;
- bts->bts.size = data_size;
+ bts->bts.size = size;
bts->bts.data_head = &header->data_head;
- bts->bts.last_head = 0;
+ bts->bts.last_head = 0ull;
- tinfo->conf.bts.size = data_size;
+ tinfo->conf.bts.size = (unsigned int) size;
return tinfo;
err_file:
@@ -746,7 +766,7 @@ linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
struct perf_event_mmap_page *header;
struct btrace_target_info *tinfo;
struct btrace_tinfo_pt *pt;
- unsigned long long pages, size;
+ size_t pages, size;
int pid, pg, errcode, type;
if (conf->size == 0)
@@ -788,31 +808,39 @@ linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
header->aux_offset = header->data_offset + header->data_size;
/* Convert the requested size in bytes to pages (rounding up). */
- pages = (((unsigned long long) conf->size) + PAGE_SIZE - 1) / PAGE_SIZE;
+ pages = ((size_t) conf->size / PAGE_SIZE
+ + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
/* We need at least one page. */
if (pages == 0)
pages = 1;
/* The buffer size can be requested in powers of two pages. Adjust PAGES
to the next power of two. */
- for (pg = 0; pages != (1u << pg); ++pg)
- if ((pages & (1u << pg)) != 0)
- pages += (1u << pg);
+ for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
+ if ((pages & ((size_t) 1 << pg)) != 0)
+ pages += ((size_t) 1 << pg);
/* We try to allocate the requested size.
If that fails, try to get as much as we can. */
for (; pages > 0; pages >>= 1)
{
size_t length;
+ __u64 data_size;
- size = pages * PAGE_SIZE;
- length = size;
+ data_size = (__u64) pages * PAGE_SIZE;
+
+ /* Don't ask for more than we can represent in the configuration. */
+ if ((__u64) UINT_MAX < data_size)
+ continue;
+
+ size = (size_t) data_size;
/* Check for overflows. */
- if ((unsigned long long) length < size)
+ if ((__u64) size != data_size)
continue;
- header->aux_size = size;
+ header->aux_size = data_size;
+ length = size;
pt->pt.mem = mmap (NULL, length, PROT_READ, MAP_SHARED, pt->file,
header->aux_offset);
@@ -827,7 +855,7 @@ linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
pt->pt.size = size;
pt->pt.data_head = &header->aux_head;
- tinfo->conf.pt.size = size;
+ tinfo->conf.pt.size = (unsigned int) size;
return tinfo;
err_conf:
@@ -938,7 +966,8 @@ linux_read_bts (struct btrace_data_bts *btrace,
{
struct perf_event_buffer *pevent;
const uint8_t *begin, *end, *start;
- unsigned long long data_head, data_tail, buffer_size, size;
+ size_t buffer_size, size;
+ __u64 data_head, data_tail;
unsigned int retries = 5;
pevent = &tinfo->variant.bts.bts;
@@ -961,6 +990,8 @@ linux_read_bts (struct btrace_data_bts *btrace,
if (type == BTRACE_READ_DELTA)
{
+ __u64 data_size;
+
/* Determine the number of bytes to read and check for buffer
overflows. */
@@ -971,9 +1002,12 @@ linux_read_bts (struct btrace_data_bts *btrace,
return BTRACE_ERR_OVERFLOW;
/* If the buffer is smaller than the trace delta, we overflowed. */
- size = data_head - data_tail;
- if (buffer_size < size)
+ data_size = data_head - data_tail;
+ if (buffer_size < data_size)
return BTRACE_ERR_OVERFLOW;
+
+ /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
+ size = (size_t) data_size;
}
else
{
@@ -982,7 +1016,7 @@ linux_read_bts (struct btrace_data_bts *btrace,
/* Adjust the size if the buffer has not overflowed, yet. */
if (data_head < size)
- size = data_head;
+ size = (size_t) data_head;
}
/* Data_head keeps growing; the buffer itself is circular. */
diff --git a/gdb/nat/linux-btrace.h b/gdb/nat/linux-btrace.h
index b680bf5..5ea87a8 100644
--- a/gdb/nat/linux-btrace.h
+++ b/gdb/nat/linux-btrace.h
@@ -38,13 +38,13 @@ struct perf_event_buffer
const uint8_t *mem;
/* The size of the mapped memory in bytes. */
- unsigned long long size;
+ size_t size;
/* A pointer to the data_head field for this buffer. */
- volatile unsigned long long *data_head;
+ volatile __u64 *data_head;
/* The data_head value from the last read. */
- unsigned long long last_head;
+ __u64 last_head;
};
/* Branch trace target information for BTS tracing. */