aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gdb/ChangeLog31
-rw-r--r--gdb/python/py-prettyprint.c5
-rw-r--r--gdb/valprint.c22
-rw-r--r--gdb/valprint.h2
-rw-r--r--gdb/value.c295
-rw-r--r--gdb/value.h14
6 files changed, 364 insertions, 5 deletions
diff --git a/gdb/ChangeLog b/gdb/ChangeLog
index 37423ce..aa54988 100644
--- a/gdb/ChangeLog
+++ b/gdb/ChangeLog
@@ -1,3 +1,34 @@
+2011-02-14 Pedro Alves <pedro@codesourcery.com>
+
+ Base support for <unavailable> value contents.
+
+ gdb/
+ * value.h (value_bytes_available): Declare.
+ (mark_value_bytes_unavailable): Declare.
+ * value.c (struct range): New struct.
+ (range_s): New typedef.
+ (ranges_overlap): New function.
+ (range_lessthan): New function.
+ (ranges_contain_p): New function.
+ (struct value) <unavailable>: New field.
+ (value_bytes_available): New function.
+ (mark_value_bytes_unavailable): New function.
+ (require_not_optimized_out): Constify parameter.
+ (require_available): New function.
+ (value_contents_all, value_contents): Require all bytes be
+ available.
+ (value_free): Free `unavailable'.
+ (value_copy): Copy `unavailable'.
+ * valprint.h (val_print_unavailable): Declare.
+ * valprint.c (valprint_check_validity): Rename `offset' parameter
+ to `embedded_offset'. If printing a scalar, check whether the
+ value chunk is available.
+ (val_print_unavailable): New.
+ (val_print_scalar_formatted): Check whether the value is
+ available.
+ * python/py-prettyprint.c (apply_val_pretty_printer): Refuse
+ pretty-printing unavailable values.
+
2011-02-13 Jan Kratochvil <jan.kratochvil@redhat.com>
Fix const/volatile qualifiers of C++ types, PR c++/12328.
diff --git a/gdb/python/py-prettyprint.c b/gdb/python/py-prettyprint.c
index 916492e..261411a 100644
--- a/gdb/python/py-prettyprint.c
+++ b/gdb/python/py-prettyprint.c
@@ -690,6 +690,11 @@ apply_val_pretty_printer (struct type *type, const gdb_byte *valaddr,
struct cleanup *cleanups;
int result = 0;
enum string_repr_result print_result;
+
+ /* No pretty-printer support for unavailable values. */
+ if (!value_bytes_available (val, embedded_offset, TYPE_LENGTH (type)))
+ return 0;
+
cleanups = ensure_python_env (gdbarch, language);
/* Instantiate the printer. */
diff --git a/gdb/valprint.c b/gdb/valprint.c
index b32d6fc..64a4b05 100644
--- a/gdb/valprint.c
+++ b/gdb/valprint.c
@@ -260,7 +260,7 @@ scalar_type_p (struct type *type)
static int
valprint_check_validity (struct ui_file *stream,
struct type *type,
- int offset,
+ int embedded_offset,
const struct value *val)
{
CHECK_TYPEDEF (type);
@@ -269,19 +269,25 @@ valprint_check_validity (struct ui_file *stream,
&& TYPE_CODE (type) != TYPE_CODE_STRUCT
&& TYPE_CODE (type) != TYPE_CODE_ARRAY)
{
- if (! value_bits_valid (val, TARGET_CHAR_BIT * offset,
- TARGET_CHAR_BIT * TYPE_LENGTH (type)))
+ if (!value_bits_valid (val, TARGET_CHAR_BIT * embedded_offset,
+ TARGET_CHAR_BIT * TYPE_LENGTH (type)))
{
val_print_optimized_out (stream);
return 0;
}
- if (value_bits_synthetic_pointer (val, TARGET_CHAR_BIT * offset,
+ if (value_bits_synthetic_pointer (val, TARGET_CHAR_BIT * embedded_offset,
TARGET_CHAR_BIT * TYPE_LENGTH (type)))
{
fputs_filtered (_("<synthetic pointer>"), stream);
return 0;
}
+
+ if (!value_bytes_available (val, embedded_offset, TYPE_LENGTH (type)))
+ {
+ val_print_unavailable (stream);
+ return 0;
+ }
}
return 1;
@@ -293,6 +299,12 @@ val_print_optimized_out (struct ui_file *stream)
fprintf_filtered (stream, _("<optimized out>"));
}
+void
+val_print_unavailable (struct ui_file *stream)
+{
+ fprintf_filtered (stream, _("<unavailable>"));
+}
+
/* Print using the given LANGUAGE the data of type TYPE located at
VALADDR + EMBEDDED_OFFSET (within GDB), which came from the
inferior at address ADDRESS + EMBEDDED_OFFSET, onto stdio stream
@@ -560,6 +572,8 @@ val_print_scalar_formatted (struct type *type,
if (!value_bits_valid (val, TARGET_CHAR_BIT * embedded_offset,
TARGET_CHAR_BIT * TYPE_LENGTH (type)))
val_print_optimized_out (stream);
+ else if (!value_bytes_available (val, embedded_offset, TYPE_LENGTH (type)))
+ val_print_unavailable (stream);
else
print_scalar_formatted (valaddr + embedded_offset, type,
options, size, stream);
diff --git a/gdb/valprint.h b/gdb/valprint.h
index 3ad3545..542a13a 100644
--- a/gdb/valprint.h
+++ b/gdb/valprint.h
@@ -154,4 +154,6 @@ int read_string (CORE_ADDR addr, int len, int width, unsigned int fetchlimit,
extern void val_print_optimized_out (struct ui_file *stream);
+extern void val_print_unavailable (struct ui_file *stream);
+
#endif
diff --git a/gdb/value.c b/gdb/value.c
index e26270c..58190ea 100644
--- a/gdb/value.c
+++ b/gdb/value.c
@@ -63,6 +63,110 @@ struct internal_function
void *cookie;
};
+/* Defines an [OFFSET, OFFSET + LENGTH) range. */
+
+struct range
+{
+ /* Lowest offset in the range. */
+ int offset;
+
+ /* Length of the range. */
+ int length;
+};
+
+typedef struct range range_s;
+
+DEF_VEC_O(range_s);
+
+/* Returns true if the ranges defined by [offset1, offset1+len1) and
+ [offset2, offset2+len2) overlap. */
+
+static int
+ranges_overlap (int offset1, int len1,
+ int offset2, int len2)
+{
+ ULONGEST h, l;
+
+ l = max (offset1, offset2);
+ h = min (offset1 + len1, offset2 + len2);
+ return (l < h);
+}
+
+/* Returns true if the first argument is strictly less than the
+ second, useful for VEC_lower_bound. We keep ranges sorted by
+ offset and coalesce overlapping and contiguous ranges, so this just
+ compares the starting offset. */
+
+static int
+range_lessthan (const range_s *r1, const range_s *r2)
+{
+ return r1->offset < r2->offset;
+}
+
+/* Returns true if RANGES contains any range that overlaps [OFFSET,
+ OFFSET+LENGTH). */
+
+static int
+ranges_contain (VEC(range_s) *ranges, int offset, int length)
+{
+ range_s what;
+ int i;
+
+ what.offset = offset;
+ what.length = length;
+
+ /* We keep ranges sorted by offset and coalesce overlapping and
+ contiguous ranges, so to check if a range list contains a given
+ range, we can do a binary search for the position the given range
+ would be inserted if we only considered the starting OFFSET of
+ ranges. We call that position I. Since we also have LENGTH to
+ care for (this is a range afterall), we need to check if the
+ _previous_ range overlaps the I range. E.g.,
+
+ R
+ |---|
+ |---| |---| |------| ... |--|
+ 0 1 2 N
+
+ I=1
+
+ In the case above, the binary search would return `I=1', meaning,
+ this OFFSET should be inserted at position 1, and the current
+ position 1 should be pushed further (and before 2). But, `0'
+ overlaps with R.
+
+ Then we need to check if the I range overlaps the I range itself.
+ E.g.,
+
+ R
+ |---|
+ |---| |---| |-------| ... |--|
+ 0 1 2 N
+
+ I=1
+ */
+
+ i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
+
+ if (i > 0)
+ {
+ struct range *bef = VEC_index (range_s, ranges, i - 1);
+
+ if (ranges_overlap (bef->offset, bef->length, offset, length))
+ return 1;
+ }
+
+ if (i < VEC_length (range_s, ranges))
+ {
+ struct range *r = VEC_index (range_s, ranges, i);
+
+ if (ranges_overlap (r->offset, r->length, offset, length))
+ return 1;
+ }
+
+ return 0;
+}
+
static struct cmd_list_element *functionlist;
struct value
@@ -206,6 +310,11 @@ struct value
valid if lazy is nonzero. */
gdb_byte *contents;
+ /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
+ rather than available, since the common and default case is for a
+ value to be available. This is filled in at value read time. */
+ VEC(range_s) *unavailable;
+
/* The number of references to this value. When a value is created,
the value chain holds a reference, so REFERENCE_COUNT is 1. If
release_value is called, this value is removed from the chain but
@@ -214,6 +323,179 @@ struct value
int reference_count;
};
+int
+value_bytes_available (const struct value *value, int offset, int length)
+{
+ gdb_assert (!value->lazy);
+
+ return !ranges_contain (value->unavailable, offset, length);
+}
+
+void
+mark_value_bytes_unavailable (struct value *value, int offset, int length)
+{
+ range_s newr;
+ int i;
+
+ /* Insert the range sorted. If there's overlap or the new range
+ would be contiguous with an existing range, merge. */
+
+ newr.offset = offset;
+ newr.length = length;
+
+ /* Do a binary search for the position the given range would be
+ inserted if we only considered the starting OFFSET of ranges.
+ Call that position I. Since we also have LENGTH to care for
+ (this is a range afterall), we need to check if the _previous_
+ range overlaps the I range. E.g., calling R the new range:
+
+ #1 - overlaps with previous
+
+ R
+ |-...-|
+ |---| |---| |------| ... |--|
+ 0 1 2 N
+
+ I=1
+
+ In the case #1 above, the binary search would return `I=1',
+ meaning, this OFFSET should be inserted at position 1, and the
+ current position 1 should be pushed further (and become 2). But,
+ note that `0' overlaps with R, so we want to merge them.
+
+ A similar consideration needs to be taken if the new range would
+ be contiguous with the previous range:
+
+ #2 - contiguous with previous
+
+ R
+ |-...-|
+ |--| |---| |------| ... |--|
+ 0 1 2 N
+
+ I=1
+
+ If there's no overlap with the previous range, as in:
+
+ #3 - not overlapping and not contiguous
+
+ R
+ |-...-|
+ |--| |---| |------| ... |--|
+ 0 1 2 N
+
+ I=1
+
+ or if I is 0:
+
+ #4 - R is the range with lowest offset
+
+ R
+ |-...-|
+ |--| |---| |------| ... |--|
+ 0 1 2 N
+
+ I=0
+
+ ... we just push the new range to I.
+
+ All the 4 cases above need to consider that the new range may
+ also overlap several of the ranges that follow, or that R may be
+ contiguous with the following range, and merge. E.g.,
+
+ #5 - overlapping following ranges
+
+ R
+ |------------------------|
+ |--| |---| |------| ... |--|
+ 0 1 2 N
+
+ I=0
+
+ or:
+
+ R
+ |-------|
+ |--| |---| |------| ... |--|
+ 0 1 2 N
+
+ I=1
+
+ */
+
+ i = VEC_lower_bound (range_s, value->unavailable, &newr, range_lessthan);
+ if (i > 0)
+ {
+ struct range *bef = VEC_index (range_s, value->unavailable, i - i);
+
+ if (ranges_overlap (bef->offset, bef->length, offset, length))
+ {
+ /* #1 */
+ ULONGEST l = min (bef->offset, offset);
+ ULONGEST h = max (bef->offset + bef->length, offset + length);
+
+ bef->offset = l;
+ bef->length = h - l;
+ i--;
+ }
+ else if (offset == bef->offset + bef->length)
+ {
+ /* #2 */
+ bef->length += length;
+ i--;
+ }
+ else
+ {
+ /* #3 */
+ VEC_safe_insert (range_s, value->unavailable, i, &newr);
+ }
+ }
+ else
+ {
+ /* #4 */
+ VEC_safe_insert (range_s, value->unavailable, i, &newr);
+ }
+
+ /* Check whether the ranges following the one we've just added or
+ touched can be folded in (#5 above). */
+ if (i + 1 < VEC_length (range_s, value->unavailable))
+ {
+ struct range *t;
+ struct range *r;
+ int removed = 0;
+ int next = i + 1;
+
+ /* Get the range we just touched. */
+ t = VEC_index (range_s, value->unavailable, i);
+ removed = 0;
+
+ i = next;
+ for (; VEC_iterate (range_s, value->unavailable, i, r); i++)
+ if (r->offset <= t->offset + t->length)
+ {
+ ULONGEST l, h;
+
+ l = min (t->offset, r->offset);
+ h = max (t->offset + t->length, r->offset + r->length);
+
+ t->offset = l;
+ t->length = h - l;
+
+ removed++;
+ }
+ else
+ {
+ /* If we couldn't merge this one, we won't be able to
+ merge following ones either, since the ranges are
+ always sorted by OFFSET. */
+ break;
+ }
+
+ if (removed != 0)
+ VEC_block_remove (range_s, value->unavailable, next, removed);
+ }
+}
+
/* Prototypes for local functions. */
static void show_values (char *, int);
@@ -420,12 +702,19 @@ value_enclosing_type (struct value *value)
}
static void
-require_not_optimized_out (struct value *value)
+require_not_optimized_out (const struct value *value)
{
if (value->optimized_out)
error (_("value has been optimized out"));
}
+static void
+require_available (const struct value *value)
+{
+ if (!VEC_empty (range_s, value->unavailable))
+ error (_("value is not available"));
+}
+
const gdb_byte *
value_contents_for_printing (struct value *value)
{
@@ -446,6 +735,7 @@ value_contents_all (struct value *value)
{
const gdb_byte *result = value_contents_for_printing (value);
require_not_optimized_out (value);
+ require_available (value);
return result;
}
@@ -478,6 +768,7 @@ value_contents (struct value *value)
{
const gdb_byte *result = value_contents_writeable (value);
require_not_optimized_out (value);
+ require_available (value);
return result;
}
@@ -703,6 +994,7 @@ value_free (struct value *val)
}
xfree (val->contents);
+ VEC_free (range_s, val->unavailable);
}
xfree (val);
}
@@ -833,6 +1125,7 @@ value_copy (struct value *arg)
TYPE_LENGTH (value_enclosing_type (arg)));
}
+ val->unavailable = VEC_copy (range_s, arg->unavailable);
val->parent = arg->parent;
if (val->parent)
value_incref (val->parent);
diff --git a/gdb/value.h b/gdb/value.h
index aa8ff3c..d51567e 100644
--- a/gdb/value.h
+++ b/gdb/value.h
@@ -360,6 +360,20 @@ extern int value_bits_valid (const struct value *value,
extern int value_bits_synthetic_pointer (const struct value *value,
int offset, int length);
+/* Given a value, determine whether the contents bytes starting at
+ OFFSET and extending for LENGTH bytes are available. This returns
+ nonzero if all bytes in the given range are available, zero if any
+ byte is unavailable. */
+
+extern int value_bytes_available (const struct value *value,
+ int offset, int length);
+
+/* Mark VALUE's content bytes starting at OFFSET and extending for
+ LENGTH bytes as unavailable. */
+
+extern void mark_value_bytes_unavailable (struct value *value,
+ int offset, int length);
+
#include "symtab.h"