aboutsummaryrefslogtreecommitdiff
path: root/gdb/valops.c
diff options
context:
space:
mode:
authorYao Qi <yao@codesourcery.com>2013-11-05 09:50:21 +0800
committerYao Qi <yao@codesourcery.com>2014-02-23 11:44:27 +0800
commit5a2eb0ef7f9b879c2bbe7b18183353e1d59c93c7 (patch)
tree0d1ecdf0a87408a9e42171c5812daf4ecacae60f /gdb/valops.c
parent1ee79381dd9cc2438a61fe4a96294682744d2458 (diff)
downloadgdb-5a2eb0ef7f9b879c2bbe7b18183353e1d59c93c7.zip
gdb-5a2eb0ef7f9b879c2bbe7b18183353e1d59c93c7.tar.gz
gdb-5a2eb0ef7f9b879c2bbe7b18183353e1d59c93c7.tar.bz2
Adjust read_value_memory to use to_xfer_partial
As the new to_xfer_partial implementations are done in ctf and tfile targets, read_value_memory can be simplified a lot. Call target_xfer_partial in a loop, check return value, and set bytes unavailable when necessary. gdb: 2014-02-23 Yao Qi <yao@codesourcery.com> * valops.c (read_value_memory): Rewrite it. Call target_xfer_partial in a loop. * exec.h (section_table_available_memory): Remove declaration. Move comments to ... * exec.c (section_table_available_memory): ... here. Make it static.
Diffstat (limited to 'gdb/valops.c')
-rw-r--r--gdb/valops.c96
1 files changed, 23 insertions, 73 deletions
diff --git a/gdb/valops.c b/gdb/valops.c
index 898401d..0d726d0 100644
--- a/gdb/valops.c
+++ b/gdb/valops.c
@@ -949,81 +949,31 @@ read_value_memory (struct value *val, int embedded_offset,
int stack, CORE_ADDR memaddr,
gdb_byte *buffer, size_t length)
{
- if (length)
- {
- VEC(mem_range_s) *available_memory;
-
- if (!traceframe_available_memory (&available_memory, memaddr, length))
- {
- if (stack)
- read_stack (memaddr, buffer, length);
- else
- read_memory (memaddr, buffer, length);
- }
+ ULONGEST xfered = 0;
+
+ while (xfered < length)
+ {
+ enum target_xfer_status status;
+ ULONGEST xfered_len;
+
+ status = target_xfer_partial (current_target.beneath,
+ TARGET_OBJECT_MEMORY, NULL,
+ buffer + xfered, NULL,
+ memaddr + xfered, length - xfered,
+ &xfered_len);
+
+ if (status == TARGET_XFER_OK)
+ /* nothing */;
+ else if (status == TARGET_XFER_E_UNAVAILABLE)
+ mark_value_bytes_unavailable (val, embedded_offset + xfered,
+ xfered_len);
+ else if (status == TARGET_XFER_EOF)
+ memory_error (TARGET_XFER_E_IO, memaddr + xfered);
else
- {
- struct target_section_table *table;
- struct cleanup *old_chain;
- CORE_ADDR unavail;
- mem_range_s *r;
- int i;
-
- /* Fallback to reading from read-only sections. */
- table = target_get_section_table (&exec_ops);
- available_memory =
- section_table_available_memory (available_memory,
- memaddr, length,
- table->sections,
- table->sections_end);
-
- old_chain = make_cleanup (VEC_cleanup(mem_range_s),
- &available_memory);
-
- normalize_mem_ranges (available_memory);
+ memory_error (status, memaddr + xfered);
- /* Mark which bytes are unavailable, and read those which
- are available. */
-
- unavail = memaddr;
-
- for (i = 0;
- VEC_iterate (mem_range_s, available_memory, i, r);
- i++)
- {
- if (mem_ranges_overlap (r->start, r->length,
- memaddr, length))
- {
- CORE_ADDR lo1, hi1, lo2, hi2;
- CORE_ADDR start, end;
-
- /* Get the intersection window. */
- lo1 = memaddr;
- hi1 = memaddr + length;
- lo2 = r->start;
- hi2 = r->start + r->length;
- start = max (lo1, lo2);
- end = min (hi1, hi2);
-
- gdb_assert (end - memaddr <= length);
-
- if (start > unavail)
- mark_value_bytes_unavailable (val,
- (embedded_offset
- + unavail - memaddr),
- start - unavail);
- unavail = end;
-
- read_memory (start, buffer + start - memaddr, end - start);
- }
- }
-
- if (unavail != memaddr + length)
- mark_value_bytes_unavailable (val,
- embedded_offset + unavail - memaddr,
- (memaddr + length) - unavail);
-
- do_cleanups (old_chain);
- }
+ xfered += xfered_len;
+ QUIT;
}
}