diff options
author | Don Breazeal <donb@codesourcery.com> | 2016-07-01 11:13:48 -0700 |
---|---|---|
committer | Don Breazeal <donb@codesourcery.com> | 2016-07-01 11:13:48 -0700 |
commit | 09c98b448f3d89cb9576e4e73991c2312939e0af (patch) | |
tree | a435ceb7448f6bd9df9c621586e806a588b1f3d3 /gdb | |
parent | 93d8990cba700abdf9d2be06a5022e588d097fc8 (diff) | |
download | gdb-09c98b448f3d89cb9576e4e73991c2312939e0af.zip gdb-09c98b448f3d89cb9576e4e73991c2312939e0af.tar.gz gdb-09c98b448f3d89cb9576e4e73991c2312939e0af.tar.bz2 |
Optimize memory_xfer_partial for remote
Some analysis we did here showed that increasing the cap on the
transfer size in target.c:memory_xfer_partial could give 20% or more
improvement in remote load across JTAG. Transfer sizes were capped
to 4K bytes because of performance problems encountered with the
restore command, documented here:
https://sourceware.org/ml/gdb-patches/2013-07/msg00611.html
and in commit 67c059c29e1f ("Improve performance of large restore
commands").
The 4K cap was introduced because in a case where the restore command
requested a 100MB transfer, memory_xfer_partial would repeatedy
allocate and copy an entire 100MB buffer in order to properly handle
breakpoint shadow instructions, even though memory_xfer_partial would
actually only write a small portion of the buffer contents.
A couple of alternative solutions were suggested:
* change the algorithm for handling the breakpoint shadow instructions
* throttle the transfer size up or down based on the previous actual
transfer size
I tried implementing the throttling approach, and my implementation
reduced the performance in some cases.
This patch implements a new target function that returns that target's
limit on memory transfer size. It defaults to ULONGEST_MAX bytes,
because for native targets there is no marshaling and thus no limit is
needed. For remote targets it uses get_memory_write_packet_size.
gdb/ChangeLog:
* remote.c (remote_get_memory_xfer_limit): New function.
* target-delegates.c: Regenerate.
* target.c (memory_xfer_partial): Call
target_ops.to_get_memory_xfer_limit.
* target.h (struct target_ops)
<to_get_memory_xfer_limit>: New member.
Diffstat (limited to 'gdb')
-rw-r--r-- | gdb/ChangeLog | 9 | ||||
-rw-r--r-- | gdb/remote.c | 9 | ||||
-rw-r--r-- | gdb/target-delegates.c | 31 | ||||
-rw-r--r-- | gdb/target.c | 5 | ||||
-rw-r--r-- | gdb/target.h | 6 |
5 files changed, 58 insertions, 2 deletions
diff --git a/gdb/ChangeLog b/gdb/ChangeLog index 58c9c78..85a5444 100644 --- a/gdb/ChangeLog +++ b/gdb/ChangeLog @@ -1,3 +1,12 @@ +2016-07-01 Don Breazeal <donb@codesourcery.com> + + * remote.c (remote_get_memory_xfer_limit): New function. + * target-delegates.c: Regenerate. + * target.c (memory_xfer_partial): Call + target_ops.to_get_memory_xfer_limit. + * target.h (struct target_ops) <to_get_memory_xfer_limit>: New + member. + 2016-07-01 John Baldwin <jhb@FreeBSD.org> * fbsd-nat.c (struct fbsd_fork_child_info): Rename to ... diff --git a/gdb/remote.c b/gdb/remote.c index e4b2095..5568346 100644 --- a/gdb/remote.c +++ b/gdb/remote.c @@ -10152,6 +10152,14 @@ remote_xfer_partial (struct target_ops *ops, enum target_object object, return TARGET_XFER_OK; } +/* Implementation of to_get_memory_xfer_limit. */ + +static ULONGEST +remote_get_memory_xfer_limit (struct target_ops *ops) +{ + return get_memory_write_packet_size (); +} + static int remote_search_memory (struct target_ops* ops, CORE_ADDR start_addr, ULONGEST search_space_len, @@ -13065,6 +13073,7 @@ Specify the serial device it is connected to\n\ remote_ops.to_interrupt = remote_interrupt; remote_ops.to_pass_ctrlc = remote_pass_ctrlc; remote_ops.to_xfer_partial = remote_xfer_partial; + remote_ops.to_get_memory_xfer_limit = remote_get_memory_xfer_limit; remote_ops.to_rcmd = remote_rcmd; remote_ops.to_pid_to_exec_file = remote_pid_to_exec_file; remote_ops.to_log_command = serial_log_command; diff --git a/gdb/target-delegates.c b/gdb/target-delegates.c index 03aa2cc..2887033 100644 --- a/gdb/target-delegates.c +++ b/gdb/target-delegates.c @@ -2064,6 +2064,33 @@ debug_xfer_partial (struct target_ops *self, enum target_object arg1, const char return result; } +static ULONGEST +delegate_get_memory_xfer_limit (struct target_ops *self) +{ + self = self->beneath; + return self->to_get_memory_xfer_limit (self); +} + +static ULONGEST +tdefault_get_memory_xfer_limit (struct target_ops *self) +{ + return ULONGEST_MAX; +} + +static ULONGEST +debug_get_memory_xfer_limit (struct target_ops *self) +{ + ULONGEST result; + fprintf_unfiltered (gdb_stdlog, "-> %s->to_get_memory_xfer_limit (...)\n", debug_target.to_shortname); + result = debug_target.to_get_memory_xfer_limit (&debug_target); + fprintf_unfiltered (gdb_stdlog, "<- %s->to_get_memory_xfer_limit (", debug_target.to_shortname); + target_debug_print_struct_target_ops_p (&debug_target); + fputs_unfiltered (") = ", gdb_stdlog); + target_debug_print_ULONGEST (result); + fputs_unfiltered ("\n", gdb_stdlog); + return result; +} + static VEC(mem_region_s) * delegate_memory_map (struct target_ops *self) { @@ -4223,6 +4250,8 @@ install_delegators (struct target_ops *ops) ops->to_get_thread_local_address = delegate_get_thread_local_address; if (ops->to_xfer_partial == NULL) ops->to_xfer_partial = delegate_xfer_partial; + if (ops->to_get_memory_xfer_limit == NULL) + ops->to_get_memory_xfer_limit = delegate_get_memory_xfer_limit; if (ops->to_memory_map == NULL) ops->to_memory_map = delegate_memory_map; if (ops->to_flash_erase == NULL) @@ -4454,6 +4483,7 @@ install_dummy_methods (struct target_ops *ops) ops->to_goto_bookmark = tdefault_goto_bookmark; ops->to_get_thread_local_address = tdefault_get_thread_local_address; ops->to_xfer_partial = tdefault_xfer_partial; + ops->to_get_memory_xfer_limit = tdefault_get_memory_xfer_limit; ops->to_memory_map = tdefault_memory_map; ops->to_flash_erase = tdefault_flash_erase; ops->to_flash_done = tdefault_flash_done; @@ -4610,6 +4640,7 @@ init_debug_target (struct target_ops *ops) ops->to_goto_bookmark = debug_goto_bookmark; ops->to_get_thread_local_address = debug_get_thread_local_address; ops->to_xfer_partial = debug_xfer_partial; + ops->to_get_memory_xfer_limit = debug_get_memory_xfer_limit; ops->to_memory_map = debug_memory_map; ops->to_flash_erase = debug_flash_erase; ops->to_flash_done = debug_flash_done; diff --git a/gdb/target.c b/gdb/target.c index d3fc35b..6228361 100644 --- a/gdb/target.c +++ b/gdb/target.c @@ -1301,8 +1301,9 @@ memory_xfer_partial (struct target_ops *ops, enum target_object object, by memory_xfer_partial_1. We will continually malloc and free a copy of the entire write request for breakpoint shadow handling even though we only end up writing a small - subset of it. Cap writes to 4KB to mitigate this. */ - len = min (4096, len); + subset of it. Cap writes to a limit specified by the target + to mitigate this. */ + len = min (ops->to_get_memory_xfer_limit (ops), len); buf = (gdb_byte *) xmalloc (len); old_chain = make_cleanup (xfree, buf); diff --git a/gdb/target.h b/gdb/target.h index fc317e3..15fd806 100644 --- a/gdb/target.h +++ b/gdb/target.h @@ -745,6 +745,12 @@ struct target_ops ULONGEST *xfered_len) TARGET_DEFAULT_RETURN (TARGET_XFER_E_IO); + /* Return the limit on the size of any single memory transfer + for the target. */ + + ULONGEST (*to_get_memory_xfer_limit) (struct target_ops *) + TARGET_DEFAULT_RETURN (ULONGEST_MAX); + /* Returns the memory map for the target. A return value of NULL means that no memory map is available. If a memory address does not fall within any returned regions, it's assumed to be |