aboutsummaryrefslogtreecommitdiff
path: root/gdb/gdbserver/linux-aarch64-ipa.c
diff options
context:
space:
mode:
authorMarcin Koƛcielnicki <koriakin@0x04.net>2016-03-13 00:30:11 +0100
committerMarcin Koƛcielnicki <koriakin@0x04.net>2016-03-31 15:36:38 +0200
commita13c46966d308297a1273e35ccc807a3912d573d (patch)
tree40653f506631fffc52ddbdec037db56a3ced307b /gdb/gdbserver/linux-aarch64-ipa.c
parent0a191de98469e84783025cbee88c86e51f072d78 (diff)
downloadgdb-a13c46966d308297a1273e35ccc807a3912d573d.zip
gdb-a13c46966d308297a1273e35ccc807a3912d573d.tar.gz
gdb-a13c46966d308297a1273e35ccc807a3912d573d.tar.bz2
IPA: Add alloc_jump_pad_buffer target hook.
Targets may have various requirements on the required location of the jump pad area. Currently IPA allocates it at the lowest possible address, so that it is reachable by branches from the executable. However, this fails on powerpc, which has executable link address (0x10000000) much larger than branch reach (+/- 32MiB). This makes jump pad buffer allocation a target hook instead. The current implementations are as follows: - i386 and s390: Branches can reach anywhere, so just mmap it. This avoids the linear search dance. - x86_64: Branches have +/-2GiB of reach, and executable is loaded low, so just call mmap with MAP_32BIT. Likewise avoids the linear search. - aarch64: Branches have +-128MiB of reach, executable loaded at 4MiB. Do a linear search from 4MiB-size downwards to page_size. - s390x: Branches have +-4GiB of reach, executable loaded at 2GiB. Do like on aarch64. gdb/gdbserver/ChangeLog: * linux-aarch64-ipa.c: Add <sys/mman.h> and <sys/auxv.h> includes. (alloc_jump_pad_buffer): New function. * linux-amd64-ipa.c: Add <sys/mman.h> include. (alloc_jump_pad_buffer): New function. * linux-i386-ipa.c (alloc_jump_pad_buffer): New function. * linux-s390-ipa.c: Add <sys/mman.h> and <sys/auxv.h> includes. (alloc_jump_pad_buffer): New function. * tracepoint.c (getauxval) [!HAVE_GETAUXVAL]: New function. (initialize_tracepoint): Delegate to alloc_jump_pad_buffer. * tracepoint.h (alloc_jump_pad_buffer): New prototype. (getauxval) [!HAVE_GETAUXVAL]: New prototype.
Diffstat (limited to 'gdb/gdbserver/linux-aarch64-ipa.c')
-rw-r--r--gdb/gdbserver/linux-aarch64-ipa.c50
1 files changed, 50 insertions, 0 deletions
diff --git a/gdb/gdbserver/linux-aarch64-ipa.c b/gdb/gdbserver/linux-aarch64-ipa.c
index 00cbf3e..50caeae 100644
--- a/gdb/gdbserver/linux-aarch64-ipa.c
+++ b/gdb/gdbserver/linux-aarch64-ipa.c
@@ -19,7 +19,11 @@
along with this program. If not, see <http://www.gnu.org/licenses/>. */
#include "server.h"
+#include <sys/mman.h>
#include "tracepoint.h"
+#ifdef HAVE_GETAUXVAL
+#include <sys/auxv.h>
+#endif
/* Defined in auto-generated file aarch64.c. */
void init_registers_aarch64 (void);
@@ -153,6 +157,52 @@ get_ipa_tdesc (int idx)
return tdesc_aarch64;
}
+/* Allocate buffer for the jump pads. The branch instruction has a reach
+ of +/- 128MiB, and the executable is loaded at 0x400000 (4MiB).
+ To maximize the area of executable that can use tracepoints, try
+ allocating at 0x400000 - size initially, decreasing until we hit
+ a free area. */
+
+void *
+alloc_jump_pad_buffer (size_t size)
+{
+ uintptr_t addr;
+ uintptr_t exec_base = getauxval (AT_PHDR);
+ int pagesize;
+ void *res;
+
+ if (exec_base == 0)
+ exec_base = 0x400000;
+
+ pagesize = sysconf (_SC_PAGE_SIZE);
+ if (pagesize == -1)
+ perror_with_name ("sysconf");
+
+ addr = exec_base - size;
+
+ /* size should already be page-aligned, but this can't hurt. */
+ addr &= ~(pagesize - 1);
+
+ /* Search for a free area. If we hit 0, we're out of luck. */
+ for (; addr; addr -= pagesize)
+ {
+ /* No MAP_FIXED - we don't want to zap someone's mapping. */
+ res = mmap ((void *) addr, size,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+ /* If we got what we wanted, return. */
+ if ((uintptr_t) res == addr)
+ return res;
+
+ /* If we got a mapping, but at a wrong address, undo it. */
+ if (res != MAP_FAILED)
+ munmap (res, size);
+ }
+
+ return NULL;
+}
+
void
initialize_low_tracepoint (void)
{