From 028a46039a22842e41030a94848d086d5db05617 Mon Sep 17 00:00:00 2001 From: Pedro Alves Date: Wed, 22 Jul 2020 12:32:53 +0100 Subject: gdbserver: handle running threads in qXfer:threads:read On some systems, the gdb.multi/multi-target.exp testcase occasionally fails like so: Running src/gdb/testsuite/gdb.multi/multi-target.exp ... FAIL: gdb.multi/multi-target.exp: info-inferiors: multi_process=on: inferior 1: info connections FAIL: gdb.multi/multi-target.exp: info-inferiors: multi_process=on: inferior 1: info inferiors FAIL: gdb.multi/multi-target.exp: info-inferiors: multi_process=on: inferior 2: info connections FAIL: gdb.multi/multi-target.exp: info-inferiors: multi_process=on: inferior 2: info inferiors FAIL: gdb.multi/multi-target.exp: info-inferiors: multi_process=on: inferior 3: inferior 3 ... many more cascading fails. The problem starts when the testcase runs an inferior against GDBserver: (gdb) run Starting program: build/gdb/testsuite/outputs/gdb.multi/multi-target/multi-target Reading /lib64/ld-linux-x86-64.so.2 from remote target... warning: File transfers from remote targets can be slow. Use "set sysroot" to access files locally instead. Reading /lib64/ld-linux-x86-64.so.2 from remote target... Reading /lib64/ld-2.31.so from remote target... Reading /lib64/.debug/ld-2.31.so from remote target... Reading /usr/lib/debug//lib64/ld-2.31.so from remote target... Reading /usr/lib/debug/lib64//ld-2.31.so from remote target... Reading target:/usr/lib/debug/lib64//ld-2.31.so from remote target... Reading /lib/x86_64-linux-gnu/libpthread.so.0 from remote target... Reading /lib/x86_64-linux-gnu/libc.so.6 from remote target... Reading /lib/x86_64-linux-gnu/libc-2.31.so from remote target... Reading /lib/x86_64-linux-gnu/.debug/libc-2.31.so from remote target... Reading /usr/lib/debug//lib/x86_64-linux-gnu/libc-2.31.so from remote target... Reading /usr/lib/debug//lib/x86_64-linux-gnu/libc-2.31.so from remote target... Remote connection closed ... Note the "Remote connection closed" message. That means GDBserver exited abruptly. I traced it down to the fact that GDB fetches the thread list from GDBserver while the main thread of the process is still running. On my main system where I wrote the testcase, I have not observed the failure because it is slow enough that the thread stops before GDBserver fetches the thread list in the problem scenario which I'll describe below. With some --remote-debug logging from GDBserver side, we see the last packets before the connection closes: ... getpkt ("vCont;c"); [no ack sent] putpkt ("$OK#9a"); [noack mode] getpkt ("Tp10f9a.10f9a"); [no ack sent] putpkt ("$OK#9a"); [noack mode] getpkt ("Hgp0.0"); [no ack sent] putpkt ("$OK#9a"); [noack mode] getpkt ("qXfer:threads:read::0,1000"); [no ack sent] Note the vCont;c , which sets the program running, and then a qXfer:threads:read packet at the end. The problem happens when the thread list refresh (qXfer:threads:read) is sent just while the main thread is running and it still hasn't initialized its libpthread id internally. In that state, the main thread's lwp will remain with the thread_known flag clear. See in find_one_thread: /* If the new thread ID is zero, a final thread ID will be available later. Do not enable thread debugging yet. */ if (ti.ti_tid == 0) return 0; Now, back in server.cc, to handle the qXfer:threads:read, we reach handle_qxfer_threads -> handle_qxfer_threads_proper, and the latter then calls handle_qxfer_threads_worker for each known thread. In handle_qxfer_threads_worker, we call target_thread_handle. This ends up in thread_db_thread_handle, here: if (!lwp->thread_known && !find_one_thread (thread->id)) return false; Since the thread ID isn't known yet, we call find_one_thread. This calls into libthread_db.so, which accesses memory. Because the current thread is running, that fails and we throw an error, here: /* Get information about this thread. */ err = thread_db->td_ta_map_lwp2thr_p (thread_db->thread_agent, lwpid, &th); if (err != TD_OK) error ("Cannot get thread handle for LWP %d: %s", lwpid, thread_db_err_str (err)); The current design is that whenever GDB-facing packets/requests need to accesses memory, server.cc is supposed to prepare the target for the access. See gdb_read_memory / gdb_write_memory. This preparation means pausing threads if in non-stop mode (someday we could lift this requirement, but we will still need to pause to access registers or do other related ptrace accesses like PTRACE_GET_THREAD_AREA). Note that the multi-target.exp testcase forces "maint set target-non-stop on". So the fix here is to prepare the target to access memory when handling qXfer:threads:read too. gdbserver/ChangeLog: * inferiors.cc (switch_to_process): New, moved here from thread-db.cc, and made extern. * inferiors.h (switch_to_process): Declare. * server.cc: Include "gdbsupport/scoped_restore.h". (handle_qxfer_threads_proper): Now returns bool. Prepare to access memory around target calls. (handle_qxfer_threads): Handle errors. * thread-db.cc (switch_to_process): Moved to inferiors.cc. --- gdbserver/server.cc | 49 ++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 44 insertions(+), 5 deletions(-) (limited to 'gdbserver/server.cc') diff --git a/gdbserver/server.cc b/gdbserver/server.cc index ab5363e..aadcb9b 100644 --- a/gdbserver/server.cc +++ b/gdbserver/server.cc @@ -48,6 +48,7 @@ #include "gdbsupport/selftest.h" #include "gdbsupport/scope-exit.h" #include "gdbsupport/gdb_select.h" +#include "gdbsupport/scoped_restore.h" #define require_running_or_return(BUF) \ if (!target_running ()) \ @@ -1678,19 +1679,54 @@ handle_qxfer_threads_worker (thread_info *thread, struct buffer *buffer) buffer_xml_printf (buffer, "/>\n"); } -/* Helper for handle_qxfer_threads. */ +/* Helper for handle_qxfer_threads. Return true on success, false + otherwise. */ -static void +static bool handle_qxfer_threads_proper (struct buffer *buffer) { + client_state &cs = get_client_state (); + + scoped_restore save_current_thread + = make_scoped_restore (¤t_thread); + scoped_restore save_current_general_thread + = make_scoped_restore (&cs.general_thread); + buffer_grow_str (buffer, "\n"); - for_each_thread ([&] (thread_info *thread) + process_info *error_proc = find_process ([&] (process_info *process) { - handle_qxfer_threads_worker (thread, buffer); + /* The target may need to access memory and registers (e.g. via + libthread_db) to fetch thread properties. Prepare for memory + access here, so that we potentially pause threads just once + for all accesses. Note that even if someday we stop needing + to pause threads to access memory, we will need to be able to + access registers, or other ptrace accesses like + PTRACE_GET_THREAD_AREA. */ + + /* Need to switch to each process in turn, because + prepare_to_access_memory prepares for an access in the + current process pointed to by general_thread. */ + switch_to_process (process); + cs.general_thread = current_thread->id; + + int res = prepare_to_access_memory (); + if (res == 0) + { + for_each_thread (process->pid, [&] (thread_info *thread) + { + handle_qxfer_threads_worker (thread, buffer); + }); + + done_accessing_memory (); + return false; + } + else + return true; }); buffer_grow_str0 (buffer, "\n"); + return error_proc == nullptr; } /* Handle qXfer:threads:read. */ @@ -1719,11 +1755,14 @@ handle_qxfer_threads (const char *annex, buffer_init (&buffer); - handle_qxfer_threads_proper (&buffer); + bool res = handle_qxfer_threads_proper (&buffer); result = buffer_finish (&buffer); result_length = strlen (result); buffer_free (&buffer); + + if (!res) + return -1; } if (offset >= result_length) -- cgit v1.1