diff options
author | Pedro Alves <palves@redhat.com> | 2016-05-24 14:47:57 +0100 |
---|---|---|
committer | Pedro Alves <palves@redhat.com> | 2016-05-24 14:50:37 +0100 |
commit | 774113b02f41ded4d9ba4d18571ee5024312ad1b (patch) | |
tree | 7f1f6ca58e0a637b30f1c8f89605991a58666417 /gdb/linux-nat.c | |
parent | 1ad3de988d2f41c72de66613c68ed78507a3abbd (diff) | |
download | gdb-774113b02f41ded4d9ba4d18571ee5024312ad1b.zip gdb-774113b02f41ded4d9ba4d18571ee5024312ad1b.tar.gz gdb-774113b02f41ded4d9ba4d18571ee5024312ad1b.tar.bz2 |
[Linux] Optimize PID -> struct lwp_info lookup
Hacking the gdb.threads/attach-many-short-lived-threads.exp test to
spawn thousands of threads instead of dozens, and running gdb under
perf, I saw that GDB was spending most of the time in find_lwp_pid:
- captured_main
- 93.61% catch_command_errors
- 87.41% attach_command
- 87.40% linux_nat_attach
- 87.40% linux_proc_attach_tgid_threads
- 82.38% attach_proc_task_lwp_callback
- 81.01% find_lwp_pid
5.30% ptid_get_lwp
+ 0.10% ptid_lwp_p
+ 0.64% add_thread
+ 0.26% set_running
+ 0.24% set_executing
0.12% ptid_get_lwp
+ 0.01% ptrace
+ 0.01% add_lwp
attach_proc_task_lwp_callback is called once for each LWP that we
attach to, found by listing the /proc/PID/task/ directory. In turn,
attach_proc_task_lwp_callback calls find_lwp_pid to check whether the
LWP we're about to try to attach to is already known. Since
find_lwp_pid does a linear walk over the whole LWP list, this becomes
quadratic. We do the /proc/PID/task/ listing until we get two
iterations in a row where we found no new threads. So the second and
following times we walk the /proc/PID/task/ dir, we're going to take
an even worse find_lwp_pid hit.
Fix this by adding a hash table keyed by LWP PID, for fast lookup.
The linked list embedded in the LWP structure itself is kept, and made
a double-linked list, so that removals from that list are O(1). An
earlier version of this patch got rid of this list altogether, but
that revealed hidden dependencies / assumptions on how the list is
sorted. For example, killing a process and then waiting for all the
LWPs status using iterate_over_lwps only works as is because the
leader LWP is always last in the list. So I thought it better to take
an incremental approach and make this patch concern itself _only_ with
the PID lookup optimization.
gdb/ChangeLog:
2016-05-24 Pedro Alves <palves@redhat.com>
PR gdb/19828
* linux-nat.c (lwp_lwpid_htab): New htab.
(lwp_info_hash, lwp_lwpid_htab_eq, lwp_lwpid_htab_create)
(lwp_lwpid_htab_add_lwp): New functions.
(lwp_list): Tweak comment.
(lwp_list_add, lwp_list_remove, lwp_lwpid_htab_remove_pid): New
functions.
(purge_lwp_list): Rewrite, using htab_traverse_noresize.
(add_initial_lwp): Add lwp to htab too. Use lwp_list_add.
(delete_lwp): Use lwp_list_remove. Remove htab too.
(find_lwp_pid): Search in htab.
(_initialize_linux_nat): Call lwp_lwpid_htab_create.
* linux-nat.h (struct lwp_info) <prev>: New field.
Diffstat (limited to 'gdb/linux-nat.c')
-rw-r--r-- | gdb/linux-nat.c | 159 |
1 files changed, 121 insertions, 38 deletions
diff --git a/gdb/linux-nat.c b/gdb/linux-nat.c index dbadf29..6cd9fc6 100644 --- a/gdb/linux-nat.c +++ b/gdb/linux-nat.c @@ -677,8 +677,86 @@ linux_child_set_syscall_catchpoint (struct target_ops *self, return 0; } -/* List of known LWPs. */ +/* List of known LWPs, keyed by LWP PID. This speeds up the common + case of mapping a PID returned from the kernel to our corresponding + lwp_info data structure. */ +static htab_t lwp_lwpid_htab; + +/* Calculate a hash from a lwp_info's LWP PID. */ + +static hashval_t +lwp_info_hash (const void *ap) +{ + const struct lwp_info *lp = (struct lwp_info *) ap; + pid_t pid = ptid_get_lwp (lp->ptid); + + return iterative_hash_object (pid, 0); +} + +/* Equality function for the lwp_info hash table. Compares the LWP's + PID. */ + +static int +lwp_lwpid_htab_eq (const void *a, const void *b) +{ + const struct lwp_info *entry = (const struct lwp_info *) a; + const struct lwp_info *element = (const struct lwp_info *) b; + + return ptid_get_lwp (entry->ptid) == ptid_get_lwp (element->ptid); +} + +/* Create the lwp_lwpid_htab hash table. */ + +static void +lwp_lwpid_htab_create (void) +{ + lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL); +} + +/* Add LP to the hash table. */ + +static void +lwp_lwpid_htab_add_lwp (struct lwp_info *lp) +{ + void **slot; + + slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT); + gdb_assert (slot != NULL && *slot == NULL); + *slot = lp; +} + +/* Head of doubly-linked list of known LWPs. Sorted by reverse + creation order. This order is assumed in some cases. E.g., + reaping status after killing alls lwps of a process: the leader LWP + must be reaped last. */ struct lwp_info *lwp_list; + +/* Add LP to sorted-by-reverse-creation-order doubly-linked list. */ + +static void +lwp_list_add (struct lwp_info *lp) +{ + lp->next = lwp_list; + if (lwp_list != NULL) + lwp_list->prev = lp; + lwp_list = lp; +} + +/* Remove LP from sorted-by-reverse-creation-order doubly-linked + list. */ + +static void +lwp_list_remove (struct lwp_info *lp) +{ + /* Remove from sorted-by-creation-order list. */ + if (lp->next != NULL) + lp->next->prev = lp->prev; + if (lp->prev != NULL) + lp->prev->next = lp->next; + if (lp == lwp_list) + lwp_list = lp->next; +} + /* Original signal mask. */ @@ -754,31 +832,30 @@ lwp_free (struct lwp_info *lp) xfree (lp); } -/* Remove all LWPs belong to PID from the lwp list. */ +/* Traversal function for purge_lwp_list. */ -static void -purge_lwp_list (int pid) +static int +lwp_lwpid_htab_remove_pid (void **slot, void *info) { - struct lwp_info *lp, *lpprev, *lpnext; - - lpprev = NULL; + struct lwp_info *lp = (struct lwp_info *) *slot; + int pid = *(int *) info; - for (lp = lwp_list; lp; lp = lpnext) + if (ptid_get_pid (lp->ptid) == pid) { - lpnext = lp->next; + htab_clear_slot (lwp_lwpid_htab, slot); + lwp_list_remove (lp); + lwp_free (lp); + } - if (ptid_get_pid (lp->ptid) == pid) - { - if (lp == lwp_list) - lwp_list = lp->next; - else - lpprev->next = lp->next; + return 1; +} - lwp_free (lp); - } - else - lpprev = lp; - } +/* Remove all LWPs belong to PID from the lwp list. */ + +static void +purge_lwp_list (int pid) +{ + htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid); } /* Add the LWP specified by PTID to the list. PTID is the first LWP @@ -812,8 +889,11 @@ add_initial_lwp (ptid_t ptid) lp->ptid = ptid; lp->core = -1; - lp->next = lwp_list; - lwp_list = lp; + /* Add to sorted-by-reverse-creation-order list. */ + lwp_list_add (lp); + + /* Add to keyed-by-pid htab. */ + lwp_lwpid_htab_add_lwp (lp); return lp; } @@ -844,22 +924,24 @@ add_lwp (ptid_t ptid) static void delete_lwp (ptid_t ptid) { - struct lwp_info *lp, *lpprev; + struct lwp_info *lp; + void **slot; + struct lwp_info dummy; - lpprev = NULL; + dummy.ptid = ptid; + slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT); + if (slot == NULL) + return; - for (lp = lwp_list; lp; lpprev = lp, lp = lp->next) - if (ptid_equal (lp->ptid, ptid)) - break; + lp = *(struct lwp_info **) slot; + gdb_assert (lp != NULL); - if (!lp) - return; + htab_clear_slot (lwp_lwpid_htab, slot); - if (lpprev) - lpprev->next = lp->next; - else - lwp_list = lp->next; + /* Remove from sorted-by-creation-order list. */ + lwp_list_remove (lp); + /* Release. */ lwp_free (lp); } @@ -871,17 +953,16 @@ find_lwp_pid (ptid_t ptid) { struct lwp_info *lp; int lwp; + struct lwp_info dummy; if (ptid_lwp_p (ptid)) lwp = ptid_get_lwp (ptid); else lwp = ptid_get_pid (ptid); - for (lp = lwp_list; lp; lp = lp->next) - if (lwp == ptid_get_lwp (lp->ptid)) - return lp; - - return NULL; + dummy.ptid = ptid_build (0, lwp, 0); + lp = (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy); + return lp; } /* See nat/linux-nat.h. */ @@ -4861,6 +4942,8 @@ Enables printf debugging output."), sigdelset (&suspend_mask, SIGCHLD); sigemptyset (&blocked_mask); + + lwp_lwpid_htab_create (); } |