aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorRoss Lagerwall <ross.lagerwall@citrix.com>2024-04-04 15:08:33 +0100
committerAnthony PERARD <anthony@xenproject.org>2024-07-01 14:57:18 +0200
commit410b4d560dfa3b38a11ad19cf00180238651d9b7 (patch)
tree5b908629f3aec605d90d1924ec9e910e00e382cb /hw
parent196fb962baeff16342279111cc927a153415f85f (diff)
downloadqemu-410b4d560dfa3b38a11ad19cf00180238651d9b7.zip
qemu-410b4d560dfa3b38a11ad19cf00180238651d9b7.tar.gz
qemu-410b4d560dfa3b38a11ad19cf00180238651d9b7.tar.bz2
xen-hvm: Avoid livelock while handling buffered ioreqs
A malicious or buggy guest may generated buffered ioreqs faster than QEMU can process them in handle_buffered_iopage(). The result is a livelock - QEMU continuously processes ioreqs on the main thread without iterating through the main loop which prevents handling other events, processing timers, etc. Without QEMU handling other events, it often results in the guest becoming unsable and makes it difficult to stop the source of buffered ioreqs. To avoid this, if we process a full page of buffered ioreqs, stop and reschedule an immediate timer to continue processing them. This lets QEMU go back to the main loop and catch up. Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com> Reviewed-by: Paul Durrant <paul@xen.org> Message-Id: <20240404140833.1557953-1-ross.lagerwall@citrix.com> Signed-off-by: Anthony PERARD <anthony@xenproject.org>
Diffstat (limited to 'hw')
-rw-r--r--hw/xen/xen-hvm-common.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c
index b8ace1c..3a9d6f9 100644
--- a/hw/xen/xen-hvm-common.c
+++ b/hw/xen/xen-hvm-common.c
@@ -475,11 +475,11 @@ static void handle_ioreq(XenIOState *state, ioreq_t *req)
}
}
-static bool handle_buffered_iopage(XenIOState *state)
+static unsigned int handle_buffered_iopage(XenIOState *state)
{
buffered_iopage_t *buf_page = state->buffered_io_page;
buf_ioreq_t *buf_req = NULL;
- bool handled_ioreq = false;
+ unsigned int handled = 0;
ioreq_t req;
int qw;
@@ -492,7 +492,7 @@ static bool handle_buffered_iopage(XenIOState *state)
req.count = 1;
req.dir = IOREQ_WRITE;
- for (;;) {
+ do {
uint32_t rdptr = buf_page->read_pointer, wrptr;
xen_rmb();
@@ -533,22 +533,30 @@ static bool handle_buffered_iopage(XenIOState *state)
assert(!req.data_is_ptr);
qatomic_add(&buf_page->read_pointer, qw + 1);
- handled_ioreq = true;
- }
+ handled += qw + 1;
+ } while (handled < IOREQ_BUFFER_SLOT_NUM);
- return handled_ioreq;
+ return handled;
}
static void handle_buffered_io(void *opaque)
{
+ unsigned int handled;
XenIOState *state = opaque;
- if (handle_buffered_iopage(state)) {
+ handled = handle_buffered_iopage(state);
+ if (handled >= IOREQ_BUFFER_SLOT_NUM) {
+ /* We handled a full page of ioreqs. Schedule a timer to continue
+ * processing while giving other stuff a chance to run.
+ */
timer_mod(state->buffered_io_timer,
- BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
- } else {
+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
+ } else if (handled == 0) {
timer_del(state->buffered_io_timer);
qemu_xen_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
+ } else {
+ timer_mod(state->buffered_io_timer,
+ BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
}
}