aboutsummaryrefslogtreecommitdiff
path: root/hw/xen/xen-hvm-common.c
diff options
context:
space:
mode:
Diffstat (limited to 'hw/xen/xen-hvm-common.c')
-rw-r--r--hw/xen/xen-hvm-common.c140
1 files changed, 90 insertions, 50 deletions
diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c
index b8ace1c..78e0bc8 100644
--- a/hw/xen/xen-hvm-common.c
+++ b/hw/xen/xen-hvm-common.c
@@ -1,14 +1,21 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
+#include "qemu/error-report.h"
#include "qapi/error.h"
+#include "exec/target_long.h"
#include "exec/target_page.h"
#include "trace.h"
+#include "hw/hw.h"
#include "hw/pci/pci_host.h"
#include "hw/xen/xen-hvm-common.h"
#include "hw/xen/xen-bus.h"
#include "hw/boards.h"
#include "hw/xen/arch_hvm.h"
+#include "system/runstate.h"
+#include "system/system.h"
+#include "system/xen.h"
+#include "system/xen-mapcache.h"
MemoryRegion xen_memory, xen_grants;
@@ -475,11 +482,11 @@ static void handle_ioreq(XenIOState *state, ioreq_t *req)
}
}
-static bool handle_buffered_iopage(XenIOState *state)
+static unsigned int handle_buffered_iopage(XenIOState *state)
{
buffered_iopage_t *buf_page = state->buffered_io_page;
buf_ioreq_t *buf_req = NULL;
- bool handled_ioreq = false;
+ unsigned int handled = 0;
ioreq_t req;
int qw;
@@ -492,7 +499,7 @@ static bool handle_buffered_iopage(XenIOState *state)
req.count = 1;
req.dir = IOREQ_WRITE;
- for (;;) {
+ do {
uint32_t rdptr = buf_page->read_pointer, wrptr;
xen_rmb();
@@ -533,22 +540,30 @@ static bool handle_buffered_iopage(XenIOState *state)
assert(!req.data_is_ptr);
qatomic_add(&buf_page->read_pointer, qw + 1);
- handled_ioreq = true;
- }
+ handled += qw + 1;
+ } while (handled < IOREQ_BUFFER_SLOT_NUM);
- return handled_ioreq;
+ return handled;
}
static void handle_buffered_io(void *opaque)
{
+ unsigned int handled;
XenIOState *state = opaque;
- if (handle_buffered_iopage(state)) {
+ handled = handle_buffered_iopage(state);
+ if (handled >= IOREQ_BUFFER_SLOT_NUM) {
+ /* We handled a full page of ioreqs. Schedule a timer to continue
+ * processing while giving other stuff a chance to run.
+ */
timer_mod(state->buffered_io_timer,
- BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
- } else {
+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
+ } else if (handled == 0) {
timer_del(state->buffered_io_timer);
qemu_xen_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
+ } else {
+ timer_mod(state->buffered_io_timer,
+ BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
}
}
@@ -659,6 +674,8 @@ static int xen_map_ioreq_server(XenIOState *state)
xen_pfn_t ioreq_pfn;
xen_pfn_t bufioreq_pfn;
evtchn_port_t bufioreq_evtchn;
+ unsigned long num_frames = 1;
+ unsigned long frame = 1;
int rc;
/*
@@ -667,65 +684,85 @@ static int xen_map_ioreq_server(XenIOState *state)
*/
QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0);
QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1);
+
+ if (state->has_bufioreq) {
+ frame = 0;
+ num_frames = 2;
+ }
state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid,
XENMEM_resource_ioreq_server,
- state->ioservid, 0, 2,
+ state->ioservid,
+ frame, num_frames,
&addr,
PROT_READ | PROT_WRITE, 0);
if (state->fres != NULL) {
trace_xen_map_resource_ioreq(state->ioservid, addr);
- state->buffered_io_page = addr;
- state->shared_page = addr + XC_PAGE_SIZE;
+ state->shared_page = addr;
+ if (state->has_bufioreq) {
+ state->buffered_io_page = addr;
+ state->shared_page = addr + XC_PAGE_SIZE;
+ }
} else if (errno != EOPNOTSUPP) {
error_report("failed to map ioreq server resources: error %d handle=%p",
errno, xen_xc);
return -1;
}
- rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
- (state->shared_page == NULL) ?
- &ioreq_pfn : NULL,
- (state->buffered_io_page == NULL) ?
- &bufioreq_pfn : NULL,
- &bufioreq_evtchn);
- if (rc < 0) {
- error_report("failed to get ioreq server info: error %d handle=%p",
- errno, xen_xc);
- return rc;
- }
+ /*
+ * If we fail to map the shared page with xenforeignmemory_map_resource()
+ * or if we're using buffered ioreqs, we need xen_get_ioreq_server_info()
+ * to provide the addresses to map the shared page and/or to get the
+ * event-channel port for buffered ioreqs.
+ */
+ if (state->shared_page == NULL || state->has_bufioreq) {
+ rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
+ (state->shared_page == NULL) ?
+ &ioreq_pfn : NULL,
+ (state->has_bufioreq &&
+ state->buffered_io_page == NULL) ?
+ &bufioreq_pfn : NULL,
+ &bufioreq_evtchn);
+ if (rc < 0) {
+ error_report("failed to get ioreq server info: error %d handle=%p",
+ errno, xen_xc);
+ return rc;
+ }
- if (state->shared_page == NULL) {
- trace_xen_map_ioreq_server_shared_page(ioreq_pfn);
+ if (state->shared_page == NULL) {
+ trace_xen_map_ioreq_server_shared_page(ioreq_pfn);
- state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
- PROT_READ | PROT_WRITE,
- 1, &ioreq_pfn, NULL);
+ state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
+ PROT_READ | PROT_WRITE,
+ 1, &ioreq_pfn, NULL);
+ }
if (state->shared_page == NULL) {
error_report("map shared IO page returned error %d handle=%p",
errno, xen_xc);
}
- }
- if (state->buffered_io_page == NULL) {
- trace_xen_map_ioreq_server_buffered_io_page(bufioreq_pfn);
+ if (state->has_bufioreq && state->buffered_io_page == NULL) {
+ trace_xen_map_ioreq_server_buffered_io_page(bufioreq_pfn);
- state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
- PROT_READ | PROT_WRITE,
- 1, &bufioreq_pfn,
- NULL);
- if (state->buffered_io_page == NULL) {
- error_report("map buffered IO page returned error %d", errno);
- return -1;
+ state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
+ PROT_READ | PROT_WRITE,
+ 1, &bufioreq_pfn,
+ NULL);
+ if (state->buffered_io_page == NULL) {
+ error_report("map buffered IO page returned error %d", errno);
+ return -1;
+ }
}
}
- if (state->shared_page == NULL || state->buffered_io_page == NULL) {
+ if (state->shared_page == NULL ||
+ (state->has_bufioreq && state->buffered_io_page == NULL)) {
return -1;
}
- trace_xen_map_ioreq_server_buffered_io_evtchn(bufioreq_evtchn);
-
- state->bufioreq_remote_port = bufioreq_evtchn;
+ if (state->has_bufioreq) {
+ trace_xen_map_ioreq_server_buffered_io_evtchn(bufioreq_evtchn);
+ state->bufioreq_remote_port = bufioreq_evtchn;
+ }
return 0;
}
@@ -822,14 +859,15 @@ static void xen_do_ioreq_register(XenIOState *state,
state->ioreq_local_port[i] = rc;
}
- rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid,
- state->bufioreq_remote_port);
- if (rc == -1) {
- error_report("buffered evtchn bind error %d", errno);
- goto err;
+ if (state->has_bufioreq) {
+ rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid,
+ state->bufioreq_remote_port);
+ if (rc == -1) {
+ error_report("buffered evtchn bind error %d", errno);
+ goto err;
+ }
+ state->bufioreq_local_port = rc;
}
- state->bufioreq_local_port = rc;
-
/* Init RAM management */
#ifdef XEN_COMPAT_PHYSMAP
xen_map_cache_init(xen_phys_offset_to_gaddr, state);
@@ -857,6 +895,7 @@ err:
}
void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
+ uint8_t handle_bufioreq,
const MemoryListener *xen_memory_listener)
{
int rc;
@@ -875,7 +914,8 @@ void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
goto err;
}
- rc = xen_create_ioreq_server(xen_domid, &state->ioservid);
+ state->has_bufioreq = handle_bufioreq != HVM_IOREQSRV_BUFIOREQ_OFF;
+ rc = xen_create_ioreq_server(xen_domid, handle_bufioreq, &state->ioservid);
if (!rc) {
xen_do_ioreq_register(state, max_cpus, xen_memory_listener);
} else {