aboutsummaryrefslogtreecommitdiff
path: root/hw/vfio
diff options
context:
space:
mode:
authorAlex Williamson <alex.williamson@redhat.com>2016-10-31 09:53:03 -0600
committerAlex Williamson <alex.williamson@redhat.com>2016-10-31 09:53:03 -0600
commit24acf72b9a291cebfd05f2ecdf3a982ac01e6291 (patch)
tree5ea76fd6bcf3315aeaa58a4879158da32ffbad86 /hw/vfio
parent4a2e242bbb306ef5c16ce9e7bb2da3bd8a4eb098 (diff)
downloadqemu-24acf72b9a291cebfd05f2ecdf3a982ac01e6291.zip
qemu-24acf72b9a291cebfd05f2ecdf3a982ac01e6291.tar.gz
qemu-24acf72b9a291cebfd05f2ecdf3a982ac01e6291.tar.bz2
vfio: Handle zero-length sparse mmap ranges
As reported in the link below, user has a PCI device with a 4KB BAR which contains the MSI-X table. This seems to hit a corner case in the kernel where the region reports being mmap capable, but the sparse mmap information reports a zero sized range. It's not entirely clear that the kernel is incorrect in doing this, but regardless, we need to handle it. To do this, fill our mmap array only with non-zero sized sparse mmap entries and add an error return from the function so we can tell the difference between nr_mmaps being zero based on sparse mmap info vs lack of sparse mmap info. NB, this doesn't actually change the behavior of the device, it only removes the scary "Failed to mmap ... Performance may be slow" error message. We cannot currently create an mmap over the MSI-X table. Link: http://lists.nongnu.org/archive/html/qemu-discuss/2016-10/msg00009.html Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Diffstat (limited to 'hw/vfio')
-rw-r--r--hw/vfio/common.c36
1 files changed, 22 insertions, 14 deletions
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index c764cb3..f528309 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -610,16 +610,16 @@ vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
return NULL;
}
-static void vfio_setup_region_sparse_mmaps(VFIORegion *region,
- struct vfio_region_info *info)
+static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
+ struct vfio_region_info *info)
{
struct vfio_info_cap_header *hdr;
struct vfio_region_info_cap_sparse_mmap *sparse;
- int i;
+ int i, j;
hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
if (!hdr) {
- return;
+ return -ENODEV;
}
sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
@@ -627,16 +627,24 @@ static void vfio_setup_region_sparse_mmaps(VFIORegion *region,
trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
region->nr, sparse->nr_areas);
- region->nr_mmaps = sparse->nr_areas;
- region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
+ region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
- for (i = 0; i < region->nr_mmaps; i++) {
- region->mmaps[i].offset = sparse->areas[i].offset;
- region->mmaps[i].size = sparse->areas[i].size;
- trace_vfio_region_sparse_mmap_entry(i, region->mmaps[i].offset,
- region->mmaps[i].offset +
- region->mmaps[i].size);
+ for (i = 0, j = 0; i < sparse->nr_areas; i++) {
+ trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
+ sparse->areas[i].offset +
+ sparse->areas[i].size);
+
+ if (sparse->areas[i].size) {
+ region->mmaps[j].offset = sparse->areas[i].offset;
+ region->mmaps[j].size = sparse->areas[i].size;
+ j++;
+ }
}
+
+ region->nr_mmaps = j;
+ region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
+
+ return 0;
}
int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
@@ -665,9 +673,9 @@ int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
region->flags & VFIO_REGION_INFO_FLAG_MMAP &&
!(region->size & ~qemu_real_host_page_mask)) {
- vfio_setup_region_sparse_mmaps(region, info);
+ ret = vfio_setup_region_sparse_mmaps(region, info);
- if (!region->nr_mmaps) {
+ if (ret) {
region->nr_mmaps = 1;
region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
region->mmaps[0].offset = 0;