aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorJean-Philippe Brucker <jean-philippe@linaro.org>2022-07-18 14:56:37 +0100
committerMichael S. Tsirkin <mst@redhat.com>2022-07-26 15:33:29 -0400
commit0522be9a0c0094088ccef7aab352c57f483ca250 (patch)
tree8bb87176f0ae8282f22ca1b639be8b062265af88 /hw
parentb3e6982b4154c1c0ab8b25f2e1ac7838a1809824 (diff)
downloadqemu-0522be9a0c0094088ccef7aab352c57f483ca250.zip
qemu-0522be9a0c0094088ccef7aab352c57f483ca250.tar.gz
qemu-0522be9a0c0094088ccef7aab352c57f483ca250.tar.bz2
hw/virtio/virtio-iommu: Enforce power-of-two notify for both MAP and UNMAP
Currently we only enforce power-of-two mappings (required by the QEMU notifier) for UNMAP requests. A MAP request not aligned on a power-of-two may be successfully handled by VFIO, and then the corresponding UNMAP notify will fail because it will attempt to split that mapping. Ensure MAP and UNMAP notifications are consistent. Fixes: dde3f08b5cab ("virtio-iommu: Handle non power of 2 range invalidations") Reported-by: Tina Zhang <tina.zhang@intel.com> Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Message-Id: <20220718135636.338264-1-jean-philippe@linaro.org> Tested-by: Tina Zhang <tina.zhang@intel.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'hw')
-rw-r--r--hw/virtio/virtio-iommu.c47
1 files changed, 28 insertions, 19 deletions
diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
index 281152d..62e07ec 100644
--- a/hw/virtio/virtio-iommu.c
+++ b/hw/virtio/virtio-iommu.c
@@ -197,6 +197,32 @@ static gint interval_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
}
}
+static void virtio_iommu_notify_map_unmap(IOMMUMemoryRegion *mr,
+ IOMMUTLBEvent *event,
+ hwaddr virt_start, hwaddr virt_end)
+{
+ uint64_t delta = virt_end - virt_start;
+
+ event->entry.iova = virt_start;
+ event->entry.addr_mask = delta;
+
+ if (delta == UINT64_MAX) {
+ memory_region_notify_iommu(mr, 0, *event);
+ }
+
+ while (virt_start != virt_end + 1) {
+ uint64_t mask = dma_aligned_pow2_mask(virt_start, virt_end, 64);
+
+ event->entry.addr_mask = mask;
+ event->entry.iova = virt_start;
+ memory_region_notify_iommu(mr, 0, *event);
+ virt_start += mask + 1;
+ if (event->entry.perm != IOMMU_NONE) {
+ event->entry.translated_addr += mask + 1;
+ }
+ }
+}
+
static void virtio_iommu_notify_map(IOMMUMemoryRegion *mr, hwaddr virt_start,
hwaddr virt_end, hwaddr paddr,
uint32_t flags)
@@ -215,19 +241,16 @@ static void virtio_iommu_notify_map(IOMMUMemoryRegion *mr, hwaddr virt_start,
event.type = IOMMU_NOTIFIER_MAP;
event.entry.target_as = &address_space_memory;
- event.entry.addr_mask = virt_end - virt_start;
- event.entry.iova = virt_start;
event.entry.perm = perm;
event.entry.translated_addr = paddr;
- memory_region_notify_iommu(mr, 0, event);
+ virtio_iommu_notify_map_unmap(mr, &event, virt_start, virt_end);
}
static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start,
hwaddr virt_end)
{
IOMMUTLBEvent event;
- uint64_t delta = virt_end - virt_start;
if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_UNMAP)) {
return;
@@ -239,22 +262,8 @@ static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start,
event.entry.target_as = &address_space_memory;
event.entry.perm = IOMMU_NONE;
event.entry.translated_addr = 0;
- event.entry.addr_mask = delta;
- event.entry.iova = virt_start;
-
- if (delta == UINT64_MAX) {
- memory_region_notify_iommu(mr, 0, event);
- }
-
- while (virt_start != virt_end + 1) {
- uint64_t mask = dma_aligned_pow2_mask(virt_start, virt_end, 64);
-
- event.entry.addr_mask = mask;
- event.entry.iova = virt_start;
- memory_region_notify_iommu(mr, 0, event);
- virt_start += mask + 1;
- }
+ virtio_iommu_notify_map_unmap(mr, &event, virt_start, virt_end);
}
static gboolean virtio_iommu_notify_unmap_cb(gpointer key, gpointer value,