aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorEric Auger <eric.auger@redhat.com>2021-03-09 11:27:37 +0100
committerPeter Maydell <peter.maydell@linaro.org>2021-03-12 12:40:10 +0000
commitf14fb6c2db961c3665a61b342ab329b7bd20d1e7 (patch)
tree57f311ed8f4d445ad5c1474751e76206e25bf972 /hw
parent41ce9a912641cd7f820bcfccea15e30efc32104e (diff)
downloadqemu-f14fb6c2db961c3665a61b342ab329b7bd20d1e7.zip
qemu-f14fb6c2db961c3665a61b342ab329b7bd20d1e7.tar.gz
qemu-f14fb6c2db961c3665a61b342ab329b7bd20d1e7.tar.bz2
dma: Introduce dma_aligned_pow2_mask()
Currently get_naturally_aligned_size() is used by the intel iommu to compute the maximum invalidation range based on @size which is a power of 2 while being aligned with the @start address and less than the maximum range defined by @gaw. This helper is also useful for other iommu devices (virtio-iommu, SMMUv3) to make sure IOMMU UNMAP notifiers only are called with power of 2 range sizes. Let's move this latter into dma-helpers.c and rename it into dma_aligned_pow2_mask(). Also rewrite the helper so that it accomodates UINT64_MAX values for the size mask and max mask. It now returns a mask instead of a size. Change the caller. Signed-off-by: Eric Auger <eric.auger@redhat.com> Reviewed-by: Peter Xu <peterx@redhat.com> Message-id: 20210309102742.30442-3-eric.auger@redhat.com Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw')
-rw-r--r--hw/i386/intel_iommu.c30
1 files changed, 7 insertions, 23 deletions
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 3206f37..6be8f32 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -35,6 +35,7 @@
#include "hw/i386/x86-iommu.h"
#include "hw/pci-host/q35.h"
#include "sysemu/kvm.h"
+#include "sysemu/dma.h"
#include "sysemu/sysemu.h"
#include "hw/i386/apic_internal.h"
#include "kvm/kvm_i386.h"
@@ -3455,24 +3456,6 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn)
return vtd_dev_as;
}
-static uint64_t get_naturally_aligned_size(uint64_t start,
- uint64_t size, int gaw)
-{
- uint64_t max_mask = 1ULL << gaw;
- uint64_t alignment = start ? start & -start : max_mask;
-
- alignment = MIN(alignment, max_mask);
- size = MIN(size, max_mask);
-
- if (alignment <= size) {
- /* Increase the alignment of start */
- return alignment;
- } else {
- /* Find the largest page mask from size */
- return 1ULL << (63 - clz64(size));
- }
-}
-
/* Unmap the whole range in the notifier's scope. */
static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
{
@@ -3501,13 +3484,14 @@ static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
while (remain >= VTD_PAGE_SIZE) {
IOMMUTLBEvent event;
- uint64_t mask = get_naturally_aligned_size(start, remain, s->aw_bits);
+ uint64_t mask = dma_aligned_pow2_mask(start, end, s->aw_bits);
+ uint64_t size = mask + 1;
- assert(mask);
+ assert(size);
event.type = IOMMU_NOTIFIER_UNMAP;
event.entry.iova = start;
- event.entry.addr_mask = mask - 1;
+ event.entry.addr_mask = mask;
event.entry.target_as = &address_space_memory;
event.entry.perm = IOMMU_NONE;
/* This field is meaningless for unmap */
@@ -3515,8 +3499,8 @@ static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
memory_region_notify_iommu_one(n, &event);
- start += mask;
- remain -= mask;
+ start += size;
+ remain -= size;
}
assert(!remain);