aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2020-11-05 15:16:43 +0000
committerPeter Maydell <peter.maydell@linaro.org>2020-11-05 15:16:43 +0000
commit85c3ed44171d757e399bcbb3db3608c1848c0984 (patch)
tree777c2c5df9154caa0f0787b15b76deda2f2c2399 /include
parent747c6b3811ef5f06278ab364261e3723bcbb4031 (diff)
parent9f6df01d0e128c2df179789b37140d6aeddfcb92 (diff)
downloadqemu-85c3ed44171d757e399bcbb3db3608c1848c0984.zip
qemu-85c3ed44171d757e399bcbb3db3608c1848c0984.tar.gz
qemu-85c3ed44171d757e399bcbb3db3608c1848c0984.tar.bz2
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
pc,pci,vhost,virtio: fixes Lots of fixes all over the place. virtio-mem and virtio-iommu patches are kind of fixes but it seems better to just make them behave sanely than try to educate users about the limitations ... Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # gpg: Signature made Wed 04 Nov 2020 18:40:03 GMT # gpg: using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469 # gpg: issuer "mst@redhat.com" # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full] # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" [full] # Primary key fingerprint: 0270 606B 6F3C DF3D 0B17 0970 C350 3912 AFBE 8E67 # Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA 8A0D 281F 0DB8 D28D 5469 * remotes/mst/tags/for_upstream: (31 commits) contrib/vhost-user-blk: fix get_config() information leak block/export: fix vhost-user-blk get_config() information leak block/export: make vhost-user-blk config space little-endian configure: introduce --enable-vhost-user-blk-server libvhost-user: follow QEMU comment style vhost-blk: set features before setting inflight feature Revert "vhost-blk: set features before setting inflight feature" net: Add vhost-vdpa in show_netdevs() vhost-vdpa: Add qemu_close in vhost_vdpa_cleanup vfio: Don't issue full 2^64 unmap virtio-iommu: Set supported page size mask vfio: Set IOMMU page size as per host supported page size memory: Add interface to set iommu page size mask virtio-iommu: Add notify_flag_changed() memory region callback virtio-iommu: Add replay() memory region callback virtio-iommu: Call memory notifiers in attach/detach virtio-iommu: Add memory notifiers for map/unmap virtio-iommu: Store memory region in endpoint struct virtio-iommu: Fix virtio_iommu_mr() hw/smbios: Fix leaked fd in save_opt_one() error path ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'include')
-rw-r--r--include/exec/memory.h38
-rw-r--r--include/hw/mem/memory-device.h10
-rw-r--r--include/hw/virtio/vhost.h2
3 files changed, 49 insertions, 1 deletions
diff --git a/include/exec/memory.h b/include/exec/memory.h
index aff6ef7..0f3e6bc 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -397,6 +397,32 @@ struct IOMMUMemoryRegionClass {
* @iommu: the IOMMUMemoryRegion
*/
int (*num_indexes)(IOMMUMemoryRegion *iommu);
+
+ /**
+ * @iommu_set_page_size_mask:
+ *
+ * Restrict the page size mask that can be supported with a given IOMMU
+ * memory region. Used for example to propagate host physical IOMMU page
+ * size mask limitations to the virtual IOMMU.
+ *
+ * Optional method: if this method is not provided, then the default global
+ * page mask is used.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ *
+ * @page_size_mask: a bitmask of supported page sizes. At least one bit,
+ * representing the smallest page size, must be set. Additional set bits
+ * represent supported block sizes. For example a host physical IOMMU that
+ * uses page tables with a page size of 4kB, and supports 2MB and 4GB
+ * blocks, will set mask 0x40201000. A granule of 4kB with indiscriminate
+ * block sizes is specified with mask 0xfffffffffffff000.
+ *
+ * Returns 0 on success, or a negative error. In case of failure, the error
+ * object must be created.
+ */
+ int (*iommu_set_page_size_mask)(IOMMUMemoryRegion *iommu,
+ uint64_t page_size_mask,
+ Error **errp);
};
typedef struct CoalescedMemoryRange CoalescedMemoryRange;
@@ -1410,6 +1436,18 @@ int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
/**
+ * memory_region_iommu_set_page_size_mask: set the supported page
+ * sizes for a given IOMMU memory region
+ *
+ * @iommu_mr: IOMMU memory region
+ * @page_size_mask: supported page size mask
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
+ uint64_t page_size_mask,
+ Error **errp);
+
+/**
* memory_region_name: get a memory region's name
*
* Returns the string that was used to initialize the memory region.
diff --git a/include/hw/mem/memory-device.h b/include/hw/mem/memory-device.h
index 30d7e99..48d2611 100644
--- a/include/hw/mem/memory-device.h
+++ b/include/hw/mem/memory-device.h
@@ -89,6 +89,16 @@ struct MemoryDeviceClass {
MemoryRegion *(*get_memory_region)(MemoryDeviceState *md, Error **errp);
/*
+ * Optional: Return the desired minimum alignment of the device in guest
+ * physical address space. The final alignment is computed based on this
+ * alignment and the alignment requirements of the memory region.
+ *
+ * Called when plugging the memory device to detect the required alignment
+ * during address assignment.
+ */
+ uint64_t (*get_min_alignment)(const MemoryDeviceState *md);
+
+ /*
* Translate the memory device into #MemoryDeviceInfo.
*/
void (*fill_device_info)(const MemoryDeviceState *md,
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 839bfb1..4a8bc75 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -141,7 +141,7 @@ void vhost_dev_reset_inflight(struct vhost_inflight *inflight);
void vhost_dev_free_inflight(struct vhost_inflight *inflight);
void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f);
int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f);
-int vhost_dev_prepare_inflight(struct vhost_dev *hdev);
+int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev);
int vhost_dev_set_inflight(struct vhost_dev *dev,
struct vhost_inflight *inflight);
int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,