aboutsummaryrefslogtreecommitdiff
path: root/hw/virtio/vhost-vdpa.c
diff options
context:
space:
mode:
Diffstat (limited to 'hw/virtio/vhost-vdpa.c')
-rw-r--r--hw/virtio/vhost-vdpa.c140
1 files changed, 64 insertions, 76 deletions
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index bc1c79b..fd0c33b 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -72,22 +72,28 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
return false;
}
-int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
- void *vaddr, bool readonly)
+/*
+ * The caller must set asid = 0 if the device does not support asid.
+ * This is not an ABI break since it is set to 0 by the initializer anyway.
+ */
+int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
+ hwaddr size, void *vaddr, bool readonly)
{
struct vhost_msg_v2 msg = {};
int fd = v->device_fd;
int ret = 0;
msg.type = v->msg_type;
+ msg.asid = asid;
msg.iotlb.iova = iova;
msg.iotlb.size = size;
msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
msg.iotlb.type = VHOST_IOTLB_UPDATE;
- trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
- msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
+ trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.asid, msg.iotlb.iova,
+ msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm,
+ msg.iotlb.type);
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
error_report("failed to write, fd=%d, errno=%d (%s)",
@@ -98,18 +104,24 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
return ret;
}
-int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size)
+/*
+ * The caller must set asid = 0 if the device does not support asid.
+ * This is not an ABI break since it is set to 0 by the initializer anyway.
+ */
+int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
+ hwaddr size)
{
struct vhost_msg_v2 msg = {};
int fd = v->device_fd;
int ret = 0;
msg.type = v->msg_type;
+ msg.asid = asid;
msg.iotlb.iova = iova;
msg.iotlb.size = size;
msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
- trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
+ trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.asid, msg.iotlb.iova,
msg.iotlb.size, msg.iotlb.type);
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
@@ -212,7 +224,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
vaddr, section->readonly);
llsize = int128_sub(llend, int128_make64(iova));
- if (v->shadow_vqs_enabled) {
+ if (v->shadow_data) {
int r;
mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr,
@@ -229,8 +241,8 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
}
vhost_vdpa_iotlb_batch_begin_once(v);
- ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
- vaddr, section->readonly);
+ ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova,
+ int128_get64(llsize), vaddr, section->readonly);
if (ret) {
error_report("vhost vdpa map fail!");
goto fail_map;
@@ -239,7 +251,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
return;
fail_map:
- if (v->shadow_vqs_enabled) {
+ if (v->shadow_data) {
vhost_iova_tree_remove(v->iova_tree, mem_region);
}
@@ -284,7 +296,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
llsize = int128_sub(llend, int128_make64(iova));
- if (v->shadow_vqs_enabled) {
+ if (v->shadow_data) {
const DMAMap *result;
const void *vaddr = memory_region_get_ram_ptr(section->mr) +
section->offset_within_region +
@@ -303,7 +315,8 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
vhost_iova_tree_remove(v->iova_tree, *result);
}
vhost_vdpa_iotlb_batch_begin_once(v);
- ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
+ ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova,
+ int128_get64(llsize));
if (ret) {
error_report("vhost_vdpa dma unmap error!");
}
@@ -365,19 +378,6 @@ static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
return 0;
}
-static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v)
-{
- int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE,
- &v->iova_range);
- if (ret != 0) {
- v->iova_range.first = 0;
- v->iova_range.last = UINT64_MAX;
- }
-
- trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first,
- v->iova_range.last);
-}
-
/*
* The use of this function is for requests that only need to be
* applied once. Typically such request occurs at the beginning
@@ -402,45 +402,19 @@ static int vhost_vdpa_get_dev_features(struct vhost_dev *dev,
return ret;
}
-static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
- Error **errp)
+static void vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v)
{
g_autoptr(GPtrArray) shadow_vqs = NULL;
- uint64_t dev_features, svq_features;
- int r;
- bool ok;
-
- if (!v->shadow_vqs_enabled) {
- return 0;
- }
-
- r = vhost_vdpa_get_dev_features(hdev, &dev_features);
- if (r != 0) {
- error_setg_errno(errp, -r, "Can't get vdpa device features");
- return r;
- }
-
- svq_features = dev_features;
- ok = vhost_svq_valid_features(svq_features, errp);
- if (unlikely(!ok)) {
- return -1;
- }
shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
for (unsigned n = 0; n < hdev->nvqs; ++n) {
- g_autoptr(VhostShadowVirtqueue) svq;
+ VhostShadowVirtqueue *svq;
- svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
- v->shadow_vq_ops_opaque);
- if (unlikely(!svq)) {
- error_setg(errp, "Cannot create svq %u", n);
- return -1;
- }
- g_ptr_array_add(shadow_vqs, g_steal_pointer(&svq));
+ svq = vhost_svq_new(v->shadow_vq_ops, v->shadow_vq_ops_opaque);
+ g_ptr_array_add(shadow_vqs, svq);
}
v->shadow_vqs = g_steal_pointer(&shadow_vqs);
- return 0;
}
static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
@@ -465,12 +439,7 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
dev->opaque = opaque ;
v->listener = vhost_vdpa_memory_listener;
v->msg_type = VHOST_IOTLB_MSG_V2;
- ret = vhost_vdpa_init_svq(dev, v, errp);
- if (ret) {
- goto err;
- }
-
- vhost_vdpa_get_iova_range(v);
+ vhost_vdpa_init_svq(dev, v);
if (!vhost_vdpa_first_dev(dev)) {
return 0;
@@ -480,10 +449,6 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
VIRTIO_CONFIG_S_DRIVER);
return 0;
-
-err:
- ram_block_discard_disable(false);
- return ret;
}
static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
@@ -580,10 +545,6 @@ static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
struct vhost_vdpa *v = dev->opaque;
size_t idx;
- if (!v->shadow_vqs) {
- return;
- }
-
for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
}
@@ -677,7 +638,8 @@ static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
{
uint64_t features;
uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
- 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH;
+ 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH |
+ 0x1ULL << VHOST_BACKEND_F_IOTLB_ASID;
int r;
if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
@@ -864,11 +826,23 @@ static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev,
const EventNotifier *event_notifier = &svq->hdev_kick;
int r;
+ r = event_notifier_init(&svq->hdev_kick, 0);
+ if (r != 0) {
+ error_setg_errno(errp, -r, "Couldn't create kick event notifier");
+ goto err_init_hdev_kick;
+ }
+
+ r = event_notifier_init(&svq->hdev_call, 0);
+ if (r != 0) {
+ error_setg_errno(errp, -r, "Couldn't create call event notifier");
+ goto err_init_hdev_call;
+ }
+
file.fd = event_notifier_get_fd(event_notifier);
r = vhost_vdpa_set_vring_dev_kick(dev, &file);
if (unlikely(r != 0)) {
error_setg_errno(errp, -r, "Can't set device kick fd");
- return r;
+ goto err_init_set_dev_fd;
}
event_notifier = &svq->hdev_call;
@@ -876,8 +850,18 @@ static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev,
r = vhost_vdpa_set_vring_dev_call(dev, &file);
if (unlikely(r != 0)) {
error_setg_errno(errp, -r, "Can't set device call fd");
+ goto err_init_set_dev_fd;
}
+ return 0;
+
+err_init_set_dev_fd:
+ event_notifier_set_handler(&svq->hdev_call, NULL);
+
+err_init_hdev_call:
+ event_notifier_cleanup(&svq->hdev_kick);
+
+err_init_hdev_kick:
return r;
}
@@ -899,7 +883,7 @@ static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr)
}
size = ROUND_UP(result->size, qemu_real_host_page_size());
- r = vhost_vdpa_dma_unmap(v, result->iova, size);
+ r = vhost_vdpa_dma_unmap(v, v->address_space_id, result->iova, size);
if (unlikely(r < 0)) {
error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r);
return;
@@ -939,7 +923,8 @@ static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle,
return false;
}
- r = vhost_vdpa_dma_map(v, needle->iova, needle->size + 1,
+ r = vhost_vdpa_dma_map(v, v->address_space_id, needle->iova,
+ needle->size + 1,
(void *)(uintptr_t)needle->translated_addr,
needle->perm == IOMMU_RO);
if (unlikely(r != 0)) {
@@ -1029,7 +1014,7 @@ static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
Error *err = NULL;
unsigned i;
- if (!v->shadow_vqs) {
+ if (!v->shadow_vqs_enabled) {
return true;
}
@@ -1045,7 +1030,7 @@ static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
goto err;
}
- vhost_svq_start(svq, dev->vdev, vq);
+ vhost_svq_start(svq, dev->vdev, vq, v->iova_tree);
ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err);
if (unlikely(!ok)) {
goto err_map;
@@ -1082,13 +1067,16 @@ static void vhost_vdpa_svqs_stop(struct vhost_dev *dev)
{
struct vhost_vdpa *v = dev->opaque;
- if (!v->shadow_vqs) {
+ if (!v->shadow_vqs_enabled) {
return;
}
for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
vhost_vdpa_svq_unmap_rings(dev, svq);
+
+ event_notifier_cleanup(&svq->hdev_kick);
+ event_notifier_cleanup(&svq->hdev_call);
}
}