diff options
Diffstat (limited to 'net/vhost-vdpa.c')
-rw-r--r-- | net/vhost-vdpa.c | 72 |
1 files changed, 28 insertions, 44 deletions
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index daa3842..58d7389 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -62,6 +62,7 @@ const int vdpa_feature_bits[] = { VIRTIO_F_RING_PACKED, VIRTIO_F_RING_RESET, VIRTIO_F_VERSION_1, + VIRTIO_F_IN_ORDER, VIRTIO_F_NOTIFICATION_DATA, VIRTIO_NET_F_CSUM, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, @@ -87,6 +88,7 @@ const int vdpa_feature_bits[] = { VIRTIO_NET_F_MQ, VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_MTU, + VIRTIO_NET_F_RSC_EXT, VIRTIO_NET_F_RSS, VIRTIO_NET_F_STATUS, VIRTIO_RING_F_EVENT_IDX, @@ -222,14 +224,6 @@ static void vhost_vdpa_cleanup(NetClientState *nc) { VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); - /* - * If a peer NIC is attached, do not cleanup anything. - * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup() - * when the guest is shutting down. - */ - if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) { - return; - } munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len()); munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len()); if (s->vhost_net) { @@ -241,6 +235,7 @@ static void vhost_vdpa_cleanup(NetClientState *nc) return; } qemu_close(s->vhost_vdpa.shared->device_fd); + g_clear_pointer(&s->vhost_vdpa.shared->iova_tree, vhost_iova_tree_delete); g_free(s->vhost_vdpa.shared); } @@ -268,6 +263,18 @@ static bool vhost_vdpa_has_ufo(NetClientState *nc) } +/* + * FIXME: vhost_vdpa doesn't have an API to "set h/w endianness". But it's + * reasonable to assume that h/w is LE by default, because LE is what + * virtio 1.0 and later ask for. So, this function just says "yes, the h/w is + * LE". Otherwise, on a BE machine, higher-level code would mistakely think + * the h/w is BE and can't support VDPA for a virtio 1.0 client. + */ +static int vhost_vdpa_set_vnet_le(NetClientState *nc, bool enable) +{ + return 0; +} + static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc, Error **errp) { @@ -356,14 +363,8 @@ static int vdpa_net_migration_state_notifier(NotifierWithReturn *notifier, static void vhost_vdpa_net_data_start_first(VhostVDPAState *s) { - struct vhost_vdpa *v = &s->vhost_vdpa; - migration_add_notifier(&s->migration_state, vdpa_net_migration_state_notifier); - if (v->shadow_vqs_enabled) { - v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, - v->shared->iova_range.last); - } } static int vhost_vdpa_net_data_start(NetClientState *nc) @@ -373,8 +374,7 @@ static int vhost_vdpa_net_data_start(NetClientState *nc) assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); - if (s->always_svq || - migration_is_setup_or_active()) { + if (s->always_svq || migration_is_running()) { v->shadow_vqs_enabled = true; } else { v->shadow_vqs_enabled = false; @@ -411,19 +411,12 @@ static int vhost_vdpa_net_data_load(NetClientState *nc) static void vhost_vdpa_net_client_stop(NetClientState *nc) { VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); - struct vhost_dev *dev; assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); if (s->vhost_vdpa.index == 0) { migration_remove_notifier(&s->migration_state); } - - dev = s->vhost_vdpa.dev; - if (dev->vq_index + dev->nvqs == dev->vq_index_end) { - g_clear_pointer(&s->vhost_vdpa.shared->iova_tree, - vhost_iova_tree_delete); - } } static NetClientInfo net_vhost_vdpa_info = { @@ -436,6 +429,7 @@ static NetClientInfo net_vhost_vdpa_info = { .cleanup = vhost_vdpa_cleanup, .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, .has_ufo = vhost_vdpa_has_ufo, + .set_vnet_le = vhost_vdpa_set_vnet_le, .check_peer_type = vhost_vdpa_check_peer_type, .set_steering_ebpf = vhost_vdpa_set_steering_ebpf, }; @@ -509,14 +503,20 @@ static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size, bool write) { DMAMap map = {}; + hwaddr taddr = (hwaddr)(uintptr_t)buf; int r; - map.translated_addr = (hwaddr)(uintptr_t)buf; map.size = size - 1; map.perm = write ? IOMMU_RW : IOMMU_RO, - r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map); + r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map, taddr); if (unlikely(r != IOVA_OK)) { error_report("Cannot map injected element"); + + if (map.translated_addr == taddr) { + error_report("Insertion to IOVA->HVA tree failed"); + /* Remove the mapping from the IOVA-only tree */ + goto dma_map_err; + } return r; } @@ -588,24 +588,6 @@ out: return 0; } - /* - * If other vhost_vdpa already have an iova_tree, reuse it for simplicity, - * whether CVQ shares ASID with guest or not, because: - * - Memory listener need access to guest's memory addresses allocated in - * the IOVA tree. - * - There should be plenty of IOVA address space for both ASID not to - * worry about collisions between them. Guest's translations are still - * validated with virtio virtqueue_pop so there is no risk for the guest - * to access memory that it shouldn't. - * - * To allocate a iova tree per ASID is doable but it complicates the code - * and it is not worth it for the moment. - */ - if (!v->shared->iova_tree) { - v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, - v->shared->iova_range.last); - } - r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len(), false); if (unlikely(r < 0)) { @@ -642,7 +624,7 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); int r; - r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL); + r = vhost_svq_add(svq, out_sg, out_num, NULL, in_sg, in_num, NULL, NULL); if (unlikely(r != 0)) { if (unlikely(r == -ENOSPC)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n", @@ -1714,6 +1696,8 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, s->vhost_vdpa.shared->device_fd = vdpa_device_fd; s->vhost_vdpa.shared->iova_range = iova_range; s->vhost_vdpa.shared->shadow_data = svq; + s->vhost_vdpa.shared->iova_tree = vhost_iova_tree_new(iova_range.first, + iova_range.last); } else if (!is_datapath) { s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), PROT_READ | PROT_WRITE, |