diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2015-07-20 13:25:28 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2015-07-20 13:25:28 +0100 |
commit | f73ca7363440240b7ee5ee7f7ddb1c64751efb54 (patch) | |
tree | 3a5a63dc82049cff15f105d68617be7d3b2571c1 | |
parent | 71358470eec668f5dc53def25e585ce250cea9bf (diff) | |
parent | f9d6dbf0bf6e91b8ed896369ab1b7e91e5a1a4df (diff) | |
download | qemu-f73ca7363440240b7ee5ee7f7ddb1c64751efb54.zip qemu-f73ca7363440240b7ee5ee7f7ddb1c64751efb54.tar.gz qemu-f73ca7363440240b7ee5ee7f7ddb1c64751efb54.tar.bz2 |
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
virtio, vhost, pc fixes for 2.4
The only notable thing here is vhost-user multiqueue
revert. We'll work on making it stable in 2.5,
reverting now means we won't have to maintain
bug for bug compability forever.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
# gpg: Signature made Mon Jul 20 12:24:00 2015 BST using RSA key ID D28D5469
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>"
# gpg: aka "Michael S. Tsirkin <mst@redhat.com>"
* remotes/mst/tags/for_upstream:
virtio-net: remove virtio queues if the guest doesn't support multiqueue
virtio-net: Flush incoming queues when DRIVER_OK is being set
pci_add_capability: remove duplicate comments
virtio-net: unbreak any layout
Revert "vhost-user: add multi queue support"
ich9: fix skipped vmstate_memhp_state subsection
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r-- | docs/specs/vhost-user.txt | 5 | ||||
-rw-r--r-- | hw/acpi/ich9.c | 3 | ||||
-rw-r--r-- | hw/net/vhost_net.c | 3 | ||||
-rw-r--r-- | hw/net/virtio-net.c | 143 | ||||
-rw-r--r-- | hw/pci/pci.c | 6 | ||||
-rw-r--r-- | hw/virtio/vhost-user.c | 11 | ||||
-rw-r--r-- | include/hw/virtio/virtio-access.h | 9 | ||||
-rw-r--r-- | net/vhost-user.c | 37 | ||||
-rw-r--r-- | qapi-schema.json | 6 | ||||
-rw-r--r-- | qemu-options.hx | 5 |
10 files changed, 138 insertions, 90 deletions
diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt index 2c8e934..650bb18 100644 --- a/docs/specs/vhost-user.txt +++ b/docs/specs/vhost-user.txt @@ -127,11 +127,6 @@ in the ancillary data: If Master is unable to send the full message or receives a wrong reply it will close the connection. An optional reconnection mechanism can be implemented. -Multi queue support -------------------- -The protocol supports multiple queues by setting all index fields in the sent -messages to a properly calculated value. - Message types ------------- diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c index 5fb7a87..f04f6dc 100644 --- a/hw/acpi/ich9.c +++ b/hw/acpi/ich9.c @@ -206,9 +206,6 @@ const VMStateDescription vmstate_ich9_pm = { }, .subsections = (const VMStateDescription*[]) { &vmstate_memhp_state, - NULL - }, - .subsections = (const VMStateDescription*[]) { &vmstate_tco_io_state, NULL } diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c index 9bd360b..5c1d11f 100644 --- a/hw/net/vhost_net.c +++ b/hw/net/vhost_net.c @@ -160,7 +160,6 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options) net->dev.nvqs = 2; net->dev.vqs = net->vqs; - net->dev.vq_index = net->nc->queue_index; r = vhost_dev_init(&net->dev, options->opaque, options->backend_type); @@ -287,7 +286,7 @@ static void vhost_net_stop_one(struct vhost_net *net, for (file.index = 0; file.index < net->dev.nvqs; ++file.index) { const VhostOps *vhost_ops = net->dev.vhost_ops; int r = vhost_ops->vhost_call(&net->dev, VHOST_RESET_OWNER, - &file); + NULL); assert(r >= 0); } } diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index e3c2db3..304d3dd 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -162,6 +162,8 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) virtio_net_vhost_status(n, status); for (i = 0; i < n->max_queues; i++) { + NetClientState *ncs = qemu_get_subqueue(n->nic, i); + bool queue_started; q = &n->vqs[i]; if ((!n->multiqueue && i != 0) || i >= n->curr_queues) { @@ -169,12 +171,18 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) } else { queue_status = status; } + queue_started = + virtio_net_started(n, queue_status) && !n->vhost_started; + + if (queue_started) { + qemu_flush_queued_packets(ncs); + } if (!q->tx_waiting) { continue; } - if (virtio_net_started(n, queue_status) && !n->vhost_started) { + if (queue_started) { if (q->tx_timer) { timer_mod(q->tx_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); @@ -1142,7 +1150,8 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q) ssize_t ret, len; unsigned int out_num = elem.out_num; struct iovec *out_sg = &elem.out_sg[0]; - struct iovec sg[VIRTQUEUE_MAX_SIZE]; + struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1]; + struct virtio_net_hdr_mrg_rxbuf mhdr; if (out_num < 1) { error_report("virtio-net header not in first element"); @@ -1150,13 +1159,25 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q) } if (n->has_vnet_hdr) { - if (out_sg[0].iov_len < n->guest_hdr_len) { + if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) < + n->guest_hdr_len) { error_report("virtio-net header incorrect"); exit(1); } - virtio_net_hdr_swap(vdev, (void *) out_sg[0].iov_base); + if (virtio_needs_swap(vdev)) { + virtio_net_hdr_swap(vdev, (void *) &mhdr); + sg2[0].iov_base = &mhdr; + sg2[0].iov_len = n->guest_hdr_len; + out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1, + out_sg, out_num, + n->guest_hdr_len, -1); + if (out_num == VIRTQUEUE_MAX_SIZE) { + goto drop; + } + out_num += 1; + out_sg = sg2; + } } - /* * If host wants to see the guest header as is, we can * pass it on unchanged. Otherwise, copy just the parts @@ -1186,7 +1207,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q) } len += ret; - +drop: virtqueue_push(q->tx_vq, &elem, 0); virtio_notify(vdev, q->tx_vq); @@ -1306,9 +1327,86 @@ static void virtio_net_tx_bh(void *opaque) } } +static void virtio_net_add_queue(VirtIONet *n, int index) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(n); + + n->vqs[index].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx); + if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) { + n->vqs[index].tx_vq = + virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer); + n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + virtio_net_tx_timer, + &n->vqs[index]); + } else { + n->vqs[index].tx_vq = + virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh); + n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]); + } + + n->vqs[index].tx_waiting = 0; + n->vqs[index].n = n; +} + +static void virtio_net_del_queue(VirtIONet *n, int index) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(n); + VirtIONetQueue *q = &n->vqs[index]; + NetClientState *nc = qemu_get_subqueue(n->nic, index); + + qemu_purge_queued_packets(nc); + + virtio_del_queue(vdev, index * 2); + if (q->tx_timer) { + timer_del(q->tx_timer); + timer_free(q->tx_timer); + } else { + qemu_bh_delete(q->tx_bh); + } + virtio_del_queue(vdev, index * 2 + 1); +} + +static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(n); + int old_num_queues = virtio_get_num_queues(vdev); + int new_num_queues = new_max_queues * 2 + 1; + int i; + + assert(old_num_queues >= 3); + assert(old_num_queues % 2 == 1); + + if (old_num_queues == new_num_queues) { + return; + } + + /* + * We always need to remove and add ctrl vq if + * old_num_queues != new_num_queues. Remove ctrl_vq first, + * and then we only enter one of the following too loops. + */ + virtio_del_queue(vdev, old_num_queues - 1); + + for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) { + /* new_num_queues < old_num_queues */ + virtio_net_del_queue(n, i / 2); + } + + for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) { + /* new_num_queues > old_num_queues */ + virtio_net_add_queue(n, i / 2); + } + + /* add ctrl_vq last */ + n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); +} + static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue) { + int max = multiqueue ? n->max_queues : 1; + n->multiqueue = multiqueue; + virtio_net_change_num_queues(n, max); virtio_net_set_queues(n); } @@ -1583,21 +1681,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) } for (i = 0; i < n->max_queues; i++) { - n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx); - if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) { - n->vqs[i].tx_vq = - virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer); - n->vqs[i].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, - virtio_net_tx_timer, - &n->vqs[i]); - } else { - n->vqs[i].tx_vq = - virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh); - n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]); - } - - n->vqs[i].tx_waiting = 0; - n->vqs[i].n = n; + virtio_net_add_queue(n, i); } n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); @@ -1651,7 +1735,7 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIONet *n = VIRTIO_NET(dev); - int i; + int i, max_queues; /* This will stop vhost backend if appropriate. */ virtio_net_set_status(vdev, 0); @@ -1666,18 +1750,9 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp) g_free(n->mac_table.macs); g_free(n->vlans); - for (i = 0; i < n->max_queues; i++) { - VirtIONetQueue *q = &n->vqs[i]; - NetClientState *nc = qemu_get_subqueue(n->nic, i); - - qemu_purge_queued_packets(nc); - - if (q->tx_timer) { - timer_del(q->tx_timer); - timer_free(q->tx_timer); - } else if (q->tx_bh) { - qemu_bh_delete(q->tx_bh); - } + max_queues = n->multiqueue ? n->max_queues : 1; + for (i = 0; i < max_queues; i++) { + virtio_net_del_queue(n, i); } timer_del(n->announce_timer); diff --git a/hw/pci/pci.c b/hw/pci/pci.c index 442f822..a017614 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -2101,12 +2101,10 @@ static void pci_del_option_rom(PCIDevice *pdev) } /* - * if !offset - * Reserve space and add capability to the linked list in pci config space - * * if offset = 0, * Find and reserve space and add capability to the linked list - * in pci config space */ + * in pci config space + */ int pci_add_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t offset, uint8_t size) { diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index d6f2163..e7ab829 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -210,12 +210,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request, break; case VHOST_SET_OWNER: - break; - case VHOST_RESET_OWNER: - memcpy(&msg.state, arg, sizeof(struct vhost_vring_state)); - msg.state.index += dev->vq_index; - msg.size = sizeof(m.state); break; case VHOST_SET_MEM_TABLE: @@ -258,20 +253,17 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request, case VHOST_SET_VRING_NUM: case VHOST_SET_VRING_BASE: memcpy(&msg.state, arg, sizeof(struct vhost_vring_state)); - msg.state.index += dev->vq_index; msg.size = sizeof(m.state); break; case VHOST_GET_VRING_BASE: memcpy(&msg.state, arg, sizeof(struct vhost_vring_state)); - msg.state.index += dev->vq_index; msg.size = sizeof(m.state); need_reply = 1; break; case VHOST_SET_VRING_ADDR: memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr)); - msg.addr.index += dev->vq_index; msg.size = sizeof(m.addr); break; @@ -279,7 +271,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request, case VHOST_SET_VRING_CALL: case VHOST_SET_VRING_ERR: file = arg; - msg.u64 = (file->index + dev->vq_index) & VHOST_USER_VRING_IDX_MASK; + msg.u64 = file->index & VHOST_USER_VRING_IDX_MASK; msg.size = sizeof(m.u64); if (ioeventfd_enabled() && file->fd > 0) { fds[fd_num++] = file->fd; @@ -321,7 +313,6 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request, error_report("Received bad msg size."); return -1; } - msg.state.index -= dev->vq_index; memcpy(arg, &msg.state, sizeof(struct vhost_vring_state)); break; default: diff --git a/include/hw/virtio/virtio-access.h b/include/hw/virtio/virtio-access.h index cee5dd7..1ec1dfd 100644 --- a/include/hw/virtio/virtio-access.h +++ b/include/hw/virtio/virtio-access.h @@ -143,6 +143,15 @@ static inline uint64_t virtio_ldq_p(VirtIODevice *vdev, const void *ptr) } } +static inline bool virtio_needs_swap(VirtIODevice *vdev) +{ +#ifdef HOST_WORDS_BIGENDIAN + return virtio_access_is_big_endian(vdev) ? false : true; +#else + return virtio_access_is_big_endian(vdev) ? true : false; +#endif +} + static inline uint16_t virtio_tswap16(VirtIODevice *vdev, uint16_t s) { #ifdef HOST_WORDS_BIGENDIAN diff --git a/net/vhost-user.c b/net/vhost-user.c index b51bc04..93dcecd 100644 --- a/net/vhost-user.c +++ b/net/vhost-user.c @@ -120,39 +120,35 @@ static void net_vhost_user_event(void *opaque, int event) case CHR_EVENT_OPENED: vhost_user_start(s); net_vhost_link_down(s, false); - error_report("chardev \"%s\" went up", s->nc.info_str); + error_report("chardev \"%s\" went up", s->chr->label); break; case CHR_EVENT_CLOSED: net_vhost_link_down(s, true); vhost_user_stop(s); - error_report("chardev \"%s\" went down", s->nc.info_str); + error_report("chardev \"%s\" went down", s->chr->label); break; } } static int net_vhost_user_init(NetClientState *peer, const char *device, - const char *name, CharDriverState *chr, - uint32_t queues) + const char *name, CharDriverState *chr) { NetClientState *nc; VhostUserState *s; - int i; - for (i = 0; i < queues; i++) { - nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name); + nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name); - snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s", - i, chr->label); + snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s", + chr->label); - s = DO_UPCAST(VhostUserState, nc, nc); + s = DO_UPCAST(VhostUserState, nc, nc); - /* We don't provide a receive callback */ - s->nc.receive_disabled = 1; - s->chr = chr; - s->nc.queue_index = i; + /* We don't provide a receive callback */ + s->nc.receive_disabled = 1; + s->chr = chr; + + qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s); - qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s); - } return 0; } @@ -230,7 +226,6 @@ static int net_vhost_check_net(void *opaque, QemuOpts *opts, Error **errp) int net_init_vhost_user(const NetClientOptions *opts, const char *name, NetClientState *peer, Error **errp) { - uint32_t queues; const NetdevVhostUserOptions *vhost_user_opts; CharDriverState *chr; @@ -248,12 +243,6 @@ int net_init_vhost_user(const NetClientOptions *opts, const char *name, return -1; } - /* number of queues for multiqueue */ - if (vhost_user_opts->has_queues) { - queues = vhost_user_opts->queues; - } else { - queues = 1; - } - return net_vhost_user_init(peer, "vhost_user", name, chr, queues); + return net_vhost_user_init(peer, "vhost_user", name, chr); } diff --git a/qapi-schema.json b/qapi-schema.json index 1285b8c..a0a45f7 100644 --- a/qapi-schema.json +++ b/qapi-schema.json @@ -2466,16 +2466,12 @@ # # @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false). # -# @queues: #optional number of queues to be created for multiqueue vhost-user -# (default: 1) (Since 2.4) -# # Since 2.1 ## { 'struct': 'NetdevVhostUserOptions', 'data': { 'chardev': 'str', - '*vhostforce': 'bool', - '*queues': 'uint32' } } + '*vhostforce': 'bool' } } ## # @NetClientOptions diff --git a/qemu-options.hx b/qemu-options.hx index 7b8efbf..8c9add9 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -1963,14 +1963,13 @@ The hubport netdev lets you connect a NIC to a QEMU "vlan" instead of a single netdev. @code{-net} and @code{-device} with parameter @option{vlan} create the required hub automatically. -@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off][,queues=n] +@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off] Establish a vhost-user netdev, backed by a chardev @var{id}. The chardev should be a unix domain socket backed one. The vhost-user uses a specifically defined protocol to pass vhost ioctl replacement messages to an application on the other end of the socket. On non-MSIX guests, the feature can be forced with -@var{vhostforce}. Use 'queues=@var{n}' to specify the number of queues to -be created for multiqueue vhost-user. +@var{vhostforce}. Example: @example |