aboutsummaryrefslogtreecommitdiff
path: root/hw/virtio-net.c
diff options
context:
space:
mode:
authorAlex Williamson <alex.williamson@redhat.com>2010-09-02 09:00:57 -0600
committerMichael S. Tsirkin <mst@redhat.com>2010-09-07 20:29:26 +0300
commite3f30488e5f802547b3a60e40cebaef3b4ec16a3 (patch)
treedaaf476a94f131569113cf3bd7f2204c1d62e33e /hw/virtio-net.c
parentf0c07c7c7b4fe4f9b63c88341fd32707def5a058 (diff)
downloadqemu-e3f30488e5f802547b3a60e40cebaef3b4ec16a3.zip
qemu-e3f30488e5f802547b3a60e40cebaef3b4ec16a3.tar.gz
qemu-e3f30488e5f802547b3a60e40cebaef3b4ec16a3.tar.bz2
virtio-net: Limit number of packets sent per TX flush
If virtio_net_flush_tx() is called with notification disabled, we can race with the guest, processing packets at the same rate as they get produced. The trouble is that this means we have no guaranteed exit condition from the function and can spend minutes in there. Currently flush_tx is only called with notification on, which seems to limit us to one pass through the queue per call. An upcoming patch changes this. Also add an option to set this value on the command line as different workloads may wish to use different values. We can't necessarily support any random value, so this is a developer option: x-txburst= Usage: -device virtio-net-pci,x-txburst=64 # 64 packets per tx flush One pass through the queue (256) seems to be a good default value for this, balancing latency with throughput. We use a signed int for x-txburst because 2^31 packets in a burst would take many, many minutes to process and it allows us to easily return a negative value value from virtio_net_flush_tx() to indicate a back-off or error condition. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'hw/virtio-net.c')
-rw-r--r--hw/virtio-net.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/hw/virtio-net.c b/hw/virtio-net.c
index d5b03ab..55f3d94 100644
--- a/hw/virtio-net.c
+++ b/hw/virtio-net.c
@@ -37,6 +37,7 @@ typedef struct VirtIONet
NICState *nic;
QEMUTimer *tx_timer;
uint32_t tx_timeout;
+ int32_t tx_burst;
int tx_timer_active;
uint32_t has_vnet_hdr;
uint8_t has_ufo;
@@ -620,7 +621,7 @@ static ssize_t virtio_net_receive(VLANClientState *nc, const uint8_t *buf, size_
return size;
}
-static void virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq);
+static int32_t virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq);
static void virtio_net_tx_complete(VLANClientState *nc, ssize_t len)
{
@@ -636,16 +637,18 @@ static void virtio_net_tx_complete(VLANClientState *nc, ssize_t len)
}
/* TX */
-static void virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq)
+static int32_t virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq)
{
VirtQueueElement elem;
+ int32_t num_packets = 0;
- if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
- return;
+ if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ return num_packets;
+ }
if (n->async_tx.elem.out_num) {
virtio_queue_set_notification(n->tx_vq, 0);
- return;
+ return num_packets;
}
while (virtqueue_pop(vq, &elem)) {
@@ -682,14 +685,19 @@ static void virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq)
virtio_queue_set_notification(n->tx_vq, 0);
n->async_tx.elem = elem;
n->async_tx.len = len;
- return;
+ return -EBUSY;
}
len += ret;
virtqueue_push(vq, &elem, len);
virtio_notify(&n->vdev, vq);
+
+ if (++num_packets >= n->tx_burst) {
+ break;
+ }
}
+ return num_packets;
}
static void virtio_net_handle_tx(VirtIODevice *vdev, VirtQueue *vq)
@@ -934,6 +942,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
n->tx_timer = qemu_new_timer(vm_clock, virtio_net_tx_timer, n);
n->tx_timer_active = 0;
n->tx_timeout = net->txtimer;
+ n->tx_burst = net->txburst;
n->mergeable_rx_bufs = 0;
n->promisc = 1; /* for compatibility */