aboutsummaryrefslogtreecommitdiff
path: root/hw/virtio/virtio.c
diff options
context:
space:
mode:
Diffstat (limited to 'hw/virtio/virtio.c')
-rw-r--r--hw/virtio/virtio.c342
1 files changed, 275 insertions, 67 deletions
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 583a224..82a285a 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -30,8 +30,8 @@
#include "hw/virtio/virtio-bus.h"
#include "hw/qdev-properties.h"
#include "hw/virtio/virtio-access.h"
-#include "sysemu/dma.h"
-#include "sysemu/runstate.h"
+#include "system/dma.h"
+#include "system/runstate.h"
#include "virtio-qmp.h"
#include "standard-headers/linux/virtio_ids.h"
@@ -205,6 +205,15 @@ static const char *virtio_id_to_name(uint16_t device_id)
return name;
}
+static void virtio_check_indirect_feature(VirtIODevice *vdev)
+{
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Device %s: indirect_desc was not negotiated!\n",
+ vdev->name);
+ }
+}
+
/* Called within call_rcu(). */
static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
{
@@ -744,6 +753,60 @@ int virtio_queue_empty(VirtQueue *vq)
}
}
+static bool virtio_queue_split_poll(VirtQueue *vq, unsigned shadow_idx)
+{
+ if (unlikely(!vq->vring.avail)) {
+ return false;
+ }
+
+ return (uint16_t)shadow_idx != vring_avail_idx(vq);
+}
+
+static bool virtio_queue_packed_poll(VirtQueue *vq, unsigned shadow_idx)
+{
+ VRingPackedDesc desc;
+ VRingMemoryRegionCaches *caches;
+
+ if (unlikely(!vq->vring.desc)) {
+ return false;
+ }
+
+ caches = vring_get_region_caches(vq);
+ if (!caches) {
+ return false;
+ }
+
+ vring_packed_desc_read(vq->vdev, &desc, &caches->desc,
+ shadow_idx, true);
+
+ return is_desc_avail(desc.flags, vq->shadow_avail_wrap_counter);
+}
+
+static bool virtio_queue_poll(VirtQueue *vq, unsigned shadow_idx)
+{
+ if (virtio_device_disabled(vq->vdev)) {
+ return false;
+ }
+
+ if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
+ return virtio_queue_packed_poll(vq, shadow_idx);
+ } else {
+ return virtio_queue_split_poll(vq, shadow_idx);
+ }
+}
+
+bool virtio_queue_enable_notification_and_check(VirtQueue *vq,
+ int opaque)
+{
+ virtio_queue_set_notification(vq, 1);
+
+ if (opaque >= 0) {
+ return virtio_queue_poll(vq, (unsigned)opaque);
+ } else {
+ return false;
+ }
+}
+
static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len)
{
@@ -872,6 +935,46 @@ static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem,
vq->used_elems[idx].ndescs = elem->ndescs;
}
+static void virtqueue_ordered_fill(VirtQueue *vq, const VirtQueueElement *elem,
+ unsigned int len)
+{
+ unsigned int i, steps, max_steps;
+
+ i = vq->used_idx % vq->vring.num;
+ steps = 0;
+ /*
+ * We shouldn't need to increase 'i' by more than the distance
+ * between used_idx and last_avail_idx.
+ */
+ max_steps = (vq->last_avail_idx - vq->used_idx) % vq->vring.num;
+
+ /* Search for element in vq->used_elems */
+ while (steps <= max_steps) {
+ /* Found element, set length and mark as filled */
+ if (vq->used_elems[i].index == elem->index) {
+ vq->used_elems[i].len = len;
+ vq->used_elems[i].in_order_filled = true;
+ break;
+ }
+
+ i += vq->used_elems[i].ndescs;
+ steps += vq->used_elems[i].ndescs;
+
+ if (i >= vq->vring.num) {
+ i -= vq->vring.num;
+ }
+ }
+
+ /*
+ * We should be able to find a matching VirtQueueElement in
+ * used_elems. If we don't, this is an error.
+ */
+ if (steps >= max_steps) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: %s cannot fill buffer id %u\n",
+ __func__, vq->vdev->name, elem->index);
+ }
+}
+
static void virtqueue_packed_fill_desc(VirtQueue *vq,
const VirtQueueElement *elem,
unsigned int idx,
@@ -922,7 +1025,9 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
return;
}
- if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
+ if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_IN_ORDER)) {
+ virtqueue_ordered_fill(vq, elem, len);
+ } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
virtqueue_packed_fill(vq, elem, len, idx);
} else {
virtqueue_split_fill(vq, elem, len, idx);
@@ -981,6 +1086,73 @@ static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)
}
}
+static void virtqueue_ordered_flush(VirtQueue *vq)
+{
+ unsigned int i = vq->used_idx % vq->vring.num;
+ unsigned int ndescs = 0;
+ uint16_t old = vq->used_idx;
+ uint16_t new;
+ bool packed;
+ VRingUsedElem uelem;
+
+ packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED);
+
+ if (packed) {
+ if (unlikely(!vq->vring.desc)) {
+ return;
+ }
+ } else if (unlikely(!vq->vring.used)) {
+ return;
+ }
+
+ /* First expected in-order element isn't ready, nothing to do */
+ if (!vq->used_elems[i].in_order_filled) {
+ return;
+ }
+
+ /* Search for filled elements in-order */
+ while (vq->used_elems[i].in_order_filled) {
+ /*
+ * First entry for packed VQs is written last so the guest
+ * doesn't see invalid descriptors.
+ */
+ if (packed && i != vq->used_idx) {
+ virtqueue_packed_fill_desc(vq, &vq->used_elems[i], ndescs, false);
+ } else if (!packed) {
+ uelem.id = vq->used_elems[i].index;
+ uelem.len = vq->used_elems[i].len;
+ vring_used_write(vq, &uelem, i);
+ }
+
+ vq->used_elems[i].in_order_filled = false;
+ ndescs += vq->used_elems[i].ndescs;
+ i += vq->used_elems[i].ndescs;
+ if (i >= vq->vring.num) {
+ i -= vq->vring.num;
+ }
+ }
+
+ if (packed) {
+ virtqueue_packed_fill_desc(vq, &vq->used_elems[vq->used_idx], 0, true);
+ vq->used_idx += ndescs;
+ if (vq->used_idx >= vq->vring.num) {
+ vq->used_idx -= vq->vring.num;
+ vq->used_wrap_counter ^= 1;
+ vq->signalled_used_valid = false;
+ }
+ } else {
+ /* Make sure buffer is written before we update index. */
+ smp_wmb();
+ new = old + ndescs;
+ vring_used_idx_set(vq, new);
+ if (unlikely((int16_t)(new - vq->signalled_used) <
+ (uint16_t)(new - old))) {
+ vq->signalled_used_valid = false;
+ }
+ }
+ vq->inuse -= ndescs;
+}
+
void virtqueue_flush(VirtQueue *vq, unsigned int count)
{
if (virtio_device_disabled(vq->vdev)) {
@@ -988,7 +1160,9 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count)
return;
}
- if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
+ if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_IN_ORDER)) {
+ virtqueue_ordered_flush(vq);
+ } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
virtqueue_packed_flush(vq, count);
} else {
virtqueue_split_flush(vq, count);
@@ -1331,9 +1505,9 @@ err:
goto done;
}
-void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
- unsigned int *out_bytes,
- unsigned max_in_bytes, unsigned max_out_bytes)
+int virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
+ unsigned int *out_bytes, unsigned max_in_bytes,
+ unsigned max_out_bytes)
{
uint16_t desc_size;
VRingMemoryRegionCaches *caches;
@@ -1366,7 +1540,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
caches);
}
- return;
+ return (int)vq->shadow_avail_idx;
err:
if (in_bytes) {
*in_bytes = 0;
@@ -1374,6 +1548,8 @@ err:
if (out_bytes) {
*out_bytes = 0;
}
+
+ return -1;
}
int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
@@ -1505,7 +1681,7 @@ static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_nu
static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
{
- unsigned int i, head, max;
+ unsigned int i, head, max, idx;
VRingMemoryRegionCaches *caches;
MemoryRegionCache indirect_desc_cache;
MemoryRegionCache *desc_cache;
@@ -1513,8 +1689,8 @@ static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
VirtIODevice *vdev = vq->vdev;
VirtQueueElement *elem = NULL;
unsigned out_num, in_num, elem_entries;
- hwaddr addr[VIRTQUEUE_MAX_SIZE];
- struct iovec iov[VIRTQUEUE_MAX_SIZE];
+ hwaddr QEMU_UNINITIALIZED addr[VIRTQUEUE_MAX_SIZE];
+ struct iovec QEMU_UNINITIALIZED iov[VIRTQUEUE_MAX_SIZE];
VRingDesc desc;
int rc;
@@ -1566,6 +1742,7 @@ static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
virtio_error(vdev, "Invalid size for indirect buffer table");
goto done;
}
+ virtio_check_indirect_feature(vdev);
/* loop over the indirect descriptor table */
len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
@@ -1629,6 +1806,13 @@ static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
elem->in_sg[i] = iov[out_num + i];
}
+ if (virtio_vdev_has_feature(vdev, VIRTIO_F_IN_ORDER)) {
+ idx = (vq->last_avail_idx - 1) % vq->vring.num;
+ vq->used_elems[idx].index = elem->index;
+ vq->used_elems[idx].len = elem->len;
+ vq->used_elems[idx].ndescs = elem->ndescs;
+ }
+
vq->inuse++;
trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
@@ -1652,8 +1836,8 @@ static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
VirtIODevice *vdev = vq->vdev;
VirtQueueElement *elem = NULL;
unsigned out_num, in_num, elem_entries;
- hwaddr addr[VIRTQUEUE_MAX_SIZE];
- struct iovec iov[VIRTQUEUE_MAX_SIZE];
+ hwaddr QEMU_UNINITIALIZED addr[VIRTQUEUE_MAX_SIZE];
+ struct iovec QEMU_UNINITIALIZED iov[VIRTQUEUE_MAX_SIZE];
VRingPackedDesc desc;
uint16_t id;
int rc;
@@ -1696,6 +1880,7 @@ static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
virtio_error(vdev, "Invalid size for indirect buffer table");
goto done;
}
+ virtio_check_indirect_feature(vdev);
/* loop over the indirect descriptor table */
len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
@@ -1762,6 +1947,13 @@ static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
elem->index = id;
elem->ndescs = (desc_cache == &indirect_desc_cache) ? 1 : elem_entries;
+
+ if (virtio_vdev_has_feature(vdev, VIRTIO_F_IN_ORDER)) {
+ vq->used_elems[vq->last_avail_idx].index = elem->index;
+ vq->used_elems[vq->last_avail_idx].len = elem->len;
+ vq->used_elems[vq->last_avail_idx].ndescs = elem->ndescs;
+ }
+
vq->last_avail_idx += elem->ndescs;
vq->inuse += elem->ndescs;
@@ -2040,12 +2232,12 @@ int virtio_set_status(VirtIODevice *vdev, uint8_t val)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
trace_virtio_set_status(vdev, val);
+ int ret = 0;
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
val & VIRTIO_CONFIG_S_FEATURES_OK) {
- int ret = virtio_validate_features(vdev);
-
+ ret = virtio_validate_features(vdev);
if (ret) {
return ret;
}
@@ -2058,16 +2250,20 @@ int virtio_set_status(VirtIODevice *vdev, uint8_t val)
}
if (k->set_status) {
- k->set_status(vdev, val);
+ ret = k->set_status(vdev, val);
+ if (ret) {
+ qemu_log("set %s status to %d failed, old status: %d\n",
+ vdev->name, val, vdev->status);
+ }
}
vdev->status = val;
- return 0;
+ return ret;
}
static enum virtio_device_endian virtio_default_endian(void)
{
- if (target_words_bigendian()) {
+ if (target_big_endian()) {
return VIRTIO_DEVICE_ENDIAN_BIG;
} else {
return VIRTIO_DEVICE_ENDIAN_LITTLE;
@@ -2135,45 +2331,6 @@ void virtio_queue_enable(VirtIODevice *vdev, uint32_t queue_index)
}
}
-void virtio_reset(void *opaque)
-{
- VirtIODevice *vdev = opaque;
- VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
- int i;
-
- virtio_set_status(vdev, 0);
- if (current_cpu) {
- /* Guest initiated reset */
- vdev->device_endian = virtio_current_cpu_endian();
- } else {
- /* System reset */
- vdev->device_endian = virtio_default_endian();
- }
-
- if (vdev->vhost_started && k->get_vhost) {
- vhost_reset_device(k->get_vhost(vdev));
- }
-
- if (k->reset) {
- k->reset(vdev);
- }
-
- vdev->start_on_kick = false;
- vdev->started = false;
- vdev->broken = false;
- vdev->guest_features = 0;
- vdev->queue_sel = 0;
- vdev->status = 0;
- vdev->disabled = false;
- qatomic_set(&vdev->isr, 0);
- vdev->config_vector = VIRTIO_NO_VECTOR;
- virtio_notify_vector(vdev, vdev->config_vector);
-
- for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
- __virtio_queue_reset(vdev, i);
- }
-}
-
void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
{
if (!vdev->vq[n].vring.num) {
@@ -2984,6 +3141,49 @@ int virtio_set_features(VirtIODevice *vdev, uint64_t val)
return ret;
}
+void virtio_reset(void *opaque)
+{
+ VirtIODevice *vdev = opaque;
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ int i;
+
+ virtio_set_status(vdev, 0);
+ if (current_cpu) {
+ /* Guest initiated reset */
+ vdev->device_endian = virtio_current_cpu_endian();
+ } else {
+ /* System reset */
+ vdev->device_endian = virtio_default_endian();
+ }
+
+ if (k->get_vhost) {
+ struct vhost_dev *hdev = k->get_vhost(vdev);
+ /* Only reset when vhost back-end is connected */
+ if (hdev && hdev->vhost_ops) {
+ vhost_reset_device(hdev);
+ }
+ }
+
+ if (k->reset) {
+ k->reset(vdev);
+ }
+
+ vdev->start_on_kick = false;
+ vdev->started = false;
+ vdev->broken = false;
+ virtio_set_features_nocheck(vdev, 0);
+ vdev->queue_sel = 0;
+ vdev->status = 0;
+ vdev->disabled = false;
+ qatomic_set(&vdev->isr, 0);
+ vdev->config_vector = VIRTIO_NO_VECTOR;
+ virtio_notify_vector(vdev, vdev->config_vector);
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ __virtio_queue_reset(vdev, i);
+ }
+}
+
static void virtio_device_check_notification_compatibility(VirtIODevice *vdev,
Error **errp)
{
@@ -3070,6 +3270,13 @@ virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
config_len--;
}
+ if (vdc->pre_load_queues) {
+ ret = vdc->pre_load_queues(vdev);
+ if (ret) {
+ return ret;
+ }
+ }
+
num = qemu_get_be32(f);
if (num > VIRTIO_QUEUE_MAX) {
@@ -3227,7 +3434,7 @@ void virtio_cleanup(VirtIODevice *vdev)
qemu_del_vm_change_state_handler(vdev->vmstate);
}
-static void virtio_vmstate_change(void *opaque, bool running, RunState state)
+static int virtio_vmstate_change(void *opaque, bool running, RunState state)
{
VirtIODevice *vdev = opaque;
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
@@ -3244,8 +3451,12 @@ static void virtio_vmstate_change(void *opaque, bool running, RunState state)
}
if (!backend_run) {
- virtio_set_status(vdev, vdev->status);
+ int ret = virtio_set_status(vdev, vdev->status);
+ if (ret) {
+ return ret;
+ }
}
+ return 0;
}
void virtio_instance_init_common(Object *proxy_obj, void *data,
@@ -3297,7 +3508,7 @@ void virtio_init(VirtIODevice *vdev, uint16_t device_id, size_t config_size)
vdev->config = NULL;
}
vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev),
- virtio_vmstate_change, vdev);
+ NULL, virtio_vmstate_change, vdev);
vdev->device_endian = virtio_default_endian();
vdev->use_guest_notifier_mask = true;
}
@@ -3456,7 +3667,6 @@ static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev,
int n)
{
/* We don't have a reference like avail idx in shared memory */
- return;
}
static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev,
@@ -3481,10 +3691,9 @@ void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n)
{
/* used idx was updated through set_last_avail_idx() */
- return;
}
-static void virtio_split_packed_update_used_idx(VirtIODevice *vdev, int n)
+static void virtio_queue_split_update_used_idx(VirtIODevice *vdev, int n)
{
RCU_READ_LOCK_GUARD();
if (vdev->vq[n].vring.desc) {
@@ -3497,7 +3706,7 @@ void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
return virtio_queue_packed_update_used_idx(vdev, n);
} else {
- return virtio_split_packed_update_used_idx(vdev, n);
+ return virtio_queue_split_update_used_idx(vdev, n);
}
}
@@ -3820,13 +4029,12 @@ static void virtio_device_instance_finalize(Object *obj)
g_free(vdev->vector_queues);
}
-static Property virtio_properties[] = {
+static const Property virtio_properties[] = {
DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true),
DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice, use_disabled_flag, true),
DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice,
disable_legacy_check, false),
- DEFINE_PROP_END_OF_LIST(),
};
static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
@@ -3949,7 +4157,7 @@ void virtio_device_release_ioeventfd(VirtIODevice *vdev)
virtio_bus_release_ioeventfd(vbus);
}
-static void virtio_device_class_init(ObjectClass *klass, void *data)
+static void virtio_device_class_init(ObjectClass *klass, const void *data)
{
/* Set the default value here. */
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);