aboutsummaryrefslogtreecommitdiff
path: root/hw/display
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2016-02-04 16:26:51 +0200
committerMichael S. Tsirkin <mst@redhat.com>2016-02-06 20:39:07 +0200
commit51b19ebe4320f3dcd93cea71235c1219318ddfd2 (patch)
treea242566284e7efb7a45e151481b0433a3ffe39b5 /hw/display
parent6aa46d8ff1ee7e9ca0c4a54d75c74108bee22124 (diff)
downloadqemu-51b19ebe4320f3dcd93cea71235c1219318ddfd2.zip
qemu-51b19ebe4320f3dcd93cea71235c1219318ddfd2.tar.gz
qemu-51b19ebe4320f3dcd93cea71235c1219318ddfd2.tar.bz2
virtio: move allocation to virtqueue_pop/vring_pop
The return code of virtqueue_pop/vring_pop is unused except to check for errors or 0. We can thus easily move allocation inside the functions and just return a pointer to the VirtQueueElement. The advantage is that we will be able to allocate only the space that is needed for the actual size of the s/g list instead of the full VIRTQUEUE_MAX_SIZE items. Currently VirtQueueElement takes about 48K of memory, and this kind of allocation puts a lot of stress on malloc. By cutting the size by two or three orders of magnitude, malloc can use much more efficient algorithms. The patch is pretty large, but changes to each device are testable more or less independently. Splitting it would mostly add churn. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
Diffstat (limited to 'hw/display')
-rw-r--r--hw/display/virtio-gpu.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index 1cb4002..ddf3bfb 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -804,16 +804,15 @@ static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
}
#endif
- cmd = g_new(struct virtio_gpu_ctrl_command, 1);
- while (virtqueue_pop(vq, &cmd->elem)) {
+ cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
+ while (cmd) {
cmd->vq = vq;
cmd->error = 0;
cmd->finished = false;
cmd->waiting = false;
QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
- cmd = g_new(struct virtio_gpu_ctrl_command, 1);
+ cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
}
- g_free(cmd);
virtio_gpu_process_cmdq(g);
@@ -833,15 +832,20 @@ static void virtio_gpu_ctrl_bh(void *opaque)
static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOGPU *g = VIRTIO_GPU(vdev);
- VirtQueueElement elem;
+ VirtQueueElement *elem;
size_t s;
struct virtio_gpu_update_cursor cursor_info;
if (!virtio_queue_ready(vq)) {
return;
}
- while (virtqueue_pop(vq, &elem)) {
- s = iov_to_buf(elem.out_sg, elem.out_num, 0,
+ for (;;) {
+ elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+ if (!elem) {
+ break;
+ }
+
+ s = iov_to_buf(elem->out_sg, elem->out_num, 0,
&cursor_info, sizeof(cursor_info));
if (s != sizeof(cursor_info)) {
qemu_log_mask(LOG_GUEST_ERROR,
@@ -850,8 +854,9 @@ static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
} else {
update_cursor(g, &cursor_info);
}
- virtqueue_push(vq, &elem, 0);
+ virtqueue_push(vq, elem, 0);
virtio_notify(vdev, vq);
+ g_free(elem);
}
}