aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2019-05-30 13:10:00 +0100
committerPeter Maydell <peter.maydell@linaro.org>2019-05-30 13:10:00 +0100
commit95172e24051ce48237f371849498bfc1027df578 (patch)
treea081b5ffc86495a129e31bcdd04d92191695686b /hw
parent62f6849e7a74fe34f5e376fe41389298100a2d1e (diff)
parent267f664658fee4138f80013fa25354191e8091cb (diff)
downloadqemu-95172e24051ce48237f371849498bfc1027df578.zip
qemu-95172e24051ce48237f371849498bfc1027df578.tar.gz
qemu-95172e24051ce48237f371849498bfc1027df578.tar.bz2
Merge remote-tracking branch 'remotes/kraxel/tags/vga-20190529-pull-request' into staging
vga: add vhost-user-gpu. # gpg: Signature made Wed 29 May 2019 05:40:02 BST # gpg: using RSA key 4CB6D8EED3E87138 # gpg: Good signature from "Gerd Hoffmann (work) <kraxel@redhat.com>" [full] # gpg: aka "Gerd Hoffmann <gerd@kraxel.org>" [full] # gpg: aka "Gerd Hoffmann (private) <kraxel@gmail.com>" [full] # Primary key fingerprint: A032 8CFF B93A 17A7 9901 FE7D 4CB6 D8EE D3E8 7138 * remotes/kraxel/tags/vga-20190529-pull-request: hw/display: add vhost-user-vga & gpu-pci virtio-gpu: split virtio-gpu-pci & virtio-vga virtio-gpu: split virtio-gpu, introduce virtio-gpu-base spice-app: fix running when !CONFIG_OPENGL contrib: add vhost-user-gpu util: compile drm.o on posix virtio-gpu: add a pixman helper header virtio-gpu: add bswap helpers header vhost-user: add vhost_user_gpu_set_socket() virtio-gpu: add sanity check Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw')
-rw-r--r--hw/display/Kconfig10
-rw-r--r--hw/display/Makefile.objs5
-rw-r--r--hw/display/vhost-user-gpu-pci.c51
-rw-r--r--hw/display/vhost-user-gpu.c607
-rw-r--r--hw/display/vhost-user-vga.c52
-rw-r--r--hw/display/virtio-gpu-3d.c49
-rw-r--r--hw/display/virtio-gpu-base.c268
-rw-r--r--hw/display/virtio-gpu-pci.c55
-rw-r--r--hw/display/virtio-gpu.c450
-rw-r--r--hw/display/virtio-vga.c138
-rw-r--r--hw/display/virtio-vga.h32
-rw-r--r--hw/virtio/vhost-user.c11
12 files changed, 1269 insertions, 459 deletions
diff --git a/hw/display/Kconfig b/hw/display/Kconfig
index dc1f113..910dccb 100644
--- a/hw/display/Kconfig
+++ b/hw/display/Kconfig
@@ -111,6 +111,16 @@ config VIRTIO_VGA
depends on VIRTIO_PCI
select VGA
+config VHOST_USER_GPU
+ bool
+ default y
+ depends on VIRTIO_GPU && VHOST_USER
+
+config VHOST_USER_VGA
+ bool
+ default y
+ depends on VIRTIO_VGA && VHOST_USER_GPU
+
config DPCD
bool
select AUX
diff --git a/hw/display/Makefile.objs b/hw/display/Makefile.objs
index 650031f..a64998f 100644
--- a/hw/display/Makefile.objs
+++ b/hw/display/Makefile.objs
@@ -43,9 +43,12 @@ obj-$(CONFIG_VGA) += vga.o
common-obj-$(CONFIG_QXL) += qxl.o qxl-logger.o qxl-render.o
-obj-$(CONFIG_VIRTIO_GPU) += virtio-gpu.o virtio-gpu-3d.o
+obj-$(CONFIG_VIRTIO_GPU) += virtio-gpu-base.o virtio-gpu.o virtio-gpu-3d.o
+obj-$(CONFIG_VHOST_USER_GPU) += vhost-user-gpu.o
obj-$(call land,$(CONFIG_VIRTIO_GPU),$(CONFIG_VIRTIO_PCI)) += virtio-gpu-pci.o
+obj-$(call land,$(CONFIG_VHOST_USER_GPU),$(CONFIG_VIRTIO_PCI)) += vhost-user-gpu-pci.o
obj-$(CONFIG_VIRTIO_VGA) += virtio-vga.o
+obj-$(CONFIG_VHOST_USER_VGA) += vhost-user-vga.o
virtio-gpu.o-cflags := $(VIRGL_CFLAGS)
virtio-gpu.o-libs += $(VIRGL_LIBS)
virtio-gpu-3d.o-cflags := $(VIRGL_CFLAGS)
diff --git a/hw/display/vhost-user-gpu-pci.c b/hw/display/vhost-user-gpu-pci.c
new file mode 100644
index 0000000..7d9b1f5
--- /dev/null
+++ b/hw/display/vhost-user-gpu-pci.c
@@ -0,0 +1,51 @@
+/*
+ * vhost-user GPU PCI device
+ *
+ * Copyright Red Hat, Inc. 2018
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/virtio/virtio-gpu-pci.h"
+
+#define TYPE_VHOST_USER_GPU_PCI "vhost-user-gpu-pci"
+#define VHOST_USER_GPU_PCI(obj) \
+ OBJECT_CHECK(VhostUserGPUPCI, (obj), TYPE_VHOST_USER_GPU_PCI)
+
+typedef struct VhostUserGPUPCI {
+ VirtIOGPUPCIBase parent_obj;
+
+ VhostUserGPU vdev;
+} VhostUserGPUPCI;
+
+static void vhost_user_gpu_pci_initfn(Object *obj)
+{
+ VhostUserGPUPCI *dev = VHOST_USER_GPU_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VHOST_USER_GPU);
+
+ VIRTIO_GPU_PCI_BASE(obj)->vgpu = VIRTIO_GPU_BASE(&dev->vdev);
+
+ object_property_add_alias(obj, "chardev",
+ OBJECT(&dev->vdev), "chardev",
+ &error_abort);
+}
+
+static const VirtioPCIDeviceTypeInfo vhost_user_gpu_pci_info = {
+ .generic_name = TYPE_VHOST_USER_GPU_PCI,
+ .parent = TYPE_VIRTIO_GPU_PCI_BASE,
+ .instance_size = sizeof(VhostUserGPUPCI),
+ .instance_init = vhost_user_gpu_pci_initfn,
+};
+
+static void vhost_user_gpu_pci_register_types(void)
+{
+ virtio_pci_types_register(&vhost_user_gpu_pci_info);
+}
+
+type_init(vhost_user_gpu_pci_register_types)
diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c
new file mode 100644
index 0000000..7181d9c
--- /dev/null
+++ b/hw/display/vhost-user-gpu.c
@@ -0,0 +1,607 @@
+/*
+ * vhost-user GPU Device
+ *
+ * Copyright Red Hat, Inc. 2018
+ *
+ * Authors:
+ * Marc-André Lureau <marcandre.lureau@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/virtio/virtio-gpu.h"
+#include "chardev/char-fe.h"
+#include "qapi/error.h"
+#include "migration/blocker.h"
+
+#define VHOST_USER_GPU(obj) \
+ OBJECT_CHECK(VhostUserGPU, (obj), TYPE_VHOST_USER_GPU)
+
+typedef enum VhostUserGpuRequest {
+ VHOST_USER_GPU_NONE = 0,
+ VHOST_USER_GPU_GET_PROTOCOL_FEATURES,
+ VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
+ VHOST_USER_GPU_GET_DISPLAY_INFO,
+ VHOST_USER_GPU_CURSOR_POS,
+ VHOST_USER_GPU_CURSOR_POS_HIDE,
+ VHOST_USER_GPU_CURSOR_UPDATE,
+ VHOST_USER_GPU_SCANOUT,
+ VHOST_USER_GPU_UPDATE,
+ VHOST_USER_GPU_DMABUF_SCANOUT,
+ VHOST_USER_GPU_DMABUF_UPDATE,
+} VhostUserGpuRequest;
+
+typedef struct VhostUserGpuDisplayInfoReply {
+ struct virtio_gpu_resp_display_info info;
+} VhostUserGpuDisplayInfoReply;
+
+typedef struct VhostUserGpuCursorPos {
+ uint32_t scanout_id;
+ uint32_t x;
+ uint32_t y;
+} QEMU_PACKED VhostUserGpuCursorPos;
+
+typedef struct VhostUserGpuCursorUpdate {
+ VhostUserGpuCursorPos pos;
+ uint32_t hot_x;
+ uint32_t hot_y;
+ uint32_t data[64 * 64];
+} QEMU_PACKED VhostUserGpuCursorUpdate;
+
+typedef struct VhostUserGpuScanout {
+ uint32_t scanout_id;
+ uint32_t width;
+ uint32_t height;
+} QEMU_PACKED VhostUserGpuScanout;
+
+typedef struct VhostUserGpuUpdate {
+ uint32_t scanout_id;
+ uint32_t x;
+ uint32_t y;
+ uint32_t width;
+ uint32_t height;
+ uint8_t data[];
+} QEMU_PACKED VhostUserGpuUpdate;
+
+typedef struct VhostUserGpuDMABUFScanout {
+ uint32_t scanout_id;
+ uint32_t x;
+ uint32_t y;
+ uint32_t width;
+ uint32_t height;
+ uint32_t fd_width;
+ uint32_t fd_height;
+ uint32_t fd_stride;
+ uint32_t fd_flags;
+ int fd_drm_fourcc;
+} QEMU_PACKED VhostUserGpuDMABUFScanout;
+
+typedef struct VhostUserGpuMsg {
+ uint32_t request; /* VhostUserGpuRequest */
+ uint32_t flags;
+ uint32_t size; /* the following payload size */
+ union {
+ VhostUserGpuCursorPos cursor_pos;
+ VhostUserGpuCursorUpdate cursor_update;
+ VhostUserGpuScanout scanout;
+ VhostUserGpuUpdate update;
+ VhostUserGpuDMABUFScanout dmabuf_scanout;
+ struct virtio_gpu_resp_display_info display_info;
+ uint64_t u64;
+ } payload;
+} QEMU_PACKED VhostUserGpuMsg;
+
+static VhostUserGpuMsg m __attribute__ ((unused));
+#define VHOST_USER_GPU_HDR_SIZE \
+ (sizeof(m.request) + sizeof(m.size) + sizeof(m.flags))
+
+#define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
+
+static void vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked);
+
+static void
+vhost_user_gpu_handle_cursor(VhostUserGPU *g, VhostUserGpuMsg *msg)
+{
+ VhostUserGpuCursorPos *pos = &msg->payload.cursor_pos;
+ struct virtio_gpu_scanout *s;
+
+ if (pos->scanout_id >= g->parent_obj.conf.max_outputs) {
+ return;
+ }
+ s = &g->parent_obj.scanout[pos->scanout_id];
+
+ if (msg->request == VHOST_USER_GPU_CURSOR_UPDATE) {
+ VhostUserGpuCursorUpdate *up = &msg->payload.cursor_update;
+ if (!s->current_cursor) {
+ s->current_cursor = cursor_alloc(64, 64);
+ }
+
+ s->current_cursor->hot_x = up->hot_x;
+ s->current_cursor->hot_y = up->hot_y;
+
+ memcpy(s->current_cursor->data, up->data,
+ 64 * 64 * sizeof(uint32_t));
+
+ dpy_cursor_define(s->con, s->current_cursor);
+ }
+
+ dpy_mouse_set(s->con, pos->x, pos->y,
+ msg->request != VHOST_USER_GPU_CURSOR_POS_HIDE);
+}
+
+static void
+vhost_user_gpu_send_msg(VhostUserGPU *g, const VhostUserGpuMsg *msg)
+{
+ qemu_chr_fe_write(&g->vhost_chr, (uint8_t *)msg,
+ VHOST_USER_GPU_HDR_SIZE + msg->size);
+}
+
+static void
+vhost_user_gpu_unblock(VhostUserGPU *g)
+{
+ VhostUserGpuMsg msg = {
+ .request = VHOST_USER_GPU_DMABUF_UPDATE,
+ .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
+ };
+
+ vhost_user_gpu_send_msg(g, &msg);
+}
+
+static void
+vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg)
+{
+ QemuConsole *con = NULL;
+ struct virtio_gpu_scanout *s;
+
+ switch (msg->request) {
+ case VHOST_USER_GPU_GET_PROTOCOL_FEATURES: {
+ VhostUserGpuMsg reply = {
+ .request = msg->request,
+ .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
+ .size = sizeof(uint64_t),
+ };
+
+ vhost_user_gpu_send_msg(g, &reply);
+ break;
+ }
+ case VHOST_USER_GPU_SET_PROTOCOL_FEATURES: {
+ break;
+ }
+ case VHOST_USER_GPU_GET_DISPLAY_INFO: {
+ struct virtio_gpu_resp_display_info display_info = { {} };
+ VhostUserGpuMsg reply = {
+ .request = msg->request,
+ .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
+ .size = sizeof(struct virtio_gpu_resp_display_info),
+ };
+
+ display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
+ virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
+ memcpy(&reply.payload.display_info, &display_info,
+ sizeof(display_info));
+ vhost_user_gpu_send_msg(g, &reply);
+ break;
+ }
+ case VHOST_USER_GPU_SCANOUT: {
+ VhostUserGpuScanout *m = &msg->payload.scanout;
+
+ if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
+ return;
+ }
+
+ g->parent_obj.enable = 1;
+ s = &g->parent_obj.scanout[m->scanout_id];
+ con = s->con;
+
+ if (m->scanout_id == 0 && m->width == 0) {
+ s->ds = qemu_create_message_surface(640, 480,
+ "Guest disabled display.");
+ dpy_gfx_replace_surface(con, s->ds);
+ } else {
+ s->ds = qemu_create_displaysurface(m->width, m->height);
+ /* replace surface on next update */
+ }
+
+ break;
+ }
+ case VHOST_USER_GPU_DMABUF_SCANOUT: {
+ VhostUserGpuDMABUFScanout *m = &msg->payload.dmabuf_scanout;
+ int fd = qemu_chr_fe_get_msgfd(&g->vhost_chr);
+ QemuDmaBuf *dmabuf;
+
+ if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
+ error_report("invalid scanout: %d", m->scanout_id);
+ if (fd >= 0) {
+ close(fd);
+ }
+ break;
+ }
+
+ g->parent_obj.enable = 1;
+ con = g->parent_obj.scanout[m->scanout_id].con;
+ dmabuf = &g->dmabuf[m->scanout_id];
+ if (dmabuf->fd >= 0) {
+ close(dmabuf->fd);
+ dmabuf->fd = -1;
+ }
+ if (!console_has_gl_dmabuf(con)) {
+ /* it would be nice to report that error earlier */
+ error_report("console doesn't support dmabuf!");
+ break;
+ }
+ dpy_gl_release_dmabuf(con, dmabuf);
+ if (fd == -1) {
+ dpy_gl_scanout_disable(con);
+ break;
+ }
+ *dmabuf = (QemuDmaBuf) {
+ .fd = fd,
+ .width = m->fd_width,
+ .height = m->fd_height,
+ .stride = m->fd_stride,
+ .fourcc = m->fd_drm_fourcc,
+ .y0_top = m->fd_flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP,
+ };
+ dpy_gl_scanout_dmabuf(con, dmabuf);
+ break;
+ }
+ case VHOST_USER_GPU_DMABUF_UPDATE: {
+ VhostUserGpuUpdate *m = &msg->payload.update;
+
+ if (m->scanout_id >= g->parent_obj.conf.max_outputs ||
+ !g->parent_obj.scanout[m->scanout_id].con) {
+ error_report("invalid scanout update: %d", m->scanout_id);
+ vhost_user_gpu_unblock(g);
+ break;
+ }
+
+ con = g->parent_obj.scanout[m->scanout_id].con;
+ if (!console_has_gl(con)) {
+ error_report("console doesn't support GL!");
+ vhost_user_gpu_unblock(g);
+ break;
+ }
+ dpy_gl_update(con, m->x, m->y, m->width, m->height);
+ g->backend_blocked = true;
+ break;
+ }
+ case VHOST_USER_GPU_UPDATE: {
+ VhostUserGpuUpdate *m = &msg->payload.update;
+
+ if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
+ break;
+ }
+ s = &g->parent_obj.scanout[m->scanout_id];
+ con = s->con;
+ pixman_image_t *image =
+ pixman_image_create_bits(PIXMAN_x8r8g8b8,
+ m->width,
+ m->height,
+ (uint32_t *)m->data,
+ m->width * 4);
+
+ pixman_image_composite(PIXMAN_OP_SRC,
+ image, NULL, s->ds->image,
+ 0, 0, 0, 0, m->x, m->y, m->width, m->height);
+
+ pixman_image_unref(image);
+ if (qemu_console_surface(con) != s->ds) {
+ dpy_gfx_replace_surface(con, s->ds);
+ } else {
+ dpy_gfx_update(con, m->x, m->y, m->width, m->height);
+ }
+ break;
+ }
+ default:
+ g_warning("unhandled message %d %d", msg->request, msg->size);
+ }
+
+ if (con && qemu_console_is_gl_blocked(con)) {
+ vhost_user_gpu_update_blocked(g, true);
+ }
+}
+
+static void
+vhost_user_gpu_chr_read(void *opaque)
+{
+ VhostUserGPU *g = opaque;
+ VhostUserGpuMsg *msg = NULL;
+ VhostUserGpuRequest request;
+ uint32_t size, flags;
+ int r;
+
+ r = qemu_chr_fe_read_all(&g->vhost_chr,
+ (uint8_t *)&request, sizeof(uint32_t));
+ if (r != sizeof(uint32_t)) {
+ error_report("failed to read msg header: %d, %d", r, errno);
+ goto end;
+ }
+
+ r = qemu_chr_fe_read_all(&g->vhost_chr,
+ (uint8_t *)&flags, sizeof(uint32_t));
+ if (r != sizeof(uint32_t)) {
+ error_report("failed to read msg flags");
+ goto end;
+ }
+
+ r = qemu_chr_fe_read_all(&g->vhost_chr,
+ (uint8_t *)&size, sizeof(uint32_t));
+ if (r != sizeof(uint32_t)) {
+ error_report("failed to read msg size");
+ goto end;
+ }
+
+ msg = g_malloc(VHOST_USER_GPU_HDR_SIZE + size);
+ g_return_if_fail(msg != NULL);
+
+ r = qemu_chr_fe_read_all(&g->vhost_chr,
+ (uint8_t *)&msg->payload, size);
+ if (r != size) {
+ error_report("failed to read msg payload %d != %d", r, size);
+ goto end;
+ }
+
+ msg->request = request;
+ msg->flags = size;
+ msg->size = size;
+
+ if (request == VHOST_USER_GPU_CURSOR_UPDATE ||
+ request == VHOST_USER_GPU_CURSOR_POS ||
+ request == VHOST_USER_GPU_CURSOR_POS_HIDE) {
+ vhost_user_gpu_handle_cursor(g, msg);
+ } else {
+ vhost_user_gpu_handle_display(g, msg);
+ }
+
+end:
+ g_free(msg);
+}
+
+static void
+vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked)
+{
+ qemu_set_fd_handler(g->vhost_gpu_fd,
+ blocked ? NULL : vhost_user_gpu_chr_read, NULL, g);
+}
+
+static void
+vhost_user_gpu_gl_unblock(VirtIOGPUBase *b)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(b);
+
+ if (g->backend_blocked) {
+ vhost_user_gpu_unblock(VHOST_USER_GPU(g));
+ g->backend_blocked = false;
+ }
+
+ vhost_user_gpu_update_blocked(VHOST_USER_GPU(g), false);
+}
+
+static bool
+vhost_user_gpu_do_set_socket(VhostUserGPU *g, Error **errp)
+{
+ Chardev *chr;
+ int sv[2];
+
+ if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
+ error_setg_errno(errp, errno, "socketpair() failed");
+ return false;
+ }
+
+ chr = CHARDEV(object_new(TYPE_CHARDEV_SOCKET));
+ if (!chr || qemu_chr_add_client(chr, sv[0]) == -1) {
+ error_setg(errp, "Failed to make socket chardev");
+ goto err;
+ }
+ if (!qemu_chr_fe_init(&g->vhost_chr, chr, errp)) {
+ goto err;
+ }
+ if (vhost_user_gpu_set_socket(&g->vhost->dev, sv[1]) < 0) {
+ error_setg(errp, "Failed to set vhost-user-gpu socket");
+ qemu_chr_fe_deinit(&g->vhost_chr, false);
+ goto err;
+ }
+
+ g->vhost_gpu_fd = sv[0];
+ vhost_user_gpu_update_blocked(g, false);
+ close(sv[1]);
+ return true;
+
+err:
+ close(sv[0]);
+ close(sv[1]);
+ if (chr) {
+ object_unref(OBJECT(chr));
+ }
+ return false;
+}
+
+static void
+vhost_user_gpu_get_config(VirtIODevice *vdev, uint8_t *config_data)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(vdev);
+ VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
+ struct virtio_gpu_config *vgconfig =
+ (struct virtio_gpu_config *)config_data;
+ int ret;
+
+ memset(config_data, 0, sizeof(struct virtio_gpu_config));
+
+ ret = vhost_dev_get_config(&g->vhost->dev,
+ config_data, sizeof(struct virtio_gpu_config));
+ if (ret) {
+ error_report("vhost-user-gpu: get device config space failed");
+ return;
+ }
+
+ /* those fields are managed by qemu */
+ vgconfig->num_scanouts = b->virtio_config.num_scanouts;
+ vgconfig->events_read = b->virtio_config.events_read;
+ vgconfig->events_clear = b->virtio_config.events_clear;
+}
+
+static void
+vhost_user_gpu_set_config(VirtIODevice *vdev,
+ const uint8_t *config_data)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(vdev);
+ VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
+ const struct virtio_gpu_config *vgconfig =
+ (const struct virtio_gpu_config *)config_data;
+ int ret;
+
+ if (vgconfig->events_clear) {
+ b->virtio_config.events_read &= ~vgconfig->events_clear;
+ }
+
+ ret = vhost_dev_set_config(&g->vhost->dev, config_data,
+ 0, sizeof(struct virtio_gpu_config),
+ VHOST_SET_CONFIG_TYPE_MASTER);
+ if (ret) {
+ error_report("vhost-user-gpu: set device config space failed");
+ return;
+ }
+}
+
+static void
+vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(vdev);
+ Error *err = NULL;
+
+ if (val & VIRTIO_CONFIG_S_DRIVER_OK && vdev->vm_running) {
+ if (!vhost_user_gpu_do_set_socket(g, &err)) {
+ error_report_err(err);
+ return;
+ }
+ vhost_user_backend_start(g->vhost);
+ } else {
+ /* unblock any wait and stop processing */
+ if (g->vhost_gpu_fd != -1) {
+ vhost_user_gpu_update_blocked(g, true);
+ qemu_chr_fe_deinit(&g->vhost_chr, true);
+ g->vhost_gpu_fd = -1;
+ }
+ vhost_user_backend_stop(g->vhost);
+ }
+}
+
+static bool
+vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(vdev);
+
+ return vhost_virtqueue_pending(&g->vhost->dev, idx);
+}
+
+static void
+vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(vdev);
+
+ vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask);
+}
+
+static void
+vhost_user_gpu_instance_init(Object *obj)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(obj);
+
+ g->vhost = VHOST_USER_BACKEND(object_new(TYPE_VHOST_USER_BACKEND));
+ object_property_add_alias(obj, "chardev",
+ OBJECT(g->vhost), "chardev", &error_abort);
+}
+
+static void
+vhost_user_gpu_instance_finalize(Object *obj)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(obj);
+
+ object_unref(OBJECT(g->vhost));
+}
+
+static void
+vhost_user_gpu_reset(VirtIODevice *vdev)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(vdev);
+
+ virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
+
+ vhost_user_backend_stop(g->vhost);
+}
+
+static int
+vhost_user_gpu_config_change(struct vhost_dev *dev)
+{
+ error_report("vhost-user-gpu: unhandled backend config change");
+ return -1;
+}
+
+static const VhostDevConfigOps config_ops = {
+ .vhost_dev_config_notifier = vhost_user_gpu_config_change,
+};
+
+static void
+vhost_user_gpu_device_realize(DeviceState *qdev, Error **errp)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(qdev);
+ VirtIODevice *vdev = VIRTIO_DEVICE(g);
+
+ vhost_dev_set_config_notifier(&g->vhost->dev, &config_ops);
+ if (vhost_user_backend_dev_init(g->vhost, vdev, 2, errp) < 0) {
+ return;
+ }
+
+ if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_VIRGL)) {
+ g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED;
+ }
+
+ if (!virtio_gpu_base_device_realize(qdev, NULL, NULL, errp)) {
+ return;
+ }
+
+ g->vhost_gpu_fd = -1;
+}
+
+static Property vhost_user_gpu_properties[] = {
+ VIRTIO_GPU_BASE_PROPERTIES(VhostUserGPU, parent_obj.conf),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void
+vhost_user_gpu_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+ VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass);
+
+ vgc->gl_unblock = vhost_user_gpu_gl_unblock;
+
+ vdc->realize = vhost_user_gpu_device_realize;
+ vdc->reset = vhost_user_gpu_reset;
+ vdc->set_status = vhost_user_gpu_set_status;
+ vdc->guest_notifier_mask = vhost_user_gpu_guest_notifier_mask;
+ vdc->guest_notifier_pending = vhost_user_gpu_guest_notifier_pending;
+ vdc->get_config = vhost_user_gpu_get_config;
+ vdc->set_config = vhost_user_gpu_set_config;
+
+ dc->props = vhost_user_gpu_properties;
+}
+
+static const TypeInfo vhost_user_gpu_info = {
+ .name = TYPE_VHOST_USER_GPU,
+ .parent = TYPE_VIRTIO_GPU_BASE,
+ .instance_size = sizeof(VhostUserGPU),
+ .instance_init = vhost_user_gpu_instance_init,
+ .instance_finalize = vhost_user_gpu_instance_finalize,
+ .class_init = vhost_user_gpu_class_init,
+};
+
+static void vhost_user_gpu_register_types(void)
+{
+ type_register_static(&vhost_user_gpu_info);
+}
+
+type_init(vhost_user_gpu_register_types)
diff --git a/hw/display/vhost-user-vga.c b/hw/display/vhost-user-vga.c
new file mode 100644
index 0000000..a719527
--- /dev/null
+++ b/hw/display/vhost-user-vga.c
@@ -0,0 +1,52 @@
+/*
+ * vhost-user VGA device
+ *
+ * Copyright Red Hat, Inc. 2018
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "virtio-vga.h"
+
+#define TYPE_VHOST_USER_VGA "vhost-user-vga"
+
+#define VHOST_USER_VGA(obj) \
+ OBJECT_CHECK(VhostUserVGA, (obj), TYPE_VHOST_USER_VGA)
+
+typedef struct VhostUserVGA {
+ VirtIOVGABase parent_obj;
+
+ VhostUserGPU vdev;
+} VhostUserVGA;
+
+static void vhost_user_vga_inst_initfn(Object *obj)
+{
+ VhostUserVGA *dev = VHOST_USER_VGA(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VHOST_USER_GPU);
+
+ VIRTIO_VGA_BASE(dev)->vgpu = VIRTIO_GPU_BASE(&dev->vdev);
+
+ object_property_add_alias(obj, "chardev",
+ OBJECT(&dev->vdev), "chardev",
+ &error_abort);
+}
+
+static const VirtioPCIDeviceTypeInfo vhost_user_vga_info = {
+ .generic_name = TYPE_VHOST_USER_VGA,
+ .parent = TYPE_VIRTIO_VGA_BASE,
+ .instance_size = sizeof(struct VhostUserVGA),
+ .instance_init = vhost_user_vga_inst_initfn,
+};
+
+static void vhost_user_vga_register_types(void)
+{
+ virtio_pci_types_register(&vhost_user_vga_info);
+}
+
+type_init(vhost_user_vga_register_types)
diff --git a/hw/display/virtio-gpu-3d.c b/hw/display/virtio-gpu-3d.c
index 2d30252..b918167 100644
--- a/hw/display/virtio-gpu-3d.c
+++ b/hw/display/virtio-gpu-3d.c
@@ -118,11 +118,11 @@ static void virgl_cmd_context_destroy(VirtIOGPU *g,
static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y,
int width, int height)
{
- if (!g->scanout[idx].con) {
+ if (!g->parent_obj.scanout[idx].con) {
return;
}
- dpy_gl_update(g->scanout[idx].con, x, y, width, height);
+ dpy_gl_update(g->parent_obj.scanout[idx].con, x, y, width, height);
}
static void virgl_cmd_resource_flush(VirtIOGPU *g,
@@ -135,8 +135,8 @@ static void virgl_cmd_resource_flush(VirtIOGPU *g,
trace_virtio_gpu_cmd_res_flush(rf.resource_id,
rf.r.width, rf.r.height, rf.r.x, rf.r.y);
- for (i = 0; i < g->conf.max_outputs; i++) {
- if (g->scanout[i].resource_id != rf.resource_id) {
+ for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
+ if (g->parent_obj.scanout[i].resource_id != rf.resource_id) {
continue;
}
virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height);
@@ -154,13 +154,13 @@ static void virgl_cmd_set_scanout(VirtIOGPU *g,
trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
ss.r.width, ss.r.height, ss.r.x, ss.r.y);
- if (ss.scanout_id >= g->conf.max_outputs) {
+ if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
__func__, ss.scanout_id);
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
return;
}
- g->enable = 1;
+ g->parent_obj.enable = 1;
memset(&info, 0, sizeof(info));
@@ -173,20 +173,22 @@ static void virgl_cmd_set_scanout(VirtIOGPU *g,
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
return;
}
- qemu_console_resize(g->scanout[ss.scanout_id].con,
+ qemu_console_resize(g->parent_obj.scanout[ss.scanout_id].con,
ss.r.width, ss.r.height);
virgl_renderer_force_ctx_0();
- dpy_gl_scanout_texture(g->scanout[ss.scanout_id].con, info.tex_id,
- info.flags & 1 /* FIXME: Y_0_TOP */,
- info.width, info.height,
- ss.r.x, ss.r.y, ss.r.width, ss.r.height);
+ dpy_gl_scanout_texture(
+ g->parent_obj.scanout[ss.scanout_id].con, info.tex_id,
+ info.flags & 1 /* FIXME: Y_0_TOP */,
+ info.width, info.height,
+ ss.r.x, ss.r.y, ss.r.width, ss.r.height);
} else {
if (ss.scanout_id != 0) {
- dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL);
+ dpy_gfx_replace_surface(
+ g->parent_obj.scanout[ss.scanout_id].con, NULL);
}
- dpy_gl_scanout_disable(g->scanout[ss.scanout_id].con);
+ dpy_gl_scanout_disable(g->parent_obj.scanout[ss.scanout_id].con);
}
- g->scanout[ss.scanout_id].resource_id = ss.resource_id;
+ g->parent_obj.scanout[ss.scanout_id].resource_id = ss.resource_id;
}
static void virgl_cmd_submit_3d(VirtIOGPU *g,
@@ -209,7 +211,7 @@ static void virgl_cmd_submit_3d(VirtIOGPU *g,
goto out;
}
- if (virtio_gpu_stats_enabled(g->conf)) {
+ if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
g->stats.req_3d++;
g->stats.bytes_3d += cs.size;
}
@@ -507,7 +509,7 @@ static void virgl_write_fence(void *opaque, uint32_t fence)
QTAILQ_REMOVE(&g->fenceq, cmd, next);
g_free(cmd);
g->inflight--;
- if (virtio_gpu_stats_enabled(g->conf)) {
+ if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
}
}
@@ -524,7 +526,7 @@ virgl_create_context(void *opaque, int scanout_idx,
qparams.major_ver = params->major_ver;
qparams.minor_ver = params->minor_ver;
- ctx = dpy_gl_ctx_create(g->scanout[scanout_idx].con, &qparams);
+ ctx = dpy_gl_ctx_create(g->parent_obj.scanout[scanout_idx].con, &qparams);
return (virgl_renderer_gl_context)ctx;
}
@@ -533,7 +535,7 @@ static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx)
VirtIOGPU *g = opaque;
QEMUGLContext qctx = (QEMUGLContext)ctx;
- dpy_gl_ctx_destroy(g->scanout[0].con, qctx);
+ dpy_gl_ctx_destroy(g->parent_obj.scanout[0].con, qctx);
}
static int virgl_make_context_current(void *opaque, int scanout_idx,
@@ -542,7 +544,8 @@ static int virgl_make_context_current(void *opaque, int scanout_idx,
VirtIOGPU *g = opaque;
QEMUGLContext qctx = (QEMUGLContext)ctx;
- return dpy_gl_ctx_make_current(g->scanout[scanout_idx].con, qctx);
+ return dpy_gl_ctx_make_current(g->parent_obj.scanout[scanout_idx].con,
+ qctx);
}
static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = {
@@ -594,11 +597,11 @@ void virtio_gpu_virgl_reset(VirtIOGPU *g)
int i;
/* virgl_renderer_reset() ??? */
- for (i = 0; i < g->conf.max_outputs; i++) {
+ for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
if (i != 0) {
- dpy_gfx_replace_surface(g->scanout[i].con, NULL);
+ dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
}
- dpy_gl_scanout_disable(g->scanout[i].con);
+ dpy_gl_scanout_disable(g->parent_obj.scanout[i].con);
}
}
@@ -614,7 +617,7 @@ int virtio_gpu_virgl_init(VirtIOGPU *g)
g->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL,
virtio_gpu_fence_poll, g);
- if (virtio_gpu_stats_enabled(g->conf)) {
+ if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
g->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL,
virtio_gpu_print_stats, g);
timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
diff --git a/hw/display/virtio-gpu-base.c b/hw/display/virtio-gpu-base.c
new file mode 100644
index 0000000..55e0799
--- /dev/null
+++ b/hw/display/virtio-gpu-base.c
@@ -0,0 +1,268 @@
+/*
+ * Virtio GPU Device
+ *
+ * Copyright Red Hat, Inc. 2013-2014
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+
+#include "hw/virtio/virtio-gpu.h"
+#include "migration/blocker.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "trace.h"
+
+void
+virtio_gpu_base_reset(VirtIOGPUBase *g)
+{
+ int i;
+
+ g->enable = 0;
+ g->use_virgl_renderer = false;
+
+ for (i = 0; i < g->conf.max_outputs; i++) {
+ g->scanout[i].resource_id = 0;
+ g->scanout[i].width = 0;
+ g->scanout[i].height = 0;
+ g->scanout[i].x = 0;
+ g->scanout[i].y = 0;
+ g->scanout[i].ds = NULL;
+ }
+}
+
+void
+virtio_gpu_base_fill_display_info(VirtIOGPUBase *g,
+ struct virtio_gpu_resp_display_info *dpy_info)
+{
+ int i;
+
+ for (i = 0; i < g->conf.max_outputs; i++) {
+ if (g->enabled_output_bitmask & (1 << i)) {
+ dpy_info->pmodes[i].enabled = 1;
+ dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width);
+ dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height);
+ }
+ }
+}
+
+static void virtio_gpu_invalidate_display(void *opaque)
+{
+}
+
+static void virtio_gpu_update_display(void *opaque)
+{
+}
+
+static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
+{
+}
+
+static void virtio_gpu_notify_event(VirtIOGPUBase *g, uint32_t event_type)
+{
+ g->virtio_config.events_read |= event_type;
+ virtio_notify_config(&g->parent_obj);
+}
+
+static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
+{
+ VirtIOGPUBase *g = opaque;
+
+ if (idx >= g->conf.max_outputs) {
+ return -1;
+ }
+
+ g->req_state[idx].x = info->xoff;
+ g->req_state[idx].y = info->yoff;
+ g->req_state[idx].width = info->width;
+ g->req_state[idx].height = info->height;
+
+ if (info->width && info->height) {
+ g->enabled_output_bitmask |= (1 << idx);
+ } else {
+ g->enabled_output_bitmask &= ~(1 << idx);
+ }
+
+ /* send event to guest */
+ virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
+ return 0;
+}
+
+static void
+virtio_gpu_gl_block(void *opaque, bool block)
+{
+ VirtIOGPUBase *g = opaque;
+ VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_GET_CLASS(g);
+
+ if (block) {
+ g->renderer_blocked++;
+ } else {
+ g->renderer_blocked--;
+ }
+ assert(g->renderer_blocked >= 0);
+
+ if (g->renderer_blocked == 0) {
+ vgc->gl_unblock(g);
+ }
+}
+
+const GraphicHwOps virtio_gpu_ops = {
+ .invalidate = virtio_gpu_invalidate_display,
+ .gfx_update = virtio_gpu_update_display,
+ .text_update = virtio_gpu_text_update,
+ .ui_info = virtio_gpu_ui_info,
+ .gl_block = virtio_gpu_gl_block,
+};
+
+bool
+virtio_gpu_base_device_realize(DeviceState *qdev,
+ VirtIOHandleOutput ctrl_cb,
+ VirtIOHandleOutput cursor_cb,
+ Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
+ VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev);
+ Error *local_err = NULL;
+ int i;
+
+ if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
+ error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
+ return false;
+ }
+
+ g->use_virgl_renderer = false;
+ if (virtio_gpu_virgl_enabled(g->conf)) {
+ error_setg(&g->migration_blocker, "virgl is not yet migratable");
+ migrate_add_blocker(g->migration_blocker, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ error_free(g->migration_blocker);
+ return false;
+ }
+ }
+
+ g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs);
+ virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
+ sizeof(struct virtio_gpu_config));
+
+ if (virtio_gpu_virgl_enabled(g->conf)) {
+ /* use larger control queue in 3d mode */
+ virtio_add_queue(vdev, 256, ctrl_cb);
+ virtio_add_queue(vdev, 16, cursor_cb);
+ } else {
+ virtio_add_queue(vdev, 64, ctrl_cb);
+ virtio_add_queue(vdev, 16, cursor_cb);
+ }
+
+ g->enabled_output_bitmask = 1;
+
+ g->req_state[0].width = g->conf.xres;
+ g->req_state[0].height = g->conf.yres;
+
+ for (i = 0; i < g->conf.max_outputs; i++) {
+ g->scanout[i].con =
+ graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
+ if (i > 0) {
+ dpy_gfx_replace_surface(g->scanout[i].con, NULL);
+ }
+ }
+
+ return true;
+}
+
+static uint64_t
+virtio_gpu_base_get_features(VirtIODevice *vdev, uint64_t features,
+ Error **errp)
+{
+ VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
+
+ if (virtio_gpu_virgl_enabled(g->conf)) {
+ features |= (1 << VIRTIO_GPU_F_VIRGL);
+ }
+ if (virtio_gpu_edid_enabled(g->conf)) {
+ features |= (1 << VIRTIO_GPU_F_EDID);
+ }
+
+ return features;
+}
+
+static void
+virtio_gpu_base_set_features(VirtIODevice *vdev, uint64_t features)
+{
+ static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
+ VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
+
+ g->use_virgl_renderer = ((features & virgl) == virgl);
+ trace_virtio_gpu_features(g->use_virgl_renderer);
+}
+
+static void
+virtio_gpu_base_device_unrealize(DeviceState *qdev, Error **errp)
+{
+ VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev);
+
+ if (g->migration_blocker) {
+ migrate_del_blocker(g->migration_blocker);
+ error_free(g->migration_blocker);
+ }
+}
+
+static void
+virtio_gpu_base_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+
+ vdc->unrealize = virtio_gpu_base_device_unrealize;
+ vdc->get_features = virtio_gpu_base_get_features;
+ vdc->set_features = virtio_gpu_base_set_features;
+
+ set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
+ dc->hotpluggable = false;
+}
+
+static const TypeInfo virtio_gpu_base_info = {
+ .name = TYPE_VIRTIO_GPU_BASE,
+ .parent = TYPE_VIRTIO_DEVICE,
+ .instance_size = sizeof(VirtIOGPUBase),
+ .class_size = sizeof(VirtIOGPUBaseClass),
+ .class_init = virtio_gpu_base_class_init,
+ .abstract = true
+};
+
+static void
+virtio_register_types(void)
+{
+ type_register_static(&virtio_gpu_base_info);
+}
+
+type_init(virtio_register_types)
+
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
+
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24);
diff --git a/hw/display/virtio-gpu-pci.c b/hw/display/virtio-gpu-pci.c
index 0bc4d9d..206870c 100644
--- a/hw/display/virtio-gpu-pci.c
+++ b/hw/display/virtio-gpu-pci.c
@@ -16,33 +16,18 @@
#include "hw/pci/pci.h"
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-bus.h"
-#include "hw/virtio/virtio-pci.h"
-#include "hw/virtio/virtio-gpu.h"
+#include "hw/virtio/virtio-gpu-pci.h"
-typedef struct VirtIOGPUPCI VirtIOGPUPCI;
-
-/*
- * virtio-gpu-pci: This extends VirtioPCIProxy.
- */
-#define TYPE_VIRTIO_GPU_PCI "virtio-gpu-pci"
-#define VIRTIO_GPU_PCI(obj) \
- OBJECT_CHECK(VirtIOGPUPCI, (obj), TYPE_VIRTIO_GPU_PCI)
-
-struct VirtIOGPUPCI {
- VirtIOPCIProxy parent_obj;
- VirtIOGPU vdev;
-};
-
-static Property virtio_gpu_pci_properties[] = {
+static Property virtio_gpu_pci_base_properties[] = {
DEFINE_VIRTIO_GPU_PCI_PROPERTIES(VirtIOPCIProxy),
DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_gpu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+static void virtio_gpu_pci_base_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
{
- VirtIOGPUPCI *vgpu = VIRTIO_GPU_PCI(vpci_dev);
- VirtIOGPU *g = &vgpu->vdev;
- DeviceState *vdev = DEVICE(&vgpu->vdev);
+ VirtIOGPUPCIBase *vgpu = VIRTIO_GPU_PCI_BASE(vpci_dev);
+ VirtIOGPUBase *g = vgpu->vgpu;
+ DeviceState *vdev = DEVICE(g);
int i;
Error *local_error = NULL;
@@ -64,36 +49,56 @@ static void virtio_gpu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
}
}
-static void virtio_gpu_pci_class_init(ObjectClass *klass, void *data)
+static void virtio_gpu_pci_base_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
- dc->props = virtio_gpu_pci_properties;
+ dc->props = virtio_gpu_pci_base_properties;
dc->hotpluggable = false;
- k->realize = virtio_gpu_pci_realize;
+ k->realize = virtio_gpu_pci_base_realize;
pcidev_k->class_id = PCI_CLASS_DISPLAY_OTHER;
}
+static const TypeInfo virtio_gpu_pci_base_info = {
+ .name = TYPE_VIRTIO_GPU_PCI_BASE,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(VirtIOGPUPCIBase),
+ .class_init = virtio_gpu_pci_base_class_init,
+ .abstract = true
+};
+
+#define TYPE_VIRTIO_GPU_PCI "virtio-gpu-pci"
+#define VIRTIO_GPU_PCI(obj) \
+ OBJECT_CHECK(VirtIOGPUPCI, (obj), TYPE_VIRTIO_GPU_PCI)
+
+typedef struct VirtIOGPUPCI {
+ VirtIOGPUPCIBase parent_obj;
+ VirtIOGPU vdev;
+} VirtIOGPUPCI;
+
static void virtio_gpu_initfn(Object *obj)
{
VirtIOGPUPCI *dev = VIRTIO_GPU_PCI(obj);
virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
TYPE_VIRTIO_GPU);
+ VIRTIO_GPU_PCI_BASE(obj)->vgpu = VIRTIO_GPU_BASE(&dev->vdev);
}
static const VirtioPCIDeviceTypeInfo virtio_gpu_pci_info = {
.generic_name = TYPE_VIRTIO_GPU_PCI,
+ .parent = TYPE_VIRTIO_GPU_PCI_BASE,
.instance_size = sizeof(VirtIOGPUPCI),
.instance_init = virtio_gpu_initfn,
- .class_init = virtio_gpu_pci_class_init,
};
static void virtio_gpu_pci_register_types(void)
{
+ type_register_static(&virtio_gpu_pci_base_info);
virtio_pci_types_register(&virtio_gpu_pci_info);
}
+
type_init(virtio_gpu_pci_register_types)
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index 9e37e0a..4a49da5 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -20,11 +20,13 @@
#include "sysemu/dma.h"
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-gpu.h"
+#include "hw/virtio/virtio-gpu-bswap.h"
+#include "hw/virtio/virtio-gpu-pixman.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/display/edid.h"
-#include "migration/blocker.h"
#include "qemu/log.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#define VIRTIO_GPU_VM_VERSION 1
@@ -34,53 +36,11 @@ virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
struct virtio_gpu_simple_resource *res);
-static void
-virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr)
-{
- le32_to_cpus(&hdr->type);
- le32_to_cpus(&hdr->flags);
- le64_to_cpus(&hdr->fence_id);
- le32_to_cpus(&hdr->ctx_id);
- le32_to_cpus(&hdr->padding);
-}
-
-static void virtio_gpu_bswap_32(void *ptr,
- size_t size)
-{
-#ifdef HOST_WORDS_BIGENDIAN
-
- size_t i;
- struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *) ptr;
-
- virtio_gpu_ctrl_hdr_bswap(hdr);
-
- i = sizeof(struct virtio_gpu_ctrl_hdr);
- while (i < size) {
- le32_to_cpus((uint32_t *)(ptr + i));
- i = i + sizeof(uint32_t);
- }
-
-#endif
-}
-
-static void
-virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d *t2d)
-{
- virtio_gpu_ctrl_hdr_bswap(&t2d->hdr);
- le32_to_cpus(&t2d->r.x);
- le32_to_cpus(&t2d->r.y);
- le32_to_cpus(&t2d->r.width);
- le32_to_cpus(&t2d->r.height);
- le64_to_cpus(&t2d->offset);
- le32_to_cpus(&t2d->resource_id);
- le32_to_cpus(&t2d->padding);
-}
-
#ifdef CONFIG_VIRGL
#include <virglrenderer.h>
#define VIRGL(_g, _virgl, _simple, ...) \
do { \
- if (_g->use_virgl_renderer) { \
+ if (_g->parent_obj.use_virgl_renderer) { \
_virgl(__VA_ARGS__); \
} else { \
_simple(__VA_ARGS__); \
@@ -148,10 +108,10 @@ static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
struct virtio_gpu_scanout *s;
bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
- if (cursor->pos.scanout_id >= g->conf.max_outputs) {
+ if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
return;
}
- s = &g->scanout[cursor->pos.scanout_id];
+ s = &g->parent_obj.scanout[cursor->pos.scanout_id];
trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
cursor->pos.x,
@@ -182,53 +142,6 @@ static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
cursor->resource_id ? 1 : 0);
}
-static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
-{
- VirtIOGPU *g = VIRTIO_GPU(vdev);
- memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
-}
-
-static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
-{
- VirtIOGPU *g = VIRTIO_GPU(vdev);
- struct virtio_gpu_config vgconfig;
-
- memcpy(&vgconfig, config, sizeof(g->virtio_config));
-
- if (vgconfig.events_clear) {
- g->virtio_config.events_read &= ~vgconfig.events_clear;
- }
-}
-
-static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features,
- Error **errp)
-{
- VirtIOGPU *g = VIRTIO_GPU(vdev);
-
- if (virtio_gpu_virgl_enabled(g->conf)) {
- features |= (1 << VIRTIO_GPU_F_VIRGL);
- }
- if (virtio_gpu_edid_enabled(g->conf)) {
- features |= (1 << VIRTIO_GPU_F_EDID);
- }
- return features;
-}
-
-static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features)
-{
- static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
- VirtIOGPU *g = VIRTIO_GPU(vdev);
-
- g->use_virgl_renderer = ((features & virgl) == virgl);
- trace_virtio_gpu_features(g->use_virgl_renderer);
-}
-
-static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type)
-{
- g->virtio_config.events_read |= event_type;
- virtio_notify_config(&g->parent_obj);
-}
-
static struct virtio_gpu_simple_resource *
virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
{
@@ -277,21 +190,6 @@ void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
}
-static void
-virtio_gpu_fill_display_info(VirtIOGPU *g,
- struct virtio_gpu_resp_display_info *dpy_info)
-{
- int i;
-
- for (i = 0; i < g->conf.max_outputs; i++) {
- if (g->enabled_output_bitmask & (1 << i)) {
- dpy_info->pmodes[i].enabled = 1;
- dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width);
- dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height);
- }
- }
-}
-
void virtio_gpu_get_display_info(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
@@ -300,7 +198,7 @@ void virtio_gpu_get_display_info(VirtIOGPU *g,
trace_virtio_gpu_cmd_get_display_info();
memset(&display_info, 0, sizeof(display_info));
display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
- virtio_gpu_fill_display_info(g, &display_info);
+ virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
sizeof(display_info));
}
@@ -309,9 +207,10 @@ static void
virtio_gpu_generate_edid(VirtIOGPU *g, int scanout,
struct virtio_gpu_resp_edid *edid)
{
+ VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
qemu_edid_info info = {
- .prefx = g->req_state[scanout].width,
- .prefy = g->req_state[scanout].height,
+ .prefx = b->req_state[scanout].width,
+ .prefy = b->req_state[scanout].height,
};
edid->size = cpu_to_le32(sizeof(edid->edid));
@@ -323,11 +222,12 @@ void virtio_gpu_get_edid(VirtIOGPU *g,
{
struct virtio_gpu_resp_edid edid;
struct virtio_gpu_cmd_get_edid get_edid;
+ VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
VIRTIO_GPU_FILL_CMD(get_edid);
virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
- if (get_edid.scanout >= g->conf.max_outputs) {
+ if (get_edid.scanout >= b->conf.max_outputs) {
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
return;
}
@@ -339,30 +239,6 @@ void virtio_gpu_get_edid(VirtIOGPU *g,
virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
}
-static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format)
-{
- switch (virtio_gpu_format) {
- case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
- return PIXMAN_BE_b8g8r8x8;
- case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
- return PIXMAN_BE_b8g8r8a8;
- case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
- return PIXMAN_BE_x8r8g8b8;
- case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
- return PIXMAN_BE_a8r8g8b8;
- case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
- return PIXMAN_BE_r8g8b8x8;
- case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
- return PIXMAN_BE_r8g8b8a8;
- case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
- return PIXMAN_BE_x8b8g8r8;
- case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
- return PIXMAN_BE_a8b8g8r8;
- default:
- return 0;
- }
-}
-
static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
uint32_t width, uint32_t height)
{
@@ -409,7 +285,7 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
res->format = c2d.format;
res->resource_id = c2d.resource_id;
- pformat = get_pixman_format(c2d.format);
+ pformat = virtio_gpu_get_pixman_format(c2d.format);
if (!pformat) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: host couldn't handle guest format %d\n",
@@ -420,7 +296,7 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
}
res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
- if (res->hostmem + g->hostmem < g->conf.max_hostmem) {
+ if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
res->image = pixman_image_create_bits(pformat,
c2d.width,
c2d.height,
@@ -442,7 +318,7 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
{
- struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id];
+ struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
struct virtio_gpu_simple_resource *res;
DisplaySurface *ds = NULL;
@@ -474,7 +350,7 @@ static void virtio_gpu_resource_destroy(VirtIOGPU *g,
int i;
if (res->scanout_bitmask) {
- for (i = 0; i < g->conf.max_outputs; i++) {
+ for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
if (res->scanout_bitmask & (1 << i)) {
virtio_gpu_disable_scanout(g, i);
}
@@ -604,7 +480,7 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g,
pixman_region_init_rect(&flush_region,
rf.r.x, rf.r.y, rf.r.width, rf.r.height);
- for (i = 0; i < g->conf.max_outputs; i++) {
+ for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
struct virtio_gpu_scanout *scanout;
pixman_region16_t region, finalregion;
pixman_box16_t *extents;
@@ -612,7 +488,7 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g,
if (!(res->scanout_bitmask & (1 << i))) {
continue;
}
- scanout = &g->scanout[i];
+ scanout = &g->parent_obj.scanout[i];
pixman_region_init(&finalregion);
pixman_region_init_rect(&region, scanout->x, scanout->y,
@@ -622,7 +498,7 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g,
pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
extents = pixman_region_extents(&finalregion);
/* work out the area we need to update for each console */
- dpy_gfx_update(g->scanout[i].con,
+ dpy_gfx_update(g->parent_obj.scanout[i].con,
extents->x1, extents->y1,
extents->x2 - extents->x1,
extents->y2 - extents->y1);
@@ -653,14 +529,14 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
ss.r.width, ss.r.height, ss.r.x, ss.r.y);
- if (ss.scanout_id >= g->conf.max_outputs) {
+ if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
__func__, ss.scanout_id);
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
return;
}
- g->enable = 1;
+ g->parent_obj.enable = 1;
if (ss.resource_id == 0) {
virtio_gpu_disable_scanout(g, ss.scanout_id);
return;
@@ -677,6 +553,8 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
if (ss.r.x > res->width ||
ss.r.y > res->height ||
+ ss.r.width < 16 ||
+ ss.r.height < 16 ||
ss.r.width > res->width ||
ss.r.height > res->height ||
ss.r.x + ss.r.width > res->width ||
@@ -689,7 +567,7 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
return;
}
- scanout = &g->scanout[ss.scanout_id];
+ scanout = &g->parent_obj.scanout[ss.scanout_id];
format = pixman_image_get_format(res->image);
bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
@@ -712,7 +590,8 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
return;
}
pixman_image_unref(rect);
- dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds);
+ dpy_gfx_replace_surface(g->parent_obj.scanout[ss.scanout_id].con,
+ scanout->ds);
}
ores = virtio_gpu_find_resource(g, scanout->resource_id);
@@ -930,7 +809,7 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g)
while (!QTAILQ_EMPTY(&g->cmdq)) {
cmd = QTAILQ_FIRST(&g->cmdq);
- if (g->renderer_blocked) {
+ if (g->parent_obj.renderer_blocked) {
break;
}
@@ -939,14 +818,14 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g)
g, cmd);
QTAILQ_REMOVE(&g->cmdq, cmd, next);
- if (virtio_gpu_stats_enabled(g->conf)) {
+ if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
g->stats.requests++;
}
if (!cmd->finished) {
QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
g->inflight++;
- if (virtio_gpu_stats_enabled(g->conf)) {
+ if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
if (g->stats.max_inflight < g->inflight) {
g->stats.max_inflight = g->inflight;
}
@@ -958,6 +837,19 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g)
}
}
+static void virtio_gpu_gl_unblock(VirtIOGPUBase *b)
+{
+ VirtIOGPU *g = VIRTIO_GPU(b);
+
+#ifdef CONFIG_VIRGL
+ if (g->renderer_reset) {
+ g->renderer_reset = false;
+ virtio_gpu_virgl_reset(g);
+ }
+#endif
+ virtio_gpu_process_cmdq(g);
+}
+
static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOGPU *g = VIRTIO_GPU(vdev);
@@ -968,7 +860,7 @@ static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
}
#ifdef CONFIG_VIRGL
- if (!g->renderer_inited && g->use_virgl_renderer) {
+ if (!g->renderer_inited && g->parent_obj.use_virgl_renderer) {
virtio_gpu_virgl_init(g);
g->renderer_inited = true;
}
@@ -986,7 +878,7 @@ static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
virtio_gpu_process_cmdq(g);
#ifdef CONFIG_VIRGL
- if (g->use_virgl_renderer) {
+ if (g->parent_obj.use_virgl_renderer) {
virtio_gpu_virgl_fence_poll(g);
}
#endif
@@ -995,7 +887,7 @@ static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
static void virtio_gpu_ctrl_bh(void *opaque)
{
VirtIOGPU *g = opaque;
- virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq);
+ virtio_gpu_handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq);
}
static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
@@ -1033,75 +925,9 @@ static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
static void virtio_gpu_cursor_bh(void *opaque)
{
VirtIOGPU *g = opaque;
- virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq);
+ virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
}
-static void virtio_gpu_invalidate_display(void *opaque)
-{
-}
-
-static void virtio_gpu_update_display(void *opaque)
-{
-}
-
-static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
-{
-}
-
-static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
-{
- VirtIOGPU *g = opaque;
-
- if (idx >= g->conf.max_outputs) {
- return -1;
- }
-
- g->req_state[idx].x = info->xoff;
- g->req_state[idx].y = info->yoff;
- g->req_state[idx].width = info->width;
- g->req_state[idx].height = info->height;
-
- if (info->width && info->height) {
- g->enabled_output_bitmask |= (1 << idx);
- } else {
- g->enabled_output_bitmask &= ~(1 << idx);
- }
-
- /* send event to guest */
- virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
- return 0;
-}
-
-static void virtio_gpu_gl_block(void *opaque, bool block)
-{
- VirtIOGPU *g = opaque;
-
- if (block) {
- g->renderer_blocked++;
- } else {
- g->renderer_blocked--;
- }
- assert(g->renderer_blocked >= 0);
-
- if (g->renderer_blocked == 0) {
-#ifdef CONFIG_VIRGL
- if (g->renderer_reset) {
- g->renderer_reset = false;
- virtio_gpu_virgl_reset(g);
- }
-#endif
- virtio_gpu_process_cmdq(g);
- }
-}
-
-const GraphicHwOps virtio_gpu_ops = {
- .invalidate = virtio_gpu_invalidate_display,
- .gfx_update = virtio_gpu_update_display,
- .text_update = virtio_gpu_text_update,
- .ui_info = virtio_gpu_ui_info,
- .gl_block = virtio_gpu_gl_block,
-};
-
static const VMStateDescription vmstate_virtio_gpu_scanout = {
.name = "virtio-gpu-one-scanout",
.version_id = 1,
@@ -1124,10 +950,11 @@ static const VMStateDescription vmstate_virtio_gpu_scanouts = {
.name = "virtio-gpu-scanouts",
.version_id = 1,
.fields = (VMStateField[]) {
- VMSTATE_INT32(enable, struct VirtIOGPU),
- VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU, NULL),
- VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU,
- conf.max_outputs, 1,
+ VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
+ VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
+ struct VirtIOGPU, NULL),
+ VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
+ parent_obj.conf.max_outputs, 1,
vmstate_virtio_gpu_scanout,
struct virtio_gpu_scanout),
VMSTATE_END_OF_LIST()
@@ -1183,7 +1010,7 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
res->iov_cnt = qemu_get_be32(f);
/* allocate */
- pformat = get_pixman_format(res->format);
+ pformat = virtio_gpu_get_pixman_format(res->format);
if (!pformat) {
g_free(res);
return -EINVAL;
@@ -1242,8 +1069,8 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
/* load & apply scanout state */
vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
- for (i = 0; i < g->conf.max_outputs; i++) {
- scanout = &g->scanout[i];
+ for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
+ scanout = &g->parent_obj.scanout[i];
if (!scanout->resource_id) {
continue;
}
@@ -1272,84 +1099,35 @@ static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
VirtIOGPU *g = VIRTIO_GPU(qdev);
bool have_virgl;
- Error *local_err = NULL;
- int i;
-
- if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
- error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
- return;
- }
- g->use_virgl_renderer = false;
#if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
have_virgl = false;
#else
have_virgl = display_opengl;
#endif
if (!have_virgl) {
- g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
- }
-
- if (virtio_gpu_virgl_enabled(g->conf)) {
- error_setg(&g->migration_blocker, "virgl is not yet migratable");
- migrate_add_blocker(g->migration_blocker, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- error_free(g->migration_blocker);
- return;
- }
- }
-
- g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs);
- virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
- sizeof(struct virtio_gpu_config));
-
- g->req_state[0].width = g->conf.xres;
- g->req_state[0].height = g->conf.yres;
-
- if (virtio_gpu_virgl_enabled(g->conf)) {
- /* use larger control queue in 3d mode */
- g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb);
- g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
-
+ g->parent_obj.conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
+ } else {
#if defined(CONFIG_VIRGL)
- g->virtio_config.num_capsets = virtio_gpu_virgl_get_num_capsets(g);
-#else
- g->virtio_config.num_capsets = 0;
+ VIRTIO_GPU_BASE(g)->virtio_config.num_capsets =
+ virtio_gpu_virgl_get_num_capsets(g);
#endif
- } else {
- g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb);
- g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
}
+ if (!virtio_gpu_base_device_realize(qdev,
+ virtio_gpu_handle_ctrl_cb,
+ virtio_gpu_handle_cursor_cb,
+ errp)) {
+ return;
+ }
+
+ g->ctrl_vq = virtio_get_queue(vdev, 0);
+ g->cursor_vq = virtio_get_queue(vdev, 1);
g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
QTAILQ_INIT(&g->reslist);
QTAILQ_INIT(&g->cmdq);
QTAILQ_INIT(&g->fenceq);
-
- g->enabled_output_bitmask = 1;
-
- for (i = 0; i < g->conf.max_outputs; i++) {
- g->scanout[i].con =
- graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
- if (i > 0) {
- dpy_gfx_replace_surface(g->scanout[i].con, NULL);
- }
- }
-}
-
-static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp)
-{
- VirtIOGPU *g = VIRTIO_GPU(qdev);
- if (g->migration_blocker) {
- migrate_del_blocker(g->migration_blocker);
- error_free(g->migration_blocker);
- }
-}
-
-static void virtio_gpu_instance_init(Object *obj)
-{
}
static void virtio_gpu_reset(VirtIODevice *vdev)
@@ -1357,21 +1135,16 @@ static void virtio_gpu_reset(VirtIODevice *vdev)
VirtIOGPU *g = VIRTIO_GPU(vdev);
struct virtio_gpu_simple_resource *res, *tmp;
struct virtio_gpu_ctrl_command *cmd;
- int i;
- g->enable = 0;
+#ifdef CONFIG_VIRGL
+ if (g->parent_obj.use_virgl_renderer) {
+ virtio_gpu_virgl_reset(g);
+ }
+#endif
QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
virtio_gpu_resource_destroy(g, res);
}
- for (i = 0; i < g->conf.max_outputs; i++) {
- g->scanout[i].resource_id = 0;
- g->scanout[i].width = 0;
- g->scanout[i].height = 0;
- g->scanout[i].x = 0;
- g->scanout[i].y = 0;
- g->scanout[i].ds = NULL;
- }
while (!QTAILQ_EMPTY(&g->cmdq)) {
cmd = QTAILQ_FIRST(&g->cmdq);
@@ -1387,15 +1160,37 @@ static void virtio_gpu_reset(VirtIODevice *vdev)
}
#ifdef CONFIG_VIRGL
- if (g->use_virgl_renderer) {
- if (g->renderer_blocked) {
+ if (g->parent_obj.use_virgl_renderer) {
+ if (g->parent_obj.renderer_blocked) {
g->renderer_reset = true;
} else {
virtio_gpu_virgl_reset(g);
}
- g->use_virgl_renderer = 0;
+ g->parent_obj.use_virgl_renderer = false;
}
#endif
+
+ virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
+}
+
+static void
+virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
+{
+ VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
+
+ memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
+}
+
+static void
+virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
+{
+ VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
+ const struct virtio_gpu_config *vgconfig =
+ (const struct virtio_gpu_config *)config;
+
+ if (vgconfig->events_clear) {
+ g->virtio_config.events_read &= ~vgconfig->events_clear;
+ }
}
/*
@@ -1426,18 +1221,15 @@ static const VMStateDescription vmstate_virtio_gpu = {
};
static Property virtio_gpu_properties[] = {
- DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1),
- DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf.max_hostmem, 256 * MiB),
+ VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
+ DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
+ 256 * MiB),
#ifdef CONFIG_VIRGL
- DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags,
+ DEFINE_PROP_BIT("virgl", VirtIOGPU, parent_obj.conf.flags,
VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
- DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags,
+ DEFINE_PROP_BIT("stats", VirtIOGPU, parent_obj.conf.flags,
VIRTIO_GPU_FLAG_STATS_ENABLED, false),
#endif
- DEFINE_PROP_BIT("edid", VirtIOGPU, conf.flags,
- VIRTIO_GPU_FLAG_EDID_ENABLED, false),
- DEFINE_PROP_UINT32("xres", VirtIOGPU, conf.xres, 1024),
- DEFINE_PROP_UINT32("yres", VirtIOGPU, conf.yres, 768),
DEFINE_PROP_END_OF_LIST(),
};
@@ -1445,27 +1237,22 @@ static void virtio_gpu_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+ VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass);
+ vgc->gl_unblock = virtio_gpu_gl_unblock;
vdc->realize = virtio_gpu_device_realize;
- vdc->unrealize = virtio_gpu_device_unrealize;
+ vdc->reset = virtio_gpu_reset;
vdc->get_config = virtio_gpu_get_config;
vdc->set_config = virtio_gpu_set_config;
- vdc->get_features = virtio_gpu_get_features;
- vdc->set_features = virtio_gpu_set_features;
- vdc->reset = virtio_gpu_reset;
-
- set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
- dc->props = virtio_gpu_properties;
dc->vmsd = &vmstate_virtio_gpu;
- dc->hotpluggable = false;
+ dc->props = virtio_gpu_properties;
}
static const TypeInfo virtio_gpu_info = {
.name = TYPE_VIRTIO_GPU,
- .parent = TYPE_VIRTIO_DEVICE,
+ .parent = TYPE_VIRTIO_GPU_BASE,
.instance_size = sizeof(VirtIOGPU),
- .instance_init = virtio_gpu_instance_init,
.class_init = virtio_gpu_class_init,
};
@@ -1475,26 +1262,3 @@ static void virtio_register_types(void)
}
type_init(virtio_register_types)
-
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
-
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24);
diff --git a/hw/display/virtio-vga.c b/hw/display/virtio-vga.c
index 5d57bf5..67e3493 100644
--- a/hw/display/virtio-vga.c
+++ b/hw/display/virtio-vga.c
@@ -1,63 +1,42 @@
#include "qemu/osdep.h"
#include "hw/hw.h"
#include "hw/pci/pci.h"
-#include "vga_int.h"
-#include "hw/virtio/virtio-pci.h"
#include "hw/virtio/virtio-gpu.h"
#include "qapi/error.h"
+#include "virtio-vga.h"
-/*
- * virtio-vga: This extends VirtioPCIProxy.
- */
-#define TYPE_VIRTIO_VGA "virtio-vga"
-#define VIRTIO_VGA(obj) \
- OBJECT_CHECK(VirtIOVGA, (obj), TYPE_VIRTIO_VGA)
-#define VIRTIO_VGA_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VirtIOVGAClass, obj, TYPE_VIRTIO_VGA)
-#define VIRTIO_VGA_CLASS(klass) \
- OBJECT_CLASS_CHECK(VirtIOVGAClass, klass, TYPE_VIRTIO_VGA)
-
-typedef struct VirtIOVGA {
- VirtIOPCIProxy parent_obj;
- VirtIOGPU vdev;
- VGACommonState vga;
- MemoryRegion vga_mrs[3];
-} VirtIOVGA;
-
-typedef struct VirtIOVGAClass {
- VirtioPCIClass parent_class;
- DeviceReset parent_reset;
-} VirtIOVGAClass;
-
-static void virtio_vga_invalidate_display(void *opaque)
+static void virtio_vga_base_invalidate_display(void *opaque)
{
- VirtIOVGA *vvga = opaque;
+ VirtIOVGABase *vvga = opaque;
+ VirtIOGPUBase *g = vvga->vgpu;
- if (vvga->vdev.enable) {
- virtio_gpu_ops.invalidate(&vvga->vdev);
+ if (g->enable) {
+ virtio_gpu_ops.invalidate(g);
} else {
vvga->vga.hw_ops->invalidate(&vvga->vga);
}
}
-static void virtio_vga_update_display(void *opaque)
+static void virtio_vga_base_update_display(void *opaque)
{
- VirtIOVGA *vvga = opaque;
+ VirtIOVGABase *vvga = opaque;
+ VirtIOGPUBase *g = vvga->vgpu;
- if (vvga->vdev.enable) {
- virtio_gpu_ops.gfx_update(&vvga->vdev);
+ if (g->enable) {
+ virtio_gpu_ops.gfx_update(g);
} else {
vvga->vga.hw_ops->gfx_update(&vvga->vga);
}
}
-static void virtio_vga_text_update(void *opaque, console_ch_t *chardata)
+static void virtio_vga_base_text_update(void *opaque, console_ch_t *chardata)
{
- VirtIOVGA *vvga = opaque;
+ VirtIOVGABase *vvga = opaque;
+ VirtIOGPUBase *g = vvga->vgpu;
- if (vvga->vdev.enable) {
+ if (g->enable) {
if (virtio_gpu_ops.text_update) {
- virtio_gpu_ops.text_update(&vvga->vdev, chardata);
+ virtio_gpu_ops.text_update(g, chardata);
}
} else {
if (vvga->vga.hw_ops->text_update) {
@@ -66,49 +45,52 @@ static void virtio_vga_text_update(void *opaque, console_ch_t *chardata)
}
}
-static int virtio_vga_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
+static int virtio_vga_base_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
{
- VirtIOVGA *vvga = opaque;
+ VirtIOVGABase *vvga = opaque;
+ VirtIOGPUBase *g = vvga->vgpu;
if (virtio_gpu_ops.ui_info) {
- return virtio_gpu_ops.ui_info(&vvga->vdev, idx, info);
+ return virtio_gpu_ops.ui_info(g, idx, info);
}
return -1;
}
-static void virtio_vga_gl_block(void *opaque, bool block)
+static void virtio_vga_base_gl_block(void *opaque, bool block)
{
- VirtIOVGA *vvga = opaque;
+ VirtIOVGABase *vvga = opaque;
+ VirtIOGPUBase *g = vvga->vgpu;
if (virtio_gpu_ops.gl_block) {
- virtio_gpu_ops.gl_block(&vvga->vdev, block);
+ virtio_gpu_ops.gl_block(g, block);
}
}
-static const GraphicHwOps virtio_vga_ops = {
- .invalidate = virtio_vga_invalidate_display,
- .gfx_update = virtio_vga_update_display,
- .text_update = virtio_vga_text_update,
- .ui_info = virtio_vga_ui_info,
- .gl_block = virtio_vga_gl_block,
+static const GraphicHwOps virtio_vga_base_ops = {
+ .invalidate = virtio_vga_base_invalidate_display,
+ .gfx_update = virtio_vga_base_update_display,
+ .text_update = virtio_vga_base_text_update,
+ .ui_info = virtio_vga_base_ui_info,
+ .gl_block = virtio_vga_base_gl_block,
};
-static const VMStateDescription vmstate_virtio_vga = {
+static const VMStateDescription vmstate_virtio_vga_base = {
.name = "virtio-vga",
.version_id = 2,
.minimum_version_id = 2,
.fields = (VMStateField[]) {
/* no pci stuff here, saving the virtio device will handle that */
- VMSTATE_STRUCT(vga, VirtIOVGA, 0, vmstate_vga_common, VGACommonState),
+ VMSTATE_STRUCT(vga, VirtIOVGABase, 0,
+ vmstate_vga_common, VGACommonState),
VMSTATE_END_OF_LIST()
}
};
/* VGA device wrapper around PCI device around virtio GPU */
-static void virtio_vga_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+static void virtio_vga_base_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
{
- VirtIOVGA *vvga = VIRTIO_VGA(vpci_dev);
- VirtIOGPU *g = &vvga->vdev;
+ VirtIOVGABase *vvga = VIRTIO_VGA_BASE(vpci_dev);
+ VirtIOGPUBase *g = vvga->vgpu;
VGACommonState *vga = &vvga->vga;
Error *err = NULL;
uint32_t offset;
@@ -168,7 +150,7 @@ static void virtio_vga_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
vvga->vga_mrs, true, false);
vga->con = g->scanout[0].con;
- graphic_console_set_hwops(vga->con, &virtio_vga_ops, vvga);
+ graphic_console_set_hwops(vga->con, &virtio_vga_base_ops, vvga);
for (i = 0; i < g->conf.max_outputs; i++) {
object_property_set_link(OBJECT(g->scanout[i].con),
@@ -177,10 +159,10 @@ static void virtio_vga_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
}
}
-static void virtio_vga_reset(DeviceState *dev)
+static void virtio_vga_base_reset(DeviceState *dev)
{
- VirtIOVGAClass *klass = VIRTIO_VGA_GET_CLASS(dev);
- VirtIOVGA *vvga = VIRTIO_VGA(dev);
+ VirtIOVGABaseClass *klass = VIRTIO_VGA_BASE_GET_CLASS(dev);
+ VirtIOVGABase *vvga = VIRTIO_VGA_BASE(dev);
/* reset virtio-gpu */
klass->parent_reset(dev);
@@ -190,48 +172,70 @@ static void virtio_vga_reset(DeviceState *dev)
vga_dirty_log_start(&vvga->vga);
}
-static Property virtio_vga_properties[] = {
+static Property virtio_vga_base_properties[] = {
DEFINE_VIRTIO_GPU_PCI_PROPERTIES(VirtIOPCIProxy),
DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_vga_class_init(ObjectClass *klass, void *data)
+static void virtio_vga_base_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
- VirtIOVGAClass *v = VIRTIO_VGA_CLASS(klass);
+ VirtIOVGABaseClass *v = VIRTIO_VGA_BASE_CLASS(klass);
PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
- dc->props = virtio_vga_properties;
- dc->vmsd = &vmstate_virtio_vga;
+ dc->props = virtio_vga_base_properties;
+ dc->vmsd = &vmstate_virtio_vga_base;
dc->hotpluggable = false;
- device_class_set_parent_reset(dc, virtio_vga_reset,
+ device_class_set_parent_reset(dc, virtio_vga_base_reset,
&v->parent_reset);
- k->realize = virtio_vga_realize;
+ k->realize = virtio_vga_base_realize;
pcidev_k->romfile = "vgabios-virtio.bin";
pcidev_k->class_id = PCI_CLASS_DISPLAY_VGA;
}
+static TypeInfo virtio_vga_base_info = {
+ .name = TYPE_VIRTIO_VGA_BASE,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(struct VirtIOVGABase),
+ .class_size = sizeof(struct VirtIOVGABaseClass),
+ .class_init = virtio_vga_base_class_init,
+ .abstract = true,
+};
+
+#define TYPE_VIRTIO_VGA "virtio-vga"
+
+#define VIRTIO_VGA(obj) \
+ OBJECT_CHECK(VirtIOVGA, (obj), TYPE_VIRTIO_VGA)
+
+typedef struct VirtIOVGA {
+ VirtIOVGABase parent_obj;
+
+ VirtIOGPU vdev;
+} VirtIOVGA;
+
static void virtio_vga_inst_initfn(Object *obj)
{
VirtIOVGA *dev = VIRTIO_VGA(obj);
virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
TYPE_VIRTIO_GPU);
+ VIRTIO_VGA_BASE(dev)->vgpu = VIRTIO_GPU_BASE(&dev->vdev);
}
+
static VirtioPCIDeviceTypeInfo virtio_vga_info = {
.generic_name = TYPE_VIRTIO_VGA,
+ .parent = TYPE_VIRTIO_VGA_BASE,
.instance_size = sizeof(struct VirtIOVGA),
.instance_init = virtio_vga_inst_initfn,
- .class_size = sizeof(struct VirtIOVGAClass),
- .class_init = virtio_vga_class_init,
};
static void virtio_vga_register_types(void)
{
+ type_register_static(&virtio_vga_base_info);
virtio_pci_types_register(&virtio_vga_info);
}
diff --git a/hw/display/virtio-vga.h b/hw/display/virtio-vga.h
new file mode 100644
index 0000000..c10bf39
--- /dev/null
+++ b/hw/display/virtio-vga.h
@@ -0,0 +1,32 @@
+#ifndef VIRTIO_VGA_H_
+#define VIRTIO_VGA_H_
+
+#include "hw/virtio/virtio-gpu-pci.h"
+#include "vga_int.h"
+
+/*
+ * virtio-vga-base: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_VGA_BASE "virtio-vga-base"
+#define VIRTIO_VGA_BASE(obj) \
+ OBJECT_CHECK(VirtIOVGABase, (obj), TYPE_VIRTIO_VGA_BASE)
+#define VIRTIO_VGA_BASE_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(VirtIOVGABaseClass, obj, TYPE_VIRTIO_VGA_BASE)
+#define VIRTIO_VGA_BASE_CLASS(klass) \
+ OBJECT_CLASS_CHECK(VirtIOVGABaseClass, klass, TYPE_VIRTIO_VGA_BASE)
+
+typedef struct VirtIOVGABase {
+ VirtIOPCIProxy parent_obj;
+
+ VirtIOGPUBase *vgpu;
+ VGACommonState vga;
+ MemoryRegion vga_mrs[3];
+} VirtIOVGABase;
+
+typedef struct VirtIOVGABaseClass {
+ VirtioPCIClass parent_class;
+
+ DeviceReset parent_reset;
+} VirtIOVGABaseClass;
+
+#endif /* VIRTIO_VGA_H_ */
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 553319c..4ca5b25 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -96,6 +96,7 @@ typedef enum VhostUserRequest {
VHOST_USER_POSTCOPY_END = 30,
VHOST_USER_GET_INFLIGHT_FD = 31,
VHOST_USER_SET_INFLIGHT_FD = 32,
+ VHOST_USER_GPU_SET_SOCKET = 33,
VHOST_USER_MAX
} VhostUserRequest;
@@ -353,6 +354,16 @@ static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
return 0;
}
+int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd)
+{
+ VhostUserMsg msg = {
+ .hdr.request = VHOST_USER_GPU_SET_SOCKET,
+ .hdr.flags = VHOST_USER_VERSION,
+ };
+
+ return vhost_user_write(dev, &msg, &fd, 1);
+}
+
static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
struct vhost_log *log)
{