qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PULL 07/11] virtio-gpu: add 3d mode and virgl rendering su


From: Gerd Hoffmann
Subject: [Qemu-devel] [PULL 07/11] virtio-gpu: add 3d mode and virgl rendering support.
Date: Fri, 9 Oct 2015 10:18:30 +0200

Add virglrenderer library detection.  Add 3d mode to virtio-gpu,
wire up virglrenderer library.  When in 3d mode render using the
new context management and texture scanout callbacks.

Signed-off-by: Gerd Hoffmann <address@hidden>
Reviewed-by: Max Reitz <address@hidden>
---
 configure                      |  32 +++
 hw/display/Makefile.objs       |   6 +-
 hw/display/virtio-gpu-3d.c     | 598 +++++++++++++++++++++++++++++++++++++++++
 hw/display/virtio-gpu.c        | 137 +++++++++-
 include/hw/virtio/virtio-gpu.h |  22 +-
 trace-events                   |   8 +
 6 files changed, 792 insertions(+), 11 deletions(-)
 create mode 100644 hw/display/virtio-gpu-3d.c

diff --git a/configure b/configure
index f14454e..561b8fa 100755
--- a/configure
+++ b/configure
@@ -331,6 +331,7 @@ gtkabi=""
 gnutls=""
 gnutls_hash=""
 vte=""
+virglrenderer=""
 tpm="yes"
 libssh2=""
 vhdx=""
@@ -1122,6 +1123,10 @@ for opt do
   ;;
   --enable-vte) vte="yes"
   ;;
+  --disable-virglrenderer) virglrenderer="no"
+  ;;
+  --enable-virglrenderer) virglrenderer="yes"
+  ;;
   --disable-tpm) tpm="no"
   ;;
   --enable-tpm) tpm="yes"
@@ -3967,6 +3972,27 @@ EOF
 fi
 
 ##########################################
+# virgl renderer probe
+
+if test "$virglrenderer" != "no" ; then
+  cat > $TMPC << EOF
+#include <virglrenderer.h>
+int main(void) { virgl_renderer_poll(); return 0; }
+EOF
+  virgl_cflags=$($pkg_config --cflags virglrenderer 2>/dev/null)
+  virgl_libs=$($pkg_config --libs virglrenderer 2>/dev/null)
+  if $pkg_config virglrenderer >/dev/null 2>&1 && \
+     compile_prog "$virgl_cflags" "$virgl_libs" ; then
+    virglrenderer="yes"
+  else
+    if test "$virglrenderer" = "yes" ; then
+      feature_not_found "virglrenderer"
+    fi
+    virglrenderer="no"
+  fi
+fi
+
+##########################################
 # check if we have fdatasync
 
 fdatasync=no
@@ -4583,6 +4609,7 @@ echo "GNUTLS nettle     $gnutls_nettle 
${gnutls_nettle+($nettle_version)}"
 echo "libtasn1          $tasn1"
 echo "VTE support       $vte"
 echo "curses support    $curses"
+echo "virgl support     $virglrenderer"
 echo "curl support      $curl"
 echo "mingw32 support   $mingw32"
 echo "Audio drivers     $audio_drv_list"
@@ -4955,6 +4982,11 @@ if test "$vte" = "yes" ; then
   echo "CONFIG_VTE=y" >> $config_host_mak
   echo "VTE_CFLAGS=$vte_cflags" >> $config_host_mak
 fi
+if test "$virglrenderer" = "yes" ; then
+  echo "CONFIG_VIRGL=y" >> $config_host_mak
+  echo "VIRGL_CFLAGS=$virgl_cflags" >> $config_host_mak
+  echo "VIRGL_LIBS=$virgl_libs" >> $config_host_mak
+fi
 if test "$xen" = "yes" ; then
   echo "CONFIG_XEN_BACKEND=y" >> $config_host_mak
   echo "CONFIG_XEN_CTRL_INTERFACE_VERSION=$xen_ctrl_version" >> 
$config_host_mak
diff --git a/hw/display/Makefile.objs b/hw/display/Makefile.objs
index dd8ea76..f0cf431 100644
--- a/hw/display/Makefile.objs
+++ b/hw/display/Makefile.objs
@@ -35,6 +35,10 @@ obj-$(CONFIG_VGA) += vga.o
 
 common-obj-$(CONFIG_QXL) += qxl.o qxl-logger.o qxl-render.o
 
-obj-$(CONFIG_VIRTIO) += virtio-gpu.o
+obj-$(CONFIG_VIRTIO) += virtio-gpu.o virtio-gpu-3d.o
 obj-$(CONFIG_VIRTIO_PCI) += virtio-gpu-pci.o
 obj-$(CONFIG_VIRTIO_VGA) += virtio-vga.o
+virtio-gpu.o-cflags := $(VIRGL_CFLAGS)
+virtio-gpu.o-libs += $(VIRGL_LIBS)
+virtio-gpu-3d.o-cflags := $(VIRGL_CFLAGS)
+virtio-gpu-3d.o-libs += $(VIRGL_LIBS)
diff --git a/hw/display/virtio-gpu-3d.c b/hw/display/virtio-gpu-3d.c
new file mode 100644
index 0000000..28dccfd
--- /dev/null
+++ b/hw/display/virtio-gpu-3d.c
@@ -0,0 +1,598 @@
+/*
+ * Virtio GPU Device
+ *
+ * Copyright Red Hat, Inc. 2013-2014
+ *
+ * Authors:
+ *     Dave Airlie <address@hidden>
+ *     Gerd Hoffmann <address@hidden>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu-common.h"
+#include "qemu/iov.h"
+#include "trace.h"
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/virtio-gpu.h"
+
+#ifdef CONFIG_VIRGL
+
+#include "virglrenderer.h"
+
+static struct virgl_renderer_callbacks virtio_gpu_3d_cbs;
+
+static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
+                                         struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_resource_create_2d c2d;
+    struct virgl_renderer_resource_create_args args;
+
+    VIRTIO_GPU_FILL_CMD(c2d);
+    trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
+                                       c2d.width, c2d.height);
+
+    args.handle = c2d.resource_id;
+    args.target = 2;
+    args.format = c2d.format;
+    args.bind = (1 << 1);
+    args.width = c2d.width;
+    args.height = c2d.height;
+    args.depth = 1;
+    args.array_size = 1;
+    args.last_level = 0;
+    args.nr_samples = 0;
+    args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP;
+    virgl_renderer_resource_create(&args, NULL, 0);
+}
+
+static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
+                                         struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_resource_create_3d c3d;
+    struct virgl_renderer_resource_create_args args;
+
+    VIRTIO_GPU_FILL_CMD(c3d);
+    trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format,
+                                       c3d.width, c3d.height, c3d.depth);
+
+    args.handle = c3d.resource_id;
+    args.target = c3d.target;
+    args.format = c3d.format;
+    args.bind = c3d.bind;
+    args.width = c3d.width;
+    args.height = c3d.height;
+    args.depth = c3d.depth;
+    args.array_size = c3d.array_size;
+    args.last_level = c3d.last_level;
+    args.nr_samples = c3d.nr_samples;
+    args.flags = c3d.flags;
+    virgl_renderer_resource_create(&args, NULL, 0);
+}
+
+static void virgl_cmd_resource_unref(VirtIOGPU *g,
+                                     struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_resource_unref unref;
+
+    VIRTIO_GPU_FILL_CMD(unref);
+    trace_virtio_gpu_cmd_res_unref(unref.resource_id);
+
+    virgl_renderer_resource_unref(unref.resource_id);
+}
+
+static void virgl_cmd_context_create(VirtIOGPU *g,
+                                     struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_ctx_create cc;
+
+    VIRTIO_GPU_FILL_CMD(cc);
+    trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id,
+                                    cc.debug_name);
+
+    virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen,
+                                  cc.debug_name);
+}
+
+static void virgl_cmd_context_destroy(VirtIOGPU *g,
+                                      struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_ctx_destroy cd;
+
+    VIRTIO_GPU_FILL_CMD(cd);
+    trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id);
+
+    virgl_renderer_context_destroy(cd.hdr.ctx_id);
+}
+
+static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y,
+                                int width, int height)
+{
+    if (!g->scanout[idx].con) {
+        return;
+    }
+
+    dpy_gl_update(g->scanout[idx].con, x, y, width, height);
+}
+
+static void virgl_cmd_resource_flush(VirtIOGPU *g,
+                                     struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_resource_flush rf;
+    int i;
+
+    VIRTIO_GPU_FILL_CMD(rf);
+    trace_virtio_gpu_cmd_res_flush(rf.resource_id,
+                                   rf.r.width, rf.r.height, rf.r.x, rf.r.y);
+
+    for (i = 0; i < VIRTIO_GPU_MAX_SCANOUT; i++) {
+        if (g->scanout[i].resource_id != rf.resource_id) {
+            continue;
+        }
+        virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height);
+    }
+}
+
+static void virgl_cmd_set_scanout(VirtIOGPU *g,
+                                  struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_set_scanout ss;
+    struct virgl_renderer_resource_info info;
+    int ret;
+
+    VIRTIO_GPU_FILL_CMD(ss);
+    trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
+                                     ss.r.width, ss.r.height, ss.r.x, ss.r.y);
+
+    if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUT) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
+                      __func__, ss.scanout_id);
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
+        return;
+    }
+    g->enable = 1;
+
+    memset(&info, 0, sizeof(info));
+
+    if (ss.resource_id && ss.r.width && ss.r.height) {
+        ret = virgl_renderer_resource_get_info(ss.resource_id, &info);
+        if (ret == -1) {
+            qemu_log_mask(LOG_GUEST_ERROR,
+                          "%s: illegal resource specified %d\n",
+                          __func__, ss.resource_id);
+            cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+            return;
+        }
+        qemu_console_resize(g->scanout[ss.scanout_id].con,
+                            ss.r.width, ss.r.height);
+        virgl_renderer_force_ctx_0();
+        dpy_gl_scanout(g->scanout[ss.scanout_id].con, info.tex_id,
+                       info.flags & 1 /* FIXME: Y_0_TOP */,
+                       ss.r.x, ss.r.y, ss.r.width, ss.r.height);
+    } else {
+        if (ss.scanout_id != 0) {
+            dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL);
+        }
+        dpy_gl_scanout(g->scanout[ss.scanout_id].con, 0, false,
+                       0, 0, 0, 0);
+    }
+    g->scanout[ss.scanout_id].resource_id = ss.resource_id;
+}
+
+static void virgl_cmd_submit_3d(VirtIOGPU *g,
+                                struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_cmd_submit cs;
+    void *buf;
+    size_t s;
+
+    VIRTIO_GPU_FILL_CMD(cs);
+    trace_virtio_gpu_cmd_ctx_submit(cs.hdr.ctx_id, cs.size);
+
+    buf = g_malloc(cs.size);
+    s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
+                   sizeof(cs), buf, cs.size);
+    if (s != cs.size) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: size mismatch (%zd/%d)",
+                      __func__, s, cs.size);
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+        return;
+    }
+
+    if (virtio_gpu_stats_enabled(g->conf)) {
+        g->stats.req_3d++;
+        g->stats.bytes_3d += cs.size;
+    }
+
+    virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4);
+
+    g_free(buf);
+}
+
+static void virgl_cmd_transfer_to_host_2d(VirtIOGPU *g,
+                                          struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_transfer_to_host_2d t2d;
+    struct virtio_gpu_box box;
+
+    VIRTIO_GPU_FILL_CMD(t2d);
+    trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
+
+    box.x = t2d.r.x;
+    box.y = t2d.r.y;
+    box.z = 0;
+    box.w = t2d.r.width;
+    box.h = t2d.r.height;
+    box.d = 1;
+
+    virgl_renderer_transfer_write_iov(t2d.resource_id,
+                                      0,
+                                      0,
+                                      0,
+                                      0,
+                                      (struct virgl_box *)&box,
+                                      t2d.offset, NULL, 0);
+}
+
+static void virgl_cmd_transfer_to_host_3d(VirtIOGPU *g,
+                                          struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_transfer_host_3d t3d;
+
+    VIRTIO_GPU_FILL_CMD(t3d);
+    trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d.resource_id);
+
+    virgl_renderer_transfer_write_iov(t3d.resource_id,
+                                      t3d.hdr.ctx_id,
+                                      t3d.level,
+                                      t3d.stride,
+                                      t3d.layer_stride,
+                                      (struct virgl_box *)&t3d.box,
+                                      t3d.offset, NULL, 0);
+}
+
+static void
+virgl_cmd_transfer_from_host_3d(VirtIOGPU *g,
+                                struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_transfer_host_3d tf3d;
+
+    VIRTIO_GPU_FILL_CMD(tf3d);
+    trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d.resource_id);
+
+    virgl_renderer_transfer_read_iov(tf3d.resource_id,
+                                     tf3d.hdr.ctx_id,
+                                     tf3d.level,
+                                     tf3d.stride,
+                                     tf3d.layer_stride,
+                                     (struct virgl_box *)&tf3d.box,
+                                     tf3d.offset, NULL, 0);
+}
+
+
+static void virgl_resource_attach_backing(VirtIOGPU *g,
+                                          struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_resource_attach_backing att_rb;
+    struct iovec *res_iovs;
+    int ret;
+
+    VIRTIO_GPU_FILL_CMD(att_rb);
+    trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id);
+
+    ret = virtio_gpu_create_mapping_iov(&att_rb, cmd, &res_iovs);
+    if (ret != 0) {
+        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+        return;
+    }
+
+    virgl_renderer_resource_attach_iov(att_rb.resource_id,
+                                       res_iovs, att_rb.nr_entries);
+}
+
+static void virgl_resource_detach_backing(VirtIOGPU *g,
+                                          struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_resource_detach_backing detach_rb;
+    struct iovec *res_iovs = NULL;
+    int num_iovs = 0;
+
+    VIRTIO_GPU_FILL_CMD(detach_rb);
+    trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id);
+
+    virgl_renderer_resource_detach_iov(detach_rb.resource_id,
+                                       &res_iovs,
+                                       &num_iovs);
+    if (res_iovs == NULL || num_iovs == 0) {
+        return;
+    }
+    virtio_gpu_cleanup_mapping_iov(res_iovs, num_iovs);
+}
+
+
+static void virgl_cmd_ctx_attach_resource(VirtIOGPU *g,
+                                          struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_ctx_resource att_res;
+
+    VIRTIO_GPU_FILL_CMD(att_res);
+    trace_virtio_gpu_cmd_ctx_res_attach(att_res.hdr.ctx_id,
+                                        att_res.resource_id);
+
+    virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, 
att_res.resource_id);
+}
+
+static void virgl_cmd_ctx_detach_resource(VirtIOGPU *g,
+                                          struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_ctx_resource det_res;
+
+    VIRTIO_GPU_FILL_CMD(det_res);
+    trace_virtio_gpu_cmd_ctx_res_detach(det_res.hdr.ctx_id,
+                                        det_res.resource_id);
+
+    virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, 
det_res.resource_id);
+}
+
+static void virgl_cmd_get_capset_info(VirtIOGPU *g,
+                                      struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_get_capset_info info;
+    struct virtio_gpu_resp_capset_info resp;
+
+    VIRTIO_GPU_FILL_CMD(info);
+
+    if (info.capset_index == 0) {
+        resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL;
+        virgl_renderer_get_cap_set(resp.capset_id,
+                                   &resp.capset_max_version,
+                                   &resp.capset_max_size);
+    } else {
+        resp.capset_max_version = 0;
+        resp.capset_max_size = 0;
+    }
+    resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
+    virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
+}
+
+static void virgl_cmd_get_capset(VirtIOGPU *g,
+                                 struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_get_capset gc;
+    struct virtio_gpu_resp_capset *resp;
+    uint32_t max_ver, max_size;
+    VIRTIO_GPU_FILL_CMD(gc);
+
+    virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
+                               &max_size);
+    resp = g_malloc(sizeof(*resp) + max_size);
+
+    resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
+    virgl_renderer_fill_caps(gc.capset_id,
+                             gc.capset_version,
+                             (void *)resp->capset_data);
+    virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size);
+    g_free(resp);
+}
+
+void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
+                                      struct virtio_gpu_ctrl_command *cmd)
+{
+    VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
+
+    virgl_renderer_force_ctx_0();
+    switch (cmd->cmd_hdr.type) {
+    case VIRTIO_GPU_CMD_CTX_CREATE:
+        virgl_cmd_context_create(g, cmd);
+        break;
+    case VIRTIO_GPU_CMD_CTX_DESTROY:
+        virgl_cmd_context_destroy(g, cmd);
+        break;
+    case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
+        virgl_cmd_create_resource_2d(g, cmd);
+        break;
+    case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D:
+        virgl_cmd_create_resource_3d(g, cmd);
+        break;
+    case VIRTIO_GPU_CMD_SUBMIT_3D:
+        virgl_cmd_submit_3d(g, cmd);
+        break;
+    case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
+        virgl_cmd_transfer_to_host_2d(g, cmd);
+        break;
+    case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D:
+        virgl_cmd_transfer_to_host_3d(g, cmd);
+        break;
+    case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D:
+        virgl_cmd_transfer_from_host_3d(g, cmd);
+        break;
+    case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
+        virgl_resource_attach_backing(g, cmd);
+        break;
+    case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
+        virgl_resource_detach_backing(g, cmd);
+        break;
+    case VIRTIO_GPU_CMD_SET_SCANOUT:
+        virgl_cmd_set_scanout(g, cmd);
+        break;
+    case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
+        virgl_cmd_resource_flush(g, cmd);
+       break;
+    case VIRTIO_GPU_CMD_RESOURCE_UNREF:
+        virgl_cmd_resource_unref(g, cmd);
+        break;
+    case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
+        /* TODO add security */
+        virgl_cmd_ctx_attach_resource(g, cmd);
+        break;
+    case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE:
+        /* TODO add security */
+        virgl_cmd_ctx_detach_resource(g, cmd);
+        break;
+    case VIRTIO_GPU_CMD_GET_CAPSET_INFO:
+        virgl_cmd_get_capset_info(g, cmd);
+        break;
+    case VIRTIO_GPU_CMD_GET_CAPSET:
+        virgl_cmd_get_capset(g, cmd);
+        break;
+
+    case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
+        virtio_gpu_get_display_info(g, cmd);
+        break;
+    default:
+        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+        break;
+    }
+
+    if (cmd->finished) {
+        return;
+    }
+    if (cmd->error) {
+        fprintf(stderr, "%s: ctrl 0x%x, error 0x%x\n", __func__,
+                cmd->cmd_hdr.type, cmd->error);
+        virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error);
+        return;
+    }
+    if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
+        virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
+        return;
+    }
+
+    trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
+    virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
+}
+
+static void virgl_write_fence(void *opaque, uint32_t fence)
+{
+    VirtIOGPU *g = opaque;
+    struct virtio_gpu_ctrl_command *cmd, *tmp;
+
+    QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
+        /*
+        * the guest can end up emitting fences out of order
+        * so we should check all fenced cmds not just the first one.
+        */
+        if (cmd->cmd_hdr.fence_id > fence) {
+            continue;
+        }
+        trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
+        virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
+        QTAILQ_REMOVE(&g->fenceq, cmd, next);
+        g_free(cmd);
+        g->inflight--;
+        if (virtio_gpu_stats_enabled(g->conf)) {
+            fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
+        }
+    }
+}
+
+static virgl_renderer_gl_context
+virgl_create_context(void *opaque, int scanout_idx,
+                     struct virgl_renderer_gl_ctx_param *params)
+{
+    VirtIOGPU *g = opaque;
+    QEMUGLContext ctx;
+    QEMUGLParams qparams;
+
+    qparams.major_ver = params->major_ver;
+    qparams.minor_ver = params->minor_ver;
+
+    ctx = dpy_gl_ctx_create(g->scanout[scanout_idx].con, &qparams);
+    return (virgl_renderer_gl_context)ctx;
+}
+
+static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx)
+{
+    VirtIOGPU *g = opaque;
+    QEMUGLContext qctx = (QEMUGLContext)ctx;
+
+    dpy_gl_ctx_destroy(g->scanout[0].con, qctx);
+}
+
+static int virgl_make_context_current(void *opaque, int scanout_idx,
+                                      virgl_renderer_gl_context ctx)
+{
+    VirtIOGPU *g = opaque;
+    QEMUGLContext qctx = (QEMUGLContext)ctx;
+
+    return dpy_gl_ctx_make_current(g->scanout[scanout_idx].con, qctx);
+}
+
+static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = {
+    .version             = 1,
+    .write_fence         = virgl_write_fence,
+    .create_gl_context   = virgl_create_context,
+    .destroy_gl_context  = virgl_destroy_context,
+    .make_current        = virgl_make_context_current,
+};
+
+static void virtio_gpu_print_stats(void *opaque)
+{
+    VirtIOGPU *g = opaque;
+
+    if (g->stats.requests) {
+        fprintf(stderr, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n",
+                g->stats.requests,
+                g->stats.max_inflight,
+                g->stats.req_3d,
+                g->stats.bytes_3d);
+        g->stats.requests     = 0;
+        g->stats.max_inflight = 0;
+        g->stats.req_3d       = 0;
+        g->stats.bytes_3d     = 0;
+    } else {
+        fprintf(stderr, "stats: idle\r");
+    }
+    timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
+}
+
+static void virtio_gpu_fence_poll(void *opaque)
+{
+    VirtIOGPU *g = opaque;
+
+    virgl_renderer_poll();
+    if (g->inflight) {
+        timer_mod(g->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10);
+    }
+}
+
+void virtio_gpu_virgl_fence_poll(VirtIOGPU *g)
+{
+    virtio_gpu_fence_poll(g);
+}
+
+void virtio_gpu_virgl_reset(VirtIOGPU *g)
+{
+    int i;
+
+    /* virgl_renderer_reset() ??? */
+    for (i = 0; i < g->conf.max_outputs; i++) {
+        if (i != 0) {
+            dpy_gfx_replace_surface(g->scanout[i].con, NULL);
+        }
+        dpy_gl_scanout(g->scanout[i].con, 0, false, 0, 0, 0, 0);
+    }
+}
+
+int virtio_gpu_virgl_init(VirtIOGPU *g)
+{
+    int ret;
+
+    ret = virgl_renderer_init(g, 0, &virtio_gpu_3d_cbs);
+    if (ret != 0) {
+        return ret;
+    }
+
+    g->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL,
+                                 virtio_gpu_fence_poll, g);
+
+    if (virtio_gpu_stats_enabled(g->conf)) {
+        g->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL,
+                                      virtio_gpu_print_stats, g);
+        timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 
1000);
+    }
+    return 0;
+}
+
+#endif /* CONFIG_VIRGL */
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index 8c35af3..0f8d35c 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -22,6 +22,23 @@
 static struct virtio_gpu_simple_resource*
 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
 
+#ifdef CONFIG_VIRGL
+#include "virglrenderer.h"
+#define VIRGL(_g, _virgl, _simple, ...)                     \
+    do {                                                    \
+        if (_g->use_virgl_renderer) {                       \
+            _virgl(__VA_ARGS__);                            \
+        } else {                                            \
+            _simple(__VA_ARGS__);                           \
+        }                                                   \
+    } while (0)
+#else
+#define VIRGL(_g, _virgl, _simple, ...)                 \
+    do {                                                \
+        _simple(__VA_ARGS__);                           \
+    } while (0)
+#endif
+
 static void update_cursor_data_simple(VirtIOGPU *g,
                                       struct virtio_gpu_scanout *s,
                                       uint32_t resource_id)
@@ -45,6 +62,32 @@ static void update_cursor_data_simple(VirtIOGPU *g,
            pixels * sizeof(uint32_t));
 }
 
+#ifdef CONFIG_VIRGL
+
+static void update_cursor_data_virgl(VirtIOGPU *g,
+                                     struct virtio_gpu_scanout *s,
+                                     uint32_t resource_id)
+{
+    uint32_t width, height;
+    uint32_t pixels, *data;
+
+    data = virgl_renderer_get_cursor_data(resource_id, &width, &height);
+    if (!data) {
+        return;
+    }
+
+    if (width != s->current_cursor->width ||
+        height != s->current_cursor->height) {
+        return;
+    }
+
+    pixels = s->current_cursor->width * s->current_cursor->height;
+    memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t));
+    free(data);
+}
+
+#endif
+
 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor 
*cursor)
 {
     struct virtio_gpu_scanout *s;
@@ -63,7 +106,8 @@ static void update_cursor(VirtIOGPU *g, struct 
virtio_gpu_update_cursor *cursor)
         s->current_cursor->hot_y = cursor->hot_y;
 
         if (cursor->resource_id > 0) {
-            update_cursor_data_simple(g, s, cursor->resource_id);
+            VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple,
+                  g, s, cursor->resource_id);
         }
         dpy_cursor_define(s->con, s->current_cursor);
     }
@@ -92,9 +136,23 @@ static void virtio_gpu_set_config(VirtIODevice *vdev, const 
uint8_t *config)
 static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features,
                                         Error **errp)
 {
+    VirtIOGPU *g = VIRTIO_GPU(vdev);
+
+    if (virtio_gpu_virgl_enabled(g->conf)) {
+        features |= (1 << VIRTIO_GPU_FEATURE_VIRGL);
+    }
     return features;
 }
 
+static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features)
+{
+    static const uint32_t virgl = (1 << VIRTIO_GPU_FEATURE_VIRGL);
+    VirtIOGPU *g = VIRTIO_GPU(vdev);
+
+    g->use_virgl_renderer = ((features & virgl) == virgl);
+    trace_virtio_gpu_features(g->use_virgl_renderer);
+}
+
 static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type)
 {
     g->virtio_config.events_read |= event_type;
@@ -698,25 +756,43 @@ static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, 
VirtQueue *vq)
         return;
     }
 
+#ifdef CONFIG_VIRGL
+    if (!g->renderer_inited && g->use_virgl_renderer) {
+        virtio_gpu_virgl_init(g);
+        g->renderer_inited = true;
+    }
+#endif
+
     cmd = g_new(struct virtio_gpu_ctrl_command, 1);
     while (virtqueue_pop(vq, &cmd->elem)) {
         cmd->vq = vq;
         cmd->error = 0;
         cmd->finished = false;
-        g->stats.requests++;
+        if (virtio_gpu_stats_enabled(g->conf)) {
+            g->stats.requests++;
+        }
 
-        virtio_gpu_simple_process_cmd(g, cmd);
+        VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd,
+              g, cmd);
         if (!cmd->finished) {
             QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
-            g->stats.inflight++;
-            if (g->stats.max_inflight < g->stats.inflight) {
-                g->stats.max_inflight = g->stats.inflight;
+            g->inflight++;
+            if (virtio_gpu_stats_enabled(g->conf)) {
+                if (g->stats.max_inflight < g->inflight) {
+                    g->stats.max_inflight = g->inflight;
+                }
+                fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
             }
-            fprintf(stderr, "inflight: %3d (+)\r", g->stats.inflight);
             cmd = g_new(struct virtio_gpu_ctrl_command, 1);
         }
     }
     g_free(cmd);
+
+#ifdef CONFIG_VIRGL
+    if (g->use_virgl_renderer) {
+        virtio_gpu_virgl_fence_poll(g);
+    }
+#endif
 }
 
 static void virtio_gpu_ctrl_bh(void *opaque)
@@ -803,6 +879,7 @@ static void virtio_gpu_device_realize(DeviceState *qdev, 
Error **errp)
 {
     VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
     VirtIOGPU *g = VIRTIO_GPU(qdev);
+    bool have_virgl;
     int i;
 
     g->config_size = sizeof(struct virtio_gpu_config);
@@ -813,8 +890,25 @@ static void virtio_gpu_device_realize(DeviceState *qdev, 
Error **errp)
     g->req_state[0].width = 1024;
     g->req_state[0].height = 768;
 
-    g->ctrl_vq   = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb);
-    g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
+    g->use_virgl_renderer = false;
+#if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
+    have_virgl = false;
+#else
+    have_virgl = display_opengl;
+#endif
+    if (!have_virgl) {
+        g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
+    }
+
+    if (virtio_gpu_virgl_enabled(g->conf)) {
+        /* use larger control queue in 3d mode */
+        g->ctrl_vq   = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb);
+        g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
+        g->virtio_config.num_capsets = 1;
+    } else {
+        g->ctrl_vq   = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb);
+        g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
+    }
 
     g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
     g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
@@ -868,10 +962,23 @@ static void virtio_gpu_reset(VirtIODevice *vdev)
         g->scanout[i].ds = NULL;
     }
     g->enabled_output_bitmask = 1;
+
+#ifdef CONFIG_VIRGL
+    if (g->use_virgl_renderer) {
+        virtio_gpu_virgl_reset(g);
+        g->use_virgl_renderer = 0;
+    }
+#endif
 }
 
 static Property virtio_gpu_properties[] = {
     DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1),
+#ifdef CONFIG_VIRGL
+    DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags,
+                    VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
+    DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags,
+                    VIRTIO_GPU_FLAG_STATS_ENABLED, false),
+#endif
     DEFINE_PROP_END_OF_LIST(),
 };
 
@@ -884,6 +991,7 @@ static void virtio_gpu_class_init(ObjectClass *klass, void 
*data)
     vdc->get_config = virtio_gpu_get_config;
     vdc->set_config = virtio_gpu_set_config;
     vdc->get_features = virtio_gpu_get_features;
+    vdc->set_features = virtio_gpu_set_features;
 
     vdc->reset = virtio_gpu_reset;
 
@@ -916,3 +1024,14 @@ QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry)     
          != 16);
 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info)       != 408);
+
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d)        != 72);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d)      != 72);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create)              != 96);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy)             != 24);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource)            != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit)              != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info)         != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info)        != 40);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset)              != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset)             != 24);
diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h
index 8896761..9b279d7 100644
--- a/include/hw/virtio/virtio-gpu.h
+++ b/include/hw/virtio/virtio-gpu.h
@@ -56,8 +56,19 @@ struct virtio_gpu_requested_state {
     int x, y;
 };
 
+enum virtio_gpu_conf_flags {
+    VIRTIO_GPU_FLAG_VIRGL_ENABLED = 1,
+    VIRTIO_GPU_FLAG_STATS_ENABLED,
+};
+
+#define virtio_gpu_virgl_enabled(_cfg) \
+    (_cfg.flags & (1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED))
+#define virtio_gpu_stats_enabled(_cfg) \
+    (_cfg.flags & (1 << VIRTIO_GPU_FLAG_STATS_ENABLED))
+
 struct virtio_gpu_conf {
     uint32_t max_outputs;
+    uint32_t flags;
 };
 
 struct virtio_gpu_ctrl_command {
@@ -92,11 +103,13 @@ typedef struct VirtIOGPU {
     int enabled_output_bitmask;
     struct virtio_gpu_config virtio_config;
 
+    bool use_virgl_renderer;
+    bool renderer_inited;
     QEMUTimer *fence_poll;
     QEMUTimer *print_stats;
 
+    uint32_t inflight;
     struct {
-        uint32_t inflight;
         uint32_t max_inflight;
         uint32_t requests;
         uint32_t req_3d;
@@ -139,4 +152,11 @@ int virtio_gpu_create_mapping_iov(struct 
virtio_gpu_resource_attach_backing *ab,
                                   struct iovec **iov);
 void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count);
 
+/* virtio-gpu-3d.c */
+void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
+                                  struct virtio_gpu_ctrl_command *cmd);
+void virtio_gpu_virgl_fence_poll(VirtIOGPU *g);
+void virtio_gpu_virgl_reset(VirtIOGPU *g);
+int virtio_gpu_virgl_init(VirtIOGPU *g);
+
 #endif
diff --git a/trace-events b/trace-events
index 36db793..02ff51b 100644
--- a/trace-events
+++ b/trace-events
@@ -1181,6 +1181,7 @@ vmware_scratch_write(uint32_t index, uint32_t value) 
"index %d, value 0x%x"
 vmware_setmode(uint32_t w, uint32_t h, uint32_t bpp) "%dx%d @ %d bpp"
 
 # hw/display/virtio-gpu.c
+virtio_gpu_features(bool virgl) "virgl %d"
 virtio_gpu_cmd_get_display_info(void) ""
 virtio_gpu_cmd_get_caps(void) ""
 virtio_gpu_cmd_set_scanout(uint32_t id, uint32_t res, uint32_t w, uint32_t h, 
uint32_t x, uint32_t y) "id %d, res 0x%x, w %d, h %d, x %d, y %d"
@@ -1190,7 +1191,14 @@ virtio_gpu_cmd_res_unref(uint32_t res) "res 0x%x"
 virtio_gpu_cmd_res_back_attach(uint32_t res) "res 0x%x"
 virtio_gpu_cmd_res_back_detach(uint32_t res) "res 0x%x"
 virtio_gpu_cmd_res_xfer_toh_2d(uint32_t res) "res 0x%x"
+virtio_gpu_cmd_res_xfer_toh_3d(uint32_t res) "res 0x%x"
+virtio_gpu_cmd_res_xfer_fromh_3d(uint32_t res) "res 0x%x"
 virtio_gpu_cmd_res_flush(uint32_t res, uint32_t w, uint32_t h, uint32_t x, 
uint32_t y) "res 0x%x, w %d, h %d, x %d, y %d"
+virtio_gpu_cmd_ctx_create(uint32_t ctx, const char *name) "ctx 0x%x, name %s"
+virtio_gpu_cmd_ctx_destroy(uint32_t ctx) "ctx 0x%x"
+virtio_gpu_cmd_ctx_res_attach(uint32_t ctx, uint32_t res) "ctx 0x%x, res 0x%x"
+virtio_gpu_cmd_ctx_res_detach(uint32_t ctx, uint32_t res) "ctx 0x%x, res 0x%x"
+virtio_gpu_cmd_ctx_submit(uint32_t ctx, uint32_t size) "ctx 0x%x, size %d"
 virtio_gpu_fence_ctrl(uint64_t fence, uint32_t type) "fence 0x%" PRIx64 ", 
type 0x%x"
 virtio_gpu_fence_resp(uint64_t fence) "fence 0x%" PRIx64
 
-- 
1.8.3.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]