qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v2 4/7] console-gl: add opengl rendering helper


From: Max Reitz
Subject: Re: [Qemu-devel] [PATCH v2 4/7] console-gl: add opengl rendering helper functions
Date: Mon, 19 Jan 2015 11:05:14 -0500
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:31.0) Gecko/20100101 Thunderbird/31.3.0

On 2015-01-19 at 08:36, Gerd Hoffmann wrote:
Signed-off-by: Gerd Hoffmann <address@hidden>
---
  configure            |   2 +-
  include/ui/console.h |  31 ++++++
  ui/Makefile.objs     |   5 +
  ui/console-gl.c      | 286 +++++++++++++++++++++++++++++++++++++++++++++++++++
  4 files changed, 323 insertions(+), 1 deletion(-)
  create mode 100644 ui/console-gl.c

diff --git a/configure b/configure
index 6d673c1..e6b29a6 100755
--- a/configure
+++ b/configure
@@ -3069,7 +3069,7 @@ libs_softmmu="$libs_softmmu $fdt_libs"
  ##########################################
  # opengl probe (for sdl2, milkymist-tmu2)
  if test "$opengl" != "no" ; then
-  opengl_pkgs="gl"
+  opengl_pkgs="gl glesv2"

I don't know anything about GLES except for it being (as I've been told) mostly similar to the normal OpenGL. I hope that'll suffice…

    if $pkg_config $opengl_pkgs x11; then
      opengl_cflags="$($pkg_config --libs $opengl_pkgs) $x11_cflags"
      opengl_libs="$($pkg_config --libs $opengl_pkgs) $x11_libs"
diff --git a/include/ui/console.h b/include/ui/console.h
index 22ef8ca..9157b64 100644
--- a/include/ui/console.h
+++ b/include/ui/console.h
@@ -9,6 +9,11 @@
  #include "qapi-types.h"
  #include "qapi/error.h"
+#ifdef CONFIG_OPENGL
+# include <GLES2/gl2.h>
+# include <GLES2/gl2ext.h>
+#endif
+
  /* keyboard/mouse support */
#define MOUSE_EVENT_LBUTTON 0x01
@@ -118,6 +123,11 @@ struct DisplaySurface {
      pixman_format_code_t format;
      pixman_image_t *image;
      uint8_t flags;
+#ifdef CONFIG_OPENGL
+    GLenum glformat;
+    GLenum gltype;
+    GLuint texture;
+#endif
  };
typedef struct QemuUIInfo {
@@ -320,6 +330,27 @@ void qemu_console_copy(QemuConsole *con, int src_x, int 
src_y,
  DisplaySurface *qemu_console_surface(QemuConsole *con);
  DisplayState *qemu_console_displaystate(QemuConsole *console);
+/* console-gl.c */
+typedef struct ConsoleGLState ConsoleGLState;
+#ifdef CONFIG_OPENGL
+ConsoleGLState *console_gl_init_context(void);
+void console_gl_fini_context(ConsoleGLState *gls);
+bool console_gl_check_format(DisplayChangeListener *dcl,
+                             pixman_format_code_t format);
+void surface_gl_create_texture(ConsoleGLState *gls,
+                               DisplaySurface *surface);
+void surface_gl_update_texture(ConsoleGLState *gls,
+                               DisplaySurface *surface,
+                               int x, int y, int w, int h);
+void surface_gl_render_texture(ConsoleGLState *gls,
+                               DisplaySurface *surface);
+void surface_gl_destroy_texture(ConsoleGLState *gls,
+                               DisplaySurface *surface);
+void surface_gl_setup_viewport(ConsoleGLState *gls,
+                               DisplaySurface *surface,
+                               int ww, int wh);
+#endif
+
  /* sdl.c */
  void sdl_display_init(DisplayState *ds, int full_screen, int no_frame);
diff --git a/ui/Makefile.objs b/ui/Makefile.objs
index 13b5cfb..3173778 100644
--- a/ui/Makefile.objs
+++ b/ui/Makefile.objs
@@ -24,4 +24,9 @@ sdl.mo-objs := sdl2.o sdl2-input.o sdl2-2d.o
  endif
  sdl.mo-cflags := $(SDL_CFLAGS)
+ifeq ($(CONFIG_OPENGL),y)
+common-obj-y += console-gl.o
+libs_softmmu += $(OPENGL_LIBS)
+endif
+
  gtk.o-cflags := $(GTK_CFLAGS) $(VTE_CFLAGS)
diff --git a/ui/console-gl.c b/ui/console-gl.c
new file mode 100644
index 0000000..589c682
--- /dev/null
+++ b/ui/console-gl.c
@@ -0,0 +1,286 @@
+/*
+ * QEMU graphical console -- opengl helper bits
+ *
+ * Copyright (c) 2014 Red Hat
+ *
+ * Authors:
+ *    Gerd Hoffmann <address@hidden>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to 
deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu-common.h"
+#include "ui/console.h"
+
+struct ConsoleGLState {
+    GLint texture_blit_prog;
+};
+
+/* ---------------------------------------------------------------------- */
+
+static GLchar texture_blit_vert_src[] =
+    "\n"
+    "#version 300 es\n"
+    "\n"
+    "in vec2  in_position;\n"
+    "in vec2  in_tex_coord;\n"

You could calculate the texture coordinate from the position in the shader, but this is mostly my premature optimization instinct kicking in instead of a real performance difference considering how few vertices there are in this case.

+    "out vec2 ex_tex_coord;\n"
+    "\n"
+    "void main(void) {\n"
+    "    gl_Position = vec4(in_position.x, in_position.y, 0.0, 1.0);\n"

vec4(in_position, 0.0, 1.0) *cough*

Okay, perhaps it's OCD and not a premature optimization instinct.

+    "    ex_tex_coord = in_tex_coord;\n"
+    "}\n"
+    "\n";
+
+static GLchar texture_blit_frag_src[] =
+    "\n"
+    "#version 300 es\n"
+    "\n"
+    "uniform sampler2D image;\n"
+    "in  highp vec2 ex_tex_coord;\n"
+    "out highp vec4 out_frag_color;\n"
+    "\n"
+    "void main(void) {\n"
+    "     out_frag_color = texture(image, ex_tex_coord);\n"
+    "}\n"
+    "\n";
+
+static void gl_run_texture_blit(ConsoleGLState *gls)
+{
+    GLfloat in_position[] = {
+        -1,  1,
+        -1, -1,
+        1,  -1,
+        -1,  1,
+        1,   1,
+        1,  -1,

This looks like a list for GL_TRIANGLES instead of GL_TRIANGLE_STRIP. For GL_TRIANGLE_STRIP, you first define one whole triangle (by specifying each of the three vertices) and after that, each vertex results in a new triangle drawn (whose two other vertices are the two vertices preceding the last one).

Therefore, for a quad, you only need four vertices in GL_TRIANGLE_STRIP mode. You will find that the following works just fine:

GLfloat in_position[] = {
    -1, -1,
     1, -1,
    -1,  1,
     1,  1,
};

(1)

+    };
+    GLfloat in_tex_coord[] = {
+        0, 0,
+        0, 1,
+        1, 1,
+        0, 0,
+        1, 0,
+        1, 1,
+    };

(1) and

GLfloat in_tex_coord[] = {
    0, 1,
    1, 1,
    0, 0,
    1, 0,
};

here.

+    GLint l_position;
+    GLint l_tex_coord;
+
+    glUseProgram(gls->texture_blit_prog);
+    l_position = glGetAttribLocation(gls->texture_blit_prog, "in_position");
+    l_tex_coord = glGetAttribLocation(gls->texture_blit_prog, "in_tex_coord");
+    glVertexAttribPointer(l_position, 2, GL_FLOAT, GL_FALSE, 0, in_position);
+    glVertexAttribPointer(l_tex_coord, 2, GL_FLOAT, GL_FALSE, 0, in_tex_coord);

I think it is disallowed to refer to non-OpenGL buffers here in the core profile. The 4.4 core specification states (section 10.3, vertex arrays): "Vertex data are placed into arrays that are stored in the server’s address space"; the 4.4 compatibility specification states: "Vertex data may also be placed into arrays that are stored in the client’s address space (described here) or in the server’s address space".

("client" refers to the main memory, "server" refers to the video memory, as far as I know)

As I said before, though, I am completely fine with going for the compatibility profile for now and making it core compliant later on.

+    glEnableVertexAttribArray(l_position);
+    glEnableVertexAttribArray(l_tex_coord);
+    glDrawArrays(GL_TRIANGLE_STRIP, l_position, 6);

(1) and of course s/6/4/ here.

+}
+
+/* ---------------------------------------------------------------------- */
+
+static GLuint gl_create_compile_shader(GLenum type, const GLchar *src)
+{
+    GLuint shader;
+    GLint status, length;
+    char *errmsg;
+
+    shader = glCreateShader(type);
+    glShaderSource(shader, 1, &src, 0);
+    glCompileShader(shader);
+
+    glGetShaderiv(shader, GL_COMPILE_STATUS, &status);
+    if (!status) {
+        glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &length);
+        errmsg = malloc(length);
+        glGetShaderInfoLog(shader, length, &length, errmsg);
+        fprintf(stderr, "%s: compile %s error\n%s\n", __func__,
+                (type == GL_VERTEX_SHADER) ? "vertex" : "fragment",
+                errmsg);
+        free(errmsg);
+        return 0;
+    }
+    return shader;
+}
+
+static GLuint gl_create_link_program(GLuint vert, GLuint frag)
+{
+    GLuint program;
+    GLint status, length;
+    char *errmsg;
+
+    program = glCreateProgram();
+    glAttachShader(program, vert);
+    glAttachShader(program, frag);
+    glLinkProgram(program);
+
+    glGetProgramiv(program, GL_LINK_STATUS, &status);
+    if (!status) {
+        glGetProgramiv(program, GL_INFO_LOG_LENGTH, &length);
+        errmsg = malloc(length);
+        glGetProgramInfoLog(program, length, &length, errmsg);
+        fprintf(stderr, "%s: link program: %s\n", __func__, errmsg);
+        free(errmsg);
+        return 0;
+    }
+    return program;
+}
+
+static GLuint gl_create_compile_link_program(const GLchar *vert_src,
+                                             const GLchar *frag_src)
+{
+    GLuint vert_shader, frag_shader;
+
+    vert_shader = gl_create_compile_shader(GL_VERTEX_SHADER, vert_src);
+    frag_shader = gl_create_compile_shader(GL_FRAGMENT_SHADER, frag_src);
+    if (!vert_shader || !frag_shader) {
+        return 0;
+    }
+
+    return gl_create_link_program(vert_shader, frag_shader);

Minor thing: You are free to delete the shaders after they have been linked into the program (because the references will be lost when this function returns).

+}
+
+/* ---------------------------------------------------------------------- */
+
+ConsoleGLState *console_gl_init_context(void)
+{
+    ConsoleGLState *gls = g_new0(ConsoleGLState, 1);
+
+    gls->texture_blit_prog = gl_create_compile_link_program
+        (texture_blit_vert_src, texture_blit_frag_src);
+    if (!gls->texture_blit_prog) {
+        exit(1);
+    }
+
+    return gls;
+}
+
+void console_gl_fini_context(ConsoleGLState *gls)
+{
+    if (!gls) {
+        return;
+    }
+    g_free(gls);
+}
+
+bool console_gl_check_format(DisplayChangeListener *dcl,
+                             pixman_format_code_t format)
+{
+    switch (format) {
+    case PIXMAN_BE_b8g8r8x8:
+    case PIXMAN_BE_b8g8r8a8:
+    case PIXMAN_r5g6b5:
+        return true;
+    default:
+        return false;
+    }
+}
+
+void surface_gl_create_texture(ConsoleGLState *gls,
+                               DisplaySurface *surface)
+{
+    assert(gls);
+
+    switch (surface->format) {
+    case PIXMAN_BE_b8g8r8x8:
+    case PIXMAN_BE_b8g8r8a8:
+        surface->glformat = GL_BGRA_EXT;

If you want to avoid the _EXT, you could use GL_RGBA here and texture().bgra in the fragment shader. However, this would require a different shader for the 32 bit and the 16 bit formats (because the 16 bit format has the right order, apparently).

I'm voting for keeping GL_BGRA_EXT until someone complains.

+        surface->gltype = GL_UNSIGNED_BYTE;
+        break;
+    case PIXMAN_r5g6b5:
+        surface->glformat = GL_RGB;
+        surface->gltype = GL_UNSIGNED_SHORT_5_6_5;

I haven't been able to really test 16 bit mode (forcing 16 bit mode in hw/display/vga.c doesn't count...), so I'm trusting you this works.

+        break;
+    default:
+        g_assert_not_reached();
+    }
+
+    glGenTextures(1, &surface->texture);
+    glEnable(GL_TEXTURE_2D);
+    glBindTexture(GL_TEXTURE_2D, surface->texture);
+    glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT,
+                  surface_stride(surface) / surface_bytes_per_pixel(surface));
+    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB,
+                 surface_width(surface),
+                 surface_height(surface),
+                 0, surface->glformat, surface->gltype,
+                 surface_data(surface));
+
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+}
+
+void surface_gl_update_texture(ConsoleGLState *gls,
+                               DisplaySurface *surface,
+                               int x, int y, int w, int h)
+{
+    uint8_t *data = (void *)surface_data(surface);
+
+    assert(gls);
+
+    glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT,
+                  surface_stride(surface) / surface_bytes_per_pixel(surface));

Maybe we should assert that surface_stride(surface) % surface_bytes_per_pixel(surface) == 0 here.

+    glTexSubImage2D(GL_TEXTURE_2D, 0,
+                    x, y, w, h,
+                    surface->glformat, surface->gltype,
+                    data + surface_stride(surface) * y
+                    + surface_bytes_per_pixel(surface) * x);
+}
+
+void surface_gl_render_texture(ConsoleGLState *gls,
+                               DisplaySurface *surface)
+{
+    assert(gls);
+
+    glClearColor(0.1f, 0.1f, 0.1f, 0.0f);
+    glClear(GL_COLOR_BUFFER_BIT);
+
+    gl_run_texture_blit(gls);
+}
+
+void surface_gl_destroy_texture(ConsoleGLState *gls,
+                                DisplaySurface *surface)
+{
+    if (!surface || !surface->texture) {
+        return;
+    }
+    glDeleteTextures(1, &surface->texture);
+    surface->texture = 0;
+}
+
+void surface_gl_setup_viewport(ConsoleGLState *gls,
+                               DisplaySurface *surface,
+                               int ww, int wh)
+{
+    int gw, gh, stripe;
+    float sw, sh;
+
+    assert(gls);
+
+    gw = surface_width(surface);
+    gh = surface_height(surface);
+
+    sw = (float)ww/gw;
+    sh = (float)wh/gh;
+    if (sw < sh) {
+        stripe = wh - wh*sw/sh;
+        glViewport(0, stripe / 2, ww, wh - stripe);
+    } else {
+        stripe = ww - ww*sh/sw;
+        glViewport(stripe / 2, 0, ww - stripe, wh);
+    }
+}

With the vertex data changed to make use of GL_TRIANGLE_STRIP and with or without the stride assertion and with or without deleting the shaders after the program has been linked:

Reviewed-by: Max Reitz <address@hidden>



reply via email to

[Prev in Thread] Current Thread [Next in Thread]