1 /*
2 * Copyright © 2012 Intel Corporation
3 * Copyright © 2015 Collabora, Ltd.
4 * Copyright © 2016 NVIDIA Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
21 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
22 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
23 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
24 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * SOFTWARE.
26 */
27
28 #include "config.h"
29
30 #include <GLES2/gl2.h>
31 #include <GLES2/gl2ext.h>
32
33 #include <stdbool.h>
34 #include <stdint.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <ctype.h>
38 #include <float.h>
39 #include <assert.h>
40 #include <linux/input.h>
41 #include <drm_fourcc.h>
42 #include <unistd.h>
43
44 #include "linux-sync-file.h"
45
46 #include "timeline.h"
47
48 #include "gl-renderer.h"
49 #include "vertex-clipping.h"
50 #include "linux-dmabuf.h"
51 #include "linux-dmabuf-unstable-v1-server-protocol.h"
52 #include "linux-explicit-synchronization.h"
53 #include "pixel-formats.h"
54
55 #include "shared/fd-util.h"
56 #include "shared/helpers.h"
57 #include "shared/platform.h"
58 #include "shared/timespec-util.h"
59 #include "weston-egl-ext.h"
60
61 #define GR_GL_VERSION(major, minor) \
62 (((uint32_t)(major) << 16) | (uint32_t)(minor))
63
64 #define GR_GL_VERSION_INVALID \
65 GR_GL_VERSION(0, 0)
66
67 struct gl_shader {
68 GLuint program;
69 GLuint vertex_shader, fragment_shader;
70 GLint proj_uniform;
71 GLint tex_uniforms[3];
72 GLint alpha_uniform;
73 GLint color_uniform;
74 const char *vertex_source, *fragment_source;
75 };
76
77 #define BUFFER_DAMAGE_COUNT 2
78
79 enum gl_border_status {
80 BORDER_STATUS_CLEAN = 0,
81 BORDER_TOP_DIRTY = 1 << GL_RENDERER_BORDER_TOP,
82 BORDER_LEFT_DIRTY = 1 << GL_RENDERER_BORDER_LEFT,
83 BORDER_RIGHT_DIRTY = 1 << GL_RENDERER_BORDER_RIGHT,
84 BORDER_BOTTOM_DIRTY = 1 << GL_RENDERER_BORDER_BOTTOM,
85 BORDER_ALL_DIRTY = 0xf,
86 BORDER_SIZE_CHANGED = 0x10
87 };
88
89 struct gl_border_image {
90 GLuint tex;
91 int32_t width, height;
92 int32_t tex_width;
93 void *data;
94 };
95
96 struct gl_output_state {
97 EGLSurface egl_surface;
98 pixman_region32_t buffer_damage[BUFFER_DAMAGE_COUNT];
99 int buffer_damage_index;
100 enum gl_border_status border_damage[BUFFER_DAMAGE_COUNT];
101 struct gl_border_image borders[4];
102 enum gl_border_status border_status;
103
104 struct weston_matrix output_matrix;
105
106 EGLSyncKHR begin_render_sync, end_render_sync;
107
108 /* struct timeline_render_point::link */
109 struct wl_list timeline_render_point_list;
110 };
111
112 enum buffer_type {
113 BUFFER_TYPE_NULL,
114 BUFFER_TYPE_SOLID, /* internal solid color surfaces without a buffer */
115 BUFFER_TYPE_SHM,
116 BUFFER_TYPE_EGL
117 };
118
119 struct gl_renderer;
120
121 struct egl_image {
122 struct gl_renderer *renderer;
123 EGLImageKHR image;
124 int refcount;
125 };
126
127 enum import_type {
128 IMPORT_TYPE_INVALID,
129 IMPORT_TYPE_DIRECT,
130 IMPORT_TYPE_GL_CONVERSION
131 };
132
133 struct dmabuf_image {
134 struct linux_dmabuf_buffer *dmabuf;
135 int num_images;
136 struct egl_image *images[3];
137 struct wl_list link;
138
139 enum import_type import_type;
140 GLenum target;
141 struct gl_shader *shader;
142 };
143
144 struct yuv_plane_descriptor {
145 int width_divisor;
146 int height_divisor;
147 uint32_t format;
148 int plane_index;
149 };
150
151 struct yuv_format_descriptor {
152 uint32_t format;
153 int input_planes;
154 int output_planes;
155 int texture_type;
156 struct yuv_plane_descriptor plane[4];
157 };
158
159 struct gl_surface_state {
160 GLfloat color[4];
161 struct gl_shader *shader;
162
163 GLuint textures[3];
164 int num_textures;
165 bool needs_full_upload;
166 pixman_region32_t texture_damage;
167
168 /* These are only used by SHM surfaces to detect when we need
169 * to do a full upload to specify a new internal texture
170 * format */
171 GLenum gl_format[3];
172 GLenum gl_pixel_type;
173
174 struct egl_image* images[3];
175 GLenum target;
176 int num_images;
177
178 struct weston_buffer_reference buffer_ref;
179 struct weston_buffer_release_reference buffer_release_ref;
180 enum buffer_type buffer_type;
181 int pitch; /* in pixels */
182 int height; /* in pixels */
183 bool y_inverted;
184
185 /* Extension needed for SHM YUV texture */
186 int offset[3]; /* offset per plane */
187 int hsub[3]; /* horizontal subsampling per plane */
188 int vsub[3]; /* vertical subsampling per plane */
189
190 struct weston_surface *surface;
191
192 /* Whether this surface was used in the current output repaint.
193 Used only in the context of a gl_renderer_repaint_output call. */
194 bool used_in_output_repaint;
195
196 struct wl_listener surface_destroy_listener;
197 struct wl_listener renderer_destroy_listener;
198 };
199
200 struct gl_renderer {
201 struct weston_renderer base;
202 bool fragment_shader_debug;
203 bool fan_debug;
204 struct weston_binding *fragment_binding;
205 struct weston_binding *fan_binding;
206
207 EGLDisplay egl_display;
208 EGLContext egl_context;
209 EGLConfig egl_config;
210
211 EGLSurface dummy_surface;
212
213 uint32_t gl_version;
214
215 struct wl_array vertices;
216 struct wl_array vtxcnt;
217
218 PFNGLEGLIMAGETARGETTEXTURE2DOESPROC image_target_texture_2d;
219 PFNEGLCREATEIMAGEKHRPROC create_image;
220 PFNEGLDESTROYIMAGEKHRPROC destroy_image;
221 PFNEGLSWAPBUFFERSWITHDAMAGEEXTPROC swap_buffers_with_damage;
222 PFNEGLCREATEPLATFORMWINDOWSURFACEEXTPROC create_platform_window;
223
224 bool has_unpack_subimage;
225
226 PFNEGLBINDWAYLANDDISPLAYWL bind_display;
227 PFNEGLUNBINDWAYLANDDISPLAYWL unbind_display;
228 PFNEGLQUERYWAYLANDBUFFERWL query_buffer;
229 bool has_bind_display;
230
231 bool has_context_priority;
232
233 bool has_egl_image_external;
234
235 bool has_egl_buffer_age;
236 bool has_egl_partial_update;
237 PFNEGLSETDAMAGEREGIONKHRPROC set_damage_region;
238
239 bool has_configless_context;
240
241 bool has_surfaceless_context;
242
243 bool has_dmabuf_import;
244 struct wl_list dmabuf_images;
245
246 bool has_gl_texture_rg;
247
248 struct gl_shader texture_shader_rgba;
249 struct gl_shader texture_shader_rgbx;
250 struct gl_shader texture_shader_egl_external;
251 struct gl_shader texture_shader_y_uv;
252 struct gl_shader texture_shader_y_u_v;
253 struct gl_shader texture_shader_y_xuxv;
254 struct gl_shader invert_color_shader;
255 struct gl_shader solid_shader;
256 struct gl_shader *current_shader;
257
258 struct wl_signal destroy_signal;
259
260 struct wl_listener output_destroy_listener;
261
262 bool has_dmabuf_import_modifiers;
263 PFNEGLQUERYDMABUFFORMATSEXTPROC query_dmabuf_formats;
264 PFNEGLQUERYDMABUFMODIFIERSEXTPROC query_dmabuf_modifiers;
265
266 bool has_native_fence_sync;
267 PFNEGLCREATESYNCKHRPROC create_sync;
268 PFNEGLDESTROYSYNCKHRPROC destroy_sync;
269 PFNEGLDUPNATIVEFENCEFDANDROIDPROC dup_native_fence_fd;
270
271 bool has_wait_sync;
272 PFNEGLWAITSYNCKHRPROC wait_sync;
273 };
274
275 enum timeline_render_point_type {
276 TIMELINE_RENDER_POINT_TYPE_BEGIN,
277 TIMELINE_RENDER_POINT_TYPE_END
278 };
279
280 struct timeline_render_point {
281 struct wl_list link; /* gl_output_state::timeline_render_point_list */
282
283 enum timeline_render_point_type type;
284 int fd;
285 struct weston_output *output;
286 struct wl_event_source *event_source;
287 };
288
289 static PFNEGLGETPLATFORMDISPLAYEXTPROC get_platform_display = NULL;
290
291 static inline const char *
dump_format(uint32_t format,char out[4])292 dump_format(uint32_t format, char out[4])
293 {
294 #if BYTE_ORDER == BIG_ENDIAN
295 format = __builtin_bswap32(format);
296 #endif
297 memcpy(out, &format, 4);
298 return out;
299 }
300
301 static inline struct gl_output_state *
get_output_state(struct weston_output * output)302 get_output_state(struct weston_output *output)
303 {
304 return (struct gl_output_state *)output->renderer_state;
305 }
306
307 static int
308 gl_renderer_create_surface(struct weston_surface *surface);
309
310 static inline struct gl_surface_state *
get_surface_state(struct weston_surface * surface)311 get_surface_state(struct weston_surface *surface)
312 {
313 if (!surface->renderer_state)
314 gl_renderer_create_surface(surface);
315
316 return (struct gl_surface_state *)surface->renderer_state;
317 }
318
319 static inline struct gl_renderer *
get_renderer(struct weston_compositor * ec)320 get_renderer(struct weston_compositor *ec)
321 {
322 return (struct gl_renderer *)ec->renderer;
323 }
324
325 static void
timeline_render_point_destroy(struct timeline_render_point * trp)326 timeline_render_point_destroy(struct timeline_render_point *trp)
327 {
328 wl_list_remove(&trp->link);
329 wl_event_source_remove(trp->event_source);
330 close(trp->fd);
331 free(trp);
332 }
333
334 static int
timeline_render_point_handler(int fd,uint32_t mask,void * data)335 timeline_render_point_handler(int fd, uint32_t mask, void *data)
336 {
337 struct timeline_render_point *trp = data;
338 const char *tp_name = trp->type == TIMELINE_RENDER_POINT_TYPE_BEGIN ?
339 "renderer_gpu_begin" : "renderer_gpu_end";
340
341 if (mask & WL_EVENT_READABLE) {
342 struct timespec tspec = { 0 };
343
344 if (weston_linux_sync_file_read_timestamp(trp->fd,
345 &tspec) == 0) {
346 TL_POINT(tp_name, TLP_GPU(&tspec),
347 TLP_OUTPUT(trp->output), TLP_END);
348 }
349 }
350
351 timeline_render_point_destroy(trp);
352
353 return 0;
354 }
355
356 static EGLSyncKHR
create_render_sync(struct gl_renderer * gr)357 create_render_sync(struct gl_renderer *gr)
358 {
359 static const EGLint attribs[] = { EGL_NONE };
360
361 if (!gr->has_native_fence_sync)
362 return EGL_NO_SYNC_KHR;
363
364 return gr->create_sync(gr->egl_display, EGL_SYNC_NATIVE_FENCE_ANDROID,
365 attribs);
366 }
367
368 static void
timeline_submit_render_sync(struct gl_renderer * gr,struct weston_compositor * ec,struct weston_output * output,EGLSyncKHR sync,enum timeline_render_point_type type)369 timeline_submit_render_sync(struct gl_renderer *gr,
370 struct weston_compositor *ec,
371 struct weston_output *output,
372 EGLSyncKHR sync,
373 enum timeline_render_point_type type)
374 {
375 struct gl_output_state *go;
376 struct wl_event_loop *loop;
377 int fd;
378 struct timeline_render_point *trp;
379
380 if (!weston_timeline_enabled_ ||
381 !gr->has_native_fence_sync ||
382 sync == EGL_NO_SYNC_KHR)
383 return;
384
385 go = get_output_state(output);
386 loop = wl_display_get_event_loop(ec->wl_display);
387
388 fd = gr->dup_native_fence_fd(gr->egl_display, sync);
389 if (fd == EGL_NO_NATIVE_FENCE_FD_ANDROID)
390 return;
391
392 trp = zalloc(sizeof *trp);
393 if (trp == NULL) {
394 close(fd);
395 return;
396 }
397
398 trp->type = type;
399 trp->fd = fd;
400 trp->output = output;
401 trp->event_source = wl_event_loop_add_fd(loop, fd,
402 WL_EVENT_READABLE,
403 timeline_render_point_handler,
404 trp);
405
406 wl_list_insert(&go->timeline_render_point_list, &trp->link);
407 }
408
409 static struct egl_image*
egl_image_create(struct gl_renderer * gr,EGLenum target,EGLClientBuffer buffer,const EGLint * attribs)410 egl_image_create(struct gl_renderer *gr, EGLenum target,
411 EGLClientBuffer buffer, const EGLint *attribs)
412 {
413 struct egl_image *img;
414
415 img = zalloc(sizeof *img);
416 img->renderer = gr;
417 img->refcount = 1;
418 img->image = gr->create_image(gr->egl_display, EGL_NO_CONTEXT,
419 target, buffer, attribs);
420
421 if (img->image == EGL_NO_IMAGE_KHR) {
422 free(img);
423 return NULL;
424 }
425
426 return img;
427 }
428
429 static struct egl_image*
egl_image_ref(struct egl_image * image)430 egl_image_ref(struct egl_image *image)
431 {
432 image->refcount++;
433
434 return image;
435 }
436
437 static int
egl_image_unref(struct egl_image * image)438 egl_image_unref(struct egl_image *image)
439 {
440 struct gl_renderer *gr = image->renderer;
441
442 assert(image->refcount > 0);
443
444 image->refcount--;
445 if (image->refcount > 0)
446 return image->refcount;
447
448 gr->destroy_image(gr->egl_display, image->image);
449 free(image);
450
451 return 0;
452 }
453
454 static struct dmabuf_image*
dmabuf_image_create(void)455 dmabuf_image_create(void)
456 {
457 struct dmabuf_image *img;
458
459 img = zalloc(sizeof *img);
460 wl_list_init(&img->link);
461
462 return img;
463 }
464
465 static void
dmabuf_image_destroy(struct dmabuf_image * image)466 dmabuf_image_destroy(struct dmabuf_image *image)
467 {
468 int i;
469
470 for (i = 0; i < image->num_images; ++i)
471 egl_image_unref(image->images[i]);
472
473 if (image->dmabuf)
474 linux_dmabuf_buffer_set_user_data(image->dmabuf, NULL, NULL);
475
476 wl_list_remove(&image->link);
477 free(image);
478 }
479
480 static const char *
egl_error_string(EGLint code)481 egl_error_string(EGLint code)
482 {
483 #define MYERRCODE(x) case x: return #x;
484 switch (code) {
485 MYERRCODE(EGL_SUCCESS)
486 MYERRCODE(EGL_NOT_INITIALIZED)
487 MYERRCODE(EGL_BAD_ACCESS)
488 MYERRCODE(EGL_BAD_ALLOC)
489 MYERRCODE(EGL_BAD_ATTRIBUTE)
490 MYERRCODE(EGL_BAD_CONTEXT)
491 MYERRCODE(EGL_BAD_CONFIG)
492 MYERRCODE(EGL_BAD_CURRENT_SURFACE)
493 MYERRCODE(EGL_BAD_DISPLAY)
494 MYERRCODE(EGL_BAD_SURFACE)
495 MYERRCODE(EGL_BAD_MATCH)
496 MYERRCODE(EGL_BAD_PARAMETER)
497 MYERRCODE(EGL_BAD_NATIVE_PIXMAP)
498 MYERRCODE(EGL_BAD_NATIVE_WINDOW)
499 MYERRCODE(EGL_CONTEXT_LOST)
500 default:
501 return "unknown";
502 }
503 #undef MYERRCODE
504 }
505
506 static void
gl_renderer_print_egl_error_state(void)507 gl_renderer_print_egl_error_state(void)
508 {
509 EGLint code;
510
511 code = eglGetError();
512 weston_log("EGL error state: %s (0x%04lx)\n",
513 egl_error_string(code), (long)code);
514 }
515
516 #define max(a, b) (((a) > (b)) ? (a) : (b))
517 #define min(a, b) (((a) > (b)) ? (b) : (a))
518
519 /*
520 * Compute the boundary vertices of the intersection of the global coordinate
521 * aligned rectangle 'rect', and an arbitrary quadrilateral produced from
522 * 'surf_rect' when transformed from surface coordinates into global coordinates.
523 * The vertices are written to 'ex' and 'ey', and the return value is the
524 * number of vertices. Vertices are produced in clockwise winding order.
525 * Guarantees to produce either zero vertices, or 3-8 vertices with non-zero
526 * polygon area.
527 */
528 static int
calculate_edges(struct weston_view * ev,pixman_box32_t * rect,pixman_box32_t * surf_rect,GLfloat * ex,GLfloat * ey)529 calculate_edges(struct weston_view *ev, pixman_box32_t *rect,
530 pixman_box32_t *surf_rect, GLfloat *ex, GLfloat *ey)
531 {
532
533 struct clip_context ctx;
534 int i, n;
535 GLfloat min_x, max_x, min_y, max_y;
536 struct polygon8 surf = {
537 { surf_rect->x1, surf_rect->x2, surf_rect->x2, surf_rect->x1 },
538 { surf_rect->y1, surf_rect->y1, surf_rect->y2, surf_rect->y2 },
539 4
540 };
541
542 ctx.clip.x1 = rect->x1;
543 ctx.clip.y1 = rect->y1;
544 ctx.clip.x2 = rect->x2;
545 ctx.clip.y2 = rect->y2;
546
547 /* transform surface to screen space: */
548 for (i = 0; i < surf.n; i++)
549 weston_view_to_global_float(ev, surf.x[i], surf.y[i],
550 &surf.x[i], &surf.y[i]);
551
552 /* find bounding box: */
553 min_x = max_x = surf.x[0];
554 min_y = max_y = surf.y[0];
555
556 for (i = 1; i < surf.n; i++) {
557 min_x = min(min_x, surf.x[i]);
558 max_x = max(max_x, surf.x[i]);
559 min_y = min(min_y, surf.y[i]);
560 max_y = max(max_y, surf.y[i]);
561 }
562
563 /* First, simple bounding box check to discard early transformed
564 * surface rects that do not intersect with the clip region:
565 */
566 if ((min_x >= ctx.clip.x2) || (max_x <= ctx.clip.x1) ||
567 (min_y >= ctx.clip.y2) || (max_y <= ctx.clip.y1))
568 return 0;
569
570 /* Simple case, bounding box edges are parallel to surface edges,
571 * there will be only four edges. We just need to clip the surface
572 * vertices to the clip rect bounds:
573 */
574 if (!ev->transform.enabled)
575 return clip_simple(&ctx, &surf, ex, ey);
576
577 /* Transformed case: use a general polygon clipping algorithm to
578 * clip the surface rectangle with each side of 'rect'.
579 * The algorithm is Sutherland-Hodgman, as explained in
580 * http://www.codeguru.com/cpp/misc/misc/graphics/article.php/c8965/Polygon-Clipping.htm
581 * but without looking at any of that code.
582 */
583 n = clip_transformed(&ctx, &surf, ex, ey);
584
585 if (n < 3)
586 return 0;
587
588 return n;
589 }
590
591 static bool
merge_down(pixman_box32_t * a,pixman_box32_t * b,pixman_box32_t * merge)592 merge_down(pixman_box32_t *a, pixman_box32_t *b, pixman_box32_t *merge)
593 {
594 if (a->x1 == b->x1 && a->x2 == b->x2 && a->y1 == b->y2) {
595 merge->x1 = a->x1;
596 merge->x2 = a->x2;
597 merge->y1 = b->y1;
598 merge->y2 = a->y2;
599 return true;
600 }
601 return false;
602 }
603
604 static int
compress_bands(pixman_box32_t * inrects,int nrects,pixman_box32_t ** outrects)605 compress_bands(pixman_box32_t *inrects, int nrects,
606 pixman_box32_t **outrects)
607 {
608 bool merged = false;
609 pixman_box32_t *out, merge_rect;
610 int i, j, nout;
611
612 if (!nrects) {
613 *outrects = NULL;
614 return 0;
615 }
616
617 /* nrects is an upper bound - we're not too worried about
618 * allocating a little extra
619 */
620 out = malloc(sizeof(pixman_box32_t) * nrects);
621 out[0] = inrects[0];
622 nout = 1;
623 for (i = 1; i < nrects; i++) {
624 for (j = 0; j < nout; j++) {
625 merged = merge_down(&inrects[i], &out[j], &merge_rect);
626 if (merged) {
627 out[j] = merge_rect;
628 break;
629 }
630 }
631 if (!merged) {
632 out[nout] = inrects[i];
633 nout++;
634 }
635 }
636 *outrects = out;
637 return nout;
638 }
639
640 static int
texture_region(struct weston_view * ev,pixman_region32_t * region,pixman_region32_t * surf_region)641 texture_region(struct weston_view *ev, pixman_region32_t *region,
642 pixman_region32_t *surf_region)
643 {
644 struct gl_surface_state *gs = get_surface_state(ev->surface);
645 struct weston_compositor *ec = ev->surface->compositor;
646 struct gl_renderer *gr = get_renderer(ec);
647 GLfloat *v, inv_width, inv_height;
648 unsigned int *vtxcnt, nvtx = 0;
649 pixman_box32_t *rects, *surf_rects;
650 pixman_box32_t *raw_rects;
651 int i, j, k, nrects, nsurf, raw_nrects;
652 bool used_band_compression;
653 raw_rects = pixman_region32_rectangles(region, &raw_nrects);
654 surf_rects = pixman_region32_rectangles(surf_region, &nsurf);
655
656 if (raw_nrects < 4) {
657 used_band_compression = false;
658 nrects = raw_nrects;
659 rects = raw_rects;
660 } else {
661 nrects = compress_bands(raw_rects, raw_nrects, &rects);
662 used_band_compression = true;
663 }
664 /* worst case we can have 8 vertices per rect (ie. clipped into
665 * an octagon):
666 */
667 v = wl_array_add(&gr->vertices, nrects * nsurf * 8 * 4 * sizeof *v);
668 vtxcnt = wl_array_add(&gr->vtxcnt, nrects * nsurf * sizeof *vtxcnt);
669
670 inv_width = 1.0 / gs->pitch;
671 inv_height = 1.0 / gs->height;
672
673 for (i = 0; i < nrects; i++) {
674 pixman_box32_t *rect = &rects[i];
675 for (j = 0; j < nsurf; j++) {
676 pixman_box32_t *surf_rect = &surf_rects[j];
677 GLfloat sx, sy, bx, by;
678 GLfloat ex[8], ey[8]; /* edge points in screen space */
679 int n;
680
681 /* The transformed surface, after clipping to the clip region,
682 * can have as many as eight sides, emitted as a triangle-fan.
683 * The first vertex in the triangle fan can be chosen arbitrarily,
684 * since the area is guaranteed to be convex.
685 *
686 * If a corner of the transformed surface falls outside of the
687 * clip region, instead of emitting one vertex for the corner
688 * of the surface, up to two are emitted for two corresponding
689 * intersection point(s) between the surface and the clip region.
690 *
691 * To do this, we first calculate the (up to eight) points that
692 * form the intersection of the clip rect and the transformed
693 * surface.
694 */
695 n = calculate_edges(ev, rect, surf_rect, ex, ey);
696 if (n < 3)
697 continue;
698
699 /* emit edge points: */
700 for (k = 0; k < n; k++) {
701 weston_view_from_global_float(ev, ex[k], ey[k],
702 &sx, &sy);
703 /* position: */
704 *(v++) = ex[k];
705 *(v++) = ey[k];
706 /* texcoord: */
707 weston_surface_to_buffer_float(ev->surface,
708 sx, sy,
709 &bx, &by);
710 *(v++) = bx * inv_width;
711 if (gs->y_inverted) {
712 *(v++) = by * inv_height;
713 } else {
714 *(v++) = (gs->height - by) * inv_height;
715 }
716 }
717
718 vtxcnt[nvtx++] = n;
719 }
720 }
721
722 if (used_band_compression)
723 free(rects);
724 return nvtx;
725 }
726
727 static void
triangle_fan_debug(struct weston_view * view,int first,int count)728 triangle_fan_debug(struct weston_view *view, int first, int count)
729 {
730 struct weston_compositor *compositor = view->surface->compositor;
731 struct gl_renderer *gr = get_renderer(compositor);
732 int i;
733 GLushort *buffer;
734 GLushort *index;
735 int nelems;
736 static int color_idx = 0;
737 static const GLfloat color[][4] = {
738 { 1.0, 0.0, 0.0, 1.0 },
739 { 0.0, 1.0, 0.0, 1.0 },
740 { 0.0, 0.0, 1.0, 1.0 },
741 { 1.0, 1.0, 1.0, 1.0 },
742 };
743
744 nelems = (count - 1 + count - 2) * 2;
745
746 buffer = malloc(sizeof(GLushort) * nelems);
747 index = buffer;
748
749 for (i = 1; i < count; i++) {
750 *index++ = first;
751 *index++ = first + i;
752 }
753
754 for (i = 2; i < count; i++) {
755 *index++ = first + i - 1;
756 *index++ = first + i;
757 }
758
759 glUseProgram(gr->solid_shader.program);
760 glUniform4fv(gr->solid_shader.color_uniform, 1,
761 color[color_idx++ % ARRAY_LENGTH(color)]);
762 glDrawElements(GL_LINES, nelems, GL_UNSIGNED_SHORT, buffer);
763 glUseProgram(gr->current_shader->program);
764 free(buffer);
765 }
766
767 static void
repaint_region(struct weston_view * ev,pixman_region32_t * region,pixman_region32_t * surf_region)768 repaint_region(struct weston_view *ev, pixman_region32_t *region,
769 pixman_region32_t *surf_region)
770 {
771 struct weston_compositor *ec = ev->surface->compositor;
772 struct gl_renderer *gr = get_renderer(ec);
773 GLfloat *v;
774 unsigned int *vtxcnt;
775 int i, first, nfans;
776
777 /* The final region to be painted is the intersection of
778 * 'region' and 'surf_region'. However, 'region' is in the global
779 * coordinates, and 'surf_region' is in the surface-local
780 * coordinates. texture_region() will iterate over all pairs of
781 * rectangles from both regions, compute the intersection
782 * polygon for each pair, and store it as a triangle fan if
783 * it has a non-zero area (at least 3 vertices, actually).
784 */
785 nfans = texture_region(ev, region, surf_region);
786
787 v = gr->vertices.data;
788 vtxcnt = gr->vtxcnt.data;
789
790 /* position: */
791 glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 4 * sizeof *v, &v[0]);
792 glEnableVertexAttribArray(0);
793
794 /* texcoord: */
795 glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 4 * sizeof *v, &v[2]);
796 glEnableVertexAttribArray(1);
797
798 for (i = 0, first = 0; i < nfans; i++) {
799 glDrawArrays(GL_TRIANGLE_FAN, first, vtxcnt[i]);
800 if (gr->fan_debug)
801 triangle_fan_debug(ev, first, vtxcnt[i]);
802 first += vtxcnt[i];
803 }
804
805 glDisableVertexAttribArray(1);
806 glDisableVertexAttribArray(0);
807
808 gr->vertices.size = 0;
809 gr->vtxcnt.size = 0;
810 }
811
812 static int
use_output(struct weston_output * output)813 use_output(struct weston_output *output)
814 {
815 static int errored;
816 struct gl_output_state *go = get_output_state(output);
817 struct gl_renderer *gr = get_renderer(output->compositor);
818 EGLBoolean ret;
819
820 ret = eglMakeCurrent(gr->egl_display, go->egl_surface,
821 go->egl_surface, gr->egl_context);
822
823 if (ret == EGL_FALSE) {
824 if (errored)
825 return -1;
826 errored = 1;
827 weston_log("Failed to make EGL context current.\n");
828 gl_renderer_print_egl_error_state();
829 return -1;
830 }
831
832 return 0;
833 }
834
835 static int
836 shader_init(struct gl_shader *shader, struct gl_renderer *gr,
837 const char *vertex_source, const char *fragment_source);
838
839 static void
use_shader(struct gl_renderer * gr,struct gl_shader * shader)840 use_shader(struct gl_renderer *gr, struct gl_shader *shader)
841 {
842 if (!shader->program) {
843 int ret;
844
845 ret = shader_init(shader, gr,
846 shader->vertex_source,
847 shader->fragment_source);
848
849 if (ret < 0)
850 weston_log("warning: failed to compile shader\n");
851 }
852
853 if (gr->current_shader == shader)
854 return;
855 glUseProgram(shader->program);
856 gr->current_shader = shader;
857 }
858
859 static void
shader_uniforms(struct gl_shader * shader,struct weston_view * view,struct weston_output * output)860 shader_uniforms(struct gl_shader *shader,
861 struct weston_view *view,
862 struct weston_output *output)
863 {
864 int i;
865 struct gl_surface_state *gs = get_surface_state(view->surface);
866 struct gl_output_state *go = get_output_state(output);
867
868 glUniformMatrix4fv(shader->proj_uniform,
869 1, GL_FALSE, go->output_matrix.d);
870 glUniform4fv(shader->color_uniform, 1, gs->color);
871 glUniform1f(shader->alpha_uniform, view->alpha);
872
873 for (i = 0; i < gs->num_textures; i++)
874 glUniform1i(shader->tex_uniforms[i], i);
875 }
876
877 static int
ensure_surface_buffer_is_ready(struct gl_renderer * gr,struct gl_surface_state * gs)878 ensure_surface_buffer_is_ready(struct gl_renderer *gr,
879 struct gl_surface_state *gs)
880 {
881 EGLint attribs[] = {
882 EGL_SYNC_NATIVE_FENCE_FD_ANDROID,
883 -1,
884 EGL_NONE
885 };
886 struct weston_surface *surface = gs->surface;
887 struct weston_buffer *buffer = gs->buffer_ref.buffer;
888 EGLSyncKHR sync;
889 EGLint wait_ret;
890 EGLint destroy_ret;
891
892 if (!buffer)
893 return 0;
894
895 if (surface->acquire_fence_fd < 0)
896 return 0;
897
898 /* We should only get a fence if we support EGLSyncKHR, since
899 * we don't advertise the explicit sync protocol otherwise. */
900 assert(gr->has_native_fence_sync);
901 /* We should only get a fence for non-SHM buffers, since surface
902 * commit would have failed otherwise. */
903 assert(wl_shm_buffer_get(buffer->resource) == NULL);
904
905 attribs[1] = dup(surface->acquire_fence_fd);
906 if (attribs[1] == -1) {
907 linux_explicit_synchronization_send_server_error(
908 gs->surface->synchronization_resource,
909 "Failed to dup acquire fence");
910 return -1;
911 }
912
913 sync = gr->create_sync(gr->egl_display,
914 EGL_SYNC_NATIVE_FENCE_ANDROID,
915 attribs);
916 if (sync == EGL_NO_SYNC_KHR) {
917 linux_explicit_synchronization_send_server_error(
918 gs->surface->synchronization_resource,
919 "Failed to create EGLSyncKHR object");
920 close(attribs[1]);
921 return -1;
922 }
923
924 wait_ret = gr->wait_sync(gr->egl_display, sync, 0);
925 if (wait_ret == EGL_FALSE) {
926 linux_explicit_synchronization_send_server_error(
927 gs->surface->synchronization_resource,
928 "Failed to wait on EGLSyncKHR object");
929 /* Continue to try to destroy the sync object. */
930 }
931
932
933 destroy_ret = gr->destroy_sync(gr->egl_display, sync);
934 if (destroy_ret == EGL_FALSE) {
935 linux_explicit_synchronization_send_server_error(
936 gs->surface->synchronization_resource,
937 "Failed to destroy on EGLSyncKHR object");
938 }
939
940 return (wait_ret == EGL_TRUE && destroy_ret == EGL_TRUE) ? 0 : -1;
941 }
942
943 static void
draw_view(struct weston_view * ev,struct weston_output * output,pixman_region32_t * damage)944 draw_view(struct weston_view *ev, struct weston_output *output,
945 pixman_region32_t *damage) /* in global coordinates */
946 {
947 struct weston_compositor *ec = ev->surface->compositor;
948 struct gl_renderer *gr = get_renderer(ec);
949 struct gl_surface_state *gs = get_surface_state(ev->surface);
950 /* repaint bounding region in global coordinates: */
951 pixman_region32_t repaint;
952 /* opaque region in surface coordinates: */
953 pixman_region32_t surface_opaque;
954 /* non-opaque region in surface coordinates: */
955 pixman_region32_t surface_blend;
956 GLint filter;
957 int i;
958 bool shader_replaced = false;
959 struct gl_shader *tmp_shader = NULL;
960
961 /* In case of a runtime switch of renderers, we may not have received
962 * an attach for this surface since the switch. In that case we don't
963 * have a valid buffer or a proper shader set up so skip rendering. */
964 if (!gs->shader)
965 return;
966
967 pixman_region32_init(&repaint);
968 pixman_region32_intersect(&repaint,
969 &ev->transform.boundingbox, damage);
970 pixman_region32_subtract(&repaint, &repaint, &ev->clip);
971
972 if (!pixman_region32_not_empty(&repaint))
973 goto out;
974
975 if (ensure_surface_buffer_is_ready(gr, gs) < 0)
976 goto out;
977
978 if (ev->surface->protection_mode == WESTON_SURFACE_PROTECTION_MODE_ENFORCED &&
979 ev->surface->desired_protection > output->current_protection) {
980 tmp_shader = gs->shader;
981 shader_replaced = true;
982 gs->color[0] = 0.40;
983 gs->color[1] = 0.0;
984 gs->color[2] = 0.0;
985 gs->color[3] = 1.0;
986 gs->shader = &gr->solid_shader;
987 }
988
989 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
990
991 if (gr->fan_debug) {
992 use_shader(gr, &gr->solid_shader);
993 shader_uniforms(&gr->solid_shader, ev, output);
994 }
995
996 use_shader(gr, gs->shader);
997 shader_uniforms(gs->shader, ev, output);
998
999 if (ev->transform.enabled || output->zoom.active ||
1000 output->current_scale != ev->surface->buffer_viewport.buffer.scale)
1001 filter = GL_LINEAR;
1002 else
1003 filter = GL_NEAREST;
1004
1005 for (i = 0; i < gs->num_textures; i++) {
1006 glActiveTexture(GL_TEXTURE0 + i);
1007 glBindTexture(gs->target, gs->textures[i]);
1008 glTexParameteri(gs->target, GL_TEXTURE_MIN_FILTER, filter);
1009 glTexParameteri(gs->target, GL_TEXTURE_MAG_FILTER, filter);
1010 }
1011
1012 /* blended region is whole surface minus opaque region: */
1013 pixman_region32_init_rect(&surface_blend, 0, 0,
1014 ev->surface->width, ev->surface->height);
1015 if (ev->geometry.scissor_enabled)
1016 pixman_region32_intersect(&surface_blend, &surface_blend,
1017 &ev->geometry.scissor);
1018 pixman_region32_subtract(&surface_blend, &surface_blend,
1019 &ev->surface->opaque);
1020
1021 /* XXX: Should we be using ev->transform.opaque here? */
1022 pixman_region32_init(&surface_opaque);
1023 if (ev->geometry.scissor_enabled)
1024 pixman_region32_intersect(&surface_opaque,
1025 &ev->surface->opaque,
1026 &ev->geometry.scissor);
1027 else
1028 pixman_region32_copy(&surface_opaque, &ev->surface->opaque);
1029
1030 if (pixman_region32_not_empty(&surface_opaque)) {
1031 if (gs->shader == &gr->texture_shader_rgba) {
1032 /* Special case for RGBA textures with possibly
1033 * bad data in alpha channel: use the shader
1034 * that forces texture alpha = 1.0.
1035 * Xwayland surfaces need this.
1036 */
1037 use_shader(gr, &gr->texture_shader_rgbx);
1038 shader_uniforms(&gr->texture_shader_rgbx, ev, output);
1039 }
1040
1041 if (ev->alpha < 1.0)
1042 glEnable(GL_BLEND);
1043 else
1044 glDisable(GL_BLEND);
1045
1046 repaint_region(ev, &repaint, &surface_opaque);
1047 gs->used_in_output_repaint = true;
1048 }
1049
1050 if (pixman_region32_not_empty(&surface_blend)) {
1051 use_shader(gr, gs->shader);
1052 glEnable(GL_BLEND);
1053 repaint_region(ev, &repaint, &surface_blend);
1054 gs->used_in_output_repaint = true;
1055 }
1056
1057 pixman_region32_fini(&surface_blend);
1058 pixman_region32_fini(&surface_opaque);
1059
1060 out:
1061 pixman_region32_fini(&repaint);
1062
1063 if (shader_replaced)
1064 gs->shader = tmp_shader;
1065 }
1066
1067 static void
repaint_views(struct weston_output * output,pixman_region32_t * damage)1068 repaint_views(struct weston_output *output, pixman_region32_t *damage)
1069 {
1070 struct weston_compositor *compositor = output->compositor;
1071 struct weston_view *view;
1072
1073 wl_list_for_each_reverse(view, &compositor->view_list, link)
1074 if (view->plane == &compositor->primary_plane)
1075 draw_view(view, output, damage);
1076 }
1077
1078 static int
1079 gl_renderer_create_fence_fd(struct weston_output *output);
1080
1081 /* Updates the release fences of surfaces that were used in the current output
1082 * repaint. Should only be used from gl_renderer_repaint_output, so that the
1083 * information in gl_surface_state.used_in_output_repaint is accurate.
1084 */
1085 static void
update_buffer_release_fences(struct weston_compositor * compositor,struct weston_output * output)1086 update_buffer_release_fences(struct weston_compositor *compositor,
1087 struct weston_output *output)
1088 {
1089 struct weston_view *view;
1090
1091 wl_list_for_each_reverse(view, &compositor->view_list, link) {
1092 struct gl_surface_state *gs;
1093 struct weston_buffer_release *buffer_release;
1094 int fence_fd;
1095
1096 if (view->plane != &compositor->primary_plane)
1097 continue;
1098
1099 gs = get_surface_state(view->surface);
1100 buffer_release = gs->buffer_release_ref.buffer_release;
1101
1102 if (!gs->used_in_output_repaint || !buffer_release)
1103 continue;
1104
1105 fence_fd = gl_renderer_create_fence_fd(output);
1106
1107 /* If we have a buffer_release then it means we support fences,
1108 * and we should be able to create the release fence. If we
1109 * can't, something has gone horribly wrong, so disconnect the
1110 * client.
1111 */
1112 if (fence_fd == -1) {
1113 linux_explicit_synchronization_send_server_error(
1114 buffer_release->resource,
1115 "Failed to create release fence");
1116 fd_clear(&buffer_release->fence_fd);
1117 continue;
1118 }
1119
1120 /* At the moment it is safe to just replace the fence_fd,
1121 * discarding the previous one:
1122 *
1123 * 1. If the previous fence fd represents a sync fence from
1124 * a previous repaint cycle, that fence fd is now not
1125 * sufficient to provide the release guarantee and should
1126 * be replaced.
1127 *
1128 * 2. If the fence fd represents a sync fence from another
1129 * output in the same repaint cycle, it's fine to replace
1130 * it since we are rendering to all outputs using the same
1131 * EGL context, so a fence issued for a later output rendering
1132 * is guaranteed to signal after fences for previous output
1133 * renderings.
1134 *
1135 * Note that the above is only valid if the buffer_release
1136 * fences only originate from the GL renderer, which guarantees
1137 * a total order of operations and fences. If we introduce
1138 * fences from other sources (e.g., plane out-fences), we will
1139 * need to merge fences instead.
1140 */
1141 fd_update(&buffer_release->fence_fd, fence_fd);
1142 }
1143 }
1144
1145 static void
draw_output_border_texture(struct gl_output_state * go,enum gl_renderer_border_side side,int32_t x,int32_t y,int32_t width,int32_t height)1146 draw_output_border_texture(struct gl_output_state *go,
1147 enum gl_renderer_border_side side,
1148 int32_t x, int32_t y,
1149 int32_t width, int32_t height)
1150 {
1151 struct gl_border_image *img = &go->borders[side];
1152 static GLushort indices [] = { 0, 1, 3, 3, 1, 2 };
1153
1154 if (!img->data) {
1155 if (img->tex) {
1156 glDeleteTextures(1, &img->tex);
1157 img->tex = 0;
1158 }
1159
1160 return;
1161 }
1162
1163 if (!img->tex) {
1164 glGenTextures(1, &img->tex);
1165 glBindTexture(GL_TEXTURE_2D, img->tex);
1166
1167 glTexParameteri(GL_TEXTURE_2D,
1168 GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
1169 glTexParameteri(GL_TEXTURE_2D,
1170 GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
1171 glTexParameteri(GL_TEXTURE_2D,
1172 GL_TEXTURE_MIN_FILTER, GL_NEAREST);
1173 glTexParameteri(GL_TEXTURE_2D,
1174 GL_TEXTURE_MAG_FILTER, GL_NEAREST);
1175 } else {
1176 glBindTexture(GL_TEXTURE_2D, img->tex);
1177 }
1178
1179 if (go->border_status & (1 << side)) {
1180 glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT, 0);
1181 glPixelStorei(GL_UNPACK_SKIP_PIXELS_EXT, 0);
1182 glPixelStorei(GL_UNPACK_SKIP_ROWS_EXT, 0);
1183 glTexImage2D(GL_TEXTURE_2D, 0, GL_BGRA_EXT,
1184 img->tex_width, img->height, 0,
1185 GL_BGRA_EXT, GL_UNSIGNED_BYTE, img->data);
1186 }
1187
1188 GLfloat texcoord[] = {
1189 0.0f, 0.0f,
1190 (GLfloat)img->width / (GLfloat)img->tex_width, 0.0f,
1191 (GLfloat)img->width / (GLfloat)img->tex_width, 1.0f,
1192 0.0f, 1.0f,
1193 };
1194
1195 GLfloat verts[] = {
1196 x, y,
1197 x + width, y,
1198 x + width, y + height,
1199 x, y + height
1200 };
1201
1202 glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, verts);
1203 glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, texcoord);
1204 glEnableVertexAttribArray(0);
1205 glEnableVertexAttribArray(1);
1206
1207 glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, indices);
1208
1209 glDisableVertexAttribArray(1);
1210 glDisableVertexAttribArray(0);
1211 }
1212
1213 static int
output_has_borders(struct weston_output * output)1214 output_has_borders(struct weston_output *output)
1215 {
1216 struct gl_output_state *go = get_output_state(output);
1217
1218 return go->borders[GL_RENDERER_BORDER_TOP].data ||
1219 go->borders[GL_RENDERER_BORDER_RIGHT].data ||
1220 go->borders[GL_RENDERER_BORDER_BOTTOM].data ||
1221 go->borders[GL_RENDERER_BORDER_LEFT].data;
1222 }
1223
1224 static void
draw_output_borders(struct weston_output * output,enum gl_border_status border_status)1225 draw_output_borders(struct weston_output *output,
1226 enum gl_border_status border_status)
1227 {
1228 struct gl_output_state *go = get_output_state(output);
1229 struct gl_renderer *gr = get_renderer(output->compositor);
1230 struct gl_shader *shader = &gr->texture_shader_rgba;
1231 struct gl_border_image *top, *bottom, *left, *right;
1232 struct weston_matrix matrix;
1233 int full_width, full_height;
1234
1235 if (border_status == BORDER_STATUS_CLEAN)
1236 return; /* Clean. Nothing to do. */
1237
1238 top = &go->borders[GL_RENDERER_BORDER_TOP];
1239 bottom = &go->borders[GL_RENDERER_BORDER_BOTTOM];
1240 left = &go->borders[GL_RENDERER_BORDER_LEFT];
1241 right = &go->borders[GL_RENDERER_BORDER_RIGHT];
1242
1243 full_width = output->current_mode->width + left->width + right->width;
1244 full_height = output->current_mode->height + top->height + bottom->height;
1245
1246 glDisable(GL_BLEND);
1247 use_shader(gr, shader);
1248
1249 glViewport(0, 0, full_width, full_height);
1250
1251 weston_matrix_init(&matrix);
1252 weston_matrix_translate(&matrix, -full_width/2.0, -full_height/2.0, 0);
1253 weston_matrix_scale(&matrix, 2.0/full_width, -2.0/full_height, 1);
1254 glUniformMatrix4fv(shader->proj_uniform, 1, GL_FALSE, matrix.d);
1255
1256 glUniform1i(shader->tex_uniforms[0], 0);
1257 glUniform1f(shader->alpha_uniform, 1);
1258 glActiveTexture(GL_TEXTURE0);
1259
1260 if (border_status & BORDER_TOP_DIRTY)
1261 draw_output_border_texture(go, GL_RENDERER_BORDER_TOP,
1262 0, 0,
1263 full_width, top->height);
1264 if (border_status & BORDER_LEFT_DIRTY)
1265 draw_output_border_texture(go, GL_RENDERER_BORDER_LEFT,
1266 0, top->height,
1267 left->width, output->current_mode->height);
1268 if (border_status & BORDER_RIGHT_DIRTY)
1269 draw_output_border_texture(go, GL_RENDERER_BORDER_RIGHT,
1270 full_width - right->width, top->height,
1271 right->width, output->current_mode->height);
1272 if (border_status & BORDER_BOTTOM_DIRTY)
1273 draw_output_border_texture(go, GL_RENDERER_BORDER_BOTTOM,
1274 0, full_height - bottom->height,
1275 full_width, bottom->height);
1276 }
1277
1278 static void
output_get_border_damage(struct weston_output * output,enum gl_border_status border_status,pixman_region32_t * damage)1279 output_get_border_damage(struct weston_output *output,
1280 enum gl_border_status border_status,
1281 pixman_region32_t *damage)
1282 {
1283 struct gl_output_state *go = get_output_state(output);
1284 struct gl_border_image *top, *bottom, *left, *right;
1285 int full_width, full_height;
1286
1287 if (border_status == BORDER_STATUS_CLEAN)
1288 return; /* Clean. Nothing to do. */
1289
1290 top = &go->borders[GL_RENDERER_BORDER_TOP];
1291 bottom = &go->borders[GL_RENDERER_BORDER_BOTTOM];
1292 left = &go->borders[GL_RENDERER_BORDER_LEFT];
1293 right = &go->borders[GL_RENDERER_BORDER_RIGHT];
1294
1295 full_width = output->current_mode->width + left->width + right->width;
1296 full_height = output->current_mode->height + top->height + bottom->height;
1297 if (border_status & BORDER_TOP_DIRTY)
1298 pixman_region32_union_rect(damage, damage,
1299 0, 0,
1300 full_width, top->height);
1301 if (border_status & BORDER_LEFT_DIRTY)
1302 pixman_region32_union_rect(damage, damage,
1303 0, top->height,
1304 left->width, output->current_mode->height);
1305 if (border_status & BORDER_RIGHT_DIRTY)
1306 pixman_region32_union_rect(damage, damage,
1307 full_width - right->width, top->height,
1308 right->width, output->current_mode->height);
1309 if (border_status & BORDER_BOTTOM_DIRTY)
1310 pixman_region32_union_rect(damage, damage,
1311 0, full_height - bottom->height,
1312 full_width, bottom->height);
1313 }
1314
1315 static void
output_get_damage(struct weston_output * output,pixman_region32_t * buffer_damage,uint32_t * border_damage)1316 output_get_damage(struct weston_output *output,
1317 pixman_region32_t *buffer_damage, uint32_t *border_damage)
1318 {
1319 struct gl_output_state *go = get_output_state(output);
1320 struct gl_renderer *gr = get_renderer(output->compositor);
1321 EGLint buffer_age = 0;
1322 EGLBoolean ret;
1323 int i;
1324
1325 if (gr->has_egl_buffer_age) {
1326 ret = eglQuerySurface(gr->egl_display, go->egl_surface,
1327 EGL_BUFFER_AGE_EXT, &buffer_age);
1328 if (ret == EGL_FALSE) {
1329 weston_log("buffer age query failed.\n");
1330 gl_renderer_print_egl_error_state();
1331 }
1332 }
1333
1334 if (buffer_age == 0 || buffer_age - 1 > BUFFER_DAMAGE_COUNT) {
1335 pixman_region32_copy(buffer_damage, &output->region);
1336 *border_damage = BORDER_ALL_DIRTY;
1337 } else {
1338 for (i = 0; i < buffer_age - 1; i++)
1339 *border_damage |= go->border_damage[(go->buffer_damage_index + i) % BUFFER_DAMAGE_COUNT];
1340
1341 if (*border_damage & BORDER_SIZE_CHANGED) {
1342 /* If we've had a resize, we have to do a full
1343 * repaint. */
1344 *border_damage |= BORDER_ALL_DIRTY;
1345 pixman_region32_copy(buffer_damage, &output->region);
1346 } else {
1347 for (i = 0; i < buffer_age - 1; i++)
1348 pixman_region32_union(buffer_damage,
1349 buffer_damage,
1350 &go->buffer_damage[(go->buffer_damage_index + i) % BUFFER_DAMAGE_COUNT]);
1351 }
1352 }
1353 }
1354
1355 static void
output_rotate_damage(struct weston_output * output,pixman_region32_t * output_damage,enum gl_border_status border_status)1356 output_rotate_damage(struct weston_output *output,
1357 pixman_region32_t *output_damage,
1358 enum gl_border_status border_status)
1359 {
1360 struct gl_output_state *go = get_output_state(output);
1361 struct gl_renderer *gr = get_renderer(output->compositor);
1362
1363 if (!gr->has_egl_buffer_age)
1364 return;
1365
1366 go->buffer_damage_index += BUFFER_DAMAGE_COUNT - 1;
1367 go->buffer_damage_index %= BUFFER_DAMAGE_COUNT;
1368
1369 pixman_region32_copy(&go->buffer_damage[go->buffer_damage_index], output_damage);
1370 go->border_damage[go->buffer_damage_index] = border_status;
1371 }
1372
1373 /**
1374 * Given a region in Weston's (top-left-origin) global co-ordinate space,
1375 * translate it to the co-ordinate space used by GL for our output
1376 * rendering. This requires shifting it into output co-ordinate space:
1377 * translating for output offset within the global co-ordinate space,
1378 * multiplying by output scale to get buffer rather than logical size.
1379 *
1380 * Finally, if borders are drawn around the output, we translate the area
1381 * to account for the border region around the outside, and add any
1382 * damage if the borders have been redrawn.
1383 *
1384 * @param output The output whose co-ordinate space we are after
1385 * @param global_region The affected region in global co-ordinate space
1386 * @param[out] rects Y-inverted quads in {x,y,w,h} order; caller must free
1387 * @param[out] nrects Number of quads (4x number of co-ordinates)
1388 */
1389 static void
pixman_region_to_egl_y_invert(struct weston_output * output,struct pixman_region32 * global_region,EGLint ** rects,EGLint * nrects)1390 pixman_region_to_egl_y_invert(struct weston_output *output,
1391 struct pixman_region32 *global_region,
1392 EGLint **rects,
1393 EGLint *nrects)
1394 {
1395 struct gl_output_state *go = get_output_state(output);
1396 pixman_region32_t transformed;
1397 struct pixman_box32 *box;
1398 int buffer_height;
1399 EGLint *d;
1400 int i;
1401
1402 /* Translate from global to output co-ordinate space. */
1403 pixman_region32_init(&transformed);
1404 pixman_region32_copy(&transformed, global_region);
1405 pixman_region32_translate(&transformed, -output->x, -output->y);
1406 weston_transformed_region(output->width, output->height,
1407 output->transform,
1408 output->current_scale,
1409 &transformed, &transformed);
1410
1411 /* If we have borders drawn around the output, shift our output damage
1412 * to account for borders being drawn around the outside, adding any
1413 * damage resulting from borders being redrawn. */
1414 if (output_has_borders(output)) {
1415 pixman_region32_translate(&transformed,
1416 go->borders[GL_RENDERER_BORDER_LEFT].width,
1417 go->borders[GL_RENDERER_BORDER_TOP].height);
1418 output_get_border_damage(output, go->border_status,
1419 &transformed);
1420 }
1421
1422 /* Convert from a Pixman region into {x,y,w,h} quads, flipping in the
1423 * Y axis to account for GL's lower-left-origin co-ordinate space. */
1424 box = pixman_region32_rectangles(&transformed, nrects);
1425 *rects = malloc(*nrects * 4 * sizeof(EGLint));
1426
1427 buffer_height = go->borders[GL_RENDERER_BORDER_TOP].height +
1428 output->current_mode->height +
1429 go->borders[GL_RENDERER_BORDER_BOTTOM].height;
1430
1431 d = *rects;
1432 for (i = 0; i < *nrects; ++i) {
1433 *d++ = box[i].x1;
1434 *d++ = buffer_height - box[i].y2;
1435 *d++ = box[i].x2 - box[i].x1;
1436 *d++ = box[i].y2 - box[i].y1;
1437 }
1438
1439 pixman_region32_fini(&transformed);
1440 }
1441
1442 /* NOTE: We now allow falling back to ARGB gl visuals when XRGB is
1443 * unavailable, so we're assuming the background has no transparency
1444 * and that everything with a blend, like drop shadows, will have something
1445 * opaque (like the background) drawn underneath it.
1446 *
1447 * Depending on the underlying hardware, violating that assumption could
1448 * result in seeing through to another display plane.
1449 */
1450 static void
gl_renderer_repaint_output(struct weston_output * output,pixman_region32_t * output_damage)1451 gl_renderer_repaint_output(struct weston_output *output,
1452 pixman_region32_t *output_damage)
1453 {
1454 struct gl_output_state *go = get_output_state(output);
1455 struct weston_compositor *compositor = output->compositor;
1456 struct gl_renderer *gr = get_renderer(compositor);
1457 EGLBoolean ret;
1458 static int errored;
1459 /* areas we've damaged since we last used this buffer */
1460 pixman_region32_t previous_damage;
1461 /* total area we need to repaint this time */
1462 pixman_region32_t total_damage;
1463 enum gl_border_status border_status = BORDER_STATUS_CLEAN;
1464 struct weston_view *view;
1465
1466 if (use_output(output) < 0)
1467 return;
1468
1469 /* Clear the used_in_output_repaint flag, so that we can properly track
1470 * which surfaces were used in this output repaint. */
1471 wl_list_for_each_reverse(view, &compositor->view_list, link) {
1472 if (view->plane == &compositor->primary_plane) {
1473 struct gl_surface_state *gs =
1474 get_surface_state(view->surface);
1475 gs->used_in_output_repaint = false;
1476 }
1477 }
1478
1479 if (go->begin_render_sync != EGL_NO_SYNC_KHR)
1480 gr->destroy_sync(gr->egl_display, go->begin_render_sync);
1481 if (go->end_render_sync != EGL_NO_SYNC_KHR)
1482 gr->destroy_sync(gr->egl_display, go->end_render_sync);
1483
1484 go->begin_render_sync = create_render_sync(gr);
1485
1486 /* Calculate the viewport */
1487 glViewport(go->borders[GL_RENDERER_BORDER_LEFT].width,
1488 go->borders[GL_RENDERER_BORDER_BOTTOM].height,
1489 output->current_mode->width,
1490 output->current_mode->height);
1491
1492 /* Calculate the global GL matrix */
1493 go->output_matrix = output->matrix;
1494 weston_matrix_translate(&go->output_matrix,
1495 -(output->current_mode->width / 2.0),
1496 -(output->current_mode->height / 2.0), 0);
1497 weston_matrix_scale(&go->output_matrix,
1498 2.0 / output->current_mode->width,
1499 -2.0 / output->current_mode->height, 1);
1500
1501 /* In fan debug mode, redraw everything to make sure that we clear any
1502 * fans left over from previous draws on this buffer.
1503 * This precludes the use of EGL_EXT_swap_buffers_with_damage and
1504 * EGL_KHR_partial_update, since we damage the whole area. */
1505 if (gr->fan_debug) {
1506 pixman_region32_t undamaged;
1507 pixman_region32_init(&undamaged);
1508 pixman_region32_subtract(&undamaged, &output->region,
1509 output_damage);
1510 gr->fan_debug = false;
1511 repaint_views(output, &undamaged);
1512 gr->fan_debug = true;
1513 pixman_region32_fini(&undamaged);
1514 }
1515
1516 /* previous_damage covers regions damaged in previous paints since we
1517 * last used this buffer */
1518 pixman_region32_init(&previous_damage);
1519 pixman_region32_init(&total_damage); /* total area to redraw */
1520
1521 /* Update previous_damage using buffer_age (if available), and store
1522 * current damaged region for future use. */
1523 output_get_damage(output, &previous_damage, &border_status);
1524 output_rotate_damage(output, output_damage, go->border_status);
1525
1526 /* Redraw both areas which have changed since we last used this buffer,
1527 * as well as the areas we now want to repaint, to make sure the
1528 * buffer is up to date. */
1529 pixman_region32_union(&total_damage, &previous_damage, output_damage);
1530 border_status |= go->border_status;
1531
1532 if (gr->has_egl_partial_update && !gr->fan_debug) {
1533 int n_egl_rects;
1534 EGLint *egl_rects;
1535
1536 /* For partial_update, we need to pass the region which has
1537 * changed since we last rendered into this specific buffer;
1538 * this is total_damage. */
1539 pixman_region_to_egl_y_invert(output, &total_damage,
1540 &egl_rects, &n_egl_rects);
1541 gr->set_damage_region(gr->egl_display, go->egl_surface,
1542 egl_rects, n_egl_rects);
1543 free(egl_rects);
1544 }
1545
1546 repaint_views(output, &total_damage);
1547
1548 pixman_region32_fini(&total_damage);
1549 pixman_region32_fini(&previous_damage);
1550
1551 draw_output_borders(output, border_status);
1552
1553 pixman_region32_copy(&output->previous_damage, output_damage);
1554 wl_signal_emit(&output->frame_signal, output);
1555
1556 go->end_render_sync = create_render_sync(gr);
1557
1558 if (gr->swap_buffers_with_damage && !gr->fan_debug) {
1559 int n_egl_rects;
1560 EGLint *egl_rects;
1561
1562 /* For swap_buffers_with_damage, we need to pass the region
1563 * which has changed since the previous SwapBuffers on this
1564 * surface - this is output_damage. */
1565 pixman_region_to_egl_y_invert(output, output_damage,
1566 &egl_rects, &n_egl_rects);
1567 ret = gr->swap_buffers_with_damage(gr->egl_display,
1568 go->egl_surface,
1569 egl_rects, n_egl_rects);
1570 free(egl_rects);
1571 } else {
1572 ret = eglSwapBuffers(gr->egl_display, go->egl_surface);
1573 }
1574
1575 if (ret == EGL_FALSE && !errored) {
1576 errored = 1;
1577 weston_log("Failed in eglSwapBuffers.\n");
1578 gl_renderer_print_egl_error_state();
1579 }
1580
1581 go->border_status = BORDER_STATUS_CLEAN;
1582
1583 /* We have to submit the render sync objects after swap buffers, since
1584 * the objects get assigned a valid sync file fd only after a gl flush.
1585 */
1586 timeline_submit_render_sync(gr, compositor, output,
1587 go->begin_render_sync,
1588 TIMELINE_RENDER_POINT_TYPE_BEGIN);
1589 timeline_submit_render_sync(gr, compositor, output, go->end_render_sync,
1590 TIMELINE_RENDER_POINT_TYPE_END);
1591
1592 update_buffer_release_fences(compositor, output);
1593 }
1594
1595 static int
gl_renderer_read_pixels(struct weston_output * output,pixman_format_code_t format,void * pixels,uint32_t x,uint32_t y,uint32_t width,uint32_t height)1596 gl_renderer_read_pixels(struct weston_output *output,
1597 pixman_format_code_t format, void *pixels,
1598 uint32_t x, uint32_t y,
1599 uint32_t width, uint32_t height)
1600 {
1601 GLenum gl_format;
1602 struct gl_output_state *go = get_output_state(output);
1603
1604 x += go->borders[GL_RENDERER_BORDER_LEFT].width;
1605 y += go->borders[GL_RENDERER_BORDER_BOTTOM].height;
1606
1607 switch (format) {
1608 case PIXMAN_a8r8g8b8:
1609 gl_format = GL_BGRA_EXT;
1610 break;
1611 case PIXMAN_a8b8g8r8:
1612 gl_format = GL_RGBA;
1613 break;
1614 default:
1615 return -1;
1616 }
1617
1618 if (use_output(output) < 0)
1619 return -1;
1620
1621 glPixelStorei(GL_PACK_ALIGNMENT, 1);
1622 glReadPixels(x, y, width, height, gl_format,
1623 GL_UNSIGNED_BYTE, pixels);
1624
1625 return 0;
1626 }
1627
gl_format_from_internal(GLenum internal_format)1628 static GLenum gl_format_from_internal(GLenum internal_format)
1629 {
1630 switch (internal_format) {
1631 case GL_R8_EXT:
1632 return GL_RED_EXT;
1633 case GL_RG8_EXT:
1634 return GL_RG_EXT;
1635 default:
1636 return internal_format;
1637 }
1638 }
1639
1640 static void
gl_renderer_flush_damage(struct weston_surface * surface)1641 gl_renderer_flush_damage(struct weston_surface *surface)
1642 {
1643 struct gl_renderer *gr = get_renderer(surface->compositor);
1644 struct gl_surface_state *gs = get_surface_state(surface);
1645 struct weston_buffer *buffer = gs->buffer_ref.buffer;
1646 struct weston_view *view;
1647 bool texture_used;
1648 pixman_box32_t *rectangles;
1649 uint8_t *data;
1650 int i, j, n;
1651
1652 pixman_region32_union(&gs->texture_damage,
1653 &gs->texture_damage, &surface->damage);
1654
1655 if (!buffer)
1656 return;
1657
1658 /* Avoid upload, if the texture won't be used this time.
1659 * We still accumulate the damage in texture_damage, and
1660 * hold the reference to the buffer, in case the surface
1661 * migrates back to the primary plane.
1662 */
1663 texture_used = false;
1664 wl_list_for_each(view, &surface->views, surface_link) {
1665 if (view->plane == &surface->compositor->primary_plane) {
1666 texture_used = true;
1667 break;
1668 }
1669 }
1670 if (!texture_used)
1671 return;
1672
1673 if (!pixman_region32_not_empty(&gs->texture_damage) &&
1674 !gs->needs_full_upload)
1675 goto done;
1676
1677 data = wl_shm_buffer_get_data(buffer->shm_buffer);
1678
1679 if (!gr->has_unpack_subimage) {
1680 wl_shm_buffer_begin_access(buffer->shm_buffer);
1681 for (j = 0; j < gs->num_textures; j++) {
1682 glBindTexture(GL_TEXTURE_2D, gs->textures[j]);
1683 glTexImage2D(GL_TEXTURE_2D, 0,
1684 gs->gl_format[j],
1685 gs->pitch / gs->hsub[j],
1686 buffer->height / gs->vsub[j],
1687 0,
1688 gl_format_from_internal(gs->gl_format[j]),
1689 gs->gl_pixel_type,
1690 data + gs->offset[j]);
1691 }
1692 wl_shm_buffer_end_access(buffer->shm_buffer);
1693
1694 goto done;
1695 }
1696
1697 if (gs->needs_full_upload) {
1698 glPixelStorei(GL_UNPACK_SKIP_PIXELS_EXT, 0);
1699 glPixelStorei(GL_UNPACK_SKIP_ROWS_EXT, 0);
1700 wl_shm_buffer_begin_access(buffer->shm_buffer);
1701 for (j = 0; j < gs->num_textures; j++) {
1702 glBindTexture(GL_TEXTURE_2D, gs->textures[j]);
1703 glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT,
1704 gs->pitch / gs->hsub[j]);
1705 glTexImage2D(GL_TEXTURE_2D, 0,
1706 gs->gl_format[j],
1707 gs->pitch / gs->hsub[j],
1708 buffer->height / gs->vsub[j],
1709 0,
1710 gl_format_from_internal(gs->gl_format[j]),
1711 gs->gl_pixel_type,
1712 data + gs->offset[j]);
1713 }
1714 wl_shm_buffer_end_access(buffer->shm_buffer);
1715 goto done;
1716 }
1717
1718 rectangles = pixman_region32_rectangles(&gs->texture_damage, &n);
1719 wl_shm_buffer_begin_access(buffer->shm_buffer);
1720 for (i = 0; i < n; i++) {
1721 pixman_box32_t r;
1722
1723 r = weston_surface_to_buffer_rect(surface, rectangles[i]);
1724
1725 for (j = 0; j < gs->num_textures; j++) {
1726 glBindTexture(GL_TEXTURE_2D, gs->textures[j]);
1727 glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT,
1728 gs->pitch / gs->hsub[j]);
1729 glPixelStorei(GL_UNPACK_SKIP_PIXELS_EXT,
1730 r.x1 / gs->hsub[j]);
1731 glPixelStorei(GL_UNPACK_SKIP_ROWS_EXT,
1732 r.y1 / gs->hsub[j]);
1733 glTexSubImage2D(GL_TEXTURE_2D, 0,
1734 r.x1 / gs->hsub[j],
1735 r.y1 / gs->vsub[j],
1736 (r.x2 - r.x1) / gs->hsub[j],
1737 (r.y2 - r.y1) / gs->vsub[j],
1738 gl_format_from_internal(gs->gl_format[j]),
1739 gs->gl_pixel_type,
1740 data + gs->offset[j]);
1741 }
1742 }
1743 wl_shm_buffer_end_access(buffer->shm_buffer);
1744
1745 done:
1746 pixman_region32_fini(&gs->texture_damage);
1747 pixman_region32_init(&gs->texture_damage);
1748 gs->needs_full_upload = false;
1749
1750 weston_buffer_reference(&gs->buffer_ref, NULL);
1751 weston_buffer_release_reference(&gs->buffer_release_ref, NULL);
1752 }
1753
1754 static void
ensure_textures(struct gl_surface_state * gs,int num_textures)1755 ensure_textures(struct gl_surface_state *gs, int num_textures)
1756 {
1757 int i;
1758
1759 if (num_textures <= gs->num_textures)
1760 return;
1761
1762 for (i = gs->num_textures; i < num_textures; i++) {
1763 glGenTextures(1, &gs->textures[i]);
1764 glBindTexture(gs->target, gs->textures[i]);
1765 glTexParameteri(gs->target,
1766 GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
1767 glTexParameteri(gs->target,
1768 GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
1769 }
1770 gs->num_textures = num_textures;
1771 glBindTexture(gs->target, 0);
1772 }
1773
1774 static void
gl_renderer_attach_shm(struct weston_surface * es,struct weston_buffer * buffer,struct wl_shm_buffer * shm_buffer)1775 gl_renderer_attach_shm(struct weston_surface *es, struct weston_buffer *buffer,
1776 struct wl_shm_buffer *shm_buffer)
1777 {
1778 struct weston_compositor *ec = es->compositor;
1779 struct gl_renderer *gr = get_renderer(ec);
1780 struct gl_surface_state *gs = get_surface_state(es);
1781 GLenum gl_format[3] = {0, 0, 0};
1782 GLenum gl_pixel_type;
1783 int pitch;
1784 int num_planes;
1785
1786 buffer->shm_buffer = shm_buffer;
1787 buffer->width = wl_shm_buffer_get_width(shm_buffer);
1788 buffer->height = wl_shm_buffer_get_height(shm_buffer);
1789
1790 num_planes = 1;
1791 gs->offset[0] = 0;
1792 gs->hsub[0] = 1;
1793 gs->vsub[0] = 1;
1794
1795 switch (wl_shm_buffer_get_format(shm_buffer)) {
1796 case WL_SHM_FORMAT_XRGB8888:
1797 gs->shader = &gr->texture_shader_rgbx;
1798 pitch = wl_shm_buffer_get_stride(shm_buffer) / 4;
1799 gl_format[0] = GL_BGRA_EXT;
1800 gl_pixel_type = GL_UNSIGNED_BYTE;
1801 es->is_opaque = true;
1802 break;
1803 case WL_SHM_FORMAT_ARGB8888:
1804 gs->shader = &gr->texture_shader_rgba;
1805 pitch = wl_shm_buffer_get_stride(shm_buffer) / 4;
1806 gl_format[0] = GL_BGRA_EXT;
1807 gl_pixel_type = GL_UNSIGNED_BYTE;
1808 es->is_opaque = false;
1809 break;
1810 case WL_SHM_FORMAT_RGB565:
1811 gs->shader = &gr->texture_shader_rgbx;
1812 pitch = wl_shm_buffer_get_stride(shm_buffer) / 2;
1813 gl_format[0] = GL_RGB;
1814 gl_pixel_type = GL_UNSIGNED_SHORT_5_6_5;
1815 es->is_opaque = true;
1816 break;
1817 case WL_SHM_FORMAT_YUV420:
1818 gs->shader = &gr->texture_shader_y_u_v;
1819 pitch = wl_shm_buffer_get_stride(shm_buffer);
1820 gl_pixel_type = GL_UNSIGNED_BYTE;
1821 num_planes = 3;
1822 gs->offset[1] = gs->offset[0] + (pitch / gs->hsub[0]) *
1823 (buffer->height / gs->vsub[0]);
1824 gs->hsub[1] = 2;
1825 gs->vsub[1] = 2;
1826 gs->offset[2] = gs->offset[1] + (pitch / gs->hsub[1]) *
1827 (buffer->height / gs->vsub[1]);
1828 gs->hsub[2] = 2;
1829 gs->vsub[2] = 2;
1830 if (gr->has_gl_texture_rg) {
1831 gl_format[0] = GL_R8_EXT;
1832 gl_format[1] = GL_R8_EXT;
1833 gl_format[2] = GL_R8_EXT;
1834 } else {
1835 gl_format[0] = GL_LUMINANCE;
1836 gl_format[1] = GL_LUMINANCE;
1837 gl_format[2] = GL_LUMINANCE;
1838 }
1839 es->is_opaque = true;
1840 break;
1841 case WL_SHM_FORMAT_NV12:
1842 pitch = wl_shm_buffer_get_stride(shm_buffer);
1843 gl_pixel_type = GL_UNSIGNED_BYTE;
1844 num_planes = 2;
1845 gs->offset[1] = gs->offset[0] + (pitch / gs->hsub[0]) *
1846 (buffer->height / gs->vsub[0]);
1847 gs->hsub[1] = 2;
1848 gs->vsub[1] = 2;
1849 if (gr->has_gl_texture_rg) {
1850 gs->shader = &gr->texture_shader_y_uv;
1851 gl_format[0] = GL_R8_EXT;
1852 gl_format[1] = GL_RG8_EXT;
1853 } else {
1854 gs->shader = &gr->texture_shader_y_xuxv;
1855 gl_format[0] = GL_LUMINANCE;
1856 gl_format[1] = GL_LUMINANCE_ALPHA;
1857 }
1858 es->is_opaque = true;
1859 break;
1860 case WL_SHM_FORMAT_YUYV:
1861 gs->shader = &gr->texture_shader_y_xuxv;
1862 pitch = wl_shm_buffer_get_stride(shm_buffer) / 2;
1863 gl_pixel_type = GL_UNSIGNED_BYTE;
1864 num_planes = 2;
1865 gs->offset[1] = 0;
1866 gs->hsub[1] = 2;
1867 gs->vsub[1] = 1;
1868 if (gr->has_gl_texture_rg)
1869 gl_format[0] = GL_RG8_EXT;
1870 else
1871 gl_format[0] = GL_LUMINANCE_ALPHA;
1872 gl_format[1] = GL_BGRA_EXT;
1873 es->is_opaque = true;
1874 break;
1875 default:
1876 weston_log("warning: unknown shm buffer format: %08x\n",
1877 wl_shm_buffer_get_format(shm_buffer));
1878 return;
1879 }
1880
1881 /* Only allocate a texture if it doesn't match existing one.
1882 * If a switch from DRM allocated buffer to a SHM buffer is
1883 * happening, we need to allocate a new texture buffer. */
1884 if (pitch != gs->pitch ||
1885 buffer->height != gs->height ||
1886 gl_format[0] != gs->gl_format[0] ||
1887 gl_format[1] != gs->gl_format[1] ||
1888 gl_format[2] != gs->gl_format[2] ||
1889 gl_pixel_type != gs->gl_pixel_type ||
1890 gs->buffer_type != BUFFER_TYPE_SHM) {
1891 gs->pitch = pitch;
1892 gs->height = buffer->height;
1893 gs->target = GL_TEXTURE_2D;
1894 gs->gl_format[0] = gl_format[0];
1895 gs->gl_format[1] = gl_format[1];
1896 gs->gl_format[2] = gl_format[2];
1897 gs->gl_pixel_type = gl_pixel_type;
1898 gs->buffer_type = BUFFER_TYPE_SHM;
1899 gs->needs_full_upload = true;
1900 gs->y_inverted = true;
1901
1902 gs->surface = es;
1903
1904 ensure_textures(gs, num_planes);
1905 }
1906 }
1907
1908 static void
gl_renderer_attach_egl(struct weston_surface * es,struct weston_buffer * buffer,uint32_t format)1909 gl_renderer_attach_egl(struct weston_surface *es, struct weston_buffer *buffer,
1910 uint32_t format)
1911 {
1912 struct weston_compositor *ec = es->compositor;
1913 struct gl_renderer *gr = get_renderer(ec);
1914 struct gl_surface_state *gs = get_surface_state(es);
1915 EGLint attribs[3];
1916 int i, num_planes;
1917
1918 buffer->legacy_buffer = (struct wl_buffer *)buffer->resource;
1919 gr->query_buffer(gr->egl_display, buffer->legacy_buffer,
1920 EGL_WIDTH, &buffer->width);
1921 gr->query_buffer(gr->egl_display, buffer->legacy_buffer,
1922 EGL_HEIGHT, &buffer->height);
1923 gr->query_buffer(gr->egl_display, buffer->legacy_buffer,
1924 EGL_WAYLAND_Y_INVERTED_WL, &buffer->y_inverted);
1925
1926 for (i = 0; i < gs->num_images; i++) {
1927 egl_image_unref(gs->images[i]);
1928 gs->images[i] = NULL;
1929 }
1930 gs->num_images = 0;
1931 gs->target = GL_TEXTURE_2D;
1932 es->is_opaque = false;
1933 switch (format) {
1934 case EGL_TEXTURE_RGB:
1935 es->is_opaque = true;
1936 /* fallthrough */
1937 case EGL_TEXTURE_RGBA:
1938 default:
1939 num_planes = 1;
1940 gs->shader = &gr->texture_shader_rgba;
1941 break;
1942 case EGL_TEXTURE_EXTERNAL_WL:
1943 num_planes = 1;
1944 gs->target = GL_TEXTURE_EXTERNAL_OES;
1945 gs->shader = &gr->texture_shader_egl_external;
1946 break;
1947 case EGL_TEXTURE_Y_UV_WL:
1948 num_planes = 2;
1949 gs->shader = &gr->texture_shader_y_uv;
1950 es->is_opaque = true;
1951 break;
1952 case EGL_TEXTURE_Y_U_V_WL:
1953 num_planes = 3;
1954 gs->shader = &gr->texture_shader_y_u_v;
1955 es->is_opaque = true;
1956 break;
1957 case EGL_TEXTURE_Y_XUXV_WL:
1958 num_planes = 2;
1959 gs->shader = &gr->texture_shader_y_xuxv;
1960 es->is_opaque = true;
1961 break;
1962 }
1963
1964 ensure_textures(gs, num_planes);
1965 for (i = 0; i < num_planes; i++) {
1966 attribs[0] = EGL_WAYLAND_PLANE_WL;
1967 attribs[1] = i;
1968 attribs[2] = EGL_NONE;
1969 gs->images[i] = egl_image_create(gr,
1970 EGL_WAYLAND_BUFFER_WL,
1971 buffer->legacy_buffer,
1972 attribs);
1973 if (!gs->images[i]) {
1974 weston_log("failed to create img for plane %d\n", i);
1975 continue;
1976 }
1977 gs->num_images++;
1978
1979 glActiveTexture(GL_TEXTURE0 + i);
1980 glBindTexture(gs->target, gs->textures[i]);
1981 gr->image_target_texture_2d(gs->target,
1982 gs->images[i]->image);
1983 }
1984
1985 gs->pitch = buffer->width;
1986 gs->height = buffer->height;
1987 gs->buffer_type = BUFFER_TYPE_EGL;
1988 gs->y_inverted = buffer->y_inverted;
1989 }
1990
1991 static void
gl_renderer_destroy_dmabuf(struct linux_dmabuf_buffer * dmabuf)1992 gl_renderer_destroy_dmabuf(struct linux_dmabuf_buffer *dmabuf)
1993 {
1994 struct dmabuf_image *image = linux_dmabuf_buffer_get_user_data(dmabuf);
1995
1996 dmabuf_image_destroy(image);
1997 }
1998
1999 static struct egl_image *
import_simple_dmabuf(struct gl_renderer * gr,struct dmabuf_attributes * attributes)2000 import_simple_dmabuf(struct gl_renderer *gr,
2001 struct dmabuf_attributes *attributes)
2002 {
2003 struct egl_image *image;
2004 EGLint attribs[50];
2005 int atti = 0;
2006 bool has_modifier;
2007
2008 /* This requires the Mesa commit in
2009 * Mesa 10.3 (08264e5dad4df448e7718e782ad9077902089a07) or
2010 * Mesa 10.2.7 (55d28925e6109a4afd61f109e845a8a51bd17652).
2011 * Otherwise Mesa closes the fd behind our back and re-importing
2012 * will fail.
2013 * https://bugs.freedesktop.org/show_bug.cgi?id=76188
2014 */
2015
2016 attribs[atti++] = EGL_WIDTH;
2017 attribs[atti++] = attributes->width;
2018 attribs[atti++] = EGL_HEIGHT;
2019 attribs[atti++] = attributes->height;
2020 attribs[atti++] = EGL_LINUX_DRM_FOURCC_EXT;
2021 attribs[atti++] = attributes->format;
2022
2023 if (attributes->modifier[0] != DRM_FORMAT_MOD_INVALID) {
2024 if (!gr->has_dmabuf_import_modifiers)
2025 return NULL;
2026 has_modifier = true;
2027 } else {
2028 has_modifier = false;
2029 }
2030
2031 if (attributes->n_planes > 0) {
2032 attribs[atti++] = EGL_DMA_BUF_PLANE0_FD_EXT;
2033 attribs[atti++] = attributes->fd[0];
2034 attribs[atti++] = EGL_DMA_BUF_PLANE0_OFFSET_EXT;
2035 attribs[atti++] = attributes->offset[0];
2036 attribs[atti++] = EGL_DMA_BUF_PLANE0_PITCH_EXT;
2037 attribs[atti++] = attributes->stride[0];
2038 if (has_modifier) {
2039 attribs[atti++] = EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT;
2040 attribs[atti++] = attributes->modifier[0] & 0xFFFFFFFF;
2041 attribs[atti++] = EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT;
2042 attribs[atti++] = attributes->modifier[0] >> 32;
2043 }
2044 }
2045
2046 if (attributes->n_planes > 1) {
2047 attribs[atti++] = EGL_DMA_BUF_PLANE1_FD_EXT;
2048 attribs[atti++] = attributes->fd[1];
2049 attribs[atti++] = EGL_DMA_BUF_PLANE1_OFFSET_EXT;
2050 attribs[atti++] = attributes->offset[1];
2051 attribs[atti++] = EGL_DMA_BUF_PLANE1_PITCH_EXT;
2052 attribs[atti++] = attributes->stride[1];
2053 if (has_modifier) {
2054 attribs[atti++] = EGL_DMA_BUF_PLANE1_MODIFIER_LO_EXT;
2055 attribs[atti++] = attributes->modifier[1] & 0xFFFFFFFF;
2056 attribs[atti++] = EGL_DMA_BUF_PLANE1_MODIFIER_HI_EXT;
2057 attribs[atti++] = attributes->modifier[1] >> 32;
2058 }
2059 }
2060
2061 if (attributes->n_planes > 2) {
2062 attribs[atti++] = EGL_DMA_BUF_PLANE2_FD_EXT;
2063 attribs[atti++] = attributes->fd[2];
2064 attribs[atti++] = EGL_DMA_BUF_PLANE2_OFFSET_EXT;
2065 attribs[atti++] = attributes->offset[2];
2066 attribs[atti++] = EGL_DMA_BUF_PLANE2_PITCH_EXT;
2067 attribs[atti++] = attributes->stride[2];
2068 if (has_modifier) {
2069 attribs[atti++] = EGL_DMA_BUF_PLANE2_MODIFIER_LO_EXT;
2070 attribs[atti++] = attributes->modifier[2] & 0xFFFFFFFF;
2071 attribs[atti++] = EGL_DMA_BUF_PLANE2_MODIFIER_HI_EXT;
2072 attribs[atti++] = attributes->modifier[2] >> 32;
2073 }
2074 }
2075
2076 if (gr->has_dmabuf_import_modifiers) {
2077 if (attributes->n_planes > 3) {
2078 attribs[atti++] = EGL_DMA_BUF_PLANE3_FD_EXT;
2079 attribs[atti++] = attributes->fd[3];
2080 attribs[atti++] = EGL_DMA_BUF_PLANE3_OFFSET_EXT;
2081 attribs[atti++] = attributes->offset[3];
2082 attribs[atti++] = EGL_DMA_BUF_PLANE3_PITCH_EXT;
2083 attribs[atti++] = attributes->stride[3];
2084 attribs[atti++] = EGL_DMA_BUF_PLANE3_MODIFIER_LO_EXT;
2085 attribs[atti++] = attributes->modifier[3] & 0xFFFFFFFF;
2086 attribs[atti++] = EGL_DMA_BUF_PLANE3_MODIFIER_HI_EXT;
2087 attribs[atti++] = attributes->modifier[3] >> 32;
2088 }
2089 }
2090
2091 attribs[atti++] = EGL_NONE;
2092
2093 image = egl_image_create(gr, EGL_LINUX_DMA_BUF_EXT, NULL,
2094 attribs);
2095
2096 return image;
2097 }
2098
2099 /* The kernel header drm_fourcc.h defines the DRM formats below. We duplicate
2100 * some of the definitions here so that building Weston won't require
2101 * bleeding-edge kernel headers.
2102 */
2103 #ifndef DRM_FORMAT_R8
2104 #define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
2105 #endif
2106
2107 #ifndef DRM_FORMAT_GR88
2108 #define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
2109 #endif
2110
2111 struct yuv_format_descriptor yuv_formats[] = {
2112 {
2113 .format = DRM_FORMAT_YUYV,
2114 .input_planes = 1,
2115 .output_planes = 2,
2116 .texture_type = EGL_TEXTURE_Y_XUXV_WL,
2117 {{
2118 .width_divisor = 1,
2119 .height_divisor = 1,
2120 .format = DRM_FORMAT_GR88,
2121 .plane_index = 0
2122 }, {
2123 .width_divisor = 2,
2124 .height_divisor = 1,
2125 .format = DRM_FORMAT_ARGB8888,
2126 .plane_index = 0
2127 }}
2128 }, {
2129 .format = DRM_FORMAT_NV12,
2130 .input_planes = 2,
2131 .output_planes = 2,
2132 .texture_type = EGL_TEXTURE_Y_UV_WL,
2133 {{
2134 .width_divisor = 1,
2135 .height_divisor = 1,
2136 .format = DRM_FORMAT_R8,
2137 .plane_index = 0
2138 }, {
2139 .width_divisor = 2,
2140 .height_divisor = 2,
2141 .format = DRM_FORMAT_GR88,
2142 .plane_index = 1
2143 }}
2144 }, {
2145 .format = DRM_FORMAT_YUV420,
2146 .input_planes = 3,
2147 .output_planes = 3,
2148 .texture_type = EGL_TEXTURE_Y_U_V_WL,
2149 {{
2150 .width_divisor = 1,
2151 .height_divisor = 1,
2152 .format = DRM_FORMAT_R8,
2153 .plane_index = 0
2154 }, {
2155 .width_divisor = 2,
2156 .height_divisor = 2,
2157 .format = DRM_FORMAT_R8,
2158 .plane_index = 1
2159 }, {
2160 .width_divisor = 2,
2161 .height_divisor = 2,
2162 .format = DRM_FORMAT_R8,
2163 .plane_index = 2
2164 }}
2165 }, {
2166 .format = DRM_FORMAT_YUV444,
2167 .input_planes = 3,
2168 .output_planes = 3,
2169 .texture_type = EGL_TEXTURE_Y_U_V_WL,
2170 {{
2171 .width_divisor = 1,
2172 .height_divisor = 1,
2173 .format = DRM_FORMAT_R8,
2174 .plane_index = 0
2175 }, {
2176 .width_divisor = 1,
2177 .height_divisor = 1,
2178 .format = DRM_FORMAT_R8,
2179 .plane_index = 1
2180 }, {
2181 .width_divisor = 1,
2182 .height_divisor = 1,
2183 .format = DRM_FORMAT_R8,
2184 .plane_index = 2
2185 }}
2186 }
2187 };
2188
2189 static struct egl_image *
import_dmabuf_single_plane(struct gl_renderer * gr,const struct dmabuf_attributes * attributes,struct yuv_plane_descriptor * descriptor)2190 import_dmabuf_single_plane(struct gl_renderer *gr,
2191 const struct dmabuf_attributes *attributes,
2192 struct yuv_plane_descriptor *descriptor)
2193 {
2194 struct dmabuf_attributes plane;
2195 struct egl_image *image;
2196 char fmt[4];
2197
2198 plane.width = attributes->width / descriptor->width_divisor;
2199 plane.height = attributes->height / descriptor->height_divisor;
2200 plane.format = descriptor->format;
2201 plane.n_planes = 1;
2202 plane.fd[0] = attributes->fd[descriptor->plane_index];
2203 plane.offset[0] = attributes->offset[descriptor->plane_index];
2204 plane.stride[0] = attributes->stride[descriptor->plane_index];
2205 plane.modifier[0] = attributes->modifier[descriptor->plane_index];
2206
2207 image = import_simple_dmabuf(gr, &plane);
2208 if (!image) {
2209 weston_log("Failed to import plane %d as %.4s\n",
2210 descriptor->plane_index,
2211 dump_format(descriptor->format, fmt));
2212 return NULL;
2213 }
2214
2215 return image;
2216 }
2217
2218 static bool
import_yuv_dmabuf(struct gl_renderer * gr,struct dmabuf_image * image)2219 import_yuv_dmabuf(struct gl_renderer *gr,
2220 struct dmabuf_image *image)
2221 {
2222 unsigned i;
2223 int j;
2224 int ret;
2225 struct yuv_format_descriptor *format = NULL;
2226 struct dmabuf_attributes *attributes = &image->dmabuf->attributes;
2227 char fmt[4];
2228
2229 for (i = 0; i < ARRAY_LENGTH(yuv_formats); ++i) {
2230 if (yuv_formats[i].format == attributes->format) {
2231 format = &yuv_formats[i];
2232 break;
2233 }
2234 }
2235
2236 if (!format) {
2237 weston_log("Error during import, and no known conversion for format "
2238 "%.4s in the renderer\n",
2239 dump_format(attributes->format, fmt));
2240 return false;
2241 }
2242
2243 if (attributes->n_planes != format->input_planes) {
2244 weston_log("%.4s dmabuf must contain %d plane%s (%d provided)\n",
2245 dump_format(format->format, fmt),
2246 format->input_planes,
2247 (format->input_planes > 1) ? "s" : "",
2248 attributes->n_planes);
2249 return false;
2250 }
2251
2252 for (j = 0; j < format->output_planes; ++j) {
2253 image->images[j] = import_dmabuf_single_plane(gr, attributes,
2254 &format->plane[j]);
2255 if (!image->images[j]) {
2256 while (j) {
2257 ret = egl_image_unref(image->images[--j]);
2258 assert(ret == 0);
2259 }
2260 return false;
2261 }
2262 }
2263
2264 image->num_images = format->output_planes;
2265
2266 switch (format->texture_type) {
2267 case EGL_TEXTURE_Y_XUXV_WL:
2268 image->shader = &gr->texture_shader_y_xuxv;
2269 break;
2270 case EGL_TEXTURE_Y_UV_WL:
2271 image->shader = &gr->texture_shader_y_uv;
2272 break;
2273 case EGL_TEXTURE_Y_U_V_WL:
2274 image->shader = &gr->texture_shader_y_u_v;
2275 break;
2276 default:
2277 assert(false);
2278 }
2279
2280 return true;
2281 }
2282
2283 static GLenum
choose_texture_target(struct dmabuf_attributes * attributes)2284 choose_texture_target(struct dmabuf_attributes *attributes)
2285 {
2286 if (attributes->n_planes > 1)
2287 return GL_TEXTURE_EXTERNAL_OES;
2288
2289 switch (attributes->format & ~DRM_FORMAT_BIG_ENDIAN) {
2290 case DRM_FORMAT_YUYV:
2291 case DRM_FORMAT_YVYU:
2292 case DRM_FORMAT_UYVY:
2293 case DRM_FORMAT_VYUY:
2294 case DRM_FORMAT_AYUV:
2295 return GL_TEXTURE_EXTERNAL_OES;
2296 default:
2297 return GL_TEXTURE_2D;
2298 }
2299 }
2300
2301 static struct dmabuf_image *
import_dmabuf(struct gl_renderer * gr,struct linux_dmabuf_buffer * dmabuf)2302 import_dmabuf(struct gl_renderer *gr,
2303 struct linux_dmabuf_buffer *dmabuf)
2304 {
2305 struct egl_image *egl_image;
2306 struct dmabuf_image *image;
2307
2308 image = dmabuf_image_create();
2309 image->dmabuf = dmabuf;
2310
2311 egl_image = import_simple_dmabuf(gr, &dmabuf->attributes);
2312 if (egl_image) {
2313 image->num_images = 1;
2314 image->images[0] = egl_image;
2315 image->import_type = IMPORT_TYPE_DIRECT;
2316 image->target = choose_texture_target(&dmabuf->attributes);
2317
2318 switch (image->target) {
2319 case GL_TEXTURE_2D:
2320 image->shader = &gr->texture_shader_rgba;
2321 break;
2322 default:
2323 image->shader = &gr->texture_shader_egl_external;
2324 }
2325 } else {
2326 if (!import_yuv_dmabuf(gr, image)) {
2327 dmabuf_image_destroy(image);
2328 return NULL;
2329 }
2330 image->import_type = IMPORT_TYPE_GL_CONVERSION;
2331 image->target = GL_TEXTURE_2D;
2332 }
2333
2334 return image;
2335 }
2336
2337 static void
gl_renderer_query_dmabuf_formats(struct weston_compositor * wc,int ** formats,int * num_formats)2338 gl_renderer_query_dmabuf_formats(struct weston_compositor *wc,
2339 int **formats, int *num_formats)
2340 {
2341 struct gl_renderer *gr = get_renderer(wc);
2342 static const int fallback_formats[] = {
2343 DRM_FORMAT_ARGB8888,
2344 DRM_FORMAT_XRGB8888,
2345 DRM_FORMAT_YUYV,
2346 DRM_FORMAT_NV12,
2347 DRM_FORMAT_YUV420,
2348 DRM_FORMAT_YUV444,
2349 };
2350 bool fallback = false;
2351 EGLint num;
2352
2353 assert(gr->has_dmabuf_import);
2354
2355 if (!gr->has_dmabuf_import_modifiers ||
2356 !gr->query_dmabuf_formats(gr->egl_display, 0, NULL, &num)) {
2357 num = gr->has_gl_texture_rg ? ARRAY_LENGTH(fallback_formats) : 2;
2358 fallback = true;
2359 }
2360
2361 *formats = calloc(num, sizeof(int));
2362 if (*formats == NULL) {
2363 *num_formats = 0;
2364 return;
2365 }
2366
2367 if (fallback) {
2368 memcpy(*formats, fallback_formats, num * sizeof(int));
2369 *num_formats = num;
2370 return;
2371 }
2372
2373 if (!gr->query_dmabuf_formats(gr->egl_display, num, *formats, &num)) {
2374 *num_formats = 0;
2375 free(*formats);
2376 return;
2377 }
2378
2379 *num_formats = num;
2380 }
2381
2382 static void
gl_renderer_query_dmabuf_modifiers(struct weston_compositor * wc,int format,uint64_t ** modifiers,int * num_modifiers)2383 gl_renderer_query_dmabuf_modifiers(struct weston_compositor *wc, int format,
2384 uint64_t **modifiers,
2385 int *num_modifiers)
2386 {
2387 struct gl_renderer *gr = get_renderer(wc);
2388 int num;
2389
2390 assert(gr->has_dmabuf_import);
2391
2392 if (!gr->has_dmabuf_import_modifiers ||
2393 !gr->query_dmabuf_modifiers(gr->egl_display, format, 0, NULL,
2394 NULL, &num)) {
2395 *num_modifiers = 0;
2396 return;
2397 }
2398
2399 *modifiers = calloc(num, sizeof(uint64_t));
2400 if (*modifiers == NULL) {
2401 *num_modifiers = 0;
2402 return;
2403 }
2404 if (!gr->query_dmabuf_modifiers(gr->egl_display, format,
2405 num, *modifiers, NULL, &num)) {
2406 *num_modifiers = 0;
2407 free(*modifiers);
2408 return;
2409 }
2410
2411 *num_modifiers = num;
2412 }
2413
2414 static bool
gl_renderer_import_dmabuf(struct weston_compositor * ec,struct linux_dmabuf_buffer * dmabuf)2415 gl_renderer_import_dmabuf(struct weston_compositor *ec,
2416 struct linux_dmabuf_buffer *dmabuf)
2417 {
2418 struct gl_renderer *gr = get_renderer(ec);
2419 struct dmabuf_image *image;
2420 int i;
2421
2422 assert(gr->has_dmabuf_import);
2423
2424 for (i = 0; i < dmabuf->attributes.n_planes; i++) {
2425 /* return if EGL doesn't support import modifiers */
2426 if (dmabuf->attributes.modifier[i] != DRM_FORMAT_MOD_INVALID)
2427 if (!gr->has_dmabuf_import_modifiers)
2428 return false;
2429
2430 /* return if modifiers passed are unequal */
2431 if (dmabuf->attributes.modifier[i] !=
2432 dmabuf->attributes.modifier[0])
2433 return false;
2434 }
2435
2436 /* reject all flags we do not recognize or handle */
2437 if (dmabuf->attributes.flags & ~ZWP_LINUX_BUFFER_PARAMS_V1_FLAGS_Y_INVERT)
2438 return false;
2439
2440 image = import_dmabuf(gr, dmabuf);
2441 if (!image)
2442 return false;
2443
2444 wl_list_insert(&gr->dmabuf_images, &image->link);
2445 linux_dmabuf_buffer_set_user_data(dmabuf, image,
2446 gl_renderer_destroy_dmabuf);
2447
2448 return true;
2449 }
2450
2451 static bool
import_known_dmabuf(struct gl_renderer * gr,struct dmabuf_image * image)2452 import_known_dmabuf(struct gl_renderer *gr,
2453 struct dmabuf_image *image)
2454 {
2455 switch (image->import_type) {
2456 case IMPORT_TYPE_DIRECT:
2457 image->images[0] = import_simple_dmabuf(gr, &image->dmabuf->attributes);
2458 if (!image->images[0])
2459 return false;
2460 image->num_images = 1;
2461 break;
2462
2463 case IMPORT_TYPE_GL_CONVERSION:
2464 if (!import_yuv_dmabuf(gr, image))
2465 return false;
2466 break;
2467
2468 default:
2469 weston_log("Invalid import type for dmabuf\n");
2470 return false;
2471 }
2472
2473 return true;
2474 }
2475
2476 static bool
dmabuf_is_opaque(struct linux_dmabuf_buffer * dmabuf)2477 dmabuf_is_opaque(struct linux_dmabuf_buffer *dmabuf)
2478 {
2479 const struct pixel_format_info *info;
2480
2481 info = pixel_format_get_info(dmabuf->attributes.format &
2482 ~DRM_FORMAT_BIG_ENDIAN);
2483 if (!info)
2484 return false;
2485
2486 return pixel_format_is_opaque(info);
2487 }
2488
2489 static void
gl_renderer_attach_dmabuf(struct weston_surface * surface,struct weston_buffer * buffer,struct linux_dmabuf_buffer * dmabuf)2490 gl_renderer_attach_dmabuf(struct weston_surface *surface,
2491 struct weston_buffer *buffer,
2492 struct linux_dmabuf_buffer *dmabuf)
2493 {
2494 struct gl_renderer *gr = get_renderer(surface->compositor);
2495 struct gl_surface_state *gs = get_surface_state(surface);
2496 struct dmabuf_image *image;
2497 int i;
2498 int ret;
2499
2500 if (!gr->has_dmabuf_import) {
2501 linux_dmabuf_buffer_send_server_error(dmabuf,
2502 "EGL dmabuf import not supported");
2503 return;
2504 }
2505
2506 buffer->width = dmabuf->attributes.width;
2507 buffer->height = dmabuf->attributes.height;
2508
2509 /*
2510 * GL-renderer uses the OpenGL convention of texture coordinates, where
2511 * the origin is at bottom-left. Because dmabuf buffers have the origin
2512 * at top-left, we must invert the Y_INVERT flag to get the image right.
2513 */
2514 buffer->y_inverted =
2515 !(dmabuf->attributes.flags & ZWP_LINUX_BUFFER_PARAMS_V1_FLAGS_Y_INVERT);
2516
2517 for (i = 0; i < gs->num_images; i++)
2518 egl_image_unref(gs->images[i]);
2519 gs->num_images = 0;
2520
2521 /*
2522 * We try to always hold an imported EGLImage from the dmabuf
2523 * to prevent the client from preventing re-imports. But, we also
2524 * need to re-import every time the contents may change because
2525 * GL driver's caching may need flushing.
2526 *
2527 * Here we release the cache reference which has to be final.
2528 */
2529 image = linux_dmabuf_buffer_get_user_data(dmabuf);
2530
2531 /* The dmabuf_image should have been created during the import */
2532 assert(image != NULL);
2533
2534 for (i = 0; i < image->num_images; ++i) {
2535 ret = egl_image_unref(image->images[i]);
2536 assert(ret == 0);
2537 }
2538
2539 if (!import_known_dmabuf(gr, image)) {
2540 linux_dmabuf_buffer_send_server_error(dmabuf, "EGL dmabuf import failed");
2541 return;
2542 }
2543
2544 gs->num_images = image->num_images;
2545 for (i = 0; i < gs->num_images; ++i)
2546 gs->images[i] = egl_image_ref(image->images[i]);
2547
2548 gs->target = image->target;
2549 ensure_textures(gs, gs->num_images);
2550 for (i = 0; i < gs->num_images; ++i) {
2551 glActiveTexture(GL_TEXTURE0 + i);
2552 glBindTexture(gs->target, gs->textures[i]);
2553 gr->image_target_texture_2d(gs->target, gs->images[i]->image);
2554 }
2555
2556 gs->shader = image->shader;
2557 gs->pitch = buffer->width;
2558 gs->height = buffer->height;
2559 gs->buffer_type = BUFFER_TYPE_EGL;
2560 gs->y_inverted = buffer->y_inverted;
2561 surface->is_opaque = dmabuf_is_opaque(dmabuf);
2562 }
2563
2564 static void
gl_renderer_attach(struct weston_surface * es,struct weston_buffer * buffer)2565 gl_renderer_attach(struct weston_surface *es, struct weston_buffer *buffer)
2566 {
2567 struct weston_compositor *ec = es->compositor;
2568 struct gl_renderer *gr = get_renderer(ec);
2569 struct gl_surface_state *gs = get_surface_state(es);
2570 struct wl_shm_buffer *shm_buffer;
2571 struct linux_dmabuf_buffer *dmabuf;
2572 EGLint format;
2573 int i;
2574
2575 weston_buffer_reference(&gs->buffer_ref, buffer);
2576 weston_buffer_release_reference(&gs->buffer_release_ref,
2577 es->buffer_release_ref.buffer_release);
2578
2579 if (!buffer) {
2580 for (i = 0; i < gs->num_images; i++) {
2581 egl_image_unref(gs->images[i]);
2582 gs->images[i] = NULL;
2583 }
2584 gs->num_images = 0;
2585 glDeleteTextures(gs->num_textures, gs->textures);
2586 gs->num_textures = 0;
2587 gs->buffer_type = BUFFER_TYPE_NULL;
2588 gs->y_inverted = true;
2589 es->is_opaque = false;
2590 return;
2591 }
2592
2593 shm_buffer = wl_shm_buffer_get(buffer->resource);
2594
2595 if (shm_buffer)
2596 gl_renderer_attach_shm(es, buffer, shm_buffer);
2597 else if (gr->has_bind_display &&
2598 gr->query_buffer(gr->egl_display, (void *)buffer->resource,
2599 EGL_TEXTURE_FORMAT, &format))
2600 gl_renderer_attach_egl(es, buffer, format);
2601 else if ((dmabuf = linux_dmabuf_buffer_get(buffer->resource)))
2602 gl_renderer_attach_dmabuf(es, buffer, dmabuf);
2603 else {
2604 weston_log("unhandled buffer type!\n");
2605 if (gr->has_bind_display) {
2606 weston_log("eglQueryWaylandBufferWL failed\n");
2607 gl_renderer_print_egl_error_state();
2608 }
2609 weston_buffer_reference(&gs->buffer_ref, NULL);
2610 weston_buffer_release_reference(&gs->buffer_release_ref, NULL);
2611 gs->buffer_type = BUFFER_TYPE_NULL;
2612 gs->y_inverted = true;
2613 es->is_opaque = false;
2614 weston_buffer_send_server_error(buffer,
2615 "disconnecting due to unhandled buffer type");
2616 }
2617 }
2618
2619 static void
gl_renderer_surface_set_color(struct weston_surface * surface,float red,float green,float blue,float alpha)2620 gl_renderer_surface_set_color(struct weston_surface *surface,
2621 float red, float green, float blue, float alpha)
2622 {
2623 struct gl_surface_state *gs = get_surface_state(surface);
2624 struct gl_renderer *gr = get_renderer(surface->compositor);
2625
2626 gs->color[0] = red;
2627 gs->color[1] = green;
2628 gs->color[2] = blue;
2629 gs->color[3] = alpha;
2630 gs->buffer_type = BUFFER_TYPE_SOLID;
2631 gs->pitch = 1;
2632 gs->height = 1;
2633
2634 gs->shader = &gr->solid_shader;
2635 }
2636
2637 static void
gl_renderer_surface_get_content_size(struct weston_surface * surface,int * width,int * height)2638 gl_renderer_surface_get_content_size(struct weston_surface *surface,
2639 int *width, int *height)
2640 {
2641 struct gl_surface_state *gs = get_surface_state(surface);
2642
2643 if (gs->buffer_type == BUFFER_TYPE_NULL) {
2644 *width = 0;
2645 *height = 0;
2646 } else {
2647 *width = gs->pitch;
2648 *height = gs->height;
2649 }
2650 }
2651
2652 static uint32_t
pack_color(pixman_format_code_t format,float * c)2653 pack_color(pixman_format_code_t format, float *c)
2654 {
2655 uint8_t r = round(c[0] * 255.0f);
2656 uint8_t g = round(c[1] * 255.0f);
2657 uint8_t b = round(c[2] * 255.0f);
2658 uint8_t a = round(c[3] * 255.0f);
2659
2660 switch (format) {
2661 case PIXMAN_a8b8g8r8:
2662 return (a << 24) | (b << 16) | (g << 8) | r;
2663 default:
2664 assert(0);
2665 return 0;
2666 }
2667 }
2668
2669 static int
gl_renderer_surface_copy_content(struct weston_surface * surface,void * target,size_t size,int src_x,int src_y,int width,int height)2670 gl_renderer_surface_copy_content(struct weston_surface *surface,
2671 void *target, size_t size,
2672 int src_x, int src_y,
2673 int width, int height)
2674 {
2675 static const GLfloat verts[4 * 2] = {
2676 0.0f, 0.0f,
2677 1.0f, 0.0f,
2678 1.0f, 1.0f,
2679 0.0f, 1.0f
2680 };
2681 static const GLfloat projmat_normal[16] = { /* transpose */
2682 2.0f, 0.0f, 0.0f, 0.0f,
2683 0.0f, 2.0f, 0.0f, 0.0f,
2684 0.0f, 0.0f, 1.0f, 0.0f,
2685 -1.0f, -1.0f, 0.0f, 1.0f
2686 };
2687 static const GLfloat projmat_yinvert[16] = { /* transpose */
2688 2.0f, 0.0f, 0.0f, 0.0f,
2689 0.0f, -2.0f, 0.0f, 0.0f,
2690 0.0f, 0.0f, 1.0f, 0.0f,
2691 -1.0f, 1.0f, 0.0f, 1.0f
2692 };
2693 const pixman_format_code_t format = PIXMAN_a8b8g8r8;
2694 const size_t bytespp = 4; /* PIXMAN_a8b8g8r8 */
2695 const GLenum gl_format = GL_RGBA; /* PIXMAN_a8b8g8r8 little-endian */
2696 struct gl_renderer *gr = get_renderer(surface->compositor);
2697 struct gl_surface_state *gs = get_surface_state(surface);
2698 int cw, ch;
2699 GLuint fbo;
2700 GLuint tex;
2701 GLenum status;
2702 const GLfloat *proj;
2703 int i;
2704
2705 gl_renderer_surface_get_content_size(surface, &cw, &ch);
2706
2707 switch (gs->buffer_type) {
2708 case BUFFER_TYPE_NULL:
2709 return -1;
2710 case BUFFER_TYPE_SOLID:
2711 *(uint32_t *)target = pack_color(format, gs->color);
2712 return 0;
2713 case BUFFER_TYPE_SHM:
2714 gl_renderer_flush_damage(surface);
2715 /* fall through */
2716 case BUFFER_TYPE_EGL:
2717 break;
2718 }
2719
2720 glGenTextures(1, &tex);
2721 glBindTexture(GL_TEXTURE_2D, tex);
2722 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, cw, ch,
2723 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
2724 glBindTexture(GL_TEXTURE_2D, 0);
2725
2726 glGenFramebuffers(1, &fbo);
2727 glBindFramebuffer(GL_FRAMEBUFFER, fbo);
2728 glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
2729 GL_TEXTURE_2D, tex, 0);
2730
2731 status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
2732 if (status != GL_FRAMEBUFFER_COMPLETE) {
2733 weston_log("%s: fbo error: %#x\n", __func__, status);
2734 glDeleteFramebuffers(1, &fbo);
2735 glDeleteTextures(1, &tex);
2736 return -1;
2737 }
2738
2739 glViewport(0, 0, cw, ch);
2740 glDisable(GL_BLEND);
2741 use_shader(gr, gs->shader);
2742 if (gs->y_inverted)
2743 proj = projmat_normal;
2744 else
2745 proj = projmat_yinvert;
2746
2747 glUniformMatrix4fv(gs->shader->proj_uniform, 1, GL_FALSE, proj);
2748 glUniform1f(gs->shader->alpha_uniform, 1.0f);
2749
2750 for (i = 0; i < gs->num_textures; i++) {
2751 glUniform1i(gs->shader->tex_uniforms[i], i);
2752
2753 glActiveTexture(GL_TEXTURE0 + i);
2754 glBindTexture(gs->target, gs->textures[i]);
2755 glTexParameteri(gs->target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
2756 glTexParameteri(gs->target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
2757 }
2758
2759 /* position: */
2760 glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, verts);
2761 glEnableVertexAttribArray(0);
2762
2763 /* texcoord: */
2764 glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, verts);
2765 glEnableVertexAttribArray(1);
2766
2767 glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
2768
2769 glDisableVertexAttribArray(1);
2770 glDisableVertexAttribArray(0);
2771
2772 glPixelStorei(GL_PACK_ALIGNMENT, bytespp);
2773 glReadPixels(src_x, src_y, width, height, gl_format,
2774 GL_UNSIGNED_BYTE, target);
2775
2776 glDeleteFramebuffers(1, &fbo);
2777 glDeleteTextures(1, &tex);
2778
2779 return 0;
2780 }
2781
2782 static void
surface_state_destroy(struct gl_surface_state * gs,struct gl_renderer * gr)2783 surface_state_destroy(struct gl_surface_state *gs, struct gl_renderer *gr)
2784 {
2785 int i;
2786
2787 wl_list_remove(&gs->surface_destroy_listener.link);
2788 wl_list_remove(&gs->renderer_destroy_listener.link);
2789
2790 gs->surface->renderer_state = NULL;
2791
2792 glDeleteTextures(gs->num_textures, gs->textures);
2793
2794 for (i = 0; i < gs->num_images; i++)
2795 egl_image_unref(gs->images[i]);
2796
2797 weston_buffer_reference(&gs->buffer_ref, NULL);
2798 weston_buffer_release_reference(&gs->buffer_release_ref, NULL);
2799 pixman_region32_fini(&gs->texture_damage);
2800 free(gs);
2801 }
2802
2803 static void
surface_state_handle_surface_destroy(struct wl_listener * listener,void * data)2804 surface_state_handle_surface_destroy(struct wl_listener *listener, void *data)
2805 {
2806 struct gl_surface_state *gs;
2807 struct gl_renderer *gr;
2808
2809 gs = container_of(listener, struct gl_surface_state,
2810 surface_destroy_listener);
2811
2812 gr = get_renderer(gs->surface->compositor);
2813
2814 surface_state_destroy(gs, gr);
2815 }
2816
2817 static void
surface_state_handle_renderer_destroy(struct wl_listener * listener,void * data)2818 surface_state_handle_renderer_destroy(struct wl_listener *listener, void *data)
2819 {
2820 struct gl_surface_state *gs;
2821 struct gl_renderer *gr;
2822
2823 gr = data;
2824
2825 gs = container_of(listener, struct gl_surface_state,
2826 renderer_destroy_listener);
2827
2828 surface_state_destroy(gs, gr);
2829 }
2830
2831 static int
gl_renderer_create_surface(struct weston_surface * surface)2832 gl_renderer_create_surface(struct weston_surface *surface)
2833 {
2834 struct gl_surface_state *gs;
2835 struct gl_renderer *gr = get_renderer(surface->compositor);
2836
2837 gs = zalloc(sizeof *gs);
2838 if (gs == NULL)
2839 return -1;
2840
2841 /* A buffer is never attached to solid color surfaces, yet
2842 * they still go through texcoord computations. Do not divide
2843 * by zero there.
2844 */
2845 gs->pitch = 1;
2846 gs->y_inverted = true;
2847
2848 gs->surface = surface;
2849
2850 pixman_region32_init(&gs->texture_damage);
2851 surface->renderer_state = gs;
2852
2853 gs->surface_destroy_listener.notify =
2854 surface_state_handle_surface_destroy;
2855 wl_signal_add(&surface->destroy_signal,
2856 &gs->surface_destroy_listener);
2857
2858 gs->renderer_destroy_listener.notify =
2859 surface_state_handle_renderer_destroy;
2860 wl_signal_add(&gr->destroy_signal,
2861 &gs->renderer_destroy_listener);
2862
2863 if (surface->buffer_ref.buffer) {
2864 gl_renderer_attach(surface, surface->buffer_ref.buffer);
2865 gl_renderer_flush_damage(surface);
2866 }
2867
2868 return 0;
2869 }
2870
2871 static const char vertex_shader[] =
2872 "uniform mat4 proj;\n"
2873 "attribute vec2 position;\n"
2874 "attribute vec2 texcoord;\n"
2875 "varying vec2 v_texcoord;\n"
2876 "void main()\n"
2877 "{\n"
2878 " gl_Position = proj * vec4(position, 0.0, 1.0);\n"
2879 " v_texcoord = texcoord;\n"
2880 "}\n";
2881
2882 /* Declare common fragment shader uniforms */
2883 #define FRAGMENT_CONVERT_YUV \
2884 " y *= alpha;\n" \
2885 " u *= alpha;\n" \
2886 " v *= alpha;\n" \
2887 " gl_FragColor.r = y + 1.59602678 * v;\n" \
2888 " gl_FragColor.g = y - 0.39176229 * u - 0.81296764 * v;\n" \
2889 " gl_FragColor.b = y + 2.01723214 * u;\n" \
2890 " gl_FragColor.a = alpha;\n"
2891
2892 static const char fragment_debug[] =
2893 " gl_FragColor = vec4(0.0, 0.3, 0.0, 0.2) + gl_FragColor * 0.8;\n";
2894
2895 static const char fragment_brace[] =
2896 "}\n";
2897
2898 static const char texture_fragment_shader_rgba[] =
2899 "precision mediump float;\n"
2900 "varying vec2 v_texcoord;\n"
2901 "uniform sampler2D tex;\n"
2902 "uniform float alpha;\n"
2903 "void main()\n"
2904 "{\n"
2905 " gl_FragColor = alpha * texture2D(tex, v_texcoord)\n;"
2906 ;
2907
2908 static const char texture_fragment_shader_rgbx[] =
2909 "precision mediump float;\n"
2910 "varying vec2 v_texcoord;\n"
2911 "uniform sampler2D tex;\n"
2912 "uniform float alpha;\n"
2913 "void main()\n"
2914 "{\n"
2915 " gl_FragColor.rgb = alpha * texture2D(tex, v_texcoord).rgb\n;"
2916 " gl_FragColor.a = alpha;\n"
2917 ;
2918
2919 static const char texture_fragment_shader_egl_external[] =
2920 "#extension GL_OES_EGL_image_external : require\n"
2921 "precision mediump float;\n"
2922 "varying vec2 v_texcoord;\n"
2923 "uniform samplerExternalOES tex;\n"
2924 "uniform float alpha;\n"
2925 "void main()\n"
2926 "{\n"
2927 " gl_FragColor = alpha * texture2D(tex, v_texcoord)\n;"
2928 ;
2929
2930 static const char texture_fragment_shader_y_uv[] =
2931 "precision mediump float;\n"
2932 "uniform sampler2D tex;\n"
2933 "uniform sampler2D tex1;\n"
2934 "varying vec2 v_texcoord;\n"
2935 "uniform float alpha;\n"
2936 "void main() {\n"
2937 " float y = 1.16438356 * (texture2D(tex, v_texcoord).x - 0.0625);\n"
2938 " float u = texture2D(tex1, v_texcoord).r - 0.5;\n"
2939 " float v = texture2D(tex1, v_texcoord).g - 0.5;\n"
2940 FRAGMENT_CONVERT_YUV
2941 ;
2942
2943 static const char texture_fragment_shader_y_u_v[] =
2944 "precision mediump float;\n"
2945 "uniform sampler2D tex;\n"
2946 "uniform sampler2D tex1;\n"
2947 "uniform sampler2D tex2;\n"
2948 "varying vec2 v_texcoord;\n"
2949 "uniform float alpha;\n"
2950 "void main() {\n"
2951 " float y = 1.16438356 * (texture2D(tex, v_texcoord).x - 0.0625);\n"
2952 " float u = texture2D(tex1, v_texcoord).x - 0.5;\n"
2953 " float v = texture2D(tex2, v_texcoord).x - 0.5;\n"
2954 FRAGMENT_CONVERT_YUV
2955 ;
2956
2957 static const char texture_fragment_shader_y_xuxv[] =
2958 "precision mediump float;\n"
2959 "uniform sampler2D tex;\n"
2960 "uniform sampler2D tex1;\n"
2961 "varying vec2 v_texcoord;\n"
2962 "uniform float alpha;\n"
2963 "void main() {\n"
2964 " float y = 1.16438356 * (texture2D(tex, v_texcoord).x - 0.0625);\n"
2965 " float u = texture2D(tex1, v_texcoord).g - 0.5;\n"
2966 " float v = texture2D(tex1, v_texcoord).a - 0.5;\n"
2967 FRAGMENT_CONVERT_YUV
2968 ;
2969
2970 static const char solid_fragment_shader[] =
2971 "precision mediump float;\n"
2972 "uniform vec4 color;\n"
2973 "uniform float alpha;\n"
2974 "void main()\n"
2975 "{\n"
2976 " gl_FragColor = alpha * color\n;"
2977 ;
2978
2979 static int
compile_shader(GLenum type,int count,const char ** sources)2980 compile_shader(GLenum type, int count, const char **sources)
2981 {
2982 GLuint s;
2983 char msg[512];
2984 GLint status;
2985
2986 s = glCreateShader(type);
2987 glShaderSource(s, count, sources, NULL);
2988 glCompileShader(s);
2989 glGetShaderiv(s, GL_COMPILE_STATUS, &status);
2990 if (!status) {
2991 glGetShaderInfoLog(s, sizeof msg, NULL, msg);
2992 weston_log("shader info: %s\n", msg);
2993 return GL_NONE;
2994 }
2995
2996 return s;
2997 }
2998
2999 static int
shader_init(struct gl_shader * shader,struct gl_renderer * renderer,const char * vertex_source,const char * fragment_source)3000 shader_init(struct gl_shader *shader, struct gl_renderer *renderer,
3001 const char *vertex_source, const char *fragment_source)
3002 {
3003 char msg[512];
3004 GLint status;
3005 int count;
3006 const char *sources[3];
3007
3008 shader->vertex_shader =
3009 compile_shader(GL_VERTEX_SHADER, 1, &vertex_source);
3010
3011 if (renderer->fragment_shader_debug) {
3012 sources[0] = fragment_source;
3013 sources[1] = fragment_debug;
3014 sources[2] = fragment_brace;
3015 count = 3;
3016 } else {
3017 sources[0] = fragment_source;
3018 sources[1] = fragment_brace;
3019 count = 2;
3020 }
3021
3022 shader->fragment_shader =
3023 compile_shader(GL_FRAGMENT_SHADER, count, sources);
3024
3025 shader->program = glCreateProgram();
3026 glAttachShader(shader->program, shader->vertex_shader);
3027 glAttachShader(shader->program, shader->fragment_shader);
3028 glBindAttribLocation(shader->program, 0, "position");
3029 glBindAttribLocation(shader->program, 1, "texcoord");
3030
3031 glLinkProgram(shader->program);
3032 glGetProgramiv(shader->program, GL_LINK_STATUS, &status);
3033 if (!status) {
3034 glGetProgramInfoLog(shader->program, sizeof msg, NULL, msg);
3035 weston_log("link info: %s\n", msg);
3036 return -1;
3037 }
3038
3039 shader->proj_uniform = glGetUniformLocation(shader->program, "proj");
3040 shader->tex_uniforms[0] = glGetUniformLocation(shader->program, "tex");
3041 shader->tex_uniforms[1] = glGetUniformLocation(shader->program, "tex1");
3042 shader->tex_uniforms[2] = glGetUniformLocation(shader->program, "tex2");
3043 shader->alpha_uniform = glGetUniformLocation(shader->program, "alpha");
3044 shader->color_uniform = glGetUniformLocation(shader->program, "color");
3045
3046 return 0;
3047 }
3048
3049 static void
shader_release(struct gl_shader * shader)3050 shader_release(struct gl_shader *shader)
3051 {
3052 glDeleteShader(shader->vertex_shader);
3053 glDeleteShader(shader->fragment_shader);
3054 glDeleteProgram(shader->program);
3055
3056 shader->vertex_shader = 0;
3057 shader->fragment_shader = 0;
3058 shader->program = 0;
3059 }
3060
3061 static void
log_extensions(const char * name,const char * extensions)3062 log_extensions(const char *name, const char *extensions)
3063 {
3064 const char *p, *end;
3065 int l;
3066 int len;
3067
3068 l = weston_log("%s:", name);
3069 p = extensions;
3070 while (*p) {
3071 end = strchrnul(p, ' ');
3072 len = end - p;
3073 if (l + len > 78)
3074 l = weston_log_continue("\n" STAMP_SPACE "%.*s",
3075 len, p);
3076 else
3077 l += weston_log_continue(" %.*s", len, p);
3078 for (p = end; isspace(*p); p++)
3079 ;
3080 }
3081 weston_log_continue("\n");
3082 }
3083
3084 static void
log_egl_info(EGLDisplay egldpy)3085 log_egl_info(EGLDisplay egldpy)
3086 {
3087 const char *str;
3088
3089 str = eglQueryString(egldpy, EGL_VERSION);
3090 weston_log("EGL version: %s\n", str ? str : "(null)");
3091
3092 str = eglQueryString(egldpy, EGL_VENDOR);
3093 weston_log("EGL vendor: %s\n", str ? str : "(null)");
3094
3095 str = eglQueryString(egldpy, EGL_CLIENT_APIS);
3096 weston_log("EGL client APIs: %s\n", str ? str : "(null)");
3097
3098 str = eglQueryString(egldpy, EGL_EXTENSIONS);
3099 log_extensions("EGL extensions", str ? str : "(null)");
3100 }
3101
3102 static void
log_gl_info(void)3103 log_gl_info(void)
3104 {
3105 const char *str;
3106
3107 str = (char *)glGetString(GL_VERSION);
3108 weston_log("GL version: %s\n", str ? str : "(null)");
3109
3110 str = (char *)glGetString(GL_SHADING_LANGUAGE_VERSION);
3111 weston_log("GLSL version: %s\n", str ? str : "(null)");
3112
3113 str = (char *)glGetString(GL_VENDOR);
3114 weston_log("GL vendor: %s\n", str ? str : "(null)");
3115
3116 str = (char *)glGetString(GL_RENDERER);
3117 weston_log("GL renderer: %s\n", str ? str : "(null)");
3118
3119 str = (char *)glGetString(GL_EXTENSIONS);
3120 log_extensions("GL extensions", str ? str : "(null)");
3121 }
3122
3123 static void
log_egl_config_info(EGLDisplay egldpy,EGLConfig eglconfig)3124 log_egl_config_info(EGLDisplay egldpy, EGLConfig eglconfig)
3125 {
3126 EGLint r, g, b, a;
3127
3128 weston_log("Chosen EGL config details:\n");
3129
3130 weston_log_continue(STAMP_SPACE "RGBA bits");
3131 if (eglGetConfigAttrib(egldpy, eglconfig, EGL_RED_SIZE, &r) &&
3132 eglGetConfigAttrib(egldpy, eglconfig, EGL_GREEN_SIZE, &g) &&
3133 eglGetConfigAttrib(egldpy, eglconfig, EGL_BLUE_SIZE, &b) &&
3134 eglGetConfigAttrib(egldpy, eglconfig, EGL_ALPHA_SIZE, &a))
3135 weston_log_continue(": %d %d %d %d\n", r, g, b, a);
3136 else
3137 weston_log_continue(" unknown\n");
3138
3139 weston_log_continue(STAMP_SPACE "swap interval range");
3140 if (eglGetConfigAttrib(egldpy, eglconfig, EGL_MIN_SWAP_INTERVAL, &a) &&
3141 eglGetConfigAttrib(egldpy, eglconfig, EGL_MAX_SWAP_INTERVAL, &b))
3142 weston_log_continue(": %d - %d\n", a, b);
3143 else
3144 weston_log_continue(" unknown\n");
3145 }
3146
3147 static int
match_config_to_visual(EGLDisplay egl_display,EGLint visual_id,EGLConfig * configs,int count)3148 match_config_to_visual(EGLDisplay egl_display,
3149 EGLint visual_id,
3150 EGLConfig *configs,
3151 int count)
3152 {
3153 int i;
3154
3155 for (i = 0; i < count; ++i) {
3156 EGLint id;
3157
3158 if (!eglGetConfigAttrib(egl_display,
3159 configs[i], EGL_NATIVE_VISUAL_ID,
3160 &id))
3161 continue;
3162
3163 if (id == visual_id)
3164 return i;
3165 }
3166
3167 return -1;
3168 }
3169
3170 static int
egl_choose_config(struct gl_renderer * gr,const EGLint * attribs,const EGLint * visual_id,const int n_ids,EGLConfig * config_out)3171 egl_choose_config(struct gl_renderer *gr, const EGLint *attribs,
3172 const EGLint *visual_id, const int n_ids,
3173 EGLConfig *config_out)
3174 {
3175 EGLint count = 0;
3176 EGLint matched = 0;
3177 EGLConfig *configs;
3178 int i, config_index = -1;
3179
3180 if (!eglGetConfigs(gr->egl_display, NULL, 0, &count) || count < 1) {
3181 weston_log("No EGL configs to choose from.\n");
3182 return -1;
3183 }
3184 configs = calloc(count, sizeof *configs);
3185 if (!configs)
3186 return -1;
3187
3188 if (!eglChooseConfig(gr->egl_display, attribs, configs,
3189 count, &matched) || !matched) {
3190 weston_log("No EGL configs with appropriate attributes.\n");
3191 goto out;
3192 }
3193
3194 if (!visual_id || n_ids == 0)
3195 config_index = 0;
3196
3197 for (i = 0; config_index == -1 && i < n_ids; i++)
3198 config_index = match_config_to_visual(gr->egl_display,
3199 visual_id[i],
3200 configs,
3201 matched);
3202
3203 if (config_index != -1)
3204 *config_out = configs[config_index];
3205
3206 out:
3207 free(configs);
3208 if (config_index == -1)
3209 return -1;
3210
3211 if (i > 1)
3212 weston_log("Unable to use first choice EGL config with id"
3213 " 0x%x, succeeded with alternate id 0x%x.\n",
3214 visual_id[0], visual_id[i - 1]);
3215 return 0;
3216 }
3217
3218 static void
gl_renderer_output_set_border(struct weston_output * output,enum gl_renderer_border_side side,int32_t width,int32_t height,int32_t tex_width,unsigned char * data)3219 gl_renderer_output_set_border(struct weston_output *output,
3220 enum gl_renderer_border_side side,
3221 int32_t width, int32_t height,
3222 int32_t tex_width, unsigned char *data)
3223 {
3224 struct gl_output_state *go = get_output_state(output);
3225
3226 if (go->borders[side].width != width ||
3227 go->borders[side].height != height)
3228 /* In this case, we have to blow everything and do a full
3229 * repaint. */
3230 go->border_status |= BORDER_SIZE_CHANGED | BORDER_ALL_DIRTY;
3231
3232 if (data == NULL) {
3233 width = 0;
3234 height = 0;
3235 }
3236
3237 go->borders[side].width = width;
3238 go->borders[side].height = height;
3239 go->borders[side].tex_width = tex_width;
3240 go->borders[side].data = data;
3241 go->border_status |= 1 << side;
3242 }
3243
3244 static int
3245 gl_renderer_setup(struct weston_compositor *ec, EGLSurface egl_surface);
3246
3247 static EGLSurface
gl_renderer_create_window_surface(struct gl_renderer * gr,EGLNativeWindowType window_for_legacy,void * window_for_platform,const EGLint * config_attribs,const EGLint * visual_id,int n_ids)3248 gl_renderer_create_window_surface(struct gl_renderer *gr,
3249 EGLNativeWindowType window_for_legacy,
3250 void *window_for_platform,
3251 const EGLint *config_attribs,
3252 const EGLint *visual_id,
3253 int n_ids)
3254 {
3255 EGLSurface egl_surface = EGL_NO_SURFACE;
3256 EGLConfig egl_config;
3257
3258 if (egl_choose_config(gr, config_attribs, visual_id,
3259 n_ids, &egl_config) == -1) {
3260 weston_log("failed to choose EGL config for output\n");
3261 return EGL_NO_SURFACE;
3262 }
3263
3264 if (egl_config != gr->egl_config &&
3265 !gr->has_configless_context) {
3266 weston_log("attempted to use a different EGL config for an "
3267 "output but EGL_KHR_no_config_context or "
3268 "EGL_MESA_configless_context is not supported\n");
3269 return EGL_NO_SURFACE;
3270 }
3271
3272 log_egl_config_info(gr->egl_display, egl_config);
3273
3274 if (gr->create_platform_window)
3275 egl_surface = gr->create_platform_window(gr->egl_display,
3276 egl_config,
3277 window_for_platform,
3278 NULL);
3279 else
3280 egl_surface = eglCreateWindowSurface(gr->egl_display,
3281 egl_config,
3282 window_for_legacy, NULL);
3283
3284 return egl_surface;
3285 }
3286
3287 static int
gl_renderer_output_create(struct weston_output * output,EGLSurface surface)3288 gl_renderer_output_create(struct weston_output *output,
3289 EGLSurface surface)
3290 {
3291 struct gl_output_state *go;
3292 int i;
3293
3294 go = zalloc(sizeof *go);
3295 if (go == NULL)
3296 return -1;
3297
3298 go->egl_surface = surface;
3299
3300 for (i = 0; i < BUFFER_DAMAGE_COUNT; i++)
3301 pixman_region32_init(&go->buffer_damage[i]);
3302
3303 wl_list_init(&go->timeline_render_point_list);
3304
3305 go->begin_render_sync = EGL_NO_SYNC_KHR;
3306 go->end_render_sync = EGL_NO_SYNC_KHR;
3307
3308 output->renderer_state = go;
3309
3310 return 0;
3311 }
3312
3313 static int
gl_renderer_output_window_create(struct weston_output * output,EGLNativeWindowType window_for_legacy,void * window_for_platform,const EGLint * config_attribs,const EGLint * visual_id,int n_ids)3314 gl_renderer_output_window_create(struct weston_output *output,
3315 EGLNativeWindowType window_for_legacy,
3316 void *window_for_platform,
3317 const EGLint *config_attribs,
3318 const EGLint *visual_id,
3319 int n_ids)
3320 {
3321 struct weston_compositor *ec = output->compositor;
3322 struct gl_renderer *gr = get_renderer(ec);
3323 EGLSurface egl_surface = EGL_NO_SURFACE;
3324 int ret = 0;
3325
3326 egl_surface = gl_renderer_create_window_surface(gr,
3327 window_for_legacy,
3328 window_for_platform,
3329 config_attribs,
3330 visual_id, n_ids);
3331 if (egl_surface == EGL_NO_SURFACE) {
3332 weston_log("failed to create egl surface\n");
3333 return -1;
3334 }
3335
3336 ret = gl_renderer_output_create(output, egl_surface);
3337 if (ret < 0)
3338 weston_platform_destroy_egl_surface(gr->egl_display, egl_surface);
3339
3340 return ret;
3341 }
3342
3343 static void
gl_renderer_output_destroy(struct weston_output * output)3344 gl_renderer_output_destroy(struct weston_output *output)
3345 {
3346 struct gl_renderer *gr = get_renderer(output->compositor);
3347 struct gl_output_state *go = get_output_state(output);
3348 struct timeline_render_point *trp, *tmp;
3349 int i;
3350
3351 for (i = 0; i < 2; i++)
3352 pixman_region32_fini(&go->buffer_damage[i]);
3353
3354 eglMakeCurrent(gr->egl_display,
3355 EGL_NO_SURFACE, EGL_NO_SURFACE,
3356 EGL_NO_CONTEXT);
3357
3358 weston_platform_destroy_egl_surface(gr->egl_display, go->egl_surface);
3359
3360 if (!wl_list_empty(&go->timeline_render_point_list))
3361 weston_log("warning: discarding pending timeline render"
3362 "objects at output destruction");
3363
3364 wl_list_for_each_safe(trp, tmp, &go->timeline_render_point_list, link)
3365 timeline_render_point_destroy(trp);
3366
3367 if (go->begin_render_sync != EGL_NO_SYNC_KHR)
3368 gr->destroy_sync(gr->egl_display, go->begin_render_sync);
3369 if (go->end_render_sync != EGL_NO_SYNC_KHR)
3370 gr->destroy_sync(gr->egl_display, go->end_render_sync);
3371
3372 free(go);
3373 }
3374
3375 static EGLSurface
gl_renderer_output_surface(struct weston_output * output)3376 gl_renderer_output_surface(struct weston_output *output)
3377 {
3378 return get_output_state(output)->egl_surface;
3379 }
3380
3381 static int
gl_renderer_create_fence_fd(struct weston_output * output)3382 gl_renderer_create_fence_fd(struct weston_output *output)
3383 {
3384 struct gl_output_state *go = get_output_state(output);
3385 struct gl_renderer *gr = get_renderer(output->compositor);
3386 int fd;
3387
3388 if (go->end_render_sync == EGL_NO_SYNC_KHR)
3389 return -1;
3390
3391 fd = gr->dup_native_fence_fd(gr->egl_display, go->end_render_sync);
3392 if (fd == EGL_NO_NATIVE_FENCE_FD_ANDROID)
3393 return -1;
3394
3395 return fd;
3396 }
3397
3398 static void
gl_renderer_destroy(struct weston_compositor * ec)3399 gl_renderer_destroy(struct weston_compositor *ec)
3400 {
3401 struct gl_renderer *gr = get_renderer(ec);
3402 struct dmabuf_image *image, *next;
3403
3404 wl_signal_emit(&gr->destroy_signal, gr);
3405
3406 if (gr->has_bind_display)
3407 gr->unbind_display(gr->egl_display, ec->wl_display);
3408
3409 /* Work around crash in egl_dri2.c's dri2_make_current() - when does this apply? */
3410 eglMakeCurrent(gr->egl_display,
3411 EGL_NO_SURFACE, EGL_NO_SURFACE,
3412 EGL_NO_CONTEXT);
3413
3414
3415 wl_list_for_each_safe(image, next, &gr->dmabuf_images, link)
3416 dmabuf_image_destroy(image);
3417
3418 if (gr->dummy_surface != EGL_NO_SURFACE)
3419 weston_platform_destroy_egl_surface(gr->egl_display,
3420 gr->dummy_surface);
3421
3422 eglTerminate(gr->egl_display);
3423 eglReleaseThread();
3424
3425 wl_list_remove(&gr->output_destroy_listener.link);
3426
3427 wl_array_release(&gr->vertices);
3428 wl_array_release(&gr->vtxcnt);
3429
3430 if (gr->fragment_binding)
3431 weston_binding_destroy(gr->fragment_binding);
3432 if (gr->fan_binding)
3433 weston_binding_destroy(gr->fan_binding);
3434
3435 free(gr);
3436 }
3437
3438 static void
renderer_setup_egl_client_extensions(struct gl_renderer * gr)3439 renderer_setup_egl_client_extensions(struct gl_renderer *gr)
3440 {
3441 const char *extensions;
3442
3443 extensions = eglQueryString(EGL_NO_DISPLAY, EGL_EXTENSIONS);
3444 if (!extensions) {
3445 weston_log("Retrieving EGL client extension string failed.\n");
3446 return;
3447 }
3448
3449 if (weston_check_egl_extension(extensions, "EGL_EXT_platform_base"))
3450 gr->create_platform_window =
3451 (void *) eglGetProcAddress("eglCreatePlatformWindowSurfaceEXT");
3452 else
3453 weston_log("warning: EGL_EXT_platform_base not supported.\n");
3454 }
3455
3456 static int
gl_renderer_setup_egl_extensions(struct weston_compositor * ec)3457 gl_renderer_setup_egl_extensions(struct weston_compositor *ec)
3458 {
3459 static const struct {
3460 char *extension, *entrypoint;
3461 } swap_damage_ext_to_entrypoint[] = {
3462 {
3463 .extension = "EGL_EXT_swap_buffers_with_damage",
3464 .entrypoint = "eglSwapBuffersWithDamageEXT",
3465 },
3466 {
3467 .extension = "EGL_KHR_swap_buffers_with_damage",
3468 .entrypoint = "eglSwapBuffersWithDamageKHR",
3469 },
3470 };
3471 struct gl_renderer *gr = get_renderer(ec);
3472 const char *extensions;
3473 EGLBoolean ret;
3474 unsigned i;
3475
3476 gr->create_image = (void *) eglGetProcAddress("eglCreateImageKHR");
3477 gr->destroy_image = (void *) eglGetProcAddress("eglDestroyImageKHR");
3478
3479 gr->bind_display =
3480 (void *) eglGetProcAddress("eglBindWaylandDisplayWL");
3481 gr->unbind_display =
3482 (void *) eglGetProcAddress("eglUnbindWaylandDisplayWL");
3483 gr->query_buffer =
3484 (void *) eglGetProcAddress("eglQueryWaylandBufferWL");
3485 gr->set_damage_region =
3486 (void *) eglGetProcAddress("eglSetDamageRegionKHR");
3487
3488 extensions =
3489 (const char *) eglQueryString(gr->egl_display, EGL_EXTENSIONS);
3490 if (!extensions) {
3491 weston_log("Retrieving EGL extension string failed.\n");
3492 return -1;
3493 }
3494
3495 if (weston_check_egl_extension(extensions, "EGL_IMG_context_priority"))
3496 gr->has_context_priority = true;
3497
3498 if (weston_check_egl_extension(extensions, "EGL_WL_bind_wayland_display"))
3499 gr->has_bind_display = true;
3500 if (gr->has_bind_display) {
3501 ret = gr->bind_display(gr->egl_display, ec->wl_display);
3502 if (!ret)
3503 gr->has_bind_display = false;
3504 }
3505
3506 if (weston_check_egl_extension(extensions, "EGL_EXT_buffer_age"))
3507 gr->has_egl_buffer_age = true;
3508
3509 if (weston_check_egl_extension(extensions, "EGL_KHR_partial_update"))
3510 gr->has_egl_partial_update = true;
3511
3512 for (i = 0; i < ARRAY_LENGTH(swap_damage_ext_to_entrypoint); i++) {
3513 if (weston_check_egl_extension(extensions,
3514 swap_damage_ext_to_entrypoint[i].extension)) {
3515 gr->swap_buffers_with_damage =
3516 (void *) eglGetProcAddress(
3517 swap_damage_ext_to_entrypoint[i].entrypoint);
3518 break;
3519 }
3520 }
3521
3522 if (weston_check_egl_extension(extensions, "EGL_KHR_no_config_context") ||
3523 weston_check_egl_extension(extensions, "EGL_MESA_configless_context"))
3524 gr->has_configless_context = true;
3525
3526 if (weston_check_egl_extension(extensions, "EGL_KHR_surfaceless_context"))
3527 gr->has_surfaceless_context = true;
3528
3529 if (weston_check_egl_extension(extensions, "EGL_EXT_image_dma_buf_import"))
3530 gr->has_dmabuf_import = true;
3531
3532 if (weston_check_egl_extension(extensions,
3533 "EGL_EXT_image_dma_buf_import_modifiers")) {
3534 gr->query_dmabuf_formats =
3535 (void *) eglGetProcAddress("eglQueryDmaBufFormatsEXT");
3536 gr->query_dmabuf_modifiers =
3537 (void *) eglGetProcAddress("eglQueryDmaBufModifiersEXT");
3538 gr->has_dmabuf_import_modifiers = true;
3539 }
3540
3541 if (weston_check_egl_extension(extensions, "EGL_KHR_fence_sync") &&
3542 weston_check_egl_extension(extensions, "EGL_ANDROID_native_fence_sync")) {
3543 gr->create_sync =
3544 (void *) eglGetProcAddress("eglCreateSyncKHR");
3545 gr->destroy_sync =
3546 (void *) eglGetProcAddress("eglDestroySyncKHR");
3547 gr->dup_native_fence_fd =
3548 (void *) eglGetProcAddress("eglDupNativeFenceFDANDROID");
3549 gr->has_native_fence_sync = true;
3550 } else {
3551 weston_log("warning: Disabling render GPU timeline and explicit "
3552 "synchronization due to missing "
3553 "EGL_ANDROID_native_fence_sync extension\n");
3554 }
3555
3556 if (weston_check_egl_extension(extensions, "EGL_KHR_wait_sync")) {
3557 gr->wait_sync = (void *) eglGetProcAddress("eglWaitSyncKHR");
3558 gr->has_wait_sync = true;
3559 } else {
3560 weston_log("warning: Disabling explicit synchronization due"
3561 "to missing EGL_KHR_wait_sync extension\n");
3562 }
3563
3564 renderer_setup_egl_client_extensions(gr);
3565
3566 return 0;
3567 }
3568
3569 static const EGLint gl_renderer_opaque_attribs[] = {
3570 EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
3571 EGL_RED_SIZE, 1,
3572 EGL_GREEN_SIZE, 1,
3573 EGL_BLUE_SIZE, 1,
3574 EGL_ALPHA_SIZE, 0,
3575 EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
3576 EGL_NONE
3577 };
3578
3579 static const EGLint gl_renderer_alpha_attribs[] = {
3580 EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
3581 EGL_RED_SIZE, 1,
3582 EGL_GREEN_SIZE, 1,
3583 EGL_BLUE_SIZE, 1,
3584 EGL_ALPHA_SIZE, 1,
3585 EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
3586 EGL_NONE
3587 };
3588
3589
3590 /** Checks whether a platform EGL client extension is supported
3591 *
3592 * \param ec The weston compositor
3593 * \param extension_suffix The EGL client extension suffix
3594 * \return 1 if supported, 0 if using fallbacks, -1 unsupported
3595 *
3596 * This function checks whether a specific platform_* extension is supported
3597 * by EGL.
3598 *
3599 * The extension suffix should be the suffix of the platform extension (that
3600 * specifies a platform argument as defined in EGL_EXT_platform_base). For
3601 * example, passing "foo" will check whether either "EGL_KHR_platform_foo",
3602 * "EGL_EXT_platform_foo", or "EGL_MESA_platform_foo" is supported.
3603 *
3604 * The return value is 1:
3605 * - if the supplied EGL client extension is supported.
3606 * The return value is 0:
3607 * - if the platform_base client extension isn't supported so will
3608 * fallback to eglGetDisplay and friends.
3609 * The return value is -1:
3610 * - if the supplied EGL client extension is not supported.
3611 */
3612 static int
gl_renderer_supports(struct weston_compositor * ec,const char * extension_suffix)3613 gl_renderer_supports(struct weston_compositor *ec,
3614 const char *extension_suffix)
3615 {
3616 static const char *extensions = NULL;
3617 char s[64];
3618
3619 if (!extensions) {
3620 extensions = (const char *) eglQueryString(
3621 EGL_NO_DISPLAY, EGL_EXTENSIONS);
3622
3623 if (!extensions)
3624 return 0;
3625
3626 log_extensions("EGL client extensions",
3627 extensions);
3628 }
3629
3630 if (!weston_check_egl_extension(extensions, "EGL_EXT_platform_base"))
3631 return 0;
3632
3633 snprintf(s, sizeof s, "EGL_KHR_platform_%s", extension_suffix);
3634 if (weston_check_egl_extension(extensions, s))
3635 return 1;
3636
3637 snprintf(s, sizeof s, "EGL_EXT_platform_%s", extension_suffix);
3638 if (weston_check_egl_extension(extensions, s))
3639 return 1;
3640
3641 snprintf(s, sizeof s, "EGL_MESA_platform_%s", extension_suffix);
3642 if (weston_check_egl_extension(extensions, s))
3643 return 1;
3644
3645 /* at this point we definitely have some platform extensions but
3646 * haven't found the supplied platform, so chances are it's
3647 * not supported. */
3648
3649 return -1;
3650 }
3651
3652 static const char *
platform_to_extension(EGLenum platform)3653 platform_to_extension(EGLenum platform)
3654 {
3655 switch (platform) {
3656 case EGL_PLATFORM_GBM_KHR:
3657 return "gbm";
3658 case EGL_PLATFORM_WAYLAND_KHR:
3659 return "wayland";
3660 case EGL_PLATFORM_X11_KHR:
3661 return "x11";
3662 default:
3663 assert(0 && "bad EGL platform enum");
3664 }
3665 }
3666
3667 static void
output_handle_destroy(struct wl_listener * listener,void * data)3668 output_handle_destroy(struct wl_listener *listener, void *data)
3669 {
3670 struct gl_renderer *gr;
3671 struct weston_output *output = data;
3672
3673 gr = container_of(listener, struct gl_renderer,
3674 output_destroy_listener);
3675
3676 if (wl_list_empty(&output->compositor->output_list))
3677 eglMakeCurrent(gr->egl_display, gr->dummy_surface,
3678 gr->dummy_surface, gr->egl_context);
3679 }
3680
3681 static int
gl_renderer_create_pbuffer_surface(struct gl_renderer * gr)3682 gl_renderer_create_pbuffer_surface(struct gl_renderer *gr) {
3683 EGLConfig pbuffer_config;
3684
3685 static const EGLint pbuffer_config_attribs[] = {
3686 EGL_SURFACE_TYPE, EGL_PBUFFER_BIT,
3687 EGL_RED_SIZE, 1,
3688 EGL_GREEN_SIZE, 1,
3689 EGL_BLUE_SIZE, 1,
3690 EGL_ALPHA_SIZE, 0,
3691 EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
3692 EGL_NONE
3693 };
3694
3695 static const EGLint pbuffer_attribs[] = {
3696 EGL_WIDTH, 10,
3697 EGL_HEIGHT, 10,
3698 EGL_NONE
3699 };
3700
3701 if (egl_choose_config(gr, pbuffer_config_attribs, NULL, 0, &pbuffer_config) < 0) {
3702 weston_log("failed to choose EGL config for PbufferSurface\n");
3703 return -1;
3704 }
3705
3706 gr->dummy_surface = eglCreatePbufferSurface(gr->egl_display,
3707 pbuffer_config,
3708 pbuffer_attribs);
3709
3710 if (gr->dummy_surface == EGL_NO_SURFACE) {
3711 weston_log("failed to create PbufferSurface\n");
3712 return -1;
3713 }
3714
3715 return 0;
3716 }
3717
3718 static int
gl_renderer_display_create(struct weston_compositor * ec,EGLenum platform,void * native_window,const EGLint * platform_attribs,const EGLint * config_attribs,const EGLint * visual_id,int n_ids)3719 gl_renderer_display_create(struct weston_compositor *ec, EGLenum platform,
3720 void *native_window, const EGLint *platform_attribs,
3721 const EGLint *config_attribs, const EGLint *visual_id, int n_ids)
3722 {
3723 struct gl_renderer *gr;
3724 EGLint major, minor;
3725 int supports = 0;
3726
3727 if (platform) {
3728 supports = gl_renderer_supports(
3729 ec, platform_to_extension(platform));
3730 if (supports < 0)
3731 return -1;
3732 }
3733
3734 gr = zalloc(sizeof *gr);
3735 if (gr == NULL)
3736 return -1;
3737
3738 gr->base.read_pixels = gl_renderer_read_pixels;
3739 gr->base.repaint_output = gl_renderer_repaint_output;
3740 gr->base.flush_damage = gl_renderer_flush_damage;
3741 gr->base.attach = gl_renderer_attach;
3742 gr->base.surface_set_color = gl_renderer_surface_set_color;
3743 gr->base.destroy = gl_renderer_destroy;
3744 gr->base.surface_get_content_size =
3745 gl_renderer_surface_get_content_size;
3746 gr->base.surface_copy_content = gl_renderer_surface_copy_content;
3747 gr->egl_display = NULL;
3748
3749 /* extension_suffix is supported */
3750 if (supports) {
3751 if (!get_platform_display) {
3752 get_platform_display = (void *) eglGetProcAddress(
3753 "eglGetPlatformDisplayEXT");
3754 }
3755
3756 /* also wrap this in the supports check because
3757 * eglGetProcAddress can return non-NULL and still not
3758 * support the feature at runtime, so ensure the
3759 * appropriate extension checks have been done. */
3760 if (get_platform_display && platform) {
3761 gr->egl_display = get_platform_display(platform,
3762 native_window,
3763 platform_attribs);
3764 }
3765 }
3766
3767 if (!gr->egl_display) {
3768 weston_log("warning: either no EGL_EXT_platform_base "
3769 "support or specific platform support; "
3770 "falling back to eglGetDisplay.\n");
3771 gr->egl_display = eglGetDisplay(native_window);
3772 }
3773
3774 if (gr->egl_display == EGL_NO_DISPLAY) {
3775 weston_log("failed to create display\n");
3776 goto fail;
3777 }
3778
3779 if (!eglInitialize(gr->egl_display, &major, &minor)) {
3780 weston_log("failed to initialize display\n");
3781 goto fail_with_error;
3782 }
3783
3784 log_egl_info(gr->egl_display);
3785
3786 if (egl_choose_config(gr, config_attribs, visual_id,
3787 n_ids, &gr->egl_config) < 0) {
3788 weston_log("failed to choose EGL config\n");
3789 goto fail_terminate;
3790 }
3791
3792 ec->renderer = &gr->base;
3793
3794 if (gl_renderer_setup_egl_extensions(ec) < 0)
3795 goto fail_with_error;
3796
3797 ec->capabilities |= WESTON_CAP_ROTATION_ANY;
3798 ec->capabilities |= WESTON_CAP_CAPTURE_YFLIP;
3799 ec->capabilities |= WESTON_CAP_VIEW_CLIP_MASK;
3800 if (gr->has_native_fence_sync && gr->has_wait_sync)
3801 ec->capabilities |= WESTON_CAP_EXPLICIT_SYNC;
3802
3803 wl_list_init(&gr->dmabuf_images);
3804 if (gr->has_dmabuf_import) {
3805 gr->base.import_dmabuf = gl_renderer_import_dmabuf;
3806 gr->base.query_dmabuf_formats =
3807 gl_renderer_query_dmabuf_formats;
3808 gr->base.query_dmabuf_modifiers =
3809 gl_renderer_query_dmabuf_modifiers;
3810 }
3811
3812 if (gr->has_surfaceless_context) {
3813 weston_log("EGL_KHR_surfaceless_context available\n");
3814 gr->dummy_surface = EGL_NO_SURFACE;
3815 } else {
3816 weston_log("EGL_KHR_surfaceless_context unavailable. "
3817 "Trying PbufferSurface\n");
3818
3819 if (gl_renderer_create_pbuffer_surface(gr) < 0)
3820 goto fail_with_error;
3821 }
3822
3823 wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_RGB565);
3824 wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_YUV420);
3825 wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_NV12);
3826 wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_YUYV);
3827
3828 wl_signal_init(&gr->destroy_signal);
3829
3830 if (gl_renderer_setup(ec, gr->dummy_surface) < 0) {
3831 if (gr->dummy_surface != EGL_NO_SURFACE)
3832 weston_platform_destroy_egl_surface(gr->egl_display,
3833 gr->dummy_surface);
3834 goto fail_with_error;
3835 }
3836
3837 return 0;
3838
3839 fail_with_error:
3840 gl_renderer_print_egl_error_state();
3841 fail_terminate:
3842 eglTerminate(gr->egl_display);
3843 fail:
3844 free(gr);
3845 return -1;
3846 }
3847
3848 static EGLDisplay
gl_renderer_display(struct weston_compositor * ec)3849 gl_renderer_display(struct weston_compositor *ec)
3850 {
3851 return get_renderer(ec)->egl_display;
3852 }
3853
3854 static int
compile_shaders(struct weston_compositor * ec)3855 compile_shaders(struct weston_compositor *ec)
3856 {
3857 struct gl_renderer *gr = get_renderer(ec);
3858
3859 gr->texture_shader_rgba.vertex_source = vertex_shader;
3860 gr->texture_shader_rgba.fragment_source = texture_fragment_shader_rgba;
3861
3862 gr->texture_shader_rgbx.vertex_source = vertex_shader;
3863 gr->texture_shader_rgbx.fragment_source = texture_fragment_shader_rgbx;
3864
3865 gr->texture_shader_egl_external.vertex_source = vertex_shader;
3866 gr->texture_shader_egl_external.fragment_source =
3867 texture_fragment_shader_egl_external;
3868
3869 gr->texture_shader_y_uv.vertex_source = vertex_shader;
3870 gr->texture_shader_y_uv.fragment_source = texture_fragment_shader_y_uv;
3871
3872 gr->texture_shader_y_u_v.vertex_source = vertex_shader;
3873 gr->texture_shader_y_u_v.fragment_source =
3874 texture_fragment_shader_y_u_v;
3875
3876 gr->texture_shader_y_xuxv.vertex_source = vertex_shader;
3877 gr->texture_shader_y_xuxv.fragment_source =
3878 texture_fragment_shader_y_xuxv;
3879
3880 gr->solid_shader.vertex_source = vertex_shader;
3881 gr->solid_shader.fragment_source = solid_fragment_shader;
3882
3883 return 0;
3884 }
3885
3886 static void
fragment_debug_binding(struct weston_keyboard * keyboard,const struct timespec * time,uint32_t key,void * data)3887 fragment_debug_binding(struct weston_keyboard *keyboard,
3888 const struct timespec *time,
3889 uint32_t key, void *data)
3890 {
3891 struct weston_compositor *ec = data;
3892 struct gl_renderer *gr = get_renderer(ec);
3893 struct weston_output *output;
3894
3895 gr->fragment_shader_debug = !gr->fragment_shader_debug;
3896
3897 shader_release(&gr->texture_shader_rgba);
3898 shader_release(&gr->texture_shader_rgbx);
3899 shader_release(&gr->texture_shader_egl_external);
3900 shader_release(&gr->texture_shader_y_uv);
3901 shader_release(&gr->texture_shader_y_u_v);
3902 shader_release(&gr->texture_shader_y_xuxv);
3903 shader_release(&gr->solid_shader);
3904
3905 /* Force use_shader() to call glUseProgram(), since we need to use
3906 * the recompiled version of the shader. */
3907 gr->current_shader = NULL;
3908
3909 wl_list_for_each(output, &ec->output_list, link)
3910 weston_output_damage(output);
3911 }
3912
3913 static void
fan_debug_repaint_binding(struct weston_keyboard * keyboard,const struct timespec * time,uint32_t key,void * data)3914 fan_debug_repaint_binding(struct weston_keyboard *keyboard,
3915 const struct timespec *time,
3916 uint32_t key, void *data)
3917 {
3918 struct weston_compositor *compositor = data;
3919 struct gl_renderer *gr = get_renderer(compositor);
3920
3921 gr->fan_debug = !gr->fan_debug;
3922 weston_compositor_damage_all(compositor);
3923 }
3924
3925 static uint32_t
get_gl_version(void)3926 get_gl_version(void)
3927 {
3928 const char *version;
3929 int major, minor;
3930
3931 version = (const char *) glGetString(GL_VERSION);
3932 if (version &&
3933 (sscanf(version, "%d.%d", &major, &minor) == 2 ||
3934 sscanf(version, "OpenGL ES %d.%d", &major, &minor) == 2)) {
3935 return GR_GL_VERSION(major, minor);
3936 }
3937
3938 return GR_GL_VERSION_INVALID;
3939 }
3940
3941 static int
gl_renderer_setup(struct weston_compositor * ec,EGLSurface egl_surface)3942 gl_renderer_setup(struct weston_compositor *ec, EGLSurface egl_surface)
3943 {
3944 struct gl_renderer *gr = get_renderer(ec);
3945 const char *extensions;
3946 EGLConfig context_config;
3947 EGLBoolean ret;
3948
3949 EGLint context_attribs[16] = {
3950 EGL_CONTEXT_CLIENT_VERSION, 0,
3951 };
3952 unsigned int nattr = 2;
3953
3954 if (!eglBindAPI(EGL_OPENGL_ES_API)) {
3955 weston_log("failed to bind EGL_OPENGL_ES_API\n");
3956 gl_renderer_print_egl_error_state();
3957 return -1;
3958 }
3959
3960 /*
3961 * Being the compositor we require minimum output latency,
3962 * so request a high priority context for ourselves - that should
3963 * reschedule all of our rendering and its dependencies to be completed
3964 * first. If the driver doesn't permit us to create a high priority
3965 * context, it will fallback to the default priority (MEDIUM).
3966 */
3967 if (gr->has_context_priority) {
3968 context_attribs[nattr++] = EGL_CONTEXT_PRIORITY_LEVEL_IMG;
3969 context_attribs[nattr++] = EGL_CONTEXT_PRIORITY_HIGH_IMG;
3970 }
3971
3972 assert(nattr < ARRAY_LENGTH(context_attribs));
3973 context_attribs[nattr] = EGL_NONE;
3974
3975 context_config = gr->egl_config;
3976
3977 if (gr->has_configless_context)
3978 context_config = EGL_NO_CONFIG_KHR;
3979
3980 /* try to create an OpenGLES 3 context first */
3981 context_attribs[1] = 3;
3982 gr->egl_context = eglCreateContext(gr->egl_display, context_config,
3983 EGL_NO_CONTEXT, context_attribs);
3984 if (gr->egl_context == NULL) {
3985 /* and then fallback to OpenGLES 2 */
3986 context_attribs[1] = 2;
3987 gr->egl_context = eglCreateContext(gr->egl_display,
3988 context_config,
3989 EGL_NO_CONTEXT,
3990 context_attribs);
3991 if (gr->egl_context == NULL) {
3992 weston_log("failed to create context\n");
3993 gl_renderer_print_egl_error_state();
3994 return -1;
3995 }
3996 }
3997
3998 if (gr->has_context_priority) {
3999 EGLint value = EGL_CONTEXT_PRIORITY_MEDIUM_IMG;
4000
4001 eglQueryContext(gr->egl_display, gr->egl_context,
4002 EGL_CONTEXT_PRIORITY_LEVEL_IMG, &value);
4003
4004 if (value != EGL_CONTEXT_PRIORITY_HIGH_IMG) {
4005 weston_log("Failed to obtain a high priority context.\n");
4006 /* Not an error, continue on as normal */
4007 }
4008 }
4009
4010 ret = eglMakeCurrent(gr->egl_display, egl_surface,
4011 egl_surface, gr->egl_context);
4012 if (ret == EGL_FALSE) {
4013 weston_log("Failed to make EGL context current.\n");
4014 gl_renderer_print_egl_error_state();
4015 return -1;
4016 }
4017
4018 gr->gl_version = get_gl_version();
4019 if (gr->gl_version == GR_GL_VERSION_INVALID) {
4020 weston_log("warning: failed to detect GLES version, "
4021 "defaulting to 2.0.\n");
4022 gr->gl_version = GR_GL_VERSION(2, 0);
4023 }
4024
4025 log_gl_info();
4026
4027 gr->image_target_texture_2d =
4028 (void *) eglGetProcAddress("glEGLImageTargetTexture2DOES");
4029
4030 extensions = (const char *) glGetString(GL_EXTENSIONS);
4031 if (!extensions) {
4032 weston_log("Retrieving GL extension string failed.\n");
4033 return -1;
4034 }
4035
4036 if (!weston_check_egl_extension(extensions, "GL_EXT_texture_format_BGRA8888")) {
4037 weston_log("GL_EXT_texture_format_BGRA8888 not available\n");
4038 return -1;
4039 }
4040
4041 if (weston_check_egl_extension(extensions, "GL_EXT_read_format_bgra"))
4042 ec->read_format = PIXMAN_a8r8g8b8;
4043 else
4044 ec->read_format = PIXMAN_a8b8g8r8;
4045
4046 if (gr->gl_version >= GR_GL_VERSION(3, 0) ||
4047 weston_check_egl_extension(extensions, "GL_EXT_unpack_subimage"))
4048 gr->has_unpack_subimage = true;
4049
4050 if (gr->gl_version >= GR_GL_VERSION(3, 0) ||
4051 weston_check_egl_extension(extensions, "GL_EXT_texture_rg"))
4052 gr->has_gl_texture_rg = true;
4053
4054 if (weston_check_egl_extension(extensions, "GL_OES_EGL_image_external"))
4055 gr->has_egl_image_external = true;
4056
4057 glActiveTexture(GL_TEXTURE0);
4058
4059 if (compile_shaders(ec))
4060 return -1;
4061
4062 gr->fragment_binding =
4063 weston_compositor_add_debug_binding(ec, KEY_S,
4064 fragment_debug_binding,
4065 ec);
4066 gr->fan_binding =
4067 weston_compositor_add_debug_binding(ec, KEY_F,
4068 fan_debug_repaint_binding,
4069 ec);
4070
4071 gr->output_destroy_listener.notify = output_handle_destroy;
4072 wl_signal_add(&ec->output_destroyed_signal,
4073 &gr->output_destroy_listener);
4074
4075 weston_log("GL ES 2 renderer features:\n");
4076 weston_log_continue(STAMP_SPACE "read-back format: %s\n",
4077 ec->read_format == PIXMAN_a8r8g8b8 ? "BGRA" : "RGBA");
4078 weston_log_continue(STAMP_SPACE "wl_shm sub-image to texture: %s\n",
4079 gr->has_unpack_subimage ? "yes" : "no");
4080 weston_log_continue(STAMP_SPACE "EGL Wayland extension: %s\n",
4081 gr->has_bind_display ? "yes" : "no");
4082
4083
4084 return 0;
4085 }
4086
4087 WL_EXPORT struct gl_renderer_interface gl_renderer_interface = {
4088 .opaque_attribs = gl_renderer_opaque_attribs,
4089 .alpha_attribs = gl_renderer_alpha_attribs,
4090
4091 .display_create = gl_renderer_display_create,
4092 .display = gl_renderer_display,
4093 .output_window_create = gl_renderer_output_window_create,
4094 .output_destroy = gl_renderer_output_destroy,
4095 .output_surface = gl_renderer_output_surface,
4096 .output_set_border = gl_renderer_output_set_border,
4097 .create_fence_fd = gl_renderer_create_fence_fd,
4098 .print_egl_error_state = gl_renderer_print_egl_error_state
4099 };
4100