1 /*
2 * copyright 2014-2020, björn ståhl
3 * license: 3-clause bsd, see copying file in arcan source repository.
4 * reference: http://arcan-fe.com
5 */
6 #include <stdio.h>
7 #include <string.h>
8 #include <stdlib.h>
9 #include <stdint.h>
10 #include <stdbool.h>
11 #include <math.h>
12 #include <unistd.h>
13 #include <errno.h>
14 #include <signal.h>
15 #include <glob.h>
16 #include <ctype.h>
17 #include <dlfcn.h>
18 #include <inttypes.h>
19 #include <sys/mman.h>
20 #include <sys/ioctl.h>
21
22 #include <sys/types.h>
23 #include <sys/stat.h>
24 #include <poll.h>
25
26 #include <fcntl.h>
27 #include <assert.h>
28
29 #include <libdrm/drm.h>
30 #include <libdrm/drm_mode.h>
31 #include <libdrm/drm_fourcc.h>
32
33 #include <xf86drm.h>
34 #include <xf86drmMode.h>
35 #include <gbm.h>
36
37 #include "arcan_math.h"
38 #include "arcan_general.h"
39 #include "arcan_video.h"
40 #include "arcan_audio.h"
41 #include "arcan_videoint.h"
42 #include "arcan_led.h"
43 #include "arcan_shmif.h"
44 #include "arcan_frameserver.h"
45 #include "../agp/glfun.h"
46 #include "arcan_event.h"
47 #include "libbacklight.h"
48
49 /*
50 * Current details / notes:
51 *
52 * - The EGL Context management rules have mutated over the years, currently
53 * we mess around with a shared 'device' context and then per display contexts
54 * that we render to based on the buffer format properties desired for that
55 * device.
56 *
57 * - The crutch to that is that when different format options are desired, say
58 * a 565 one and a 10bit on EGL will choke since it does not fit the config on
59 * the shared context. There is an extension to this, EGL_KHR_no_config_context
60 * where we can simply stop and remove/refactor all the device_context/display
61 * context settings.
62 *
63 * - But this is not the entire truth - enter multiGPU where we actually have
64 * discrete and different contexts and possible different GL implementations
65 * living in the same TLS.
66 *
67 * - For that case we need to track afinity to a agp_vstore and upload/synch
68 * into each context based on that affinity.
69 */
70
71 /*
72 * mask out these types as they won't be useful,
73 */
74 #define VIDEO_PLATFORM_IMPL
75 #include "../../engine/arcan_conductor.h"
76
77 #ifdef _DEBUG
78 #define DEBUG 1
79 #else
80 #define DEBUG 0
81 #endif
82
83 /*
84 * same debugging / tracing setup as in egl-dri.c
85 */
86 #define debug_print(fmt, ...) \
87 do { if (DEBUG) arcan_warning("%lld:%s:%d:%s(): " fmt "\n",\
88 arcan_timemillis(), "egl-dri:", __LINE__, __func__,##__VA_ARGS__); } while (0)
89
90 #ifndef verbose_print
91 #define verbose_print
92 #endif
93
94 #include "egl.h"
95
96 #define shmifext_buffer_plane agp_buffer_plane
97 struct shmifext_color_buffer {
98 union {
99 unsigned int gl;
100 } id;
101
102 void* alloc_tags[4];
103 int type;
104 };
105
106 #include "egl_gbm_helper.h"
107
108 static bool lookup_drm_propval(int fd,
109 uint32_t oid, uint32_t otype, const char* name, uint64_t* val, bool id);
110
111 static const char* egl_errstr();
lookup(void * tag,const char * sym,bool req)112 static void* lookup(void* tag, const char* sym, bool req)
113 {
114 dlerror();
115 void* res = dlsym(tag ? tag : RTLD_DEFAULT, sym);
116 if (dlerror() != NULL && req){
117 arcan_fatal("agp lookup(%s) failed, missing req. symbol.\n", sym);
118 }
119 return res;
120 }
121
lookup_call(void * tag,const char * sym,bool req)122 static void* lookup_call(void* tag, const char* sym, bool req)
123 {
124 PFNEGLGETPROCADDRESSPROC getproc = tag;
125 void* res = getproc(sym);
126 if (!res && req)
127 arcan_fatal("agp lookup(%s) failed, missing req. symbol.\n", sym);
128 return res;
129 }
130
131 static char* egl_envopts[] = {
132 "[ for multiple devices, append _n to key (e.g. device_2=) ]", "",
133 "display_device=/path/to/dev", "for multiple devices suffix with _n (n = 2,3..)",
134 "draw_device=/path/to/dev", "set to display device unless provided",
135 "device_buffer=method", "set buffer transfer method (gbm, streams)",
136 "device_libs=lib1:lib2", "libs used for device",
137 "device_connector=ind", "primary display connector index",
138 "device_wait", "loop until an active connector is found",
139 "device_nodpms", "set to disable power management controls",
140 "device_direct_scanout", "enable direct rendertarget scanout",
141 "display_context=1", "set outer shared headless context, per display contexts",
142 NULL
143 };
144
145 enum buffer_method {
146 BUF_GBM,
147 /* There is another option to running a 'display-less' EGL context and
148 * that is to build the display around a pbuffer, but there seem to be
149 * little utility to having that over this form of 'headless' */
150 BUF_HEADLESS,
151 BUF_STREAM
152 };
153
154 enum vsynch_method {
155 VSYNCH_FLIP = 0,
156 VSYNCH_CLOCK = 1,
157 VSYNCH_IGNORE = 2
158 };
159
160 enum display_update_state {
161 UPDATE_FLIP, /* swap between front and back bo */
162 UPDATE_DIRECT,
163 UPDATE_FRONT,
164 UPDATE_SKIP
165 };
166
167 /*
168 * Each open output device, can be shared between displays
169 */
170 struct dev_node {
171 int active; /*tristate, 0 = not used, 1 = active, 2 = displayless, 3 = inactive */
172 int draw_fd;
173 int disp_fd;
174 char* pathref;
175
176 /* things we need to track to be able to forward devices to a client */
177 struct {
178 int fd;
179 uint8_t* metadata;
180 size_t metadata_sz;
181 } client_meta;
182 int refc;
183
184 /* dev_node to use instead of this when performing reset */
185 int gpu_index;
186 bool have_altgpu;
187
188 enum vsynch_method vsynch_method;
189
190 /* card_id is some unique sequential identifier for this card
191 * crtc is an allocation bitmap for output port<->display allocation
192 * atomic is set if the driver kms side supports/needs atomic modesetting */
193 bool wait_connector;
194 bool explicit_synch;
195 int card_id;
196 bool atomic;
197
198 /*
199 * method is the key driver for most paths in here, see the M_ enum values
200 * above to indicate which of the elements here that are valid.
201 */
202 enum buffer_method buftype;
203 union {
204 EGLDeviceEXT egldev;
205 struct gbm_device* gbm;
206 } buffer;
207
208 /* Display is the display system connection, not to be confused with our normal
209 * display, for that we have configs derived from the display which match the
210 * visual of our underlying buffer method - these combined give the surface
211 * within the context */
212 EGLint attrtbl[24];
213 EGLConfig config;
214 EGLDisplay display;
215
216 /* Each display has its own context in order to have different framebuffer out
217 * configuration, then an outer headless context that all the other resources
218 * are allocated with */
219 EGLContext context;
220 int context_refc;
221 const char* context_state;
222
223 /*
224 * to deal with multiple GPUs and multiple vendor libraries, these contexts are
225 * managed per display and explicitly referenced / switched when we need to.
226 */
227 char* egllib;
228 char* agplib;
229 struct egl_env eglenv;
230 };
231
232 enum disp_state {
233 DISP_UNUSED = 0,
234 DISP_KNOWN = 1,
235 DISP_MAPPED = 2,
236 DISP_CLEANUP = 3,
237 DISP_EXTSUSP = 4
238 };
239
240 /*
241 * only the setup_cards_db() initialization path can handle more than one
242 * device node, and it is incomplete still until we can maintain affinity
243 * for all resources.
244 */
245 #ifndef VIDEO_MAX_NODES
246 #define VIDEO_MAX_NODES 4
247 #endif
248 static struct dev_node nodes[VIDEO_MAX_NODES];
249
250 /*
251 * we don't go with the normal GBM_ buffer names in order to have something
252 * that maps between EGLStreams/... and covers the set of 'common' display
253 * formats
254 */
255 enum output_format {
256 OUTPUT_DEFAULT = 0, /* RGB888 */
257 OUTPUT_DEEP = 1,
258 OUTPUT_LOW = 2,
259 OUTPUT_HDR = 3
260 };
261
262 /*
263 * aggregation struct that represent one triple of display, card, bindings
264 */
265 struct dispout {
266 /* connect drm, gbm and EGLs idea of a device */
267 struct dev_node* device;
268 unsigned long long last_update;
269 int output_format;
270 uint64_t frame_cookie;
271
272 struct monitor_mode* mode_cache;
273 size_t mode_cache_sz;
274
275 /* the output buffers, actual fields use will vary with underlying
276 * method, i.e. different for normal gbm, headless gbm and eglstreams */
277 struct {
278 int in_flip, in_destroy, in_dumb_set;
279 EGLConfig config;
280 EGLContext context;
281 EGLSurface esurf;
282 EGLStreamKHR stream;
283 EGLSyncKHR synch;
284
285 struct gbm_bo* cur_bo, (* next_bo);
286 uint32_t cur_fb;
287 int format;
288 struct gbm_surface* surface;
289
290 /* If a vobj- has been set to be directly mapped and is of a compatible type
291 * (e.g. shm or tui mapped) we use a single dumb buffer as our fb and draw
292 * directly into it (the shmpage itself is our "back buffer") - the store
293 * pointer will be our reference. With TUI, the rasterizer draws right into the
294 * front buffer. This will currently cause tearing. The improvement there is a
295 * synch interface so that we can have a worker thread that does the final
296 * blit/upload from the text screen buffer.
297 */
298 struct {
299 bool enabled;
300 size_t sz;
301 struct agp_vstore agp;
302 struct agp_vstore* ref;
303 int fd;
304 uint32_t fb;
305 } dumb;
306 } buffer;
307
308 struct {
309 bool reset_mode, primary;
310 drmModeConnector* con;
311 uint32_t con_id;
312 drmModeModeInfo mode;
313 int mode_set;
314 drmModeCrtcPtr old_crtc;
315 int crtc;
316 int crtc_index;
317 int plane_id;
318 enum dpms_state dpms;
319 char* edid_blob;
320 size_t blob_sz;
321 size_t gamma_size;
322 uint16_t* orig_gamma;
323
324 /* should track a small amount of possible overlay planes (one or two) and
325 * allow the platform_map call to set them and their offsets individually */
326 } display;
327
328 /* internal v-store and system mappings, rules for drawing final output */
329 arcan_vobj_id vid;
330 bool force_compose;
331 bool skip_blit;
332 size_t dispw, disph, dispx, dispy;
333
334 _Alignas(16) float projection[16];
335 _Alignas(16) float txcos[8];
336 enum blitting_hint hint;
337 enum disp_state state;
338 platform_display_id id;
339
340 /* backlight is "a bit" quirky, we register a custom led controller that
341 * is shared for all displays and processed while we're busy synching.
342 * Subleds on this controller match the displayid of the display */
343 struct backlight* backlight;
344 long backlight_brightness;
345 };
346
347 static struct {
348 struct dispout* last_display;
349 size_t canvasw, canvash;
350 long long destroy_pending;
351 int ledid, ledind;
352 uint8_t ledval[3];
353 int ledpair[2];
354
355 /* on rendertarget rebuild and so on the decay is set to a certain number
356 * of frames (typically 3 - current, in-flight, draw-dst) so that the full
357 * swapchain reflects the same contents and format */
358 size_t decay;
359
360 long long last_card_scan;
361 bool scan_pending;
362 } egl_dri = {
363 .ledind = 255
364 };
365
366 #ifndef CARD_RESCAN_DELAY_MS
367 #define CARD_RESCAN_DELAY_MS 500
368 #endif
369
370 #ifndef MAX_DISPLAYS
371 #define MAX_DISPLAYS 16
372 #endif
373
374 static struct dispout displays[MAX_DISPLAYS];
375
allocate_display(struct dev_node * node)376 static struct dispout* allocate_display(struct dev_node* node)
377 {
378 uintptr_t tag;
379 cfg_lookup_fun get_config = platform_config_lookup(&tag);
380
381 for (size_t i = 0; i < MAX_DISPLAYS; i++){
382 if (displays[i].state == DISP_UNUSED){
383 displays[i].device = node;
384 displays[i].display.primary = false;
385 displays[i].id = i;
386
387 node->refc++;
388 displays[i].state = DISP_KNOWN;
389
390 /* we currently force composition on all displays unless
391 * explicitly turned on, as there seem to be some driver
392 * issues with scanning out fbo color attachments */
393 displays[i].force_compose = !get_config(
394 "video_device_direct_scanout", 0, NULL, tag);
395 debug_print("(%zu) added, force composition? %d",
396 i, (int) displays[i].force_compose);
397 return &displays[i];
398 }
399 }
400
401 return NULL;
402 }
403
get_display(size_t index)404 static struct dispout* get_display(size_t index)
405 {
406 if (index >= MAX_DISPLAYS)
407 return NULL;
408 else
409 return &displays[index];
410 }
411
adpms_to_dpms(enum dpms_state state)412 static int adpms_to_dpms(enum dpms_state state)
413 {
414 switch (state){
415 case ADPMS_ON: return DRM_MODE_DPMS_ON;
416 case ADPMS_STANDBY: return DRM_MODE_DPMS_STANDBY;
417 case ADPMS_SUSPEND: return DRM_MODE_DPMS_SUSPEND;
418 case ADPMS_OFF: return DRM_MODE_DPMS_OFF;
419 default:
420 return -1;
421 }
422 }
423
set_device_context(struct dev_node * node)424 static void set_device_context(struct dev_node* node)
425 {
426 verbose_print("context_state=device(%"PRIxPTR")", (uintptr_t)node->context);
427 node->eglenv.make_current(node->display,
428 EGL_NO_SURFACE, EGL_NO_SURFACE, node->context);
429 node->context_state = "device";
430 }
431
set_display_context(struct dispout * d)432 static void set_display_context(struct dispout* d)
433 {
434 if (d->buffer.context == EGL_NO_CONTEXT){
435 verbose_print("context_state=display(%"PRIxPTR")", (uintptr_t)d->device->context);
436 d->device->eglenv.make_current(
437 d->device->display, d->buffer.esurf, d->buffer.esurf, d->device->context);
438 }
439 else {
440 verbose_print("context_state=display(uniq_%"PRIxPTR")", (uintptr_t)d->buffer.context);
441 d->device->eglenv.make_current(
442 d->device->display, d->buffer.esurf, d->buffer.esurf, d->buffer.context);
443 }
444
445 d->device->context_state = "display";
446 }
447
448 /*
449 * Same example as on khronos.org/registry/OpenGL/docs/rules.html
450 */
check_ext(const char * needle,const char * haystack)451 static bool check_ext(const char* needle, const char* haystack)
452 {
453 const char* cpos = haystack;
454 size_t len = strlen(needle);
455 const char* eoe = haystack + strlen(haystack);
456
457 while (cpos < eoe){
458 int n = strcspn(cpos, " ");
459 if (len == n && strncmp(needle, cpos, n) == 0)
460 return true;
461 cpos += (n+1);
462 }
463
464 return false;
465 }
466
dpms_set(struct dispout * d,int level)467 static void dpms_set(struct dispout* d, int level)
468 {
469 uintptr_t tag;
470 cfg_lookup_fun get_config = platform_config_lookup(&tag);
471 if (get_config("video_device_nodpms", 0, NULL, tag)){
472 return;
473 }
474
475 /*
476 * FIXME: this needs to be deferred in the same way as disable / etc.
477 */
478 drmModePropertyPtr prop;
479 debug_print("dpms_set(%d) to %d", d->device->disp_fd, level);
480 for (size_t i = 0; i < d->display.con->count_props; i++){
481 prop = drmModeGetProperty(d->device->disp_fd, d->display.con->props[i]);
482 if (!prop)
483 continue;
484
485 if (strcmp(prop->name, "DPMS") == 0){
486 drmModeConnectorSetProperty(d->device->disp_fd,
487 d->display.con->connector_id, prop->prop_id, level);
488 i = d->display.con->count_props;
489 }
490
491 drmModeFreeProperty(prop);
492 }
493 }
494
495 /*
496 * free, dealloc, possibly re-index displays
497 */
498 static void disable_display(struct dispout*, bool dealloc);
499
500 /*
501 * assumes that the video pipeline is in a state to safely
502 * blit, will take the mapped objects and schedule buffer transfer
503 */
504 static bool update_display(struct dispout*);
505
506 static bool set_dumb_fb(struct dispout* d);
507
close_devices(struct dev_node * node)508 static void close_devices(struct dev_node* node)
509 {
510 /* we might have a different device for drawing than for scanout */
511 int disp_fd = node->disp_fd;
512 if (-1 != disp_fd){
513
514 /* the privsep- parent still has the device open in master */
515 if (node->pathref){
516 platform_device_release(node->pathref, 0);
517 free(node->pathref);
518 node->pathref = NULL;
519 }
520 close(disp_fd);
521 node->disp_fd = -1;
522 }
523
524 /* another node might be used for drawing, assumed this does not
525 * actually need master, if that turns out incorrect - duplicate the
526 * pathref parts to drawref as well */
527 if (node->draw_fd != -1 && node->draw_fd != disp_fd){
528 close(node->draw_fd);
529 node->draw_fd = -1;
530 }
531
532 /* render node */
533 if (node->client_meta.fd != -1){
534 close(node->client_meta.fd);
535 node->client_meta.fd = -1;
536 }
537 }
538
539 /*
540 * Assumes that the individual displays allocated on the card have already
541 * been properly disabled(disable_display(ptr, true))
542 */
release_card(size_t i)543 static void release_card(size_t i)
544 {
545 if (!nodes[i].active)
546 return;
547
548 debug_print("release card (%d)", i);
549 nodes[i].eglenv.make_current(nodes[i].display,
550 EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
551
552 if (nodes[i].context != EGL_NO_CONTEXT){
553 nodes[i].eglenv.destroy_context(nodes[i].display, nodes[i].context);
554 nodes[i].context = EGL_NO_CONTEXT;
555 }
556
557 switch (nodes[i].buftype){
558 case BUF_GBM:
559 if (nodes[i].buffer.gbm){
560 debug_print("destroying device/gbm buffers");
561 gbm_device_destroy(nodes[i].buffer.gbm);
562 nodes[i].buffer.gbm = NULL;
563 }
564 break;
565 case BUF_HEADLESS:
566 /* Should be destroyed with the EGL context */
567 break;
568 case BUF_STREAM:
569 /* Should be destroyed with the EGL context */
570 break;
571 }
572
573 close_devices(&nodes[i]);
574
575 if (nodes[i].display != EGL_NO_DISPLAY){
576 debug_print("terminating card-egl display");
577 nodes[i].eglenv.terminate(nodes[i].display);
578 nodes[i].display = EGL_NO_DISPLAY;
579 }
580
581 nodes[i].context_refc = 0;
582 nodes[i].active = false;
583 }
584
585 /* the criterion for direct- mapping is a bit weird:
586 * if the backing is entirely GPU based, then we need to juggle / queue buffers.
587 *
588 * The 'refcount' property is somewhat problematic as it means that the backing
589 * store might be locked with scanout while we are also waiting to update it for
590 * another consumer.
591 */
sane_direct_vobj(arcan_vobject * vobj,const char * domain)592 static bool sane_direct_vobj(arcan_vobject* vobj, const char* domain)
593 {
594 debug_print(
595 "direct=%s:sane_direct=%d:vobj=%d:no_txcos=%d:default_prg=%d:2d=%d",
596 domain,
597 (int)(vobj != NULL),
598 (int)(vobj->vstore != NULL),
599 (int)(vobj->txcos == NULL),
600 (int)(!vobj->program || vobj->program == agp_default_shader(BASIC_2D)),
601 (int)(vobj->vstore->txmapped == TXSTATE_TEX2D)
602 );
603
604 return vobj
605 && vobj->vstore
606 && !vobj->txcos
607 && (!vobj->program || vobj->program == agp_default_shader(BASIC_2D))
608 && vobj->vstore->txmapped == TXSTATE_TEX2D;
609 }
610
platform_video_map_buffer(struct agp_vstore * vs,struct agp_buffer_plane * planes,size_t n_planes)611 bool platform_video_map_buffer(
612 struct agp_vstore* vs, struct agp_buffer_plane* planes, size_t n_planes)
613 {
614 if (!nodes[0].eglenv.create_image || !nodes[0].eglenv.image_target_texture2D)
615 return false;
616
617 struct dev_node* device = &nodes[0];
618 EGLDisplay dpy = device->display;
619 struct egl_env* egl = &device->eglenv;
620
621 EGLImage img = helper_dmabuf_eglimage(agp_env(), egl, dpy, planes, n_planes);
622 if (!img){
623 debug_print("buffer import failed (%s)", egl_errstr());
624 return false;
625 }
626
627 /* might have an old eglImage around */
628 if (0 != vs->vinf.text.tag){
629 egl->destroy_image(dpy, (EGLImageKHR) vs->vinf.text.tag);
630 }
631
632 vs->w = planes[0].w;
633 vs->h = planes[0].h;
634 vs->bpp = sizeof(shmif_pixel);
635 vs->txmapped = TXSTATE_TEX2D;
636
637 agp_activate_vstore(vs);
638 egl->image_target_texture2D(GL_TEXTURE_2D, img);
639 agp_deactivate_vstore(vs);
640
641 vs->vinf.text.tag = (uintptr_t) img;
642
643 return true;
644 }
645
setup_backlight_ledmap()646 void setup_backlight_ledmap()
647 {
648 if (pipe(egl_dri.ledpair) == -1)
649 return;
650
651 egl_dri.ledid = arcan_led_register(egl_dri.ledpair[1], -1,
652 "backlight", (struct led_capabilities){
653 .nleds = MAX_DISPLAYS,
654 .variable_brightness = true,
655 .rgb = false
656 });
657
658 /* prepare the pipe-pair to be non-block and close-on-exit */
659 if (-1 == egl_dri.ledid){
660 close(egl_dri.ledpair[0]);
661 close(egl_dri.ledpair[1]);
662 egl_dri.ledpair[0] = egl_dri.ledpair[1] = -1;
663 }
664 else{
665 for (size_t i = 0; i < 2; i++){
666 int flags = fcntl(egl_dri.ledpair[i], F_GETFL);
667 if (-1 != flags)
668 fcntl(egl_dri.ledpair[i], F_SETFL, flags | O_NONBLOCK);
669
670 flags = fcntl(egl_dri.ledpair[i], F_GETFD);
671 if (-1 != flags)
672 fcntl(egl_dri.ledpair[i], F_SETFD, flags | FD_CLOEXEC);
673 }
674 }
675 }
676
677 static char* last_err = "unknown";
678 static size_t err_sz = 0;
679 #define SET_SEGV_MSG(X) last_err = (X); err_sz = sizeof(X);
680
sigsegv_errmsg(int sign)681 static void sigsegv_errmsg(int sign)
682 {
683 size_t nw __attribute__((unused));
684 nw = write(STDOUT_FILENO, last_err, err_sz);
685 _exit(EXIT_FAILURE);
686 }
687
egl_errstr()688 static const char* egl_errstr()
689 {
690 if (!egl_dri.last_display)
691 return "No EGL display";
692
693 EGLint errc = egl_dri.last_display->device->eglenv.get_error();
694 switch(errc){
695 case EGL_SUCCESS:
696 return "Success";
697 case EGL_NOT_INITIALIZED:
698 return "Not initialize for the specific display connection";
699 case EGL_BAD_ACCESS:
700 return "Cannot access the requested resource (wrong thread?)";
701 case EGL_BAD_ALLOC:
702 return "Couldn't allocate resources for the requested operation";
703 case EGL_BAD_ATTRIBUTE:
704 return "Unrecognized attribute or attribute value";
705 case EGL_BAD_CONTEXT:
706 return "Context argument does not name a valid context";
707 case EGL_BAD_CONFIG:
708 return "EGLConfig argument did not match a valid config";
709 case EGL_BAD_CURRENT_SURFACE:
710 return "Current surface refers to an invalid destination";
711 case EGL_BAD_DISPLAY:
712 return "The EGLDisplay argument does not match a valid display";
713 case EGL_BAD_SURFACE:
714 return "EGLSurface argument does not name a valid surface";
715 case EGL_BAD_MATCH:
716 return "Inconsistent arguments";
717 case EGL_BAD_PARAMETER:
718 return "Invalid parameter passed to function";
719 case EGL_BAD_NATIVE_PIXMAP:
720 return "NativePixmapType is invalid";
721 case EGL_BAD_NATIVE_WINDOW:
722 return "Native Window Type does not refer to a valid window";
723 case EGL_CONTEXT_LOST:
724 return "Power-management event has forced the context to drop";
725 default:
726 return "Uknown Error";
727 }
728 }
729
setup_buffers_stream(struct dispout * d)730 static int setup_buffers_stream(struct dispout* d)
731 {
732 debug_print("EGLStream, building buffers for display");
733 if (!d->device->eglenv.create_stream ||
734 !d->device->eglenv.query_devices ||
735 !d->device->eglenv.query_device_string ||
736 !d->device->eglenv.get_platform_display ||
737 !d->device->eglenv.get_output_layers ||
738 !d->device->eglenv.create_stream ||
739 !d->device->eglenv.stream_consumer_output ||
740 !d->device->eglenv.create_stream_producer_surface){
741 debug_print("EGLStreams, buffers failed, missing functions");
742 return -1;
743 }
744
745 const char* extstr =
746 d->device->eglenv.query_string(d->device->display, EGL_EXTENSIONS);
747
748 const char* lastext;
749 if (!check_ext(lastext = "EGL_EXT_output_base", extstr) ||
750 !check_ext(lastext = "EGL_EXT_output_drm", extstr) ||
751 !check_ext(lastext = "EGL_KHR_stream", extstr) ||
752 !check_ext(lastext = "EGL_EXT_stream_consumer_egloutput", extstr) ||
753 !check_ext(lastext = "EGL_KHR_stream_producer_eglsurface", extstr)){
754 debug_print("EGLstreams, couldn't find extension (%s)", lastext);
755 return -1;
756 }
757
758 EGLAttrib layer_attrs[3] = {};
759 if (d->display.plane_id){
760 debug_print("(%d) match layer to drm plane (%d)", d->id, d->display.plane_id);
761 layer_attrs[0] = EGL_DRM_PLANE_EXT;
762 layer_attrs[1] = d->display.plane_id;
763 layer_attrs[2] = EGL_NONE;
764 }
765 else {
766 debug_print("(%d) match layer to crtc (%d)", d->id, d->display.crtc);
767 layer_attrs[0] = EGL_DRM_CRTC_EXT;
768 layer_attrs[1] = d->display.crtc;
769 layer_attrs[2] = EGL_NONE;
770 };
771
772 /*
773 * 1. Match output layer to KMS plane
774 */
775 EGLOutputLayerEXT layer;
776 EGLint n_layers = 0;
777 if (!d->device->eglenv.get_output_layers(
778 d->device->display, layer_attrs, &layer, 1, &n_layers) || !n_layers){
779 debug_print("EGLstreams, couldn't get output layer for display");
780 return -1;
781 }
782
783 /*
784 * 2. Create stream
785 */
786 EGLint stream_attrs[] = {
787 /*
788 * EGL_STREAM_FIFO_LENGTH_KHR, 1,
789 * EGL_CONSUMER_AUTO_ACQUIRE_EXT, EGL_TRUE,
790 */
791 EGL_NONE
792 };
793 d->buffer.stream =
794 d->device->eglenv.create_stream(d->device->display, stream_attrs);
795 if (d->buffer.stream == EGL_NO_STREAM_KHR){
796 debug_print("EGLstreams - couldn't create output stream");
797 return -1;
798 }
799
800 /*
801 * 3. Map stream output
802 */
803 if (!d->device->eglenv.stream_consumer_output(
804 d->device->display, d->buffer.stream, layer)){
805 d->device->eglenv.destroy_stream(d->device->display, d->buffer.stream);
806 d->buffer.stream = EGL_NO_STREAM_KHR;
807 debug_print("EGLstreams - couldn't map output stream");
808 return -1;
809 }
810
811 /* 4. Get config and context,
812 * two possible variants here - the default:
813 * all displays uses the device context, the 'mixed HDR/SDR' setup is a
814 * different display context that shares parts of the device context */
815 EGLint nc;
816 if (!d->device->eglenv.choose_config(
817 d->device->display, d->device->attrtbl, &d->buffer.config, 1, &nc)){
818 debug_print("couldn't chose a configuration (%s)", egl_errstr());
819 return -1;
820 }
821
822 const EGLint context_attribs[] = {
823 EGL_CONTEXT_CLIENT_VERSION, 2,
824 EGL_NONE
825 };
826
827 uintptr_t tag;
828 cfg_lookup_fun get_config = platform_config_lookup(&tag);
829 size_t devind = 0;
830 for (; devind < COUNT_OF(nodes); devind++)
831 if (&nodes[devind] == d->device)
832 break;
833
834 bool shared_dev = !get_config("video_display_context", devind, NULL, tag);
835 /* shared device context, no display context */
836 if (shared_dev){
837 debug_print("per-device context for display");
838 d->buffer.context = EGL_NO_CONTEXT;
839
840 /* First display on the device re-creates the context, difference to the gbm
841 * version here is that we still need to create the stream-bound surface even
842 * if we have a context on the device. The reason for re-creating the context
843 * is that the config the display needs is un-known at the time of first
844 * creation */
845 if (!d->device->context_refc){
846 debug_print("first display on device, rebuild context to match");
847 d->device->eglenv.destroy_context(d->device->display, d->device->context);
848 d->device->context = d->device->eglenv.create_context(
849 d->device->display, d->buffer.config, EGL_NO_CONTEXT, context_attribs);
850 }
851 d->device->context_refc++;
852 }
853 else {
854 verbose_print("creating NEW context for display with SHARED device context");
855 d->buffer.context = d->device->eglenv.create_context(
856 d->device->display, d->buffer.config, d->device->context, context_attribs);
857
858 if (d->buffer.context == NULL) {
859 debug_print("couldn't create display context");
860 return false;
861 }
862 }
863
864 /*
865 * 5. Create stream-bound surface
866 */
867 EGLint surface_attrs[] = {
868 EGL_WIDTH, d->display.mode.hdisplay,
869 EGL_HEIGHT, d->display.mode.vdisplay,
870 EGL_NONE
871 };
872 d->buffer.esurf = d->device->eglenv.create_stream_producer_surface(
873 d->device->display, d->buffer.config, d->buffer.stream, surface_attrs);
874
875 if (!d->buffer.esurf){
876 d->device->eglenv.destroy_stream(d->device->display, d->buffer.stream);
877 d->buffer.stream = EGL_NO_STREAM_KHR;
878 d->device->eglenv.destroy_context(d->device->display, d->buffer.context);
879 d->buffer.context = EGL_NO_CONTEXT;
880 debug_print("EGLstreams - couldn't create output surface");
881 return -1;
882 }
883
884 /*
885 * 6. Activate context and buffers
886 */
887 egl_dri.last_display = d;
888 set_display_context(d);
889
890 /*
891 * 5. Set synchronization attributes on stream
892 */
893 if (d->device->eglenv.stream_consumer_acquire_attrib){
894 EGLAttrib attr[] = {
895 EGL_DRM_FLIP_EVENT_DATA_NV, (EGLAttrib) d,
896 EGL_NONE,
897 };
898 d->device->eglenv.stream_consumer_acquire_attrib(
899 d->device->display, d->buffer.stream, attr);
900 }
901
902 /*
903 * 8. make the drm node non-blocking (might be needed for
904 * multiscreen, somewhat uncertain)
905 */
906 int flags = fcntl(d->device->disp_fd, F_GETFL);
907 if (-1 != flags)
908 fcntl(d->device->disp_fd, F_SETFL, flags | O_NONBLOCK);
909
910 /*
911 * 8. we don't have a path for streaming the FBO directly,
912 * (seems to be reasonably trivial though), so disable the
913 * optimization for now
914 */
915 d->force_compose = true;
916
917 set_device_context(d->device);
918 return 0;
919 }
920
setup_buffers_gbm(struct dispout * d)921 static int setup_buffers_gbm(struct dispout* d)
922 {
923 SET_SEGV_MSG("libgbm(), creating scanout buffer"
924 " failed catastrophically.\n")
925
926 if (!d->device->eglenv.create_image){
927 map_eglext_functions(&d->device->eglenv,
928 lookup_call, d->device->eglenv.get_proc_address);
929 }
930
931 /* preference order with -1 omitted. whatever is user-set on the display will
932 * be added as preference, with the safe-bets at the end */
933 int gbm_formats[] = {
934 -1, /* 565 */
935 -1, /* 10-bit, X */
936 -1, /* 10-bit, A */
937 -1, /* 64-bpp, XBGR16F */
938 -1, /* 64-bpp, ABGR16F */
939 GBM_FORMAT_XRGB8888,
940 GBM_FORMAT_ARGB8888
941 };
942
943 const char* fmt_lbls[] = {
944 "RGB565",
945 "R10G10B10X",
946 "R10G10B10A2",
947 "F16X",
948 "F16A",
949 "xR8G8B8",
950 "A8R8G8B8",
951 };
952
953 /*
954 * 10-bit output has very spotty driver support, so only allow it if it has
955 * been explicitly set as interesting - big note is that the creation order
956 * is somewhat fucked, the gbm output buffer defines the configuration of
957 * the EGL node, note the other way around.
958 */
959 if (d->output_format == OUTPUT_LOW){
960 gbm_formats[0] = GBM_FORMAT_RGB565;
961 }
962
963 if (d->output_format == OUTPUT_DEEP){
964 gbm_formats[1] = GBM_FORMAT_XRGB2101010;
965 gbm_formats[2] = GBM_FORMAT_ARGB2101010;
966 }
967 else if (d->output_format == OUTPUT_HDR){
968 /* older distributions may still carry a header without this one so go the
969 * preprocessor route for enabling */
970 #ifdef GBM_FORMAT_XBGR16161616F
971 gbm_formats[3] = GBM_FORMAT_XBGR16161616F;
972 gbm_formats[4] = GBM_FORMAT_ABGR16161616F;
973 #endif
974 }
975
976 /* first get the set of configs from the display */
977 EGLint nc;
978 d->device->eglenv.get_configs(d->device->display, NULL, 0, &nc);
979 if (nc < 1){
980 debug_print("no configurations found for display, (%s)", egl_errstr());
981 return false;
982 }
983
984 EGLConfig configs[nc];
985 EGLint match = 0;
986
987 /* filter them based on the desired attributes from the device itself */
988 d->device->eglenv.choose_config(
989 d->device->display, d->device->attrtbl, configs, nc, &match);
990 if (!match)
991 return -1;
992
993 /* then sweep the formats in desired order and look for a matching visual */
994 for (size_t i = 0; i < COUNT_OF(gbm_formats); i++){
995 if (gbm_formats[i] == -1)
996 continue;
997
998 bool got_config = false;
999 for (size_t j = 0; j < nc && !got_config; j++){
1000 EGLint id;
1001 if (!d->device->eglenv.get_config_attrib(
1002 d->device->display, configs[j], EGL_NATIVE_VISUAL_ID, &id))
1003 continue;
1004
1005 if (id == gbm_formats[i]){
1006 d->buffer.config = configs[j];
1007 got_config = true;
1008 }
1009 }
1010
1011 if (!got_config){
1012 debug_print("no matching gbm-format <-> visual "
1013 "<-> egl config for fmt: %d", (int)gbm_formats[i]);
1014 continue;
1015 }
1016
1017 /* first time device setup will call this function in two stages, so there
1018 * might be a buffer already set when we get called the second time and it is
1019 * safe to actually bind the buffer to an EGL surface as the config should be
1020 * the right one */
1021 if (!d->buffer.surface)
1022 d->buffer.surface = gbm_surface_create(d->device->buffer.gbm,
1023 d->display.mode.hdisplay, d->display.mode.vdisplay,
1024 gbm_formats[i], GBM_BO_USE_SCANOUT | GBM_BO_USE_RENDERING);
1025
1026 if (!d->buffer.surface)
1027 continue;
1028
1029 if (!d->buffer.esurf)
1030 d->buffer.esurf = d->device->eglenv.create_window_surface(
1031 d->device->display, d->buffer.config,(uintptr_t)d->buffer.surface,NULL);
1032
1033 /* we can accept buffer setup failure in this stage if we are being init:ed
1034 * and the EGL configuration doesn't exist */
1035 if (d->buffer.esurf != EGL_NO_SURFACE){
1036 d->buffer.format = gbm_formats[i];
1037 debug_print("(gbm) picked output buffer format %s", fmt_lbls[i]);
1038 break;
1039 }
1040
1041 gbm_surface_destroy(d->buffer.surface);
1042 d->buffer.surface = NULL;
1043 }
1044 if (!d->buffer.surface){
1045 debug_print("couldn't find a gbm buffer format matching EGL display");
1046 return -1;
1047 }
1048
1049 /* finally, build the display- specific context with the new surface and
1050 * context - might not always use it due to direct-scanout vs. shaders etc.
1051 * but still needed */
1052 EGLint context_attribs[] = {
1053 EGL_CONTEXT_CLIENT_VERSION, 2,
1054 EGL_NONE
1055 };
1056
1057 /*
1058 * Unfortunately there seem to be many strange driver issues with using a
1059 * headless shared context and doing the buffer swaps and scanout on the
1060 * others, we can solve that in two ways, one is simply force even the WORLDID
1061 * to be a FBO - which was already the default in the lwa backend. This would
1062 * require making the vint_world() RT double-buffered with a possible 'do I
1063 * have a non-default shader' extra blit stage and then the drm_add_fb2 call.
1064 * It's a scanout path that we likely need anyway.
1065 */
1066 uintptr_t tag;
1067 cfg_lookup_fun get_config = platform_config_lookup(&tag);
1068 size_t devind = 0;
1069 for (; devind < COUNT_OF(nodes); devind++)
1070 if (&nodes[devind] == d->device)
1071 break;
1072
1073 /* DEFAULT: per device context: let first display drive choice of config
1074 * and hope that other displays have the same preferred format */
1075 bool shared_dev = !get_config("video_display_context", devind, NULL, tag);
1076 if (shared_dev && d->device->context_refc > 0){
1077 d->buffer.context = EGL_NO_CONTEXT;
1078 egl_dri.last_display = d;
1079 set_device_context(d->device);
1080 d->device->context_refc++;
1081 return 0;
1082 }
1083
1084 /* LEGACY: EGL_NO_CONFIG_KHR used to point to d->buffer.config but now
1085 * assumes that we can create contexts where any valid surface is also
1086 * valid as the context format */
1087 EGLContext device = shared_dev ? NULL : d->device->context;
1088 EGLContext context = d->device->eglenv.create_context(
1089 d->device->display, EGL_NO_CONFIG_KHR, device, context_attribs
1090 );
1091
1092 if (!context){
1093 debug_print("couldn't create egl context for display");
1094 gbm_surface_destroy(d->buffer.surface);
1095 return -1;
1096 }
1097 set_device_context(d->device);
1098
1099 /* per device context */
1100 if (shared_dev){
1101 d->buffer.context = EGL_NO_CONTEXT;
1102 d->device->context_refc++;
1103 d->device->eglenv.destroy_context(d->device->display, d->device->context);
1104 d->device->context = context;
1105 egl_dri.last_display = d;
1106 return 0;
1107 }
1108
1109 /* per display (not EGLDisplay) context */
1110 d->buffer.context = context;
1111
1112 egl_dri.last_display = d;
1113 set_device_context(d->device);
1114
1115 return 0;
1116 }
1117
setup_buffers(struct dispout * d)1118 static int setup_buffers(struct dispout* d)
1119 {
1120 switch (d->device->buftype){
1121 case BUF_GBM:
1122 return setup_buffers_gbm(d);
1123 break;
1124 case BUF_HEADLESS:
1125 /* won't be needed, we only ever accept FBO management */
1126 return 0;
1127 break;
1128 case BUF_STREAM:
1129 return setup_buffers_stream(d);
1130 break;
1131 }
1132 return 0;
1133 }
1134
platform_video_displays(platform_display_id * dids,size_t * lim)1135 size_t platform_video_displays(platform_display_id* dids, size_t* lim)
1136 {
1137 size_t rv = 0;
1138
1139 for (size_t i = 0; i < MAX_DISPLAYS; i++){
1140 if (displays[i].state == DISP_UNUSED)
1141 continue;
1142
1143 if (dids && lim && *lim < rv)
1144 dids[rv] = i;
1145 rv++;
1146 }
1147
1148 if (lim)
1149 *lim = MAX_DISPLAYS;
1150
1151 return rv;
1152 }
1153
platform_video_cardhandle(int cardn,int * buffer_method,size_t * metadata_sz,uint8_t ** metadata)1154 int platform_video_cardhandle(int cardn,
1155 int* buffer_method, size_t* metadata_sz, uint8_t** metadata)
1156 {
1157 if (cardn < 0 || cardn > COUNT_OF(nodes))
1158 return -1;
1159
1160 if (metadata_sz && metadata &&
1161 nodes[cardn].eglenv.query_dmabuf_formats &&
1162 nodes[cardn].eglenv.query_dmabuf_modifiers){
1163 *metadata_sz = 0;
1164 *metadata = NULL;
1165 }
1166 else if (metadata_sz && metadata){
1167 debug_print("no format/modifiers query support, sending simple card");
1168 *metadata_sz = 0;
1169 *metadata = NULL;
1170 }
1171
1172 if (buffer_method)
1173 *buffer_method = nodes[cardn].buftype;
1174
1175 return nodes[cardn].client_meta.fd;
1176 }
1177
realloc_buffers(struct dispout * d)1178 static bool realloc_buffers(struct dispout* d)
1179 {
1180 switch (d->device->buftype){
1181 case BUF_GBM:
1182 gbm_surface_destroy(d->buffer.surface);
1183 d->buffer.surface = NULL;
1184 if (setup_buffers_gbm(d) != 0)
1185 return false;
1186 break;
1187 case BUF_HEADLESS:
1188 break;
1189 case BUF_STREAM:
1190 d->device->eglenv.destroy_stream(d->device->display, d->buffer.stream);
1191 d->buffer.stream = EGL_NO_STREAM_KHR;
1192 if (setup_buffers_stream(d) != 0)
1193 return false;
1194 break;
1195 }
1196 return true;
1197 }
1198
deadline_for_display(struct dispout * d)1199 static float deadline_for_display(struct dispout* d)
1200 {
1201 /* [FIX-VRR: the actual target including 'slew' rate stepping should be
1202 * presented / calculated here based on the last synch, the
1203 * target and the stepping rate ] */
1204 return 1000.0f / (float)
1205 (d->display.mode.vrefresh ? d->display.mode.vrefresh : 60.0);
1206 }
1207
platform_video_set_mode(platform_display_id disp,platform_mode_id mode,struct platform_mode_opts opts)1208 bool platform_video_set_mode(platform_display_id disp,
1209 platform_mode_id mode, struct platform_mode_opts opts)
1210 {
1211 struct dispout* d = get_display(disp);
1212
1213 if (!d || d->state != DISP_MAPPED || mode >= d->display.con->count_modes)
1214
1215 if (memcmp(&d->display.mode,
1216 &d->display.con->modes[mode], sizeof(drmModeModeInfo)) == 0)
1217 return true;
1218
1219 d->display.reset_mode = true;
1220 d->display.mode = d->display.con->modes[mode];
1221 d->display.mode_set = mode;
1222
1223 /* changes to the output format are reflected first in rebuild_buffers, if that
1224 * fails (e.g. the buffers do not fit the qualities of the display) it reverts
1225 * back to whatever OUTPUT_DEFAULT is set to rather than failing */
1226 switch(opts.depth){
1227 case VSTORE_HINT_LODEF:
1228 d->output_format = OUTPUT_LOW;
1229 break;
1230 case VSTORE_HINT_HIDEF:
1231 d->output_format = OUTPUT_DEEP;
1232 break;
1233 case VSTORE_HINT_F16:
1234 case VSTORE_HINT_F32:
1235 d->output_format = OUTPUT_HDR;
1236 break;
1237 default:
1238 d->output_format = OUTPUT_DEFAULT;
1239 }
1240
1241 uint64_t pid;
1242 if (lookup_drm_propval(d->device->disp_fd,
1243 d->display.crtc, DRM_MODE_OBJECT_CRTC, "type", &pid, true)){
1244 debug_print("setting_vrr: %f", opts.vrr);
1245 drmModeObjectSetProperty(d->device->disp_fd,
1246 d->display.crtc, DRM_MODE_OBJECT_CRTC, pid, fabs(opts.vrr) > EPSILON);
1247 }
1248 else
1249 debug_print("vrr_ignored:missing_vrr_property");
1250
1251 /* ATOMIC test goes here */
1252 debug_print("(%d) schedule mode switch to %zu * %zu", (int) disp,
1253 d->display.mode.hdisplay, d->display.mode.vdisplay);
1254
1255 build_orthographic_matrix(d->projection,
1256 0, d->display.mode.hdisplay, d->display.mode.vdisplay, 0, 0, 1);
1257 d->dispw = d->display.mode.hdisplay;
1258 d->disph = d->display.mode.vdisplay;
1259
1260 /*
1261 * reset scanout buffers to match new crtc mode
1262 if (d->buffer.cur_bo){
1263 gbm_surface_release_buffer(d->buffer.surface, d->buffer.cur_bo);
1264 d->buffer.cur_bo = NULL;
1265 }
1266
1267 if (d->buffer.next_bo){
1268 gbm_surface_release_buffer(d->buffer.surface, d->buffer.next_bo);
1269 d->buffer.next_bo = NULL;
1270 }
1271 */
1272 /* the BOs should die with the surface */
1273 debug_print("modeset, destroy surface");
1274 d->state = DISP_CLEANUP;
1275 d->device->eglenv.destroy_surface(d->device->display, d->buffer.esurf);
1276 d->buffer.esurf = EGL_NO_SURFACE;
1277 /*
1278 * drop current framebuffers
1279 if (d->buffer.cur_fb){
1280 drmModeRmFB(d->device->fd, d->buffer.cur_fb);
1281 d->buffer.cur_fb = 0;
1282 }
1283
1284 if(d->buffer.next_fb){
1285 drmModeRmFB(d->device->fd, d->buffer.next_fb);
1286 d->buffer.next_fb = 0;
1287 }
1288 */
1289
1290 /*
1291 * setup / allocate a new set of buffers that match the new mode
1292 */
1293 if (!realloc_buffers(d))
1294 return false;
1295
1296 d->state = DISP_MAPPED;
1297
1298 return true;
1299 }
1300
platform_video_set_display_gamma(platform_display_id did,size_t n_ramps,uint16_t * r,uint16_t * g,uint16_t * b)1301 bool platform_video_set_display_gamma(platform_display_id did,
1302 size_t n_ramps, uint16_t* r, uint16_t* g, uint16_t* b)
1303 {
1304 struct dispout* d = get_display(did);
1305 if (!d)
1306 return false;
1307
1308 drmModeCrtc* inf = drmModeGetCrtc(d->device->disp_fd, d->display.crtc);
1309
1310 if (!inf)
1311 return false;
1312
1313 int rv = -1;
1314 if (inf->gamma_size > 0 && n_ramps == inf->gamma_size){
1315 /* first time we get called, saved trhe original gamma for the display
1316 * so that we can restore it when the display gets deallocated */
1317 if (!d->display.orig_gamma){
1318 if (!platform_video_get_display_gamma(did,
1319 &d->display.gamma_size, &d->display.orig_gamma)){
1320 drmModeFreeCrtc(inf);
1321 return false;
1322 }
1323 }
1324 rv = drmModeCrtcSetGamma(d->device->disp_fd, d->display.crtc, n_ramps, r, g, b);
1325 }
1326
1327 drmModeFreeCrtc(inf);
1328 return rv == 0;
1329 }
1330
platform_video_get_display_gamma(platform_display_id did,size_t * n_ramps,uint16_t ** outb)1331 bool platform_video_get_display_gamma(
1332 platform_display_id did, size_t* n_ramps, uint16_t** outb)
1333 {
1334 struct dispout* d = get_display(did);
1335 if (!d || !n_ramps)
1336 return false;
1337
1338 drmModeCrtc* inf = drmModeGetCrtc(d->device->disp_fd, d->display.crtc);
1339 if (!inf)
1340 return false;
1341
1342 if (inf->gamma_size <= 0){
1343 drmModeFreeCrtc(inf);
1344 return false;
1345 }
1346
1347 *n_ramps = inf->gamma_size;
1348 uint16_t* ramps = malloc(*n_ramps * 3 * sizeof(uint16_t));
1349 if (!ramps){
1350 drmModeFreeCrtc(inf);
1351 return false;
1352 }
1353
1354 bool rv = true;
1355 memset(ramps, '\0', *n_ramps * 3 * sizeof(uint16_t));
1356 if (drmModeCrtcGetGamma(d->device->disp_fd, d->display.crtc, *n_ramps,
1357 &ramps[0], &ramps[*n_ramps], &ramps[2 * *n_ramps])){
1358 free(ramps);
1359 rv = false;
1360 }
1361 *outb = ramps;
1362 drmModeFreeCrtc(inf);
1363 return rv;
1364 }
1365
get_connector_property(struct dispout * d,const char * name,size_t * i)1366 static drmModePropertyPtr get_connector_property(
1367 struct dispout* d, const char* name, size_t* i)
1368 {
1369 for (; *i < d->display.con->count_props; *i++){
1370 drmModePropertyPtr prop =
1371 drmModeGetProperty(d->device->disp_fd, d->display.con->props[*i]);
1372 if (!prop)
1373 continue;
1374 if (strcmp(prop->name, name) == 0)
1375 return prop;
1376 drmModeFreeProperty(prop);
1377 }
1378 return NULL;
1379 }
1380
fetch_edid(struct dispout * d)1381 static void fetch_edid(struct dispout* d)
1382 {
1383 drmModePropertyPtr prop;
1384 bool done = false;
1385
1386 /* stick with the cached one */
1387 if (d->display.edid_blob){
1388 return;
1389 }
1390
1391 for (size_t i = 0; i < d->display.con->count_props && !done; i++){
1392 prop = drmModeGetProperty(d->device->disp_fd, d->display.con->props[i]);
1393 if (!prop)
1394 continue;
1395
1396 if (!(prop->flags&DRM_MODE_PROP_BLOB) || strcmp(prop->name, "EDID") != 0){
1397 drmModeFreeProperty(prop);
1398 continue;
1399 }
1400
1401 drmModePropertyBlobPtr blob = drmModeGetPropertyBlob(
1402 d->device->disp_fd, d->display.con->prop_values[i]);
1403
1404 if (!blob || (int)blob->length <= 0){
1405 drmModeFreeProperty(prop);
1406 continue;
1407 }
1408
1409 if ((d->display.edid_blob = malloc(blob->length))){
1410 d->display.blob_sz = blob->length;
1411 memcpy(d->display.edid_blob, blob->data, blob->length);
1412 done = true;
1413 }
1414
1415 drmModeFreePropertyBlob(blob);
1416 drmModeFreeProperty(prop);
1417 }
1418 }
1419
platform_video_display_edid(platform_display_id did,char ** out,size_t * sz)1420 bool platform_video_display_edid(
1421 platform_display_id did, char** out, size_t* sz)
1422 {
1423 struct dispout* d = get_display(did);
1424 if (!d || d->state == DISP_UNUSED)
1425 return false;
1426
1427 *out = NULL;
1428 *sz = 0;
1429
1430 /* attempt to re-acquire the blob */
1431 fetch_edid(d);
1432
1433 /* allocate a new scratch copy of the cached blob */
1434 if (d->display.edid_blob){
1435 *sz = 0;
1436 *out = malloc(d->display.blob_sz);
1437 if (*out){
1438 *sz = d->display.blob_sz;
1439 memcpy(*out, d->display.edid_blob, d->display.blob_sz);
1440 }
1441 return true;
1442 }
1443
1444 return false;
1445 }
1446
1447 /*
1448 * this platform does not currently support dynamic modes
1449 * (this should well be possible for old CRTs though)..
1450 */
platform_video_specify_mode(platform_display_id disp,struct monitor_mode mode)1451 bool platform_video_specify_mode(
1452 platform_display_id disp, struct monitor_mode mode)
1453 {
1454 return false;
1455 }
1456
drm_mode_tos(FILE * dst,unsigned val)1457 static void drm_mode_tos(FILE* dst, unsigned val)
1458 {
1459 if ( (val & DRM_MODE_TYPE_BUILTIN) > 0){
1460 fprintf(dst, "/builtin");
1461 val &= ~DRM_MODE_TYPE_BUILTIN;
1462 }
1463
1464 if ( (val & DRM_MODE_TYPE_CLOCK_C) > 0){
1465 fprintf(dst, "/clock");
1466 val &= ~DRM_MODE_TYPE_CLOCK_C;
1467 }
1468
1469 if ( (val & DRM_MODE_TYPE_CRTC_C) > 0){
1470 fprintf(dst, "/crtc");
1471 val &= ~DRM_MODE_TYPE_CRTC_C;
1472 }
1473
1474 if ( (val & DRM_MODE_TYPE_PREFERRED) > 0){
1475 fprintf(dst, "/preferred");
1476 val &= ~DRM_MODE_TYPE_PREFERRED;
1477 }
1478
1479 if ( (val & DRM_MODE_TYPE_DEFAULT) > 0){
1480 fprintf(dst, "/default");
1481 val &= ~DRM_MODE_TYPE_DEFAULT;
1482 }
1483
1484 if ( (val & DRM_MODE_TYPE_USERDEF) > 0){
1485 fprintf(dst, "/userdef");
1486 val &= ~DRM_MODE_TYPE_USERDEF;
1487 }
1488
1489 if ( (val & DRM_MODE_TYPE_DRIVER) > 0){
1490 fprintf(dst, "/driver");
1491 val &= ~DRM_MODE_TYPE_DRIVER;
1492 }
1493
1494 if ( val > 0 )
1495 fprintf(dst, "/unknown(%d)", (int)val);
1496 }
1497
drm_mode_flag(FILE * dst,unsigned val)1498 static void drm_mode_flag(FILE* dst, unsigned val)
1499 {
1500 if ( (val & DRM_MODE_FLAG_PHSYNC) > 0 ){
1501 fprintf(dst, "/phsync");
1502 val &= ~DRM_MODE_FLAG_PHSYNC;
1503 }
1504
1505 if ( (val & DRM_MODE_FLAG_NHSYNC) > 0 ){
1506 fprintf(dst, "/nhsync");
1507 val &= ~DRM_MODE_FLAG_NHSYNC;
1508 }
1509
1510 if ( (val & DRM_MODE_FLAG_PVSYNC) > 0 ){
1511 fprintf(dst, "/pvsync");
1512 val &= ~DRM_MODE_FLAG_PVSYNC;
1513 }
1514
1515 if ( (val & DRM_MODE_FLAG_NVSYNC) > 0 ){
1516 fprintf(dst, "/nvsync");
1517 val &= ~DRM_MODE_FLAG_NVSYNC;
1518 }
1519
1520 if ( (val & DRM_MODE_FLAG_INTERLACE) > 0 ){
1521 fprintf(dst, "/interlace");
1522 val &= ~DRM_MODE_FLAG_INTERLACE;
1523 }
1524
1525 if ( (val & DRM_MODE_FLAG_DBLSCAN) > 0 ){
1526 fprintf(dst, "/dblscan");
1527 val &= ~DRM_MODE_FLAG_DBLSCAN;
1528 }
1529
1530 if ( (val & DRM_MODE_FLAG_CSYNC) > 0 ){
1531 fprintf(dst, "/csync");
1532 val &= ~DRM_MODE_FLAG_CSYNC;
1533 }
1534
1535 if ( (val & DRM_MODE_FLAG_PCSYNC) > 0 ){
1536 fprintf(dst, "/pcsync");
1537 val &= ~DRM_MODE_FLAG_PCSYNC;
1538 }
1539
1540 if ( (val & DRM_MODE_FLAG_NCSYNC) > 0 ){
1541 fprintf(dst, "/ncsync");
1542 val &= ~DRM_MODE_FLAG_NCSYNC;
1543 }
1544
1545 if ( (val & DRM_MODE_FLAG_HSKEW) > 0 ){
1546 fprintf(dst, "/hskew");
1547 val &= ~DRM_MODE_FLAG_HSKEW;
1548 }
1549
1550 if ( (val & DRM_MODE_FLAG_BCAST) > 0 ){
1551 fprintf(dst, "/bcast");
1552 val &= ~DRM_MODE_FLAG_BCAST;
1553 }
1554
1555 if ( (val & DRM_MODE_FLAG_PIXMUX) > 0 ){
1556 fprintf(dst, "/pixmux");
1557 val &= ~DRM_MODE_FLAG_PIXMUX;
1558 }
1559
1560 if ( (val & DRM_MODE_FLAG_DBLCLK) > 0 ){
1561 fprintf(dst, "/dblclk");
1562 val &= ~DRM_MODE_FLAG_DBLCLK;
1563 }
1564
1565 if ( (val & DRM_MODE_FLAG_CLKDIV2) > 0 ){
1566 fprintf(dst, "/clkdiv2");
1567 val &= ~DRM_MODE_FLAG_CLKDIV2;
1568 }
1569
1570 if ( (val & DRM_MODE_FLAG_3D_MASK) > 0 ){
1571 fprintf(dst, "/3dmask");
1572 val &= ~DRM_MODE_FLAG_3D_MASK;
1573 }
1574
1575 if ( (val & DRM_MODE_FLAG_3D_NONE) > 0 ){
1576 fprintf(dst, "/3dnone");
1577 val &= ~DRM_MODE_FLAG_3D_NONE;
1578 }
1579
1580 if ( (val & DRM_MODE_FLAG_3D_FRAME_PACKING) > 0 ){
1581 fprintf(dst, "/3dframep");
1582 val &= ~DRM_MODE_FLAG_3D_FRAME_PACKING;
1583 }
1584
1585 if ( (val & DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE) > 0 ){
1586 fprintf(dst, "/3dfield_alt");
1587 val &= ~DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE;
1588 }
1589
1590 if ( (val & DRM_MODE_FLAG_3D_LINE_ALTERNATIVE) > 0 ){
1591 fprintf(dst, "/3dline_alt");
1592 val &= ~DRM_MODE_FLAG_3D_LINE_ALTERNATIVE;
1593 }
1594
1595 if ( (val & DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL) > 0 ){
1596 fprintf(dst, "/3dsbs");
1597 val &= ~DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL;
1598 }
1599
1600 if ( (val & DRM_MODE_FLAG_3D_L_DEPTH) > 0 ){
1601 fprintf(dst, "/3dldepth");
1602 val &= ~DRM_MODE_FLAG_3D_L_DEPTH;
1603 }
1604
1605 if ( (val & DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH) > 0 ){
1606 fprintf(dst, "/3dldepth_gfx2_depth");
1607 val &= ~DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH;
1608 }
1609
1610 if ( (val & DRM_MODE_FLAG_3D_TOP_AND_BOTTOM) > 0 ){
1611 fprintf(dst, "/3dt&b");
1612 val &= ~DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
1613 }
1614
1615 if ( (val & DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF) > 0 ){
1616 fprintf(dst, "/3dsbs-h");
1617 val &= ~DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
1618 }
1619
1620 if (val > 0){
1621 fprintf(dst, "/unknown(%d)", (int) val);
1622 }
1623 }
1624
drm_mode_connector(FILE * fpek,int val)1625 static void drm_mode_connector(FILE* fpek, int val)
1626 {
1627 switch(val){
1628 case DRM_MODE_CONNECTOR_Unknown:
1629 fprintf(fpek, "unknown");
1630 break;
1631
1632 case DRM_MODE_CONNECTOR_VGA:
1633 fprintf(fpek, "vga");
1634 break;
1635
1636 case DRM_MODE_CONNECTOR_DVII:
1637 fprintf(fpek, "dvii");
1638 break;
1639
1640 case DRM_MODE_CONNECTOR_DVID:
1641 fprintf(fpek, "dvid");
1642 break;
1643
1644 case DRM_MODE_CONNECTOR_DVIA:
1645 fprintf(fpek, "dvia");
1646 break;
1647
1648 case DRM_MODE_CONNECTOR_Composite:
1649 fprintf(fpek, "composite");
1650 break;
1651
1652 case DRM_MODE_CONNECTOR_SVIDEO:
1653 fprintf(fpek, "s-video");
1654 break;
1655
1656 case DRM_MODE_CONNECTOR_Component:
1657 fprintf(fpek, "component");
1658 break;
1659
1660 case DRM_MODE_CONNECTOR_9PinDIN:
1661 fprintf(fpek, "9-pin din");
1662 break;
1663
1664 case DRM_MODE_CONNECTOR_DisplayPort:
1665 fprintf(fpek, "displayPort");
1666 break;
1667
1668 case DRM_MODE_CONNECTOR_HDMIA:
1669 fprintf(fpek, "hdmi-a");
1670 break;
1671
1672 case DRM_MODE_CONNECTOR_HDMIB:
1673 fprintf(fpek, "hdmi-b");
1674 break;
1675
1676 case DRM_MODE_CONNECTOR_TV:
1677 fprintf(fpek, "tv");
1678 break;
1679
1680 case DRM_MODE_CONNECTOR_eDP:
1681 fprintf(fpek, "eDP");
1682 break;
1683
1684 default:
1685 fprintf(fpek, "unknown");
1686 }
1687 }
1688
1689 /* should be passed on to font rendering */
subpixel_type(int val)1690 static const char* subpixel_type(int val)
1691 {
1692 switch(val){
1693 case DRM_MODE_SUBPIXEL_UNKNOWN:
1694 return "unknown";
1695
1696 case DRM_MODE_SUBPIXEL_HORIZONTAL_RGB:
1697 return "horiz- RGB";
1698
1699 case DRM_MODE_SUBPIXEL_HORIZONTAL_BGR:
1700 return "horiz- BGR";
1701
1702 case DRM_MODE_SUBPIXEL_VERTICAL_RGB:
1703 return "vert- RGB";
1704
1705 case DRM_MODE_SUBPIXEL_VERTICAL_BGR:
1706 return "vert- BGR";
1707
1708 default:
1709 return "unsupported";
1710 }
1711 }
1712
connection_type(int conn)1713 static const char* connection_type(int conn)
1714 {
1715 switch(conn){
1716 case DRM_MODE_CONNECTED:
1717 return "connected";
1718
1719 case DRM_MODE_DISCONNECTED:
1720 return "not connected";
1721
1722 case DRM_MODE_UNKNOWNCONNECTION:
1723 return "unknown";
1724
1725 default:
1726 return "undefined";
1727 }
1728 }
1729
dump_connectors(FILE * dst,struct dev_node * node,bool shorth)1730 static void dump_connectors(FILE* dst, struct dev_node* node, bool shorth)
1731 {
1732 drmModeRes* res = drmModeGetResources(node->disp_fd);
1733 if (!res){
1734 fprintf(dst, "DRM dump, couldn't acquire resource list\n");
1735 return;
1736 }
1737
1738 fprintf(dst, "DRM Dump: \n\tConnectors: %d\n", res->count_connectors);
1739 for (size_t i = 0; i < res->count_connectors; i++){
1740 drmModeConnector* conn = drmModeGetConnector(
1741 node->disp_fd, res->connectors[i]);
1742 if (!conn)
1743 continue;
1744
1745 fprintf(dst, "\t(%d), id:(%d), encoder:(%d), type: ",
1746 (int) i, conn->connector_id, conn->encoder_id);
1747 drm_mode_connector(dst, conn->connector_type);
1748 fprintf(dst, " phy(%d * %d), mode: %s, hinting: %s\n",
1749 (int)conn->mmWidth, (int)conn->mmHeight,
1750 connection_type(conn->connection),
1751 subpixel_type(conn->subpixel));
1752
1753 if (!shorth)
1754 for (size_t j = 0; j < conn->count_modes; j++){
1755 fprintf(dst, "\t\t Mode (%d:%s): clock@%d, refresh@%d\n\t\tflags : ",
1756 (int)j, conn->modes[j].name,
1757 conn->modes[j].clock, conn->modes[j].vrefresh
1758 ) ;
1759 drm_mode_flag(dst, conn->modes[j].flags);
1760 fprintf(dst, " type : ");
1761 drm_mode_tos(dst, conn->modes[j].type);
1762 }
1763
1764 fprintf(dst, "\n\n");
1765 drmModeFreeConnector(conn);
1766 }
1767
1768 drmModeFreeResources(res);
1769 }
1770
1771 /*
1772 * first: successful setup_node[egl or gbm], then setup_node
1773 */
setup_node(struct dev_node * node)1774 static bool setup_node(struct dev_node* node)
1775 {
1776 EGLint context_attribs[] = {
1777 EGL_CONTEXT_CLIENT_VERSION, 2,
1778 EGL_NONE, /* pad for robustness */
1779 EGL_NONE, /* pad for robustness */
1780 EGL_NONE, /* pad for strategy */
1781 EGL_NONE, /* pad for strategy */
1782 EGL_NONE, /* pad for PRIORITY */
1783 EGL_NONE, /* pad for PRIORTIY */
1784 EGL_NONE
1785 };
1786 int ca_offset = 2;
1787
1788 EGLint apiv;
1789 const char* ident = agp_ident();
1790 EGLint attrtbl[24] = {
1791 EGL_RENDERABLE_TYPE, 0,
1792 EGL_RED_SIZE, 5,
1793 EGL_GREEN_SIZE, 6,
1794 EGL_BLUE_SIZE, 5,
1795 EGL_ALPHA_SIZE, 0,
1796 EGL_DEPTH_SIZE, 1,
1797 EGL_STENCIL_SIZE, 1,
1798 /* this only allows the context to return CONFIGs with floating point outputs,
1799 * the actual selection of such a config happens based on the GBM surface type
1800 * in setup_buffers */
1801 EGL_COLOR_COMPONENT_TYPE_EXT, EGL_COLOR_COMPONENT_TYPE_FLOAT_EXT,
1802 };
1803 int attrofs = 14;
1804
1805 switch (node->buftype){
1806 case BUF_GBM:
1807 attrtbl[attrofs++] = EGL_SURFACE_TYPE;
1808 attrtbl[attrofs++] = EGL_WINDOW_BIT;
1809 break;
1810 case BUF_HEADLESS:
1811 break;
1812 case BUF_STREAM:
1813 attrtbl[attrofs++] = EGL_SURFACE_TYPE;
1814 attrtbl[attrofs++] = EGL_STREAM_BIT_KHR;
1815 break;
1816 }
1817 attrtbl[attrofs++] = EGL_NONE;
1818
1819 /* right now, this platform won't support anything that isn't rendering using
1820 * xGL,VK/EGL which will be a problem for a software based AGP. When we get
1821 * one, we need a fourth scanout path (ffs) where the worldid rendertarget
1822 * writes into the scanout buffer immediately. It might be possibly for that
1823 * AGP to provide a 'faux' EGL implementation though. There also is the path
1824 * in here already for special calls to map_video_display via dumb buffers so
1825 * the best way forward is probably the fake EGL one. */
1826 size_t i = 0;
1827 bool gles = true;
1828
1829 if (strcmp(ident, "OPENGL21") == 0){
1830 apiv = EGL_OPENGL_API;
1831 for (i = 0; attrtbl[i] != EGL_RENDERABLE_TYPE; i++);
1832 attrtbl[i+1] = EGL_OPENGL_BIT;
1833 gles = false;
1834 }
1835 else if (strcmp(ident, "GLES3") == 0 ||
1836 strcmp(ident, "GLES2") == 0){
1837 for (i = 0; attrtbl[i] != EGL_RENDERABLE_TYPE; i++);
1838 #ifndef EGL_OPENGL_ES2_BIT
1839 debug_print("EGL implementation do not support GLESv2, "
1840 "yet AGP platform requires it, use a different AGP platform.");
1841 return false;
1842 #endif
1843
1844 #ifndef EGL_OPENGL_ES3_BIT
1845 #define EGL_OPENGL_ES3_BIT EGL_OPENGL_ES2_BIT
1846 #endif
1847 attrtbl[i+1] = EGL_OPENGL_ES3_BIT;
1848 apiv = EGL_OPENGL_ES_API;
1849 }
1850 else
1851 return false;
1852
1853 SET_SEGV_MSG("EGL-dri(), getting the display failed\n");
1854
1855 if (!node->eglenv.initialize(node->display, NULL, NULL)){
1856 debug_print("failed to initialize EGL");
1857 return false;
1858 }
1859
1860 /*
1861 * make sure the API we've selected match the AGP platform
1862 */
1863 if (!node->eglenv.bind_api(apiv)){
1864 debug_print("couldn't bind GL API");
1865 return false;
1866 }
1867
1868 /*
1869 * now copy the attributes that match our choice in API etc. so that the
1870 * correct buffers can be selected
1871 */
1872 memcpy(node->attrtbl, attrtbl, sizeof(attrtbl));
1873
1874 EGLint match = 0;
1875
1876 node->eglenv.choose_config(node->display, node->attrtbl, &node->config, 1, &match);
1877 node->context = node->eglenv.create_context(
1878 node->display, EGL_NO_CONFIG_KHR, EGL_NO_CONTEXT, context_attribs);
1879
1880 bool priority = false;
1881
1882 const char* extstr =
1883 node->eglenv.query_string(node->display, EGL_EXTENSIONS);
1884
1885 if (check_ext("EGL_IMG_context_priority", extstr)){
1886 priority = true;
1887 context_attribs[ca_offset++] = EGL_CONTEXT_PRIORITY_LEVEL_IMG;
1888 context_attribs[ca_offset++] = EGL_CONTEXT_PRIORITY_HIGH_IMG;
1889 }
1890
1891 /* Context creation can fail on an unavailable high priority level - then
1892 * try to downgrade and try again */
1893 if (!node->context && priority){
1894 context_attribs[--ca_offset] = EGL_NONE;
1895 context_attribs[--ca_offset] = EGL_NONE;
1896 node->context = node->eglenv.create_context(
1897 node->display, EGL_NO_CONFIG_KHR, node, context_attribs);
1898 }
1899
1900 if (!node->context){
1901 debug_print(
1902 "couldn't build an EGL context on the display, (%s)", egl_errstr());
1903 return false;
1904 }
1905
1906 set_device_context(node);
1907 return true;
1908 }
1909
1910 /*
1911 * We have a circular dependency problem here "kind of": we need to know what
1912 * driver to pick in order to setup EGL for the node, but there is also an Egl
1913 * function to setup the EGLDevice from which we can get the node. For most
1914 * cases the driver would just resolve to a GLvnd implementation anyhow, but we
1915 * don't know and don't want the restriction when it comes to switching between
1916 * test drivers etc.
1917 */
setup_node_egl(int dst_ind,struct dev_node * node,int fd)1918 static int setup_node_egl(int dst_ind, struct dev_node* node, int fd)
1919 {
1920 if (!node->eglenv.query_string){
1921 debug_print("EGLStreams, couldn't get EGL extension string");
1922 return -1;
1923 }
1924
1925 const char* extstr = node->eglenv.query_string(EGL_NO_DISPLAY,EGL_EXTENSIONS);
1926 const char* lastext;
1927 if (!check_ext(lastext = "EGL_EXT_platform_base", extstr)){
1928 debug_print("EGLStreams, missing extension (%s)", lastext);
1929 return -1;
1930 }
1931
1932 if (!node->eglenv.query_devices){
1933 debug_print("EGLStreams, couldn't find extensions/functions");
1934 return -1;
1935 }
1936
1937 EGLint numdev;
1938 if (!node->eglenv.query_devices(0, NULL, &numdev) || numdev < 1){
1939 debug_print("EGLStreams, query failed or no devices found");
1940 return -1;
1941 }
1942
1943 EGLDeviceEXT devs[numdev];
1944 if (!node->eglenv.query_devices(numdev, devs, &numdev)){
1945 debug_print("EGLStreams, couldn't query device data");
1946 return -1;
1947 }
1948
1949 /*
1950 * sweep all devices that matches and expose the necessary extensions and pick
1951 * the first one (should possibly change that to a counter for multiple cards
1952 * again) or, if fd is provided, the one with stat data that match.
1953 */
1954 bool found = false;
1955
1956 for (size_t i = 0; i < numdev && !found; i++){
1957 const char* ext = node->eglenv.query_device_string(devs[i], EGL_EXTENSIONS);
1958 if (!check_ext(lastext = "EGL_EXT_device_drm", ext))
1959 continue;
1960
1961 const char* fn =
1962 node->eglenv.query_device_string(devs[i], EGL_DRM_DEVICE_FILE_EXT);
1963
1964 if (!fn)
1965 continue;
1966
1967 int lfd = platform_device_open(fn, O_RDWR);
1968
1969 /* no caller provided device, go with the one we found */
1970 if (-1 == fd){
1971 fd = lfd;
1972 found = true;
1973 node->buffer.egldev = devs[i];
1974 }
1975 /* we we want to pair the incoming descriptor with the suggested one */
1976 else {
1977 struct stat s1, s2;
1978 if (-1 == fstat(lfd, &s2) || -1 == fstat(fd, &s1) ||
1979 s1.st_ino != s2.st_ino || s1.st_dev != s2.st_dev){
1980 close(lfd);
1981 }
1982 else{
1983 found = true;
1984 node->buffer.egldev = devs[i];
1985 }
1986 }
1987 }
1988
1989 /*
1990 * 1:1 for card-node:egldisplay might not be correct for the setup here
1991 * (with normal GBM, this doesn't really matter that much as we have
1992 * finer control over buffer scanout)
1993 */
1994 if (found){
1995 EGLint attribs[] = {EGL_DRM_MASTER_FD_EXT, fd, EGL_NONE};
1996 node->disp_fd = node->draw_fd = fd;
1997 node->buftype = BUF_STREAM;
1998 node->atomic =
1999 drmSetClientCap(fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1) == 0 &&
2000 drmSetClientCap(fd, DRM_CLIENT_CAP_ATOMIC, 1) == 0;
2001 node->display = node->eglenv.get_platform_display(
2002 EGL_PLATFORM_DEVICE_EXT, node->buffer.egldev, attribs);
2003 node->eglenv.swap_interval(node->display, 0);
2004 return 0;
2005 }
2006
2007 return -1;
2008 }
2009
cleanup_node_gbm(struct dev_node * node)2010 static void cleanup_node_gbm(struct dev_node* node)
2011 {
2012 close_devices(node);
2013
2014 if (node->buffer.gbm)
2015 gbm_device_destroy(node->buffer.gbm);
2016 node->buffer.gbm = NULL;
2017 }
2018
setup_node_gbm(int devind,struct dev_node * node,int draw_fd,int disp_fd)2019 static int setup_node_gbm(int devind,
2020 struct dev_node* node, int draw_fd, int disp_fd)
2021 {
2022 SET_SEGV_MSG("libdrm(), open device failed (check permissions) "
2023 " or use ARCAN_VIDEO_DEVICE environment.\n");
2024
2025 node->client_meta.fd = -1;
2026 node->client_meta.metadata = NULL;
2027 node->client_meta.metadata_sz = 0;
2028 node->disp_fd = disp_fd;
2029 node->draw_fd = draw_fd;
2030
2031 SET_SEGV_MSG("libgbm(), create device failed catastrophically.\n");
2032 node->buffer.gbm = gbm_create_device(node->draw_fd);
2033
2034 if (!node->buffer.gbm){
2035 debug_print("gbm, couldn't create gbm device on node");
2036 cleanup_node_gbm(node);
2037 return -1;
2038 }
2039
2040 node->buftype = BUF_GBM;
2041
2042 if (node->eglenv.get_platform_display){
2043 debug_print("gbm, using eglGetPlatformDisplayEXT");
2044 node->display = node->eglenv.get_platform_display(
2045 EGL_PLATFORM_GBM_KHR, (void*)(node->buffer.gbm), NULL);
2046 }
2047 else{
2048 debug_print("gbm, building display using native handle only");
2049 node->display = node->eglenv.get_display((void*)(node->buffer.gbm));
2050 }
2051
2052 /* This is kept optional as not all drivers have it, and not all drivers
2053 * work well with it. The current state is opt-in at a certain cost, but
2054 * don't want the bug reports. */
2055 uintptr_t tag;
2056 cfg_lookup_fun get_config = platform_config_lookup(&tag);
2057 char* devstr, (* cfgstr), (* altstr);
2058 node->atomic =
2059 get_config("video_device_atomic", devind, NULL, tag) && (
2060 drmSetClientCap(node->disp_fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1) == 0 &&
2061 drmSetClientCap(node->disp_fd, DRM_CLIENT_CAP_ATOMIC, 1) == 0
2062 );
2063 debug_print("gbm, node in atomic mode: %s", node->atomic ? "yes" : "no");
2064
2065 /* Set the render node environment variable here, this is primarily for legacy
2066 * clients that gets launched through arcan - the others should get the
2067 * descriptor from DEVICEHINT. It also won't work for multiple cards as the
2068 * last one would just overwrite */
2069 char pbuf[24] = "/dev/dri/renderD128";
2070
2071 char* rdev = drmGetRenderDeviceNameFromFd(node->draw_fd);
2072 if (rdev){
2073 debug_print("derived render-node: %s", rdev);
2074 node->client_meta.fd = open(rdev, O_RDWR | O_CLOEXEC);
2075 if (-1 != node->client_meta.fd)
2076 setenv("ARCAN_RENDER_NODE", rdev, 1);
2077 free(rdev);
2078 }
2079
2080 /* If this fails for some reason (e.g. libdrm packaging on OpenBSD, then
2081 * try to fallback to a hardcoded default */
2082 if (-1 == node->client_meta.fd){
2083 setenv("ARCAN_RENDER_NODE", pbuf, 1);
2084 node->client_meta.fd = open(pbuf, O_RDWR | O_CLOEXEC);
2085 }
2086
2087 return 0;
2088 }
2089
2090 /*
2091 * foreach prop on object(id:type):
2092 * foreach modprob on prop:
2093 * found if name matches modprob -> true:set_val
2094 */
lookup_drm_propval(int fd,uint32_t oid,uint32_t otype,const char * name,uint64_t * val,bool id)2095 static bool lookup_drm_propval(int fd,
2096 uint32_t oid, uint32_t otype, const char* name, uint64_t* val, bool id)
2097 {
2098 drmModeObjectPropertiesPtr oprops =
2099 drmModeObjectGetProperties(fd, oid, otype);
2100
2101 for (size_t i = 0; i < oprops->count_props; i++){
2102 drmModePropertyPtr mprops = drmModeGetProperty(fd, oprops->props[i]);
2103 if (!mprops)
2104 continue;
2105
2106 if (strcmp(name, mprops->name) == 0){
2107 if (id){
2108 *val = mprops->prop_id;
2109 }
2110 else
2111 *val = oprops->prop_values[i];
2112
2113 drmModeFreeObjectProperties(oprops);
2114 drmModeFreeProperty(mprops);
2115 return true;
2116 }
2117
2118 drmModeFreeProperty(mprops);
2119 }
2120
2121 drmModeFreeObjectProperties(oprops);
2122 return false;
2123 }
2124
2125 /*
2126 * called once per updated display per frame, as part of the normal
2127 * draw / flip / ... cycle, bo is the returned gbm_surface_lock_front
2128 */
get_gbm_fb(struct dispout * d,enum display_update_state dstate,struct gbm_bo * bo,uint32_t * dst)2129 static int get_gbm_fb(struct dispout* d,
2130 enum display_update_state dstate, struct gbm_bo* bo, uint32_t* dst)
2131 {
2132 uint32_t new_fb;
2133
2134 /* convert the currently mapped object */
2135 if (dstate == UPDATE_DIRECT){
2136 arcan_vobject* vobj = arcan_video_getobject(d->vid);
2137 struct rendertarget* newtgt = arcan_vint_findrt(vobj);
2138 if (!newtgt)
2139 return -1;
2140
2141 /* though the rendertarget might not be ready for the first frame */
2142 bool swap;
2143 struct agp_vstore* vs = agp_rendertarget_swap(newtgt->art, &swap);
2144 if (!swap){
2145 verbose_print("(%d) no-swap on rtgt", d->id);
2146 return 0;
2147 }
2148
2149 if (!vs->vinf.text.handle){
2150 TRACE_MARK_ONESHOT(
2151 "egl-dri", "rendertarget-swap", TRACE_SYS_ERROR, 0, 0, "no allocator handle");
2152 return -1;
2153 }
2154
2155 struct shmifext_color_buffer* buf =
2156 (struct shmifext_color_buffer*) vs->vinf.text.handle;
2157
2158 /* Now buf represents what we want, but it might still be pending - so create a fence
2159 * if we have fencing enabled and flush-out. */
2160 if (d->device->explicit_synch){
2161 d->buffer.synch =
2162 d->device->eglenv.create_synch(d->device->display,
2163 EGL_SYNC_NATIVE_FENCE_ANDROID, (EGLint[]){
2164 EGL_SYNC_NATIVE_FENCE_FD_ANDROID,
2165 EGL_NO_NATIVE_FENCE_FD_ANDROID, EGL_NONE}
2166 );
2167 }
2168
2169 struct agp_fenv* env = agp_env();
2170 env->flush();
2171
2172 bo = (struct gbm_bo*) buf->alloc_tags[0];
2173 TRACE_MARK_ONESHOT("egl-dri", "rendertarget-swap", TRACE_SYS_DEFAULT, 0, 0, "");
2174 }
2175
2176 if (!bo){
2177 TRACE_MARK_ONESHOT("egl-dri", "vobj-bo-fail", TRACE_SYS_DEFAULT, d->vid, 0, "");
2178 return -1;
2179 }
2180
2181 /* Three possible paths for getting the framebuffer id that can then be
2182 * scanned out: drmModeAddFB2WithModifiers, drmModeAddFB2 and drmModeAddFB
2183 * success rate depend on driver and overall config */
2184 ssize_t n_planes = gbm_bo_get_plane_count(bo);
2185 if (n_planes < 0)
2186 n_planes = 1;
2187
2188 uint32_t handles[n_planes];
2189 uint32_t strides[n_planes];
2190 uint32_t offsets[n_planes];
2191 uint64_t modifiers[n_planes];
2192
2193 TRACE_MARK_ONESHOT("egl-dri", "bo-gbm-planes", TRACE_SYS_DEFAULT, n_planes, 0, "");
2194 if (gbm_bo_get_handle_for_plane(bo, 0).s32 == -1){
2195 handles[0] = gbm_bo_get_handle(bo).u32;
2196 strides[0] = gbm_bo_get_stride(bo);
2197 modifiers[0] = DRM_FORMAT_MOD_INVALID;
2198 TRACE_MARK_ONESHOT("egl-dri", "bo-handle", TRACE_SYS_ERROR, 0, 0, "");
2199 }
2200 else {
2201 for (ssize_t i = 0; i < n_planes; i++){
2202 strides[i] = gbm_bo_get_stride_for_plane(bo, i);
2203 handles[i] = gbm_bo_get_handle_for_plane(bo, i).u32;
2204 offsets[i] = gbm_bo_get_offset(bo, i);
2205 modifiers[i] = gbm_bo_get_modifier(bo);
2206 }
2207 }
2208
2209 size_t bo_width = gbm_bo_get_width(bo);
2210 size_t bo_height = gbm_bo_get_height(bo);
2211
2212 /* nop:ed for now, but the path for dealing with modifiers should be
2213 * considered as soon as we have the other setup for direct-scanout
2214 * of a client and metadata packing across the interface */
2215 if (0){
2216 if (drmModeAddFB2WithModifiers(d->device->disp_fd,
2217 bo_width, bo_height, gbm_bo_get_format(bo),
2218 handles, strides, offsets, modifiers, dst, 0)){
2219 TRACE_MARK_ONESHOT("egl-dri", "drm-gbm-addfb2-mods", TRACE_SYS_ERROR, 0, 0, "");
2220 return -1;
2221 }
2222 TRACE_MARK_ONESHOT("egl-dri", "drm-gbm-addfb2-mods", TRACE_SYS_DEFAULT, 0, 0, "");
2223 }
2224 else if (drmModeAddFB2(d->device->disp_fd, bo_width, bo_height,
2225 gbm_bo_get_format(bo), handles, strides, offsets, dst, 0)){
2226
2227 if (drmModeAddFB(d->device->disp_fd,
2228 bo_width, bo_height, 24, 32, strides[0], handles[0], dst)){
2229 TRACE_MARK_ONESHOT("egl-dri", "drm-gbm-addfb", TRACE_SYS_ERROR, 0, 0, "");
2230 debug_print(
2231 "(%d) failed to add framebuffer (%s)", (int)d->id, strerror(errno));
2232 return -1;
2233 }
2234 TRACE_MARK_ONESHOT("egl-dri", "drm-gbm-addfb", TRACE_SYS_DEFAULT, 0, 0, "");
2235 }
2236 else {
2237 TRACE_MARK_ONESHOT("egl-dri", "drm-gbm-addfb2", TRACE_SYS_DEFAULT, 0, 0, "");
2238 }
2239
2240 return 1;
2241 }
2242
2243 /* switch the display to work in 'dumb' mode with a single 'direct-out' buffer */
set_dumb_fb(struct dispout * d)2244 static bool set_dumb_fb(struct dispout* d)
2245 {
2246 struct drm_mode_create_dumb create = {
2247 .width = d->display.mode.hdisplay,
2248 .height = d->display.mode.vdisplay,
2249 .bpp = 32
2250 };
2251
2252 assert(!d->buffer.dumb.enabled);
2253
2254 int fd = d->device->disp_fd;
2255 if (drmIoctl(fd, DRM_IOCTL_MODE_CREATE_DUMB, &create) < 0){
2256 TRACE_MARK_ONESHOT("egl-dri", "create-dumb", TRACE_SYS_ERROR, 0, 0, "");
2257 debug_print("(%d) create dumb-fb (%d*%d@%d bpp) failed",
2258 (int) d->id, create.width, create.height, create.bpp);
2259 return false;
2260 }
2261
2262 struct agp_vstore* buf = &d->buffer.dumb.agp;
2263
2264 buf->vinf.text.handle = create.handle;
2265 buf->vinf.text.stride = create.pitch;
2266 buf->vinf.text.s_raw = create.size;
2267 buf->w = d->display.mode.hdisplay;
2268 buf->h = d->display.mode.vdisplay;
2269 d->buffer.dumb.enabled = true;
2270
2271 /* mark to switch on the next flip */
2272 d->buffer.in_dumb_set = true;
2273
2274 struct drm_mode_map_dumb mreq = {
2275 .handle = create.handle
2276 };
2277 if (drmIoctl(fd, DRM_IOCTL_MODE_MAP_DUMB, &mreq) < 0){
2278 TRACE_MARK_ONESHOT("egl-dri", "create-dumb-fbmap", TRACE_SYS_ERROR, 0, 0, "");
2279 debug_print("(%d) couldn't map dumb-fb: %s", (int) d->id, strerror(errno));
2280 return false;
2281 }
2282
2283 /* note, do we get an offset here? */
2284 d->buffer.dumb.agp.vinf.text.raw = mmap(0,
2285 create.size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, mreq.offset);
2286
2287 if (MAP_FAILED == d->buffer.dumb.agp.vinf.text.raw){
2288 debug_print("(%d) couldn't mmap dumb-fb: %s", (int) d->id, strerror(errno));
2289 TRACE_MARK_ONESHOT("egl-dri", "create-dumb-mmap", TRACE_SYS_ERROR, 0, 0, "");
2290
2291 return false;
2292 }
2293
2294 TRACE_MARK_ONESHOT("egl-dri", "create-dumb", TRACE_SYS_DEFAULT, 0, 0, "");
2295 memset(d->buffer.dumb.agp.vinf.text.raw, 0xaa, create.size);
2296
2297 return true;
2298 }
2299
release_dumb_fb(struct dispout * d)2300 static void release_dumb_fb(struct dispout* d)
2301 {
2302 if (!d->buffer.dumb.enabled)
2303 return;
2304
2305 drmModeRmFB(d->device->disp_fd, d->buffer.dumb.fd);
2306 struct drm_mode_destroy_dumb dreq = {
2307 .handle = d->buffer.dumb.fd
2308 };
2309
2310 drmIoctl(d->device->disp_fd, DRM_IOCTL_MODE_DESTROY_DUMB, &dreq);
2311 close(d->buffer.dumb.fd);
2312 d->buffer.dumb.fd = -1;
2313
2314 /* unref- the store, unlikely that we are the last consumer of this but some
2315 * edge (map -> vlayer deletes -> maps different, it can happen */
2316 d->buffer.dumb.ref->refcount--;
2317 if (!d->buffer.dumb.ref->refcount){
2318
2319 if (d->buffer.dumb.ref->vinf.text.raw)
2320 arcan_mem_free(d->buffer.dumb.ref->vinf.text.raw);
2321
2322 agp_drop_vstore(d->buffer.dumb.ref);
2323 }
2324 d->buffer.dumb.ref = NULL;
2325
2326 munmap(d->buffer.dumb.agp.vinf.text.raw, d->buffer.dumb.sz);
2327 d->buffer.dumb.agp = (struct agp_vstore){};
2328 d->buffer.dumb.enabled = false;
2329
2330 /* if we have succeeded with switching to another framebuffer, restore
2331 * the old one by setting whatever was in that slot */
2332 if (d->buffer.dumb.fb){
2333 drmModeRmFB(d->device->disp_fd, d->buffer.dumb.fb);
2334 d->buffer.dumb.fb = 0;
2335 drmModeSetCrtc(d->device->disp_fd, d->display.crtc,
2336 d->buffer.cur_fb, 0, 0, &d->display.con_id, 1, &d->display.mode);
2337 }
2338
2339 verbose_print("(%d) released dumb framebuffer", d->id);
2340 d->device->vsynch_method = VSYNCH_FLIP;
2341 }
2342
resolve_add(int fd,drmModeAtomicReqPtr dst,uint32_t obj_id,drmModeObjectPropertiesPtr pptr,const char * name,uint32_t val)2343 static bool resolve_add(int fd, drmModeAtomicReqPtr dst, uint32_t obj_id,
2344 drmModeObjectPropertiesPtr pptr, const char* name, uint32_t val)
2345 {
2346 for (size_t i = 0; i < pptr->count_props; i++){
2347 drmModePropertyPtr prop = drmModeGetProperty(fd, pptr->props[i]);
2348 if (!prop)
2349 continue;
2350
2351 if (strcmp(prop->name, name) == 0){
2352 drmModeAtomicAddProperty(dst, obj_id, prop->prop_id, val);
2353 drmModeFreeProperty(prop);
2354 return true;
2355 }
2356 drmModeFreeProperty(prop);
2357 }
2358
2359 return false;
2360 }
2361
atomic_set_mode(struct dispout * d)2362 static bool atomic_set_mode(struct dispout* d)
2363 {
2364 uint32_t mode;
2365 bool rv = false;
2366 int fd = d->device->disp_fd;
2367
2368 if (0 != drmModeCreatePropertyBlob(fd,
2369 &d->display.mode, sizeof(drmModeModeInfo), &mode)){
2370 debug_print("(%d) atomic-modeset, failed to create mode-prop");
2371 return false;
2372 }
2373
2374 drmModeAtomicReqPtr aptr = drmModeAtomicAlloc();
2375
2376 #define AADD(ID, LBL, VAL) if (!resolve_add(fd,aptr,(ID),pptr,(LBL),(VAL))){\
2377 debug_print("(%d) atomic-modeset, failed to resolve prop %s", (int) d->id, (LBL));\
2378 goto cleanup;\
2379 }
2380
2381 drmModeObjectPropertiesPtr pptr =
2382 drmModeObjectGetProperties(fd, d->display.crtc, DRM_MODE_OBJECT_CRTC);
2383 if (!pptr){
2384 debug_print("(%d) atomic-modeset, failed to get crtc prop", (int) d->id);
2385 goto cleanup;
2386 }
2387 AADD(d->display.crtc, "MODE_ID", mode);
2388 AADD(d->display.crtc, "ACTIVE", 1);
2389 drmModeFreeObjectProperties(pptr);
2390
2391 pptr = drmModeObjectGetProperties(fd,
2392 d->display.con->connector_id, DRM_MODE_OBJECT_CONNECTOR);
2393 if (!pptr){
2394 debug_print("(%d) atomic-modeset, failed to get connector prop", (int)d->id);
2395 goto cleanup;
2396 }
2397 AADD(d->display.con->connector_id, "CRTC_ID", d->display.crtc);
2398 drmModeFreeObjectProperties(pptr);
2399
2400 pptr =
2401 drmModeObjectGetProperties(fd, d->display.plane_id, DRM_MODE_OBJECT_PLANE);
2402 if (!pptr){
2403 debug_print("(%d) atomic-modeset, failed to get plane props", (int)d->id);
2404 goto cleanup;
2405 }
2406
2407 unsigned width = d->display.mode.hdisplay << 16;
2408 unsigned height = d->display.mode.vdisplay << 16;
2409
2410 AADD(d->display.plane_id, "SRC_X", 0);
2411 AADD(d->display.plane_id, "SRC_Y", 0);
2412 AADD(d->display.plane_id, "SRC_W", width);
2413 AADD(d->display.plane_id, "SRC_H", height);
2414 AADD(d->display.plane_id, "CRTC_X", 0);
2415 AADD(d->display.plane_id, "CRTC_Y", 0);
2416 AADD(d->display.plane_id, "CRTC_W", width);
2417 AADD(d->display.plane_id, "CRTC_H", height);
2418 AADD(d->display.plane_id, "FB_ID", d->buffer.cur_fb);
2419 AADD(d->display.plane_id, "CRTC_ID", d->display.crtc);
2420 #undef AADD
2421
2422 /* resolve sym:id for the properties on the objects we need:
2423 */
2424
2425 if (0 != drmModeAtomicCommit(fd,aptr, DRM_MODE_ATOMIC_ALLOW_MODESET, NULL)){
2426 goto cleanup;
2427 }
2428 else
2429 rv = true;
2430
2431 cleanup:
2432 drmModeAtomicFree(aptr);
2433 drmModeDestroyPropertyBlob(fd, mode);
2434
2435 return rv;
2436 }
2437
2438 /*
2439 * foreach plane in plane-resources(dev):
2440 * if plane.crtc == display.crtc:
2441 * find type
2442 * if type is primary, set and true
2443 */
find_plane(struct dispout * d)2444 static bool find_plane(struct dispout* d)
2445 {
2446 drmModePlaneResPtr plane_res = drmModeGetPlaneResources(d->device->disp_fd);
2447 d->display.plane_id = 0;
2448 if (!plane_res){
2449 debug_print("(%d) atomic-modeset, couldn't find plane on device",(int)d->id);
2450 return false;
2451 }
2452 for (size_t i = 0; i < plane_res->count_planes; i++){
2453 drmModePlanePtr plane =
2454 drmModeGetPlane(d->device->disp_fd, plane_res->planes[i]);
2455 if (!plane){
2456 debug_print("(%d) atomic-modeset, couldn't get plane (%zu)",(int)d->id,i);
2457 return false;
2458 }
2459 uint32_t crtcs = plane->possible_crtcs;
2460 drmModeFreePlane(plane);
2461 if (0 == (crtcs & (1 << d->display.crtc_index)))
2462 continue;
2463
2464 uint64_t val;
2465 if (!lookup_drm_propval(d->device->disp_fd,
2466 plane_res->planes[i], DRM_MODE_OBJECT_PLANE, "type", &val, false))
2467 continue;
2468
2469 /* NOTE: There are additional constraints for PRIMARY planes that don't
2470 * apply to OVERLAY planes - we can't do scaling, plane size must cover
2471 * all of CRTC etc. If we use this wrong, check dmesg for something like
2472 * 'DRM: plane must cover entire CRTC' */
2473 if (val == DRM_PLANE_TYPE_PRIMARY){
2474 d->display.plane_id = plane_res->planes[i];
2475 break;
2476 }
2477 }
2478 drmModeFreePlaneResources(plane_res);
2479 return d->display.plane_id != 0;
2480 }
2481
2482 /*
2483 * sweep all displays, and see if the referenced CRTC id is in use.
2484 */
crtc_used(struct dev_node * dev,int crtc)2485 static struct dispout* crtc_used(struct dev_node* dev, int crtc)
2486 {
2487 for (size_t i = 0; i < MAX_DISPLAYS; i++){
2488 if (displays[i].state == DISP_UNUSED)
2489 continue;
2490
2491 if (displays[i].device != dev)
2492 continue;
2493
2494 if (displays[i].display.crtc == crtc)
2495 return &displays[i];
2496 }
2497
2498 return NULL;
2499 }
2500
setup_kms(struct dispout * d,int conn_id,size_t w,size_t h)2501 static int setup_kms(struct dispout* d, int conn_id, size_t w, size_t h)
2502 {
2503 SET_SEGV_MSG("egl-dri(), enumerating connectors on device failed.\n");
2504 drmModeRes* res;
2505
2506 retry:
2507 res = drmModeGetResources(d->device->disp_fd);
2508 if (!res){
2509 debug_print("(%d) setup_kms, couldn't get resources (fd:%d)",
2510 (int)d->id, (int)d->device->disp_fd);
2511 return -1;
2512 }
2513
2514 /* for the default case we don't have a connector, but for
2515 * newly detected displays we store / reserve */
2516 if (!d->display.con)
2517 for (int i = 0; i < res->count_connectors; i++){
2518 d->display.con = drmModeGetConnector(d->device->disp_fd, res->connectors[i]);
2519
2520 if (d->display.con->connection == DRM_MODE_CONNECTED &&
2521 (conn_id == -1 || conn_id == d->display.con->connector_id))
2522 break;
2523
2524 drmModeFreeConnector(d->display.con);
2525 d->display.con = NULL;
2526 }
2527
2528 /*
2529 * No connector in place, set a retry- timer or give up. The other
2530 * option would be to switch over to display/headless mode
2531 */
2532 if (!d->display.con){
2533 drmModeFreeResources(res);
2534
2535 /* only wait for the first display */
2536 if (d == &displays[0] && d->device->wait_connector){
2537 debug_print("(%d) setup-kms, no display - retry in 5s", (int)d->id);
2538 sleep(5);
2539 goto retry;
2540 }
2541
2542 debug_print("(%d) setup-kms, no connected displays", (int)d->id);
2543 return -1;
2544 }
2545 d->display.con_id = d->display.con->connector_id;
2546 SET_SEGV_MSG("egl-dri(), enumerating connector/modes failed.\n");
2547
2548 /*
2549 * If dimensions are specified, find the closest match and on collision,
2550 * the one with the highest refresh rate.
2551 */
2552 bool try_inherited_mode = true;
2553 int vrefresh = 0;
2554
2555 /*
2556 * will just nop- out unless verbose defined
2557 */
2558 for (ssize_t i = 0; i < d->display.con->count_modes; i++){
2559 drmModeModeInfo* cm = &d->display.con->modes[i];
2560 verbose_print("(%d) mode (%zu): %d*%d@%d Hz",
2561 d->id, i, cm->hdisplay, cm->vdisplay, cm->vrefresh);
2562 }
2563
2564 /*
2565 * w and h comes from the old- style command-line to video_init calls
2566 * sets to 0 if the user didn't explicitly request anything else
2567 */
2568 if (w != 0 && h != 0){
2569 bool found = false;
2570
2571 for (ssize_t i = 0; i < d->display.con->count_modes; i++){
2572 drmModeModeInfo* cm = &d->display.con->modes[i];
2573 /*
2574 * prefer exact match at highest vrefresh, otherwise we'll fall back to
2575 * whatever we inherit from the console or 'first best'
2576 */
2577 if (cm->hdisplay == w && cm->vdisplay == h && cm->vrefresh > vrefresh){
2578 d->display.mode = *cm;
2579 d->display.mode_set = i;
2580 d->dispw = cm->hdisplay;
2581 d->disph = cm->vdisplay;
2582 vrefresh = cm->vrefresh;
2583 try_inherited_mode = false;
2584 found = true;
2585 debug_print("(%d) hand-picked (-w, -h): "
2586 "%d*%d@%dHz", d->id, d->dispw, d->disph, vrefresh);
2587 }
2588 }
2589
2590 if (!found){
2591 /* but if not, drop the presets and return to auto-detect */
2592 w = 0;
2593 h = 0;
2594 }
2595 }
2596
2597 /*
2598 * If no dimensions are specified, grab the first one. (according to drm
2599 * documentation, that should be the most 'fitting') but also allow the
2600 * 'try_inherited_mode' using what is already on the connector.
2601 *
2602 * Note for ye who ventures in here, seems like some drivers still enjoy
2603 * returning ones that are actually 0*0, skip those.
2604 */
2605 if (w == 0 && d->display.con->count_modes >= 1){
2606 bool found = false;
2607
2608 for (ssize_t i = 0; i < d->display.con->count_modes; i++){
2609 drmModeModeInfo* cm = &d->display.con->modes[i];
2610 if (!cm->hdisplay || !cm->vdisplay)
2611 continue;
2612
2613 d->display.mode = *cm;
2614 d->display.mode_set = 0;
2615 d->dispw = cm->hdisplay;
2616 d->disph = cm->vdisplay;
2617 vrefresh = cm->vrefresh;
2618 found = true;
2619 debug_print("(%d) default connector mode: %d*%d@%dHz",
2620 d->id, d->dispw, d->disph, vrefresh);
2621 break;
2622 }
2623
2624 /* everything is broken, just set a bad mode and let the rest of the error-
2625 * paths take care of the less-than-graceful exit */
2626 if (!found){
2627 d->display.mode = d->display.con->modes[0];
2628 d->display.mode_set = 0;
2629 d->dispw = d->display.mode.hdisplay;
2630 d->disph = d->display.mode.vdisplay;
2631 debug_print("(%d) setup-kms, couldn't find any useful mode");
2632 }
2633
2634 debug_print("(%d) setup-kms, default-picked %zu*%zu", (int)d->id,
2635 (size_t)d->display.mode.hdisplay, (size_t)d->display.mode.vdisplay);
2636 }
2637
2638 /*
2639 * Grab any EDID data now as we've had issues trying to query it on some
2640 * displays later while buffers etc. are queued(?). Some reports have hinted
2641 * that it's more dependent on race conditions on the kernel-driver side when
2642 * there are multiple EDID queries in flight which can happen as part of
2643 * on_hotplug(func) style event storms in independent software.
2644 */
2645 fetch_edid(d);
2646
2647 /*
2648 * foreach(encoder)
2649 * check_default_crtc -> not used ? allocate -> go
2650 * foreach possible_crtc -> first not used ? allocate -> go
2651 *
2652 * note that practically, the crtc search must also find a crtcs that supports
2653 * a certain 'plane configuration' that match the render configuration we want
2654 * to perform, which triggers on each setup where we have a change in vid to
2655 * display mappings (similar to defered modeset).
2656 */
2657 SET_SEGV_MSG("libdrm(), setting matching encoder failed.\n");
2658 bool crtc_found = false;
2659
2660 /* mimic x11 modesetting driver use, sweep all encoders and pick the crtcs
2661 * that all of them support, and bias against inherited crtc on the first
2662 * encoder that aren't already mapped */
2663 uint64_t mask = 0;
2664 mask = ~mask;
2665 uint64_t join = 0;
2666
2667 for (int i = 0; i < res->count_encoders; i++){
2668 drmModeEncoder* enc = drmModeGetEncoder(d->device->disp_fd, res->encoders[i]);
2669 if (!enc)
2670 continue;
2671
2672 for (int j = 0; j < res->count_crtcs; j++){
2673 if (!(enc->possible_crtcs & (1 << j)))
2674 mask &= enc->possible_crtcs;
2675 join |= enc->possible_crtcs;
2676 }
2677
2678 drmModeFreeEncoder(enc);
2679 }
2680
2681 if (!mask){
2682 debug_print("libdrm(), no shared mask of crtcs, take full set");
2683 mask = join;
2684 }
2685
2686 /* now sweep the list of possible crtcs and pick the first one we don't have
2687 * already allocated to a display, uncertain if the crtc size was 32 or 64
2688 * bit so might as well go for the higher */
2689 for (uint64_t i = 0; i < 64 && i < res->count_crtcs; i++){
2690 if (mask & ((uint64_t)1 << i)){
2691 uint32_t crtc_val = res->crtcs[i];
2692 struct dispout* crtc_disp = crtc_used(d->device, crtc_val);
2693 if (crtc_disp)
2694 continue;
2695
2696 d->display.crtc = crtc_val;
2697 d->display.crtc_index = i;
2698 crtc_found = true;
2699 break;
2700 }
2701 }
2702
2703 debug_print("(%d) picked crtc (%d) from encoder", (int)d->id, d->display.crtc);
2704 drmModeCrtc* crtc = drmModeGetCrtc(d->device->disp_fd, d->display.crtc);
2705 if (!crtc){
2706 debug_print("couldn't retrieve chose crtc, giving up");
2707 goto drop_disp;
2708 }
2709
2710 /* sanity-check inherited mode (weird drivers + "non-graphical" defaults? */
2711 if (crtc->mode_valid && try_inherited_mode && crtc->mode.hdisplay){
2712 d->display.mode = crtc->mode;
2713 d->display.mode_set = 0;
2714
2715 /* find the matching index */
2716 for (size_t i = 0; i < d->display.con->count_modes; i++){
2717 if (memcmp(&d->display.con->modes[i],
2718 &d->display.mode, sizeof(drmModeModeInfo)) == 0){
2719 d->display.mode_set = i;
2720 break;
2721 }
2722 }
2723
2724 d->dispw = d->display.mode.hdisplay;
2725 d->disph = d->display.mode.vdisplay;
2726 debug_print("(%d) trying tty- inherited mode ", (int)d->id);
2727 }
2728
2729 if (!crtc_found){
2730 debug_print("(%d) setup-kms, no working encoder/crtc", (int)d->id);
2731 goto drop_disp;
2732 return -1;
2733 }
2734
2735 /* now we have a mode that is either hand-picked, inherited from the TTY or the
2736 * DRM default - alas in many cases this is not the one with the highest refresh
2737 * at that resolution, so sweep yet again and try and find one for that */
2738 for (size_t i = 0; i < d->display.con->count_modes; i++){
2739 drmModeModeInfo* cm = &d->display.con->modes[i];
2740 if (cm->hdisplay == d->dispw &&
2741 cm->vdisplay == d->disph && cm->vrefresh > vrefresh){
2742 d->display.mode = *cm;
2743 d->display.mode_set = i;
2744 vrefresh = cm->vrefresh;
2745 debug_print(
2746 "(%d) higher refresh (%d) found at set resolution", vrefresh, (int)d->id);
2747 }
2748 }
2749
2750 /* find a matching output-plane for atomic/streams */
2751 if (d->device->atomic){
2752 bool ok = true;
2753 if (!find_plane(d)){
2754 debug_print("(%d) setup_kms, atomic-find_plane fail", (int)d->id);
2755 ok = false;
2756 }
2757 /* map a buffer to it so we get it to behave */
2758 else if (!set_dumb_fb(d)){
2759 debug_print("(%d) setup_kms, atomic dumb-fb fail", (int)d->id);
2760 ok = false;
2761 }
2762 /* and finally commit */
2763 else if (!atomic_set_mode(d)){
2764 debug_print("(%d) setup_kms, atomic modeset fail", (int)d->id);
2765 drmModeRmFB(d->device->disp_fd, d->buffer.cur_fb);
2766 d->buffer.cur_fb = 0;
2767 ok = false;
2768 }
2769 /* just disable atomic */
2770 if (!ok){
2771 debug_print("(%d) setup_kms, "
2772 "atomic modeset failed, fallback to legacy", d->id);
2773 d->device->atomic = false;
2774 drmModeFreeConnector(d->display.con);
2775 d->display.con = NULL;
2776 drmModeFreeResources(res);
2777 drmSetClientCap(d->device->disp_fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 0);
2778 drmSetClientCap(d->device->disp_fd, DRM_CLIENT_CAP_ATOMIC, 0);
2779 return setup_kms(d, conn_id, w, h);
2780 }
2781 else if (!ok)
2782 goto drop_disp;
2783 }
2784
2785 build_orthographic_matrix(d->projection,
2786 0, d->display.mode.hdisplay, d->display.mode.vdisplay, 0, 0, 1);
2787
2788 dpms_set(d, DRM_MODE_DPMS_ON);
2789 d->display.dpms = ADPMS_ON;
2790
2791 drmModeFreeResources(res);
2792 return 0;
2793
2794 drop_disp:
2795 drmModeFreeConnector(d->display.con);
2796 d->display.con = NULL;
2797 drmModeFreeResources(res);
2798 return -1;
2799 }
2800
2801 /* this is the deprecated interface - import buffer has replaced it */
map_handle_gbm(struct agp_vstore * dst,int64_t handle)2802 static bool map_handle_gbm(struct agp_vstore* dst, int64_t handle)
2803 {
2804 uint64_t invalid = DRM_FORMAT_MOD_INVALID;
2805 uint32_t hi = invalid >> 32;
2806 uint32_t lo = invalid & 0xffffffff;
2807
2808 if (-1 == handle){
2809 struct dispout* d = &displays[0];
2810 d->device->eglenv.destroy_image(
2811 d->device->display, (EGLImage) dst->vinf.text.tag);
2812 dst->vinf.text.tag = 0;
2813 return true;
2814 }
2815
2816 struct agp_buffer_plane plane = {
2817 .fd = handle,
2818 .gbm = {
2819 .mod_hi = DRM_FORMAT_MOD_INVALID >> 32,
2820 .mod_lo = DRM_FORMAT_MOD_INVALID & 0xffffffff,
2821 .offset = 0,
2822 .stride = dst->vinf.text.stride,
2823 .format = dst->vinf.text.format
2824 }
2825 };
2826
2827 return platform_video_map_buffer(dst, &plane, 1);
2828 }
2829
2830 /*
2831 * There's a really ugly GLES- inheritance GOTCHA here that makes streams
2832 * a ******** pain to work with. 99.9% of all existing code works against
2833 * GL_TEXTURE_2D as the target for 2D buffers. Of course, there's a 'special'
2834 * GL_TEXTURE_EXTERNAL_OES target that also requires a different sampler in
2835 * the shader code. This leaves us with 3-ish options.
2836 * This is actually true for GBM- buffers as well, but so far MESA doesn't
2837 * really give a duck.
2838 *
2839 * 1. Mimic 'CopyTextureCHROMIUM' - explicit render-to-texture pass.
2840 * 2. The 'Cogl' approach - Make shader management exponentially worse by
2841 * tracking the dst- and when binding to a backend, compile/generate variants
2842 * where the sampler references are replaced with the 'right' target by
2843 * basically doing string- replacement on the program source.
2844 * 3. Move the complexity to the script level, breaking the opaqueness of
2845 * VIDs by forcing separate rules and gotcha's when the VID comes from an
2846 * external source. Possibly on the shader level by allowing additional
2847 * rule-slots.
2848 * +. Find some long-lost OpenGL function that relies on semi-defined behavior
2849 * to get rid of the _OES sampler type. There's probably something in
2850 * EGLImage.
2851 * +. Nasty hybrid: have a special external version of the 'default' and
2852 * when/if that one fails, fall back to 1/2.
2853 *
2854 * Granted, we need something similar for dma-buf when the source is one of
2855 * the many YUUVUUVUV formats.
2856 *
2857 * OpenGL: an arcane language with 400 different words for memcpy, where you
2858 * won't be sure of what src is, or dst, how much will actually be copied or
2859 * what happens to data in transit.
2860 */
map_handle_stream(struct agp_vstore * dst,int64_t handle)2861 bool map_handle_stream(struct agp_vstore* dst, int64_t handle)
2862 {
2863 /*
2864 * 1. eglCreateStreamFromFileDescriptorKHR(dpy, handle)
2865 * 2. glBindTexture (GL_TEXTURE_EXTERNAL_OES)
2866 * 3. eglStreamConsumerGLTextureExternalKHR(dpy, stream)
2867 *
2868 * to 'poll' the stream, we can go with eglStreamConsumerQueryStream and
2869 * check that for EGL_STREAM_STATE_NEW_FRAME_AVAILABLE_KHR. We need semantics
2870 * the the BUFFER_ calls in shmif and the corresponding place in engine/
2871 * arcan_event.c to use a reserved identifier for matching against the
2872 * handle.
2873 *
2874 * then we have eglStreamConsumerAcquireKHR(dpy, stream)
2875 */
2876
2877 return false;
2878 }
2879
platform_video_map_handle(struct agp_vstore * dst,int64_t handle)2880 bool platform_video_map_handle(
2881 struct agp_vstore* dst, int64_t handle)
2882 {
2883 /*
2884 * MULTIGPU:FAIL
2885 * we need to follow the affinity for the specific [dst], and run the procedure
2886 * for each set bit in the field, if it is even possible to do with PRIME etc.
2887 * otherwise we need a mechanism to activate the DEVICEHINT event for the
2888 * provider to indicate that we need a portable handle.
2889 */
2890 switch (nodes[0].buftype){
2891 case BUF_GBM:
2892 return map_handle_gbm(dst, handle);
2893 break;
2894 case BUF_STREAM:
2895 return map_handle_stream(dst, handle);
2896 break;
2897 case BUF_HEADLESS:
2898 return false;
2899 break;
2900 }
2901 return false;
2902 }
2903
update_mode_cache(struct dispout * d)2904 static void update_mode_cache(struct dispout* d)
2905 {
2906 debug_print("(%d) issuing mode scan", (int) d->id);
2907 drmModeConnector* conn = d->display.con;
2908 drmModeRes* res = drmModeGetResources(d->device->disp_fd);
2909
2910 int count = conn->count_modes;
2911 if (!count)
2912 return;
2913
2914 d->mode_cache_sz = count;
2915 d->mode_cache = arcan_alloc_mem(
2916 sizeof(struct monitor_mode) * d->mode_cache_sz,
2917 ARCAN_MEM_VSTRUCT, ARCAN_MEM_BZERO, ARCAN_MEMALIGN_NATURAL
2918 );
2919
2920 for (size_t i = 0; i < conn->count_modes; i++){
2921 d->mode_cache[i].refresh = conn->modes[i].vrefresh;
2922 d->mode_cache[i].width = conn->modes[i].hdisplay;
2923 d->mode_cache[i].height = conn->modes[i].vdisplay;
2924 d->mode_cache[i].subpixel = subpixel_type(conn->subpixel);
2925 d->mode_cache[i].phy_width = conn->mmWidth;
2926 d->mode_cache[i].phy_height = conn->mmHeight;
2927 d->mode_cache[i].dynamic = false;
2928 d->mode_cache[i].id = i;
2929 d->mode_cache[i].depth = sizeof(av_pixel) * 8;
2930 }
2931
2932 drmModeFreeResources(res);
2933 }
2934
platform_video_query_modes(platform_display_id id,size_t * count)2935 struct monitor_mode* platform_video_query_modes(
2936 platform_display_id id, size_t* count)
2937 {
2938 bool free_conn = false;
2939
2940 struct dispout* d = get_display(id);
2941 if (!d || d->state == DISP_UNUSED)
2942 return NULL;
2943
2944 update_mode_cache(d);
2945
2946 if (d->mode_cache){
2947 *count = d->mode_cache_sz;
2948 return d->mode_cache;
2949 }
2950
2951 *count = 0;
2952 return NULL;
2953 }
2954
match_connector(int fd,drmModeConnector * con)2955 static struct dispout* match_connector(int fd, drmModeConnector* con)
2956 {
2957 int j = 0;
2958
2959 for (size_t i=0; i < MAX_DISPLAYS; i++){
2960 struct dispout* d = &displays[i];
2961 if (d->state == DISP_UNUSED)
2962 continue;
2963
2964 if (d->device->disp_fd == fd &&
2965 (d->display.con ? d->display.con->connector_id : d->display.con_id) == con->connector_id)
2966 return d;
2967 }
2968
2969 return NULL;
2970 }
2971
2972 /*
2973 * The cost for this function is rather unsavory, nouveau testing has shown
2974 * somewhere around ~110+ ms stalls for one re-scan
2975 */
query_card(struct dev_node * node)2976 static void query_card(struct dev_node* node)
2977 {
2978 debug_print("check resources on %i", node->disp_fd);
2979
2980 drmModeRes* res = drmModeGetResources(node->disp_fd);
2981 if (!res){
2982 debug_print("couldn't get resources for rescan on %i", node->disp_fd);
2983 return;
2984 }
2985
2986 for (size_t i = 0; i < res->count_connectors; i++){
2987 drmModeConnector* con = drmModeGetConnector(node->disp_fd, res->connectors[i]);
2988 struct dispout* d = match_connector(node->disp_fd, con);
2989
2990 /* no display on connector */
2991 if (con->connection != DRM_MODE_CONNECTED){
2992 /* if there was one known, remove it and notify */
2993 debug_print("(%zu) lost, disabled", (int)i);
2994 if (d){
2995 debug_print("(%d) display lost, disabling", (int)d->id);
2996 disable_display(d, true);
2997 arcan_event ev = {
2998 .category = EVENT_VIDEO,
2999 .vid.kind = EVENT_VIDEO_DISPLAY_REMOVED,
3000 .vid.displayid = d->id,
3001 .vid.ledctrl = egl_dri.ledid,
3002 .vid.ledid = d->id
3003 };
3004 arcan_event_enqueue(arcan_event_defaultctx(), &ev);
3005 arcan_conductor_release_display(d->device->card_id, d->id);
3006 }
3007 drmModeFreeConnector(con);
3008 continue;
3009 }
3010
3011 /* do we already know about the connector? then do nothing */
3012 if (d){
3013 debug_print("(%d) already known", (int)d->id);
3014 drmModeFreeConnector(con);
3015 continue;
3016 }
3017
3018 /* allocate display and mark as known but not mapped, give up
3019 * if we're out of display slots */
3020 debug_print("unknown display detected");
3021 d = allocate_display(&nodes[0]);
3022 if (!d){
3023 debug_print("failed to allocate new display");
3024 drmModeFreeConnector(con);
3025 continue;
3026 }
3027
3028 /* save the ID for later so that we can match in match_connector */
3029 d->display.con = con;
3030 d->display.con_id = con->connector_id;
3031 d->backlight = backlight_init(
3032 d->device->card_id, d->display.con->connector_type,
3033 d->display.con->connector_type_id
3034 );
3035 debug_print(
3036 "(%d) assigned connector id (%d)",(int)d->id,(int)con->connector_id);
3037 if (d->backlight){
3038 debug_print("(%d) display backlight assigned", (int)d->id);
3039 d->backlight_brightness = backlight_get_brightness(d->backlight);
3040 }
3041 arcan_event ev = {
3042 .category = EVENT_VIDEO,
3043 .vid.kind = EVENT_VIDEO_DISPLAY_ADDED,
3044 .vid.displayid = d->id,
3045 .vid.ledctrl = egl_dri.ledid,
3046 .vid.ledid = d->id,
3047 .vid.cardid = d->device->card_id
3048 };
3049 arcan_conductor_register_display(
3050 d->device->card_id, d->id, SYNCH_STATIC, d->display.mode.vrefresh, d->vid);
3051
3052 update_mode_cache(d);
3053 arcan_event_enqueue(arcan_event_defaultctx(), &ev);
3054 continue; /* don't want to free con */
3055 }
3056 drmModeFreeResources(res);
3057 }
3058
platform_video_query_displays()3059 void platform_video_query_displays()
3060 {
3061 debug_print("issuing display requery");
3062 egl_dri.scan_pending = true;
3063 }
3064
disable_display(struct dispout * d,bool dealloc)3065 static void disable_display(struct dispout* d, bool dealloc)
3066 {
3067 if (!d || d->state == DISP_UNUSED){
3068 debug_print("disable_display called on unused display (%d)", (int)d->id);
3069 return;
3070 }
3071
3072 debug_print("(%d) trying to disable", (int)d->id);
3073 if (d->buffer.in_flip){
3074 debug_print("(%d) flip pending, deferring destruction", (int)d->id);
3075 d->buffer.in_destroy = true;
3076 egl_dri.destroy_pending |= 1 << d->id;
3077 return;
3078 }
3079
3080 if (d->mode_cache){
3081 arcan_mem_free(d->mode_cache);
3082 d->mode_cache = NULL;
3083 }
3084
3085 d->device->refc--;
3086 if (d->buffer.in_destroy){
3087 egl_dri.destroy_pending &= ~(1 << d->id);
3088 }
3089 d->buffer.in_destroy = false;
3090
3091 if (d->display.edid_blob){
3092 free(d->display.edid_blob);
3093 d->display.edid_blob = NULL;
3094 d->display.blob_sz = 0;
3095 }
3096
3097 if (d->state == DISP_KNOWN){
3098 d->device = NULL;
3099 d->state = DISP_UNUSED;
3100 return;
3101 }
3102
3103 d->state = DISP_CLEANUP;
3104
3105 set_display_context(d);
3106 debug_print("(%d) destroying EGL surface", (int)d->id);
3107 d->device->eglenv.destroy_surface(d->device->display, d->buffer.esurf);
3108 d->buffer.esurf = NULL;
3109
3110 /* destroying the context has triggered driver bugs and hard to attribute UAFs
3111 * in the past, monitor this closely */
3112 if (d->buffer.cur_fb){
3113 debug_print("(%d) removing framebuffer", (int)d->id);
3114 drmModeRmFB(d->device->disp_fd, d->buffer.cur_fb);
3115 d->buffer.cur_fb = 0;
3116 }
3117
3118 if (d->buffer.context != EGL_NO_CONTEXT){
3119 debug_print("(%d) EGL - set device"
3120 "context, destroy display context", (int)d->id);
3121 set_device_context(d->device);
3122 d->device->eglenv.destroy_context(d->device->display, d->buffer.context);
3123 d->buffer.context = EGL_NO_CONTEXT;
3124 }
3125
3126 if (d->device->buftype == BUF_GBM){
3127 if (d->buffer.surface){
3128 debug_print("destroy gbm surface");
3129
3130 if (d->buffer.cur_bo)
3131 gbm_surface_release_buffer(d->buffer.surface, d->buffer.cur_bo);
3132 gbm_surface_destroy(d->buffer.surface);
3133 }
3134 if (d->buffer.cur_bo)
3135 d->buffer.cur_bo = NULL;
3136
3137 d->buffer.surface = NULL;
3138 }
3139 else
3140 debug_print("EGL- display");
3141
3142 /* restore the color LUTs, not 100% certain that this is the best approach here
3143 * since an external- launch then needs to figure out / manipulate them on its
3144 * own, losing color calibration and so on in the process */
3145 if (d->display.orig_gamma){
3146 debug_print("(%d) restoring device color LUTs");
3147 drmModeCrtcSetGamma(d->device->disp_fd, d->display.crtc,
3148 d->display.gamma_size, d->display.orig_gamma,
3149 &d->display.orig_gamma[1*d->display.gamma_size],
3150 &d->display.orig_gamma[2*d->display.gamma_size]
3151 );
3152 }
3153
3154 /* in extended suspend, we have no idea which displays we are returning to so
3155 * the only real option is to fully deallocate even in EXTSUSP */
3156 debug_print("(%d) release crtc id (%d)", (int)d->id,(int)d->display.crtc);
3157 if (d->display.old_crtc){
3158 debug_print("(%d) old mode found, trying to reset", (int)d->id);
3159 if (d->device->atomic){
3160 d->display.mode = d->display.old_crtc->mode;
3161 if (atomic_set_mode(d)){
3162 debug_print("(%d) atomic-modeset failed on (%d)",
3163 (int)d->id, (int)d->display.con_id);
3164 }
3165 }
3166 else if (0 > drmModeSetCrtc(
3167 d->device->disp_fd,
3168 d->display.old_crtc->crtc_id,
3169 d->display.old_crtc->buffer_id,
3170 d->display.old_crtc->x,
3171 d->display.old_crtc->y,
3172 &d->display.con_id, 1,
3173 &d->display.old_crtc->mode
3174 )){
3175 debug_print("Error setting old CRTC on %d", d->display.con_id);
3176 }
3177 }
3178
3179 /* in the no-dealloc state we still want to remember which CRTCs etc were
3180 * set as those might have been changed as part of a modeset request */
3181 if (!dealloc){
3182 debug_print("(%d) switched state to EXTSUSP", (int)d->id);
3183 d->state = DISP_EXTSUSP;
3184 return;
3185 }
3186
3187 /* gamma has already been restored above, but we need to free the resources */
3188 debug_print("(%d) full deallocation requested", (int)d->id);
3189 if (d->display.orig_gamma){
3190 free(d->display.orig_gamma);
3191 d->display.orig_gamma = NULL;
3192 }
3193
3194 debug_print("(%d) freeing display connector", (int)d->id);
3195 drmModeFreeConnector(d->display.con);
3196 d->display.con = NULL;
3197 d->display.con_id = -1;
3198 d->display.mode_set = -1;
3199
3200 drmModeFreeCrtc(d->display.old_crtc);
3201 d->display.old_crtc = NULL;
3202
3203 /* d->device = NULL; */
3204 d->state = DISP_UNUSED;
3205
3206 if (d->backlight){
3207 debug_print("(%d) resetting display backlight", (int)d->id);
3208 backlight_set_brightness(d->backlight, d->backlight_brightness);
3209 backlight_destroy(d->backlight);
3210 d->backlight = NULL;
3211 }
3212 }
3213
platform_video_dimensions()3214 struct monitor_mode platform_video_dimensions()
3215 {
3216 struct monitor_mode res = {
3217 .width = egl_dri.canvasw,
3218 .height = egl_dri.canvash
3219 };
3220
3221 /*
3222 * this is done to work around how gl- agp handles scissoring as there's no
3223 * version of platform_video_dimension that worked for the display out
3224 */
3225 if (egl_dri.last_display && egl_dri.last_display->display.mode_set != -1){
3226 res.width = egl_dri.last_display->display.mode.hdisplay;
3227 res.height = egl_dri.last_display->display.mode.vdisplay;
3228 if (egl_dri.last_display->display.con){
3229 res.phy_width = egl_dri.last_display->display.con->mmWidth;
3230 res.phy_height = egl_dri.last_display->display.con->mmHeight;
3231 }
3232 }
3233 /*
3234 * fake dimensions to provide an OK default PPCM (say 72)
3235 */
3236 if (!res.phy_width)
3237 res.phy_width = (float)egl_dri.canvasw / (float)28.34645 * 10.0;
3238
3239 if (!res.phy_height)
3240 res.phy_height = (float)egl_dri.canvash / (float)28.34645 * 10.0;
3241
3242 return res;
3243 }
3244
platform_video_gfxsym(const char * sym)3245 void* platform_video_gfxsym(const char* sym)
3246 {
3247 return nodes[0].eglenv.get_proc_address(sym);
3248 }
3249
do_led(struct dispout * disp,uint8_t val)3250 static void do_led(struct dispout* disp, uint8_t val)
3251 {
3252 if (disp && disp->backlight){
3253 float lvl = (float) val / 255.0;
3254 float max_brightness = backlight_get_max_brightness(disp->backlight);
3255 backlight_set_brightness(disp->backlight, lvl * max_brightness);
3256 }
3257 }
3258
3259 /* read-end of ledpair pipe is in nonblocking, so just run through
3260 * it and update the corresponding backlights */
flush_leds()3261 static void flush_leds()
3262 {
3263 if (egl_dri.ledid < 0)
3264 return;
3265
3266 uint8_t buf[2];
3267 while (2 == read(egl_dri.ledpair[0], buf, 2)){
3268 switch (tolower(buf[0])){
3269 case 'A': egl_dri.ledind = 255; break;
3270 case 'a': egl_dri.ledind = buf[1]; break;
3271 case 'r': egl_dri.ledval[0] = buf[1]; break;
3272 case 'g': egl_dri.ledval[1] = buf[1]; break;
3273 case 'b': egl_dri.ledval[2] = buf[1]; break;
3274 case 'i': egl_dri.ledval[0] = egl_dri.ledval[1]=egl_dri.ledval[2]=buf[1];
3275 case 'c':
3276 /*
3277 * don't expose an RGB capable backlight (are there such displays out there?
3278 * the other option would be to weight the gamma channel of the ramps but if
3279 * someone wants that behavior it is probably better to change all the ramps
3280 */
3281 if (egl_dri.ledind != 255)
3282 do_led(get_display(egl_dri.ledind), egl_dri.ledval[0]);
3283 else
3284 for (size_t i = 0; i < MAX_DISPLAYS; i++)
3285 do_led(get_display(i), buf[1]);
3286 break;
3287 }
3288 }
3289 }
3290
try_node(int draw_fd,int disp_fd,const char * pathref,int dst_ind,enum buffer_method method,int connid,int w,int h,bool ignore_display)3291 static bool try_node(int draw_fd, int disp_fd, const char* pathref,
3292 int dst_ind, enum buffer_method method, int connid, int w, int h,
3293 bool ignore_display)
3294 {
3295 /* set default lookup function if none has been provided */
3296 if (!nodes[dst_ind].eglenv.get_proc_address){
3297 nodes[dst_ind].eglenv.get_proc_address =
3298 (PFNEGLGETPROCADDRESSPROC)eglGetProcAddress;
3299 }
3300 struct dev_node* node = &nodes[dst_ind];
3301 node->active = true;
3302 map_egl_functions(&node->eglenv, lookup, NULL);
3303 map_eglext_functions(
3304 &node->eglenv, lookup_call, node->eglenv.get_proc_address);
3305
3306 switch (method){
3307 case BUF_GBM:
3308 if (0 != setup_node_gbm(dst_ind, node, draw_fd, disp_fd)){
3309 node->eglenv.get_proc_address = NULL;
3310 debug_print("couldn't open (%d:%s) in GBM mode",
3311 draw_fd, pathref ? pathref : "(no path)");
3312 release_card(dst_ind);
3313 return false;
3314 }
3315 break;
3316 case BUF_STREAM:
3317 if (0 != setup_node_egl(dst_ind, node, disp_fd)){
3318 debug_print("couldn't open (%d:%s) in EGLStreams mode",
3319 draw_fd, pathref ? pathref : "(no path)");
3320 release_card(dst_ind);
3321 return false;
3322 }
3323 break;
3324 case BUF_HEADLESS:
3325 break;
3326 }
3327
3328 if (!setup_node(node)){
3329 debug_print("setup/configure [%d](%d:%s)",
3330 dst_ind, draw_fd, pathref ? pathref : "(no path)");
3331 release_card(dst_ind);
3332 return false;
3333 }
3334
3335 /* used when we already have one */
3336 if (ignore_display)
3337 return true;
3338
3339 struct dispout* d = allocate_display(node);
3340 d->display.primary = dst_ind == 0;
3341 egl_dri.last_display = d;
3342
3343 if (setup_kms(d, connid, w, h) != 0){
3344 disable_display(d, true);
3345 debug_print("card found, but no working/connected display");
3346 release_card(dst_ind);
3347 return false;
3348 }
3349
3350 if (setup_buffers(d) == -1){
3351 disable_display(d, true);
3352 release_card(dst_ind);
3353 return false;
3354 }
3355
3356 d->backlight = backlight_init(d->device->card_id,
3357 d->display.con->connector_type, d->display.con->connector_type_id);
3358
3359 if (d->backlight)
3360 d->backlight_brightness = backlight_get_brightness(d->backlight);
3361
3362 return true;
3363 }
3364
3365 /*
3366 * config/profile matching derived approach, for use when something more
3367 * specific and sophisticated is desired
3368 */
try_card(size_t devind,int w,int h,size_t * dstind)3369 static bool try_card(size_t devind, int w, int h, size_t* dstind)
3370 {
3371 uintptr_t tag;
3372 cfg_lookup_fun get_config = platform_config_lookup(&tag);
3373 char* dispdevstr, (* cfgstr), (* altstr);
3374 int connind = -1;
3375
3376 bool gbm = true;
3377
3378 /* basic device, device_1, device_2 etc. search path */
3379 if (!get_config("video_display_device", devind, &dispdevstr, tag))
3380 return false;
3381
3382 char* drawdevstr = NULL;
3383 get_config("video_draw_device", devind, &drawdevstr, tag);
3384
3385 /* reference to another card_id, only one is active at any one moment
3386 * and card_1 should reference card_2 and vice versa. */
3387 if (get_config("video_device_alternate", devind, &cfgstr, tag)){
3388 /* sweep from devind down to 0 and see if there is a card with the
3389 * specified path, if so, open but don't activate this one */
3390 }
3391
3392 if (get_config("video_device_buffer", devind, &cfgstr, tag)){
3393 if (strcmp(cfgstr, "streams") == 0){
3394 gbm = false;
3395 debug_print("device_buffer forced to EGLstreams");
3396 }
3397 else {
3398 debug_print("device_buffer fallback to GBM");
3399 }
3400 free(cfgstr);
3401 }
3402
3403 /* reload any possible library references */
3404 if (nodes[devind].agplib){
3405 free(nodes[devind].agplib);
3406 nodes[devind].agplib = NULL;
3407 }
3408 if (nodes[devind].egllib){
3409 free(nodes[devind].egllib);
3410 nodes[devind].egllib = NULL;
3411 }
3412 get_config("video_device_egllib", devind, &nodes[devind].egllib, tag);
3413 get_config("video_device_agplib", devind, &nodes[devind].agplib, tag);
3414
3415 /* hard- connector index set */
3416 if (get_config("video_device_connector", devind, &cfgstr, tag)){
3417 connind = strtol(cfgstr, NULL, 10) % INT_MAX;
3418 free(cfgstr);
3419 }
3420
3421 nodes[devind].wait_connector =
3422 get_config("video_device_wait", devind, NULL, tag);
3423
3424 int dispfd = platform_device_open(dispdevstr, O_RDWR | O_CLOEXEC);
3425 int drawfd = (drawdevstr ?
3426 platform_device_open(drawdevstr, O_RDWR | O_CLOEXEC) : dispfd);
3427
3428 if (try_node(drawfd, dispfd, dispdevstr,
3429 *dstind, gbm ? BUF_GBM : BUF_STREAM, connind, w, h, false)){
3430 debug_print("card at %d added", *dstind);
3431 nodes[*dstind].pathref = dispdevstr;
3432 nodes[*dstind].card_id = *dstind;
3433 *dstind++;
3434 return true;
3435 }
3436 /* don't need to close the disp/draw descriptors here, it is done in try_node */
3437 else{
3438 free(nodes[devind].egllib);
3439 free(nodes[devind].agplib);
3440 nodes[devind].egllib = nodes[devind].agplib = NULL;
3441 free(dispdevstr);
3442 free(drawdevstr);
3443 return false;
3444 }
3445 }
3446
setup_cards_db(int w,int h)3447 static bool setup_cards_db(int w, int h)
3448 {
3449 size_t dstind = 0;
3450 for (size_t devind = 0; devind < COUNT_OF(nodes); devind++){
3451 if (!try_card(devind, w, h, &dstind))
3452 return dstind > 0;
3453 dstind++;
3454 }
3455
3456 return dstind > 0;
3457 }
3458
3459 /*
3460 * naive approach - for when there's no explicit configuration set.
3461 * This just globs a preset/os-specific path and takes the first
3462 * device that appears and can be opened.
3463 */
setup_cards_basic(int w,int h)3464 static bool setup_cards_basic(int w, int h)
3465 {
3466 #define DEVICE_PATH "/dev/dri/card%zu"
3467
3468 /* sweep as there might be more GPUs but without any connected
3469 * display, indicating that it's not a valid target for autodetect. */
3470 for (size_t i = 0; i < 4; i++){
3471 char buf[sizeof(DEVICE_PATH)];
3472 snprintf(buf, sizeof(buf), DEVICE_PATH, i);
3473 int fd = platform_device_open(buf, O_RDWR | O_CLOEXEC);
3474 debug_print("trying [basic/auto] setup on %s", buf);
3475 if (-1 != fd){
3476 /* on Linux (basically only place that'd be relevant for now) we could
3477 * check proc/modules for nvidia, then go the sysfs route for the card node,
3478 * grab the vendor string and switch to streams that way - if so we could
3479 * dynamically load the GL libs etc. */
3480 if (try_node(fd, fd, buf, 0, BUF_GBM, -1, w, h, false) ||
3481 try_node(fd, fd, buf, 0, BUF_GBM, -1, w, h, false)){
3482 nodes[0].pathref = strdup(buf);
3483 return true;
3484 }
3485 else{
3486 debug_print("node setup failed");
3487 close(fd);
3488 }
3489 }
3490 else
3491 debug_print("could not open %s - %s", buf, strerror(errno));
3492 }
3493
3494 /* in the no-dealloc state we still want to remember which CRTCs etc were
3495 * set as those might have been changed as part of a modeset request */
3496 return false;
3497 }
3498
platform_video_preinit()3499 void platform_video_preinit()
3500 {
3501 }
3502
platform_video_init(uint16_t w,uint16_t h,uint8_t bpp,bool fs,bool frames,const char * title)3503 bool platform_video_init(uint16_t w, uint16_t h,
3504 uint8_t bpp, bool fs, bool frames, const char* title)
3505 {
3506 bool rv = false;
3507 struct sigaction old_sh;
3508 struct sigaction err_sh = {
3509 .sa_handler = sigsegv_errmsg
3510 };
3511
3512 if (getenv("ARCAN_VIDEO_DEBUGSTALL")){
3513 volatile static bool spinwait = true;
3514 while (spinwait){}
3515 }
3516
3517 /*
3518 * init after recovery etc. won't need seeding
3519 */
3520 static bool seeded;
3521 if (!seeded){
3522 for (size_t i = 0; i < VIDEO_MAX_NODES; i++){
3523 nodes[i].disp_fd = -1;
3524 nodes[i].draw_fd = -1;
3525 }
3526 seeded = true;
3527 }
3528
3529 /*
3530 * temporarily override segmentation fault handler here because it has happened
3531 * in libdrm for a number of "user-managable" settings (i.e. drm locked to X,
3532 * wrong permissions etc.)
3533 */
3534 sigaction(SIGSEGV, &err_sh, &old_sh);
3535
3536 if (setup_cards_db(w, h) || setup_cards_basic(w, h)){
3537 struct dispout* d = egl_dri.last_display;
3538 set_display_context(d);
3539 egl_dri.canvasw = d->display.mode.hdisplay;
3540 egl_dri.canvash = d->display.mode.vdisplay;
3541 build_orthographic_matrix(d->projection, 0,
3542 egl_dri.canvasw, egl_dri.canvash, 0, 0, 1);
3543 memcpy(d->txcos, arcan_video_display.mirror_txcos, sizeof(float) * 8);
3544 d->vid = ARCAN_VIDEO_WORLDID;
3545 d->state = DISP_MAPPED;
3546 debug_print("(%d) mapped/default display at %zu*%zu",
3547 (int)d->id, (size_t)egl_dri.canvasw, (size_t)egl_dri.canvash);
3548
3549 setup_backlight_ledmap();
3550 /*
3551 * send a first 'added' event for display tracking as the
3552 * primary / connected display will not show up in the rescan
3553 */
3554 arcan_event ev = {
3555 .category = EVENT_VIDEO,
3556 .vid.kind = EVENT_VIDEO_DISPLAY_ADDED,
3557 .vid.ledctrl = egl_dri.ledid,
3558 .vid.cardid = d->device->card_id
3559 };
3560 arcan_event_enqueue(arcan_event_defaultctx(), &ev);
3561 platform_video_query_displays();
3562 rv = true;
3563 }
3564
3565 sigaction(SIGSEGV, &old_sh, NULL);
3566 return rv;
3567 }
3568
3569 static bool in_external;
platform_video_reset(int id,int swap)3570 void platform_video_reset(int id, int swap)
3571 {
3572 /* protect against some possible circular calls with VT switch invoked
3573 * while we are also trying to already go external */
3574 if (in_external)
3575 return;
3576
3577 arcan_video_prepare_external(true);
3578
3579 /* at this stage, all the GPU resources should be non-volatile */
3580 if (id != -1 || swap){
3581 /* these case are more difficult as we also need to modify vstore affinity
3582 * masks (so we don't synch to multiple GPUs), and verify that the swapped
3583 * in GPU actually works */
3584 }
3585
3586 /* this is slightly incorrect as the agp_init function- environment,
3587 * ideally we'd also terminate the agp environment and rebuild on the
3588 * new / different card */
3589 arcan_video_restore_external(true);
3590
3591 in_external = false;
3592 }
3593
3594 /*
3595 * for recovery, first emit a display added for the default 0 display, then
3596 * interate all already known displays and do the same. Lastly, do a rescan for
3597 * good measure
3598 */
platform_video_recovery()3599 void platform_video_recovery()
3600 {
3601 arcan_event ev = {
3602 .category = EVENT_VIDEO,
3603 .vid.kind = EVENT_VIDEO_DISPLAY_ADDED,
3604 .vid.ledctrl = egl_dri.ledid
3605 };
3606 debug_print("video_recovery, injecting 'added' for mapped displays");
3607 arcan_evctx* evctx = arcan_event_defaultctx();
3608 arcan_event_enqueue(evctx, &ev);
3609
3610 for (size_t i = 0; i < MAX_DISPLAYS; i++){
3611 if (displays[i].state == DISP_MAPPED){
3612 platform_video_map_display(
3613 ARCAN_VIDEO_WORLDID, displays[i].id, HINT_NONE);
3614 displays[i].vid = ARCAN_VIDEO_WORLDID;
3615 ev.vid.displayid = displays[i].id;
3616 ev.vid.cardid = displays[i].device->card_id;
3617 arcan_event_enqueue(evctx, &ev);
3618 }
3619 }
3620
3621 /*
3622 * commented as it is quite expensive and shouldn't be necessary in short
3623 * resets, and the VT switching now has full card rebuild
3624 * platform_video_query_displays();
3625 */
3626
3627 /* rebuild so that we guaranteed have a rendertarget */
3628 arcan_video_display.no_stdout = false;
3629 arcan_video_resize_canvas(displays[0].dispw, displays[0].disph);
3630 }
3631
page_flip_handler(int fd,unsigned int frame,unsigned int sec,unsigned int usec,void * data)3632 static void page_flip_handler(int fd, unsigned int frame,
3633 unsigned int sec, unsigned int usec, void* data)
3634 {
3635 struct dispout* d = data;
3636 d->buffer.in_flip = 0;
3637 TRACE_MARK_ONESHOT("egl-dri", "flip-ack", TRACE_SYS_DEFAULT, d->id, frame, "flip");
3638
3639 verbose_print("(%d) flip(frame: %u, @ %u.%u)", (int) d->id, frame, sec, usec);
3640
3641 switch(d->device->buftype){
3642 case BUF_GBM:{
3643
3644 /* won't happen first frame or to-from dumb transition */
3645 if (d->buffer.cur_bo)
3646 gbm_surface_release_buffer(d->buffer.surface, d->buffer.cur_bo);
3647
3648 /* with in_dumb_set we have waited for flip to be released, then we will switch
3649 * to the 'dumb' flip handler in the future */
3650 if (d->buffer.in_dumb_set){
3651 debug_print("(%d) page-flip, switch to single-dumb buffer", d->id);
3652 struct agp_vstore* buf = &d->buffer.dumb.agp;
3653
3654 /* create the framebuffer tied to the dumb buffer and set to the Crtc,
3655 * if it fails just continue as 'normal' without the dumb buffer */
3656 if (0 == drmModeAddFB(d->device->disp_fd, buf->w, buf->h, 24, 32,
3657 buf->vinf.text.stride, buf->vinf.text.handle, &d->buffer.dumb.fb)){
3658 d->buffer.cur_bo = NULL;
3659 d->device->vsynch_method = VSYNCH_IGNORE;
3660
3661 drmModeSetCrtc(d->device->disp_fd, d->display.crtc,
3662 d->buffer.dumb.fb, 0, 0, &d->display.con_id, 1, &d->display.mode);
3663
3664 debug_print("(%d) dumb-fb on crtc", d->id);
3665 }
3666 else {
3667 debug_print("(%d) couldn't add dumb-fb, revert", d->id);
3668 release_dumb_fb(d);
3669 d->buffer.cur_bo = d->buffer.next_bo;
3670 }
3671
3672 d->buffer.in_dumb_set = false;
3673 }
3674 else
3675 d->buffer.cur_bo = d->buffer.next_bo;
3676 d->buffer.next_bo = NULL;
3677
3678 verbose_print("(%d) gbm-bo, release %"PRIxPTR" with %"PRIxPTR,
3679 (int)d->id, (uintptr_t) d->buffer.cur_bo, (uintptr_t) d->buffer.next_bo);
3680 }
3681 break;
3682 case BUF_STREAM:
3683 break;
3684 case BUF_HEADLESS:
3685 break;
3686 }
3687
3688 arcan_conductor_deadline(deadline_for_display(d));
3689 }
3690
dirty_displays()3691 static bool dirty_displays()
3692 {
3693 struct dispout* d;
3694 int i = 0;
3695
3696 while((d = get_display(i++))){
3697 arcan_vobject* vobj = arcan_video_getobject(d->vid);
3698 if (!vobj)
3699 continue;
3700
3701 struct rendertarget* tgt = arcan_vint_findrt(vobj);
3702 if (!tgt)
3703 continue;
3704
3705 if (d->frame_cookie != tgt->frame_cookie){
3706 verbose_print("(%d) frame-cookie (%zu) "
3707 "changed to (%zu)", d->id, d->frame_cookie, tgt->frame_cookie);
3708 return true;
3709 }
3710 }
3711
3712 return false;
3713 }
3714
get_pending(bool primary_only)3715 static bool get_pending(bool primary_only)
3716 {
3717 int i = 0;
3718 int pending = 0;
3719 struct dispout* d;
3720
3721 while((d = get_display(i++))){
3722 if (!primary_only || d->display.primary)
3723 pending |= d->buffer.in_flip;
3724 }
3725
3726 return pending > 0;
3727 }
3728
3729 /*
3730 * Real synchronization work is in this function. Go through all mapped
3731 * displays and wait for any pending events to finish, or the specified
3732 * timeout(ms) to elapse.
3733 *
3734 * Timeout is typically used for shutdown / cleanup operations where
3735 * normal background processing need to be ignored anyhow.
3736 */
flush_display_events(int timeout,bool yield)3737 static void flush_display_events(int timeout, bool yield)
3738 {
3739 struct dispout* d;
3740 verbose_print("flush display events, timeout: %d", timeout);
3741
3742 unsigned long long start = arcan_timemillis();
3743
3744 int period = 4;
3745 if (timeout > 0){
3746 period = timeout;
3747 }
3748 /* only flush, don't iterate */
3749 else if (timeout == -1){
3750 period = 0;
3751 }
3752
3753 /*
3754 * NOTE: recent versions of DRM has added support to let us know which CRTC
3755 * actually provided a synch signal (through a .page_flip_handler2). When this
3756 * is more wide-spread, we should really switch to that kind of a system
3757 * because this is horrid.
3758 *
3759 * [SCANOUT-note] this is also where we have a .vblank_handler that would let
3760 * us re-raster / synch any directly mapped vobjs to the output.
3761 */
3762 drmEventContext evctx = {
3763 .version = DRM_EVENT_CONTEXT_VERSION,
3764 .page_flip_handler = page_flip_handler
3765 };
3766
3767 do{
3768 /* MULTICARD> for multiple cards, extend this pollset */
3769 struct pollfd fds = {
3770 .fd = nodes[0].disp_fd,
3771 .events = POLLIN | POLLERR | POLLHUP
3772 };
3773
3774 int rv = poll(&fds, 1, period);
3775 if (rv == 1){
3776 /* If we get HUP on a card we have open, it is basically as bad as a fatal
3777 * state, unless - we support hotplugging multi-GPUs, then that decision needs
3778 * to be re-evaluated as it is essentially a drop_card + drop all displays */
3779 if (fds.revents & (POLLHUP | POLLERR)){
3780 debug_print("(card-fd %d) broken/recovery missing", (int) nodes[0].disp_fd);
3781 arcan_fatal("GPU device lost / broken");
3782 }
3783 else
3784 drmHandleEvent(nodes[0].disp_fd, &evctx);
3785
3786 /* There is a special property here, 'PRIMARY'. The displays with this property
3787 * set are being used for synch, with the rest - we just accept the possibility
3788 * of tearing or ignore that display for another frame (extremes like 59.xx Hz
3789 * display main and a 30Hz secondary output) */
3790 }
3791 else if (yield) {
3792 /*
3793 * With VFR changes, we should start passing the responsibility for dealing with
3794 * synch period and timeout here before proceeding with the next pass / cycle.
3795 */
3796 int yv = arcan_conductor_yield(NULL, 0);
3797 if (-1 == yv)
3798 break;
3799 else
3800 period = yv;
3801 }
3802 }
3803 /* 3 possible timeouts: exit directly, wait indefinitely, wait for fixed period */
3804 while (timeout != -1 && get_pending(true) &&
3805 (!timeout || (timeout && arcan_timemillis() - start < timeout)));
3806 }
3807
flush_parent_commands()3808 static void flush_parent_commands()
3809 {
3810 /* Changed but not enough information to actually specify which card we need
3811 * to rescan in order for the changes to be detected. This needs cooperation
3812 * with the scripts anyhow as they need to rate-limit / invoke rescan. This
3813 * only takes care of an invalid or severed connection, moving device disc.
3814 * to a supervisory process would also require something in event.c */
3815 int pv = platform_device_poll(NULL);
3816 switch(pv){
3817 case -1:
3818 debug_print("parent connection severed");
3819 arcan_event_enqueue(arcan_event_defaultctx(), &(struct arcan_event){
3820 .category = EVENT_SYSTEM,
3821 .sys.kind = EVENT_SYSTEM_EXIT,
3822 .sys.errcode = EXIT_FAILURE
3823 });
3824 break;
3825 case 5:
3826 debug_print("parent requested termination");
3827 arcan_event_enqueue(arcan_event_defaultctx(), &(struct arcan_event){
3828 .category = EVENT_SYSTEM,
3829 .sys.kind = EVENT_SYSTEM_EXIT,
3830 .sys.errcode = EXIT_FAILURE
3831 });
3832 break;
3833 case 4:
3834 /* restore / rebuild out of context */
3835 debug_print("received restore while not in suspend state");
3836 break;
3837 case 3:{
3838 /* suspend / release, if the parent connection is severed while in this
3839 * state, we'll leave it to restore external to shutdown */
3840 debug_print("received tty switch request");
3841
3842 /* first release current resources, then ack the release on the tty */
3843 arcan_video_prepare_external(false);
3844 platform_device_release("TTY", -1);
3845
3846 int sock = platform_device_pollfd();
3847
3848 while (true){
3849 poll(&(struct pollfd){
3850 .fd = sock, .events = POLLIN | POLLERR | POLLHUP | POLLNVAL} , 1, -1);
3851
3852 pv = platform_device_poll(NULL);
3853
3854 if (pv == 4 || pv == -1){
3855 debug_print("received restore request (%d)", pv);
3856 arcan_video_restore_external(false);
3857 break;
3858 }
3859 }
3860 }
3861 break;
3862 case 2:
3863 /* new display event */
3864 arcan_event_enqueue(arcan_event_defaultctx(), &(struct arcan_event){
3865 .category = EVENT_VIDEO,
3866 .vid.kind = EVENT_VIDEO_DISPLAY_CHANGED,
3867 });
3868 break;
3869 default:
3870 break;
3871 }
3872 }
3873
3874 /*
3875 * A lot more work / research is needed on this one to be able to handle all
3876 * weird edge-cases depending on the mapped displays and priorities (powersave?
3877 * tearfree? lowest possible latency in regards to other external clocks etc.)
3878 * - especially when mixing in future synch models that don't require VBlank
3879 */
platform_video_synch(uint64_t tick_count,float fract,video_synchevent pre,video_synchevent post)3880 void platform_video_synch(
3881 uint64_t tick_count, float fract, video_synchevent pre, video_synchevent post)
3882 {
3883 if (pre)
3884 pre();
3885
3886 /*
3887 * Destruction of hotplugged displays are deferred to this stage as it is hard
3888 * to know when it is actually safe to do in other places in the pipeline.
3889 * Wait until the queued transfers to a display have been finished before it is
3890 * put down.
3891 */
3892 int i = 0;
3893 struct dispout* d;
3894 while (egl_dri.destroy_pending){
3895 flush_display_events(30, true);
3896 int ind = __builtin_ffsll(egl_dri.destroy_pending) - 1;
3897 debug_print("synch, %d - destroy %d", ind);
3898 disable_display(&displays[ind], true);
3899 egl_dri.destroy_pending &= ~(1 << ind);
3900 }
3901
3902 /* Some strategies might leave us with a display still in pending flip state
3903 * even though it has finished by now. If we don't flush those out, they will
3904 * skip updating one frame, so do a quick no-yield flush first */
3905 if (get_pending(false))
3906 flush_display_events(-1, false);
3907
3908 /*
3909 * Rescanning displays is binned to this as well along with a rate limiting
3910 * timeout. This is to mitigate storms from KVMs plugging in multiple displays
3911 * or display events causing appl to reissue scan causing events causing new
3912 * scans.
3913 */
3914 unsigned long long ts = arcan_timemillis();
3915
3916 if (egl_dri.scan_pending &&
3917 ((ts < egl_dri.last_card_scan) ||
3918 (ts - egl_dri.last_card_scan) > CARD_RESCAN_DELAY_MS)){
3919 egl_dri.scan_pending = false;
3920 egl_dri.last_card_scan = arcan_timemillis();
3921
3922 for (size_t j = 0; j < COUNT_OF(nodes); j++){
3923 debug_print("query_card: %zu", j);
3924 if (nodes[j].disp_fd != -1)
3925 query_card(&nodes[j]);
3926 }
3927 }
3928
3929 /* the 'nd' here is much too coarse-grained, we need the affected objects and
3930 * rendertargets so that we can properly decide which ones to synch or not -
3931 * this is basically a left-over from old / naive design */
3932 size_t nd;
3933 uint32_t cost_ms = arcan_vint_refresh(fract, &nd);
3934
3935 /*
3936 * At this stage, the contents of all RTs have been synched, with nd == 0,
3937 * nothing has changed from what was draw last time.
3938 */
3939 arcan_bench_register_cost( cost_ms );
3940
3941 bool clocked = false;
3942 bool updated = false;
3943 int method = 0;
3944
3945 /*
3946 * If we have a real update, the display timing will request a deadline based
3947 * on whatever display that was updated so we can go with that
3948 */
3949 if (nd > 0 || dirty_displays()){
3950 while ( (d = get_display(i++)) ){
3951 if (d->state == DISP_MAPPED && d->buffer.in_flip == 0){
3952 updated |= update_display(d);
3953 clocked |= d->device->vsynch_method == VSYNCH_CLOCK;
3954 }
3955 }
3956 /*
3957 * Finally check for the callbacks, synchronize with the conductor and so on
3958 * the clocked is a failsafe for devices that don't support giving a vsynch
3959 * signal.
3960 */
3961 if (get_pending(false) || updated)
3962 flush_display_events(clocked ? 16 : 0, true);
3963 }
3964
3965 /*
3966 * If there are no updates, just 'fake' synch to the display with the lowest
3967 * refresh unless the yield function tells us to run in a processing- like
3968 * state (useful for displayless like processing).
3969 */
3970 else {
3971 float refresh = 60.0;
3972 i = 0;
3973 while ((d = get_display(i++))){
3974 if (d->state == DISP_MAPPED){
3975 if (d->display.mode.vrefresh && d->display.mode.vrefresh > refresh)
3976 refresh = d->display.mode.vrefresh;
3977 }
3978 }
3979
3980 /*
3981 * The other option would be to to set left as the deadline here, but that
3982 * makes the platform even worse when it comes to testing strategies etc.
3983 */
3984 int left = 1000.0f / refresh;
3985 arcan_conductor_deadline(-1);
3986 arcan_conductor_fakesynch(left);
3987 }
3988
3989 /*
3990 * The LEDs that are mapped as backlights via the internal pipe-led protocol
3991 * needs to be flushed separately, here is a decent time to get that out of the
3992 * way. [HDR-note] this might be insufficient for the HDR related backlight
3993 * control interfaces that seem to take a different path.
3994 */
3995 flush_leds();
3996
3997 /*
3998 * Since we outsource device access to a possibly privileged layer, here is
3999 * the time to check for requests from the parent itself.
4000 */
4001 flush_parent_commands();
4002
4003 if (post)
4004 post();
4005 }
4006
platform_video_auth(int cardn,unsigned token)4007 bool platform_video_auth(int cardn, unsigned token)
4008 {
4009 int fd = platform_video_cardhandle(cardn, NULL, NULL, NULL);
4010 if (fd != -1){
4011 bool auth_ok = drmAuthMagic(fd, token);
4012 debug_print("requested auth of (%u) on card (%d)", token, cardn);
4013 return auth_ok;
4014 }
4015 else
4016 return false;
4017 }
4018
platform_video_shutdown()4019 void platform_video_shutdown()
4020 {
4021 int rc = 10;
4022
4023 do{
4024 for(size_t i = 0; i < MAX_DISPLAYS; i++){
4025 unsigned long long start = arcan_timemillis();
4026 disable_display(&displays[i], true);
4027 debug_print("shutdown (%zu) took %d ms", i, (int)(arcan_timemillis() - start));
4028 }
4029 flush_display_events(30, false);
4030 } while (egl_dri.destroy_pending && rc-- > 0);
4031
4032 for (size_t i = 0; i < sizeof(nodes)/sizeof(nodes[0]); i++)
4033 release_card(i);
4034 }
4035
platform_video_capstr()4036 const char* platform_video_capstr()
4037 {
4038 static char* buf;
4039 static size_t buf_sz;
4040
4041 if (buf){
4042 free(buf);
4043 buf = NULL;
4044 }
4045
4046 FILE* stream = open_memstream(&buf, &buf_sz);
4047 if (!stream)
4048 return "platform/egl-dri capstr(), couldn't create memstream\n";
4049
4050 const char* vendor = (const char*) glGetString(GL_VENDOR);
4051 const char* render = (const char*) glGetString(GL_RENDERER);
4052 const char* version = (const char*) glGetString(GL_VERSION);
4053 const char* shading = (const char*)glGetString(GL_SHADING_LANGUAGE_VERSION);
4054 const char* exts = (const char*) glGetString(GL_EXTENSIONS);
4055
4056 const char* eglexts = "";
4057 struct dispout* disp = get_display(0);
4058
4059 if (disp){
4060 eglexts = (const char*)disp->device->eglenv.query_string(
4061 disp->device->display, EGL_EXTENSIONS);
4062 dump_connectors(stream, disp->device, true);
4063 }
4064 fprintf(stream, "Video Platform (EGL-DRI)\n"
4065 "Vendor: %s\nRenderer: %s\nGL Version: %s\n"
4066 "GLSL Version: %s\n\n Extensions Supported: \n%s\n\n"
4067 "EGL Extensions supported: \n%s\n\n",
4068 vendor, render, version, shading, exts, eglexts);
4069
4070 fclose(stream);
4071
4072 return buf;
4073 }
4074
platform_video_envopts()4075 const char** platform_video_envopts()
4076 {
4077 return (const char**) egl_envopts;
4078 }
4079
platform_video_dpms(platform_display_id disp,enum dpms_state state)4080 enum dpms_state platform_video_dpms(
4081 platform_display_id disp, enum dpms_state state)
4082 {
4083 struct dispout* out = get_display(disp);
4084 if (!out || out->state <= DISP_KNOWN)
4085 return ADPMS_IGNORE;
4086
4087 if (state == ADPMS_IGNORE)
4088 return out->display.dpms;
4089
4090 if (state != out->display.dpms){
4091 debug_print("dmps (%d) change to (%d)", (int)disp, state);
4092 dpms_set(out, adpms_to_dpms(state));
4093 }
4094
4095 out->display.dpms = state;
4096 return state;
4097 }
4098
platform_video_decay()4099 size_t platform_video_decay()
4100 {
4101 size_t ret = egl_dri.decay;
4102 egl_dri.decay = 0;
4103 return ret;
4104 }
4105
direct_scanout_alloc(struct agp_rendertarget * tgt,struct agp_vstore * vs,int action,void * tag)4106 static bool direct_scanout_alloc(
4107 struct agp_rendertarget* tgt, struct agp_vstore* vs, int action, void* tag)
4108 {
4109 struct dispout* display = tag;
4110 struct agp_fenv* env = agp_env();
4111
4112 if (action == RTGT_ALLOC_FREE){
4113 debug_print("scanout_free:display=%d", (int) display->id);
4114 struct shmifext_color_buffer* buf =
4115 (struct shmifext_color_buffer*) vs->vinf.text.handle;
4116
4117 /* slightly different to the one used in arcan/video.c as we keep the glid alive */
4118 if (buf){
4119 env->delete_textures(1, &vs->vinf.text.glid);
4120 display->device->eglenv.destroy_image(
4121 display->device->display, buf->alloc_tags[1]);
4122 gbm_bo_destroy(buf->alloc_tags[0]);
4123 buf->alloc_tags[0] = NULL;
4124 buf->alloc_tags[1] = NULL;
4125 free(buf);
4126 vs->vinf.text.glid = 0;
4127 vs->vinf.text.handle = 0;
4128 }
4129 else {
4130 agp_drop_vstore(vs);
4131 }
4132 }
4133 else if (action == RTGT_ALLOC_SETUP){
4134 debug_print("scanout_alloc:display=%d:w=%zu:h=%zu",
4135 (int) display->id, (size_t) display->dispw, (size_t) display->disph);
4136
4137 struct shmifext_color_buffer* buf =
4138 malloc(sizeof(struct shmifext_color_buffer));
4139 *buf = (struct shmifext_color_buffer){
4140 .id.gl = vs->vinf.text.glid
4141 };
4142
4143 if (!helper_alloc_color(env,
4144 &display->device->eglenv,
4145 display->device->buffer.gbm,
4146 display->device->display,
4147 buf,
4148 display->dispw,
4149 display->disph,
4150 display->buffer.format,
4151 4, /* becomes USE_SCANOUT */
4152 0, NULL
4153 )){
4154 debug_print("scanout_alloc:failed_fallback");
4155 agp_empty_vstore(vs, vs->w, vs->h);
4156 free(buf);
4157 }
4158 else{
4159 debug_print("scanout_alloc:ok:glid=%zu", (size_t) buf->id.gl);
4160 vs->vinf.text.glid = buf->id.gl;
4161 vs->vinf.text.handle = (uintptr_t) buf;
4162 }
4163 }
4164 return true;
4165 }
4166
platform_video_invalidate_map(struct agp_vstore * vstore,struct agp_region region)4167 void platform_video_invalidate_map(
4168 struct agp_vstore* vstore, struct agp_region region)
4169 {
4170 /* the same store could be mapped to multiple displays */
4171 for (size_t i = 0; i < MAX_DISPLAYS; i++){
4172 if (displays[i].state == DISP_UNUSED ||
4173 &displays[i].buffer.dumb.agp != vstore ||
4174 !displays[i].buffer.dumb.fb)
4175 continue;
4176
4177 drmModeClip reg = {
4178 .x1 = region.x1,
4179 .y1 = region.y1,
4180 .x2 = region.x2,
4181 .y2 = region.y2
4182 };
4183
4184 drmModeDirtyFB(
4185 displays[i].device->disp_fd, displays[i].buffer.dumb.fb, ®, 1);
4186 }
4187 }
4188
platform_video_map_display(arcan_vobj_id vid,platform_display_id id,enum blitting_hint hint)4189 bool platform_video_map_display(
4190 arcan_vobj_id vid, platform_display_id id, enum blitting_hint hint)
4191 {
4192 struct display_layer_cfg cfg = {
4193 .opacity = 1.0,
4194 .hint = hint
4195 };
4196
4197 return platform_video_map_display_layer(vid, id, 0, cfg) >= 0;
4198 }
4199
platform_video_map_display_layer(arcan_vobj_id id,platform_display_id disp,size_t layer,struct display_layer_cfg cfg)4200 ssize_t platform_video_map_display_layer(arcan_vobj_id id,
4201 platform_display_id disp, size_t layer, struct display_layer_cfg cfg)
4202
4203 {
4204 enum blitting_hint hint = cfg.hint;
4205 struct dispout* d = get_display(disp);
4206
4207 /* incomplete - should try and deal with cursor etc. */
4208 if (layer)
4209 return -1;
4210
4211 if (!d || d->state == DISP_UNUSED){
4212 debug_print(
4213 "map_display(%d->%d) attempted on unused disp", (int)id, (int)disp);
4214 return -1;
4215 }
4216
4217 /* we have a known but previously unmapped display, set it up */
4218 if (d->state == DISP_KNOWN){
4219 debug_print("map_display(%d->%d), known but unmapped", (int)id, (int)disp);
4220 if (setup_kms(d,
4221 d->display.con ? d->display.con->connector_id : -1,
4222 d->display.mode_set != -1 ? d->display.mode.hdisplay : 0,
4223 d->display.mode_set != -1 ? d->display.mode.vdisplay : 0) ||
4224 setup_buffers(d) == -1){
4225 debug_print("map_display(%d->%d) alloc/map failed", (int)id, (int)disp);
4226 return -1;
4227 }
4228 d->state = DISP_MAPPED;
4229 }
4230
4231 arcan_vobject* vobj = arcan_video_getobject(id);
4232 if (!vobj){
4233 debug_print("setting display(%d) to unmapped", (int) disp);
4234 d->display.dpms = ADPMS_OFF;
4235 d->vid = id;
4236 arcan_conductor_release_display(d->device->card_id, d->id);
4237
4238 return 0;
4239 }
4240
4241 /* remove any previous rendertarget scanout buffering */
4242 if (d->vid){
4243 arcan_vobject* old = arcan_video_getobject(id);
4244 struct rendertarget* tgt = NULL;
4245
4246 if (old)
4247 tgt = arcan_vint_findrt(vobj);
4248
4249 if (tgt)
4250 agp_rendertarget_dropswap(tgt->art);
4251 }
4252
4253 /* the more recent rpack- based mapping format could/should get special
4254 * consideration here as we could then raster into a buffer directly for a
4255 * non-GL scanout path, avoiding some of the possible driver fuzz and latency
4256 * */
4257 if (vobj->vstore->txmapped != TXSTATE_TEX2D){
4258 debug_print("map_display(%d->%d) rejected, source not a valid texture",
4259 (int) id, (int) disp);
4260 return -1;
4261 }
4262
4263 /* normal object may have origo in UL, WORLDID FBO in LL */
4264 float txcos[8];
4265 memcpy(txcos, vobj->txcos ? vobj->txcos :
4266 (vobj->vstore == arcan_vint_world() ?
4267 arcan_video_display.mirror_txcos :
4268 arcan_video_display.default_txcos), sizeof(float) * 8
4269 );
4270
4271 debug_print("map_display(%d->%d) ok @%zu*%zu+%zu,%zu, hint: %d",
4272 (int) id, (int) disp, (size_t) d->dispw, (size_t) d->disph,
4273 (size_t) d->dispx, (size_t) d->dispy, (int) hint);
4274
4275 d->frame_cookie = 0;
4276 d->display.primary = hint & HINT_FL_PRIMARY;
4277 memcpy(d->txcos, txcos, sizeof(float) * 8);
4278
4279 /* this kind of hint management is implemented through texture coordinate
4280 * tricks, on a direct mapping, the capabilities of the output needs to be
4281 * checked as well in the same 'sane direct vobj' style to see if there are
4282 * layer flags that can be set to avoid a forced composition pass */
4283 size_t iframes = 0;
4284 arcan_vint_applyhint(vobj, hint,
4285 txcos, d->txcos, &d->dispx, &d->dispy, &d->dispw, &d->disph, &iframes);
4286 arcan_video_display.ignore_dirty += iframes;
4287
4288 /* turn on the display on mapping if it isn't already */
4289 if (d->display.dpms == ADPMS_OFF){
4290 dpms_set(d, DRM_MODE_DPMS_ON);
4291 d->display.dpms = ADPMS_ON;
4292 }
4293
4294 if (hint & HINT_DIRECT)
4295 d->force_compose = false;
4296
4297 /* need to remove this from the mapping hint so that it doesn't
4298 * hit HINT_NONE tests */
4299 d->hint = hint & ~(HINT_FL_PRIMARY | HINT_DIRECT);
4300 d->vid = id;
4301 arcan_conductor_register_display(
4302 d->device->card_id, d->id, SYNCH_STATIC, d->display.mode.vrefresh, d->vid);
4303
4304 /* we might have messed around with the projection, rebuild it to be sure */
4305 struct rendertarget* newtgt = arcan_vint_findrt(vobj);
4306 release_dumb_fb(d);
4307
4308 if (newtgt){
4309 newtgt->inv_y = false;
4310
4311 build_orthographic_matrix(
4312 newtgt->projection, 0, vobj->origw, 0, vobj->origh, 0, 1);
4313
4314 if (!d->hint && !d->force_compose && sane_direct_vobj(vobj, "rtgt")){
4315 /* before swapping, set an allocator for the rendertarget so that we can ensure
4316 * that we allocate from scanout capable memory - note that in that case the
4317 * contents is invalidated and a new render pass on the target is needed. This
4318 * is not that problematic with the normal render loop as the map call will come
4319 * in a 'good enough' order. */
4320 bool swap;
4321 debug_print("(%d) setting up rtgt allocator for direct out");
4322 agp_rendertarget_allocator(newtgt->art, direct_scanout_alloc, d);
4323 (void*) agp_rendertarget_swap(newtgt->art, &swap);
4324 }
4325 }
4326 /* ok:
4327 * - handle based external backend with bo_use_scanout and the right
4328 * modifiers, if the object is ok but the allocation isn't, wait for
4329 * a resize and then retry- mapping
4330 *
4331 * - shm based backing where we can just blit into a dumb buffer
4332 *
4333 * - tui based contents where we can raster into a dumb buffer
4334 */
4335 else if (sane_direct_vobj(vobj, "simple_vid")){
4336 TRACE_MARK_ONESHOT("egl-dri", "dumb-bo", TRACE_SYS_DEFAULT, d->id, 0, "");
4337 debug_print("(%d) switching to dumb mode", d->id);
4338
4339 if (set_dumb_fb(d)){
4340 /* now swap in our 'real-object' - copy-blit as is for the first pass then
4341 * swap in our storage into the mapped source */
4342 struct arcan_frameserver* fsrv = vobj->feed.state.ptr;
4343
4344 struct agp_vstore* src = vobj->vstore;
4345 struct agp_vstore* dst = &d->buffer.dumb.agp;
4346
4347 /* refcount and track so it doesn't disappear before we have released */
4348 src->refcount++;
4349 d->buffer.dumb.ref = src;
4350
4351 /* ensure local copying when there is a buffer transfer */
4352 if (fsrv){
4353 if (!(fsrv->desc.hints & SHMIF_RHINT_TPACK))
4354 fsrv->flags.local_copy = true;
4355 src->dst_copy = dst;
4356
4357 /* is there a pre-existing local store? */
4358 if (src->vinf.text.raw)
4359 agp_vstore_copyreg(src, src->dst_copy, 0, 0, src->w, src->h);
4360 }
4361
4362 /* MISSING: setting locked flag and aligning to vsync can save tearing, should
4363 * not be needed for tui as we can just reraster from the source but that part
4364 * is not finished */
4365 }
4366 }
4367
4368 /* reset the 'force composition' output path, this may cost a frame
4369 * being slightly delayed on map operations should the direct-scanout
4370 * fail, causing force_composition to be set */
4371 uintptr_t tag;
4372 cfg_lookup_fun get_config = platform_config_lookup(&tag);
4373 d->force_compose = !(hint & HINT_DIRECT) &&
4374 !get_config("video_device_direct_scanout", 0, NULL, tag);
4375
4376 return 0;
4377 }
4378
drop_swapchain(struct dispout * d)4379 static void drop_swapchain(struct dispout* d)
4380 {
4381 arcan_vobject* vobj = arcan_video_getobject(d->vid);
4382 if (!vobj)
4383 return;
4384
4385 struct rendertarget* newtgt = arcan_vint_findrt(vobj);
4386 if (!newtgt)
4387 return;
4388
4389 /* this will also reset any allocator set */
4390 agp_rendertarget_dropswap(newtgt->art);
4391 arcan_video_display.ignore_dirty += 3;
4392 }
4393
draw_display(struct dispout * d)4394 static enum display_update_state draw_display(struct dispout* d)
4395 {
4396 bool swap_display = true;
4397 arcan_vobject* vobj = arcan_video_getobject(d->vid);
4398 agp_shader_id shid = agp_default_shader(BASIC_2D);
4399
4400 if (!d->buffer.in_dumb_set && d->buffer.dumb.enabled){
4401 return UPDATE_SKIP;
4402 }
4403
4404 /* if a rendertarget is mapped to the display, check so that that rtgt itself
4405 * has had actual drawing operations done to it - as the same target can be
4406 * mapped with different scissor rects yielding no change and so on */
4407 struct rendertarget* newtgt = arcan_vint_findrt(vobj);
4408 if (newtgt){
4409 size_t nd = agp_rendertarget_dirty(newtgt->art, NULL);
4410 verbose_print("(%d) draw display, dirty regions: %zu", nd);
4411 if (nd || newtgt->frame_cookie != d->frame_cookie){
4412 agp_rendertarget_dirty_reset(newtgt->art, NULL);
4413 }
4414 else{
4415 verbose_print("(%d) no dirty, skip");
4416 return UPDATE_SKIP;
4417 }
4418
4419 newtgt->frame_cookie = d->frame_cookie;
4420 }
4421
4422 /*
4423 * If the following conditions are valid, we can simply add the source vid
4424 * to the display directly, saving a full screen copy.
4425 */
4426 if (d->hint == HINT_NONE &&
4427 !d->force_compose && sane_direct_vobj(vobj, "rt_swap")){
4428 swap_display = false;
4429 goto out;
4430 }
4431
4432 /*
4433 * object invalid or mapped poorly, just reset to whatever the clear color is
4434 */
4435 if (!vobj) {
4436 agp_rendertarget_clear();
4437 goto out;
4438 }
4439 else{
4440 if (vobj->program > 0)
4441 shid = vobj->program;
4442
4443 agp_activate_vstore(
4444 d->vid == ARCAN_VIDEO_WORLDID ? arcan_vint_world() : vobj->vstore);
4445 }
4446
4447 if (d->skip_blit){
4448 verbose_print("(%d) skip draw, already composed", (int)d->id);
4449 d->skip_blit = false;
4450 }
4451 else {
4452 agp_shader_activate(shid);
4453 agp_shader_envv(PROJECTION_MATR, d->projection, sizeof(float)*16);
4454 agp_rendertarget_clear();
4455 agp_blendstate(BLEND_NONE);
4456 agp_draw_vobj(0, 0, d->dispw, d->disph, d->txcos, NULL);
4457 verbose_print("(%d) draw, shader: %d, %zu*%zu",
4458 (int)d->id, (int)shid, (size_t)d->dispw, (size_t)d->disph);
4459 }
4460 /*
4461 * another rough corner case, if we have a store that is not world ID but
4462 * shared with different texture coordinates (to extend display), we need to
4463 * draw the cursor .. but if the texture coordinates indicate that we only draw
4464 * a subset, we need to check if the cursor is actually inside that area...
4465 * Seems more and more that accelerated cursors add to more state explosion
4466 * than they are worth ..
4467 */
4468 if (vobj->vstore == arcan_vint_world()){
4469 arcan_vint_drawcursor(false);
4470 }
4471
4472 agp_deactivate_vstore();
4473
4474 out:
4475 if (swap_display){
4476 verbose_print("(%d) pre-swap", (int)d->id);
4477 d->device->eglenv.swap_buffers(d->device->display, d->buffer.esurf);
4478 verbose_print("(%d) swapped", (int)d->id);
4479 return UPDATE_FLIP;
4480 }
4481
4482 verbose_print("(%d) direct- path selected");
4483 return UPDATE_DIRECT;
4484 }
4485
update_display(struct dispout * d)4486 static bool update_display(struct dispout* d)
4487 {
4488 if (d->display.dpms != ADPMS_ON)
4489 return false;
4490
4491 /* we want to know how multiple- displays drift against eachother */
4492 d->last_update = arcan_timemillis();
4493
4494 /*
4495 * Make sure we target the right GL context
4496 * Notice that the context being set is that of the display buffer,
4497 * not the shared outer "headless" context.
4498 *
4499 * MULTIGPU> will also need to set the agp- current rendertarget
4500 */
4501 set_display_context(d);
4502 egl_dri.last_display = d;
4503
4504 /* activated- rendertarget covers scissor regions etc. so we want to reset */
4505 agp_blendstate(BLEND_NONE);
4506 agp_activate_rendertarget(NULL);
4507
4508 /*
4509 * Currently we only do binary damage / update tracking in that there are EGL
4510 * versions for saying 'this region is damaged, update that' to cut down on
4511 * fillrate/bw. This should likely be added to the agp_ layer as a simple dirty
4512 * list that the draw_vobj calls append to. Some is prepared for (see
4513 * agp_rendertarget_dirty), but more is needed in the drawing logic itself.
4514 */
4515 enum display_update_state dstate = draw_display(d);
4516
4517 uint32_t next_fb = 0;
4518 switch(d->device->buftype){
4519 case BUF_STREAM:{
4520 EGLAttrib attr[] = {
4521 EGL_DRM_FLIP_EVENT_DATA_NV, (EGLAttrib) d,
4522 EGL_NONE,
4523 };
4524 if (d->device->vsynch_method == VSYNCH_FLIP){
4525 if (!d->device->eglenv.stream_consumer_acquire_attrib(
4526 d->device->display, d->buffer.stream, attr)){
4527 d->device->vsynch_method = VSYNCH_CLOCK;
4528 TRACE_MARK_ONESHOT("egl-dri", "eglstream-clock",
4529 TRACE_SYS_WARN, d->id, 0, "eglstream-vsynch fail");
4530 debug_print("(%d) - no acq-attr, revert to clock", (int)d->id);
4531 }
4532 }
4533 /* dumb buffer, will never change */
4534 next_fb = d->buffer.cur_fb;
4535 }
4536 break;
4537 case BUF_GBM:{
4538 int rv = -1;
4539 /* We use rendertarget_swap for implementing front/back buffering in the
4540 * case of rendertarget scanout. */
4541 if (dstate == UPDATE_DIRECT){
4542 if ((rv = get_gbm_fb(d, dstate, NULL, &next_fb)) == -1){
4543
4544 TRACE_MARK_ONESHOT("egl-dri", "gbm-scanout",
4545 TRACE_SYS_WARN, d->id, 0, "gbm-direct scanout fail, compose");
4546
4547 debug_print("(%d) direct-scanout buffer "
4548 "conversion failed, falling back to composition", true);
4549 d->force_compose = true;
4550 dstate = draw_display(d);
4551
4552 /* Free all textures/buffer objects, disable allocator for the rendertarget and
4553 * revert to normal 'gbm-flip + drawing to EGLSurface */
4554 drop_swapchain(d);
4555 }
4556 else {
4557 TRACE_MARK_ONESHOT("egl-dri", "gbm-scanout", TRACE_SYS_WARN, d->id, 0, "");
4558 }
4559 }
4560
4561 if (dstate == UPDATE_FLIP){
4562 if (!d->buffer.surface)
4563 goto out;
4564
4565 d->buffer.next_bo = gbm_surface_lock_front_buffer(d->buffer.surface);
4566 if (!d->buffer.next_bo){
4567 TRACE_MARK_ONESHOT("egl-dri", "gbm-buffer-lock-fail", TRACE_SYS_ERROR, d->id, 0, "");
4568 verbose_print("(%d) update, failed to lock front buffer", (int)d->id);
4569 goto out;
4570 }
4571 if ((rv = get_gbm_fb(d, dstate, d->buffer.next_bo, &next_fb)) == -1){
4572 TRACE_MARK_ONESHOT("egl-dri", "gbm-framebuffer-fail", TRACE_SYS_ERROR, d->id, 0, "");
4573 debug_print("(%d) - couldn't get framebuffer handle", (int)d->id);
4574 gbm_surface_release_buffer(d->buffer.surface, d->buffer.next_bo);
4575 goto out;
4576 }
4577
4578 if (rv == 0){
4579 TRACE_MARK_ONESHOT("egl-dri", "gbm-buffer-release", TRACE_SYS_DEFAULT, d->id, 0, "");
4580 gbm_surface_release_buffer(d->buffer.surface, d->buffer.next_bo);
4581 verbose_print("(%d) - no update for display", (int)d->id);
4582 goto out;
4583 }
4584 }
4585 }
4586 break;
4587 case BUF_HEADLESS:
4588 break;
4589 }
4590
4591 bool new_crtc = false;
4592 /* mode-switching is defered to the first frame that is ready as things
4593 * might've happened in the engine between _init and draw */
4594 if (d->display.reset_mode || !d->display.old_crtc){
4595 /* save a copy of the old_crtc so we know what to restore on shutdown */
4596 if (!d->display.old_crtc)
4597 d->display.old_crtc = drmModeGetCrtc(d->device->disp_fd, d->display.crtc);
4598 d->display.reset_mode = false;
4599
4600 /* do any deferred ioctl- device actions to switch from text to graphics */
4601 platform_device_open("TTYGRAPHICS", 0);
4602 new_crtc = true;
4603 }
4604
4605 if (new_crtc){
4606 debug_print("(%d) deferred modeset, switch now (%d*%d => %d*%d@%d)",
4607 (int) d->id, d->dispw, d->disph, d->display.mode.hdisplay,
4608 d->display.mode.vdisplay, d->display.mode.vrefresh
4609 );
4610 if (d->device->atomic){
4611 atomic_set_mode(d);
4612 }
4613 else {
4614 int rv = drmModeSetCrtc(d->device->disp_fd, d->display.crtc,
4615 next_fb, 0, 0, &d->display.con_id, 1, &d->display.mode);
4616 if (rv < 0){
4617 debug_print("(%d) error (%d) setting Crtc for %d:%d(con:%d)",
4618 (int)d->id, errno, d->device->disp_fd, d->display.crtc, d->display.con_id);
4619 }
4620 egl_dri.decay = 4;
4621 }
4622 arcan_conductor_register_display(
4623 d->device->card_id, d->id, SYNCH_STATIC, d->display.mode.vrefresh, d->vid);
4624 }
4625
4626 /* let DRM drive synch and wait for vsynch events on the file descriptor */
4627 if (d->device->vsynch_method == VSYNCH_FLIP){
4628 verbose_print("(%d) request flip (fd: %d, crtc: %"PRIxPTR", fb: %d)",
4629 (int)d->id, (uintptr_t) d->display.crtc, (int) next_fb);
4630
4631 /* for Atomic, there is also a
4632 * DRM_MODE_ATOMIC_NONBLOCK (poll fd), DRM_MODE_ATOMIC_ALLOW_MODESET */
4633 if (0 == drmModePageFlip(d->device->disp_fd,
4634 d->display.crtc, next_fb, DRM_MODE_PAGE_FLIP_EVENT, d)){
4635 TRACE_MARK_ONESHOT("egl-dri", "vsynch-req", TRACE_SYS_DEFAULT, d->id, next_fb, "flip");
4636 d->buffer.in_flip = 1;
4637 d->buffer.cur_fb = next_fb;
4638
4639 verbose_print("(%d) in flip", (int)d->id);
4640 }
4641 else {
4642 debug_print("(%d) error scheduling vsynch-flip (%"PRIxPTR":%"PRIxPTR")",
4643 (int)d->id, (uintptr_t) d->buffer.cur_fb, (uintptr_t)next_fb);
4644 }
4645 }
4646 set_device_context(d->device);
4647 return true;
4648
4649 out:
4650 set_device_context(d->device);
4651 return false;
4652 }
4653
platform_video_prepare_external()4654 void platform_video_prepare_external()
4655 {
4656 if (in_external)
4657 return;
4658
4659 int rc = 10;
4660 debug_print("preparing external");
4661 TRACE_MARK_ENTER("egl-dri", "external-handover", TRACE_SYS_DEFAULT, 0, 0, "");
4662
4663 do{
4664 for(size_t i = 0; i < MAX_DISPLAYS; i++)
4665 disable_display(&displays[i], false);
4666 if (egl_dri.destroy_pending)
4667 flush_display_events(30, false);
4668 } while(egl_dri.destroy_pending && rc-- > 0);
4669
4670 /* tell the privsep side that we no-longer need the GPU */
4671 char* pathref = nodes[0].pathref;
4672 nodes[0].pathref = NULL;
4673 if (pathref){
4674 platform_device_release(pathref, 0);
4675 close(nodes[0].disp_fd);
4676 }
4677
4678 /* this will actually kill the pathref, restore needs it */
4679 release_card(0);
4680 nodes[0].pathref = pathref;
4681
4682 agp_dropenv(agp_env());
4683
4684 in_external = true;
4685 }
4686
platform_video_restore_external()4687 void platform_video_restore_external()
4688 {
4689 debug_print("restoring external");
4690 if (!in_external)
4691 return;
4692
4693 TRACE_MARK_EXIT("egl-dri", "external-handover", TRACE_SYS_DEFAULT, 0, 0, "");
4694 arcan_event_maskall(arcan_event_defaultctx());
4695
4696 /* this is a special place in malbolge, it is possible that the GPU has
4697 * disappeared when we restore, or that it has been replaced with a different
4698 * one - the setup for that is left hanging until the multi-GPU bits are in
4699 * place */
4700 int lfd = -1;
4701 if (nodes[0].pathref){
4702 /* our options in this case if not getting the GPU back are currently slim,
4703 * with a fallback software or remote AGP layer, the rest of the engine can be
4704 * left going, we have enough state to release clients to migrate somewhere */
4705 lfd = platform_device_open(nodes[0].pathref, O_RDWR);
4706 if (-1 == lfd){
4707 debug_print("couldn't re-acquire GPU after suspend");
4708 goto give_up;
4709 }
4710
4711 /* and re-associate draw with disp if that was the case before */
4712 if (nodes[0].draw_fd != -1 && nodes[0].draw_fd == nodes[0].disp_fd){
4713 nodes[0].draw_fd = lfd;
4714 }
4715 nodes[0].disp_fd = lfd;
4716 }
4717
4718 /* rebuild the card itself now, if that fails, we are basically screwed,
4719 * go to crash recovery */
4720 if (!try_node(lfd, lfd,
4721 nodes[0].pathref, 0,nodes[0].buftype, -1, -1, -1, true)){
4722 debug_print("failed to rebuild display after external suspend");
4723 goto give_up;
4724 }
4725
4726 /* rebuild the mapped and known displays, extsusp is a marker that indicate
4727 * that the state of the engine is that the display is still alive, and should
4728 * be brought back to that state before we push 'removed' events */
4729 for (size_t i = 0; i < MAX_DISPLAYS; i++){
4730 if (displays[i].state == DISP_EXTSUSP){
4731 /* refc? */
4732 if (setup_kms(&displays[i],
4733 displays[i].display.con->connector_id, 0, 0) != 0){
4734 debug_print(
4735 "(%d) restore external failed on kms setup", (int)displays[i].id);
4736 disable_display(&displays[i], true);
4737 }
4738 else if (setup_buffers(&displays[i]) == -1){
4739 debug_print(
4740 "(%d) restore external failed on buffer alloc", (int)displays[i].id);
4741 disable_display(&displays[i], true);
4742 }
4743 debug_print("(%d) restore ok, flag reset", (int)displays[i].id);
4744 displays[i].state = DISP_MAPPED;
4745 displays[i].display.reset_mode = true;
4746 }
4747 }
4748
4749 set_device_context(&nodes[0]);
4750 agp_init();
4751 in_external = false;
4752 arcan_event_clearmask(arcan_event_defaultctx());
4753 return;
4754
4755 give_up:
4756 arcan_event_clearmask(arcan_event_defaultctx());
4757 arcan_event_enqueue(arcan_event_defaultctx(), &(struct arcan_event){
4758 .category = EVENT_SYSTEM,
4759 .sys.kind = EVENT_SYSTEM_EXIT,
4760 .sys.errcode = EXIT_FAILURE
4761 });
4762 }
4763