1 #include "../../platform/agp/glfun.h"
2 
surf_destroy(struct wl_client * cl,struct wl_resource * res)3 static void surf_destroy(struct wl_client* cl, struct wl_resource* res)
4 {
5 	trace(TRACE_ALLOC, "destroy:surf(%"PRIxPTR")", (uintptr_t) res);
6 	struct comp_surf* surf = wl_resource_get_user_data(res);
7 	if (!surf){
8 		trace(TRACE_ALLOC, "destroy:lost-surface");
9 		return;
10 	}
11 
12 /* check pending subsurfaces? */
13 	destroy_comp_surf(surf, true);
14 }
15 
buffer_destroy(struct wl_listener * list,void * data)16 static void buffer_destroy(struct wl_listener* list, void* data)
17 {
18 	struct comp_surf* surf = NULL;
19 	surf = wl_container_of(list, surf, l_bufrem);
20 	if (!surf)
21 		return;
22 
23 	trace(TRACE_SURF, "(event) destroy:buffer(%"PRIxPTR")", (uintptr_t) data);
24 
25 	if (surf->buf){
26 		surf->cbuf = (uintptr_t) NULL;
27 		surf->buf = NULL;
28 	}
29 
30 	if (surf->l_bufrem_a){
31 		surf->l_bufrem_a = false;
32 		wl_list_remove(&surf->l_bufrem.link);
33 	}
34 }
35 
36 /*
37  * Buffer now belongs to surface, but it is useless until there's a commit
38  */
surf_attach(struct wl_client * cl,struct wl_resource * res,struct wl_resource * buf,int32_t x,int32_t y)39 static void surf_attach(struct wl_client* cl,
40 	struct wl_resource* res, struct wl_resource* buf, int32_t x, int32_t y)
41 {
42 	struct comp_surf* surf = wl_resource_get_user_data(res);
43 	if (!surf){
44 		trace(TRACE_SURF, "attempted attach to missing surface\n");
45 		return;
46 	}
47 
48 	if (surf->l_bufrem_a){
49 		surf->l_bufrem_a = false;
50 		wl_list_remove(&surf->l_bufrem.link);
51 	}
52 
53 	trace(TRACE_SURF, "attach to: %s, @x,y: %d, %d - buf: %"
54 		PRIxPTR, surf->tracetag, (int)x, (int)y, (uintptr_t)buf);
55 
56 	bool changed = false;
57 	if (surf->buf && !buf){
58 		trace(TRACE_SURF, "mark visible: %s", surf->tracetag);
59 		surf->viewport.ext.viewport.invisible = true;
60 		arcan_shmif_enqueue(&surf->acon, &surf->viewport);
61 		changed = true;
62 	}
63 
64 /* a note for subsurfaces here - so these can have hierarchies that are mixed:
65  *
66  *  a (surf)
67  *    b (subsurf, null buffer)
68  *      c (subsurf, buffer)
69  *
70  *  some toolkits use this for intermediate positioning (guess OO + rel anchors)
71  *  in principle the code here is correct, i.e. the subsurface becomes invisible
72  *  but the idiomatic way of implementing on wm side is by linking the subsurfaces
73  *  to the surfaces, and hide on viewport marking them invisible.
74  *
75  *  the other option is to suck it up as we pay the allocation price anyhow to
76  *  get the 'right' size and commit full-translucent non-decorated in the case
77  *  of non-buf on subsurface (if we do it on a normal surface we get problems
78  *  with clients that keep toplevels around and hide them ..
79  */
80 	else if (surf->viewport.ext.viewport.invisible){
81 		trace(TRACE_SURF, "mark visible: %s", surf->tracetag);
82 		surf->viewport.ext.viewport.invisible = false;
83 		arcan_shmif_enqueue(&surf->acon, &surf->viewport);
84 		changed = true;
85 	}
86 
87 	if (buf){
88 		surf->l_bufrem_a = true;
89 		surf->l_bufrem.notify = buffer_destroy;
90 		wl_resource_add_destroy_listener(buf, &surf->l_bufrem);
91 	}
92 /* follow up on the explanation above, push a fully translucent buffer */
93 	else if (surf->is_subsurface && changed){
94 		surf->acon.hints |= SHMIF_RHINT_IGNORE_ALPHA;
95 		for (size_t y = 0; y < surf->acon.h; y++)
96 			memset(&surf->acon.vidb[y * surf->acon.stride], '\0', surf->acon.stride);
97 		arcan_shmif_signal(&surf->acon, SHMIF_SIGVID | SHMIF_SIGBLK_NONE);
98 	}
99 
100 /* buf XOR cookie == cbuf in commit */
101 	surf->cbuf = (uintptr_t) buf;
102 	surf->buf = (void*) ((uintptr_t) buf ^ ((uintptr_t) 0xfeedface));
103 }
104 
105 /*
106  * Similar to the X damage stuff, just grow the synch region for shm repacking
107  * but there's more to this (of course there is) as there's the whole buffer
108  * isn't necessarily 1:1 of surface.
109  */
surf_damage(struct wl_client * cl,struct wl_resource * res,int32_t x,int32_t y,int32_t w,int32_t h)110 static void surf_damage(struct wl_client* cl,
111 	struct wl_resource* res, int32_t x, int32_t y, int32_t w, int32_t h)
112 {
113 	struct comp_surf* surf = wl_resource_get_user_data(res);
114 
115 	x *= surf->scale;
116 	y *= surf->scale;
117 	w *= surf->scale;
118 	h *= surf->scale;
119 
120 	trace(TRACE_SURF,"%s:(%"PRIxPTR") @x,y+w,h(%d+%d, %d+%d)",
121 		surf->tracetag, (uintptr_t)res, (int)x, (int)w, (int)y, (int)h);
122 
123 if (x < surf->acon.dirty.x1)
124 		surf->acon.dirty.x1 = x;
125 	if (x+w > surf->acon.dirty.x2)
126 		surf->acon.dirty.x2 = x+w;
127 	if (y < surf->acon.dirty.y1)
128 		surf->acon.dirty.y1 = y;
129 	if (y+h > surf->acon.dirty.y2)
130 		surf->acon.dirty.y2 = y+h;
131 }
132 
133 /*
134  * The client wants this object to be signalled when it is time to produce a
135  * new frame. There's a few options:
136  * - CLOCKREQ and attach it to frame
137  * - signal immediately, but defer if we're invisible and wait for DISPLAYHINT.
138  * - set a FUTEX/KQUEUE to monitor the segment vready flag, and when
139  *   that triggers, send the signal.
140  * - enable the frame-feedback mode on shmif.
141  */
surf_frame(struct wl_client * cl,struct wl_resource * res,uint32_t cb)142 static void surf_frame(
143 	struct wl_client* cl, struct wl_resource* res, uint32_t cb)
144 {
145 	struct comp_surf* surf = wl_resource_get_user_data(res);
146 	trace(TRACE_SURF, "req-cb, %s(%"PRIu32")", surf->tracetag, cb);
147 
148 	if (surf->frames_pending + surf->subsurf_pending > COUNT_OF(surf->scratch)){
149 		trace(TRACE_ALLOC, "too many pending surface ops");
150 		wl_resource_post_no_memory(res);
151 		return;
152 	}
153 
154 	struct wl_resource* cbres =
155 		wl_resource_create(cl, &wl_callback_interface, 1, cb);
156 
157 	if (!cbres){
158 	trace(TRACE_ALLOC, "frame callback allocation failed");
159 		wl_resource_post_no_memory(res);
160 		return;
161 	}
162 
163 /* special case, if the surface has not yet been promoted to a usable type
164  * and the client requests a callback, ack:it immediately */
165 	if (!surf->shell_res){
166 		trace(TRACE_SURF, "preemptive-cb-ack");
167 		wl_callback_send_done(cbres, arcan_timemillis());
168 		wl_resource_destroy(cbres);
169 		return;
170 	}
171 
172 /* should just bitmap this .. */
173 	for (size_t i = 0; i < COUNT_OF(surf->scratch); i++){
174 		if (surf->scratch[i].type == 1){
175 			wl_callback_send_done(surf->scratch[i].res, surf->scratch[i].id);
176 			wl_resource_destroy(surf->scratch[i].res);
177 			surf->frames_pending--;
178 			surf->scratch[i].res = NULL;
179 			surf->scratch[i].id = 0;
180 			surf->scratch[i].type = 0;
181 		}
182 
183 		if (surf->scratch[i].type == 0){
184 			surf->frames_pending++;
185 			surf->scratch[i].res = cbres;
186 			surf->scratch[i].id = cb;
187 			surf->scratch[i].type = 1;
188 			break;
189 		}
190 	}
191 }
192 
shm_to_gl(struct arcan_shmif_cont * acon,struct comp_surf * surf,int w,int h,int fmt,void * data,int stride)193 static bool shm_to_gl(
194 	struct arcan_shmif_cont* acon, struct comp_surf* surf,
195 	int w, int h, int fmt, void* data, int stride)
196 {
197 /* globally rejected or per-window rejected or no GL or it has failed before */
198 	if (!arcan_shmif_handle_permitted(&wl.control) ||
199 		!arcan_shmif_handle_permitted(acon) ||
200 		arcan_shmifext_isext(&wl.control) != 1 ||
201 		surf->shm_gl_fail)
202 		return false;
203 
204 	int gl_fmt = -1;
205 	int px_fmt = GL_UNSIGNED_BYTE;
206 	int gl_int_fmt = -1;
207 	int pitch = 0;
208 
209 	switch(fmt){
210 	case WL_SHM_FORMAT_ARGB8888:
211 	case WL_SHM_FORMAT_XRGB8888:
212 		gl_fmt = GL_BGRA_EXT;
213 /* only gles/gles2 supports int_fmt as GL_BGRA */
214 		gl_int_fmt = GL_RGBA8;
215 //		pitch = stride ? stride / 4 : 0;
216 		pitch = w;
217 	break;
218 	case WL_SHM_FORMAT_ABGR8888:
219 	case WL_SHM_FORMAT_XBGR8888:
220 		gl_fmt = GL_RGBA;
221 		gl_int_fmt = GL_RGBA8;
222 		pitch = stride ? stride / 4 : 0;
223 	break;
224 	case WL_SHM_FORMAT_RGB565:
225 		gl_fmt = GL_RGB;
226 		gl_int_fmt = GL_RGBA8;
227 		px_fmt = GL_UNSIGNED_SHORT_5_6_5;
228 		pitch = stride ? stride / 2 : 0;
229 	break;
230 /* for WL_SHM_FORMAT_YUV***, NV12, we need a really complicated dance here with
231  * multiple planes, GL_UNSIGNED_BYTE, ... as well as custom unpack shaders and
232  * a conversion pass through FBO */
233 	default:
234 		return false;
235 	break;
236 	}
237 
238 /* used the shared primary context for allocation, this is also where we can do
239  * our color conversion, currently just use the teximage- format */
240 	struct agp_fenv* fenv = arcan_shmifext_getfenv(&wl.control);
241 	GLuint glid;
242 	fenv->gen_textures(1, &glid);
243 	fenv->bind_texture(GL_TEXTURE_2D, glid);
244 	fenv->pixel_storei(GL_UNPACK_ROW_LENGTH, pitch);
245 	fenv->tex_image_2d(GL_TEXTURE_2D,
246 		0, gl_int_fmt, w, h, 0, gl_fmt, px_fmt, data);
247 	fenv->pixel_storei(GL_UNPACK_ROW_LENGTH, 0);
248 	fenv->bind_texture(GL_TEXTURE_2D, 0);
249 
250 /* this seems to be needed still or the texture contents will be invalid,
251  * better still is to have an explicit fence and queue the release of the
252  * buffer until the upload is finished */
253 	fenv->flush();
254 
255 /* build descriptors */
256 	int fd;
257 	size_t stride_out;
258 	int out_fmt;
259 
260 	uintptr_t gl_display;
261 	arcan_shmifext_egl_meta(&wl.control, &gl_display, NULL, NULL);
262 
263 /* can still fail for mysterious reasons, force-fallback to normal shm */
264 	if (!arcan_shmifext_gltex_handle(&wl.control,
265 		gl_display, glid, &fd, &stride_out, &out_fmt)){
266 		trace(TRACE_SURF, "shm->glhandle failed");
267 		fenv->delete_textures(1, &glid);
268 		surf->shm_gl_fail = true;
269 		return false;
270 	}
271 
272 	trace(TRACE_SURF, "shm->gl(%d, %d)", glid, fd);
273 	arcan_shmif_signalhandle(acon,
274 		SHMIF_SIGVID | SHMIF_SIGBLK_NONE,
275 		fd, stride_out, out_fmt
276 	);
277 
278 	fenv->delete_textures(1, &glid);
279 	return true;
280 }
281 
282 /*
283  * IGNORE, shmif doesn't split up into regions like this, though
284  * we can forward it as messages and let the script-side decide.
285  */
surf_opaque(struct wl_client * cl,struct wl_resource * res,struct wl_resource * reg)286 static void surf_opaque(struct wl_client* cl,
287 	struct wl_resource* res, struct wl_resource* reg)
288 {
289 	trace(TRACE_REGION, "opaque_region");
290 }
291 
surf_inputreg(struct wl_client * cl,struct wl_resource * res,struct wl_resource * reg)292 static void surf_inputreg(struct wl_client* cl,
293 	struct wl_resource* res, struct wl_resource* reg)
294 {
295 	trace(TRACE_REGION, "input_region");
296 /*
297  * INCOMPLETE:
298  * Should either send this onward for the wm scripts to mask/forward
299  * events that fall outside the region, or annotate the surface resource
300  * and route the input in the bridge. This becomes important with complex
301  * hierarchies (from popups and subsurfaces).
302  */
303 }
304 
fmt_has_alpha(int fmt,struct comp_surf * surf)305 static bool fmt_has_alpha(int fmt, struct comp_surf* surf)
306 {
307 /* should possible check for the special case if the entire region is marked
308  * as opaque as well or if there are translucent portions */
309 	return
310 		fmt == WL_SHM_FORMAT_XRGB8888 ||
311 		fmt == WL_DRM_FORMAT_XRGB4444 ||
312 		fmt == WL_DRM_FORMAT_XBGR4444 ||
313 		fmt == WL_DRM_FORMAT_RGBX4444 ||
314 		fmt == WL_DRM_FORMAT_BGRX4444 ||
315 		fmt == WL_DRM_FORMAT_XRGB1555 ||
316 		fmt == WL_DRM_FORMAT_XBGR1555 ||
317 		fmt == WL_DRM_FORMAT_RGBX5551 ||
318 		fmt == WL_DRM_FORMAT_BGRX5551 ||
319 		fmt == WL_DRM_FORMAT_XRGB8888 ||
320 		fmt == WL_DRM_FORMAT_XBGR8888 ||
321 		fmt == WL_DRM_FORMAT_RGBX8888 ||
322 		fmt == WL_DRM_FORMAT_BGRX8888 ||
323 		fmt == WL_DRM_FORMAT_XRGB2101010 ||
324 		fmt == WL_DRM_FORMAT_XBGR2101010 ||
325 		fmt == WL_DRM_FORMAT_RGBX1010102 ||
326 		fmt == WL_DRM_FORMAT_BGRX1010102;
327 }
328 
synch_acon_alpha(struct arcan_shmif_cont * acon,bool has_alpha)329 static void synch_acon_alpha(struct arcan_shmif_cont* acon, bool has_alpha)
330 {
331 	if (has_alpha){
332 		if (acon->hints & SHMIF_RHINT_IGNORE_ALPHA){
333 			/* NOP */
334 		}
335 		else {
336 			acon->hints |= SHMIF_RHINT_IGNORE_ALPHA;
337 		}
338 	}
339 	else {
340 		if (acon->hints & SHMIF_RHINT_IGNORE_ALPHA){
341 			acon->hints &= ~SHMIF_RHINT_IGNORE_ALPHA;
342 		}
343 		else {
344 			/* NOP */
345 		}
346 	}
347 }
348 
push_drm(struct wl_client * cl,struct arcan_shmif_cont * acon,struct wl_resource * buf,struct comp_surf * surf)349 static bool push_drm(struct wl_client* cl,
350 	struct arcan_shmif_cont* acon, struct wl_resource* buf, struct comp_surf* surf)
351 {
352 	struct wl_drm_buffer* drm_buf = wayland_drm_buffer_get(wl.drm, buf);
353 	if (!drm_buf)
354 		return false;
355 
356 	trace(TRACE_SURF, "surf_commit(egl:%s)", surf->tracetag);
357 	synch_acon_alpha(acon,
358 		fmt_has_alpha(wayland_drm_buffer_get_format(drm_buf), surf));
359 	wayland_drm_commit(surf, drm_buf, acon);
360 	return true;
361 }
362 
push_dma(struct wl_client * cl,struct arcan_shmif_cont * acon,struct wl_resource * buf,struct comp_surf * surf)363 static bool push_dma(struct wl_client* cl,
364 	struct arcan_shmif_cont* acon, struct wl_resource* buf, struct comp_surf* surf)
365 {
366 	struct dma_buf* dmabuf = dmabuf_buffer_get(buf);
367 	if (!dmabuf)
368 		return false;
369 
370 	if (dmabuf->w != acon->w || dmabuf->h != acon->h){
371 		arcan_shmif_resize(acon, dmabuf->w, dmabuf->h);
372 	}
373 
374 /* same dance as in wayland_drm, if the receiving side doesn't want dma bufs,
375  * attach them to the context (and extend to accelerated) then force a CPU
376  * readback - could be leveraged to perform other transforms at the same time,
377  * one candidate being subsurface composition and colorspace conversion */
378 	if (!arcan_shmif_handle_permitted(acon) ||
379 			!arcan_shmif_handle_permitted(&wl.control)){
380 		if (!arcan_shmifext_isext(acon)){
381 			struct arcan_shmifext_setup defs = arcan_shmifext_defaults(acon);
382 			defs.no_context = true;
383 			arcan_shmifext_setup(acon, defs);
384 		}
385 
386 		int n_planes = 0;
387 		struct shmifext_buffer_plane planes[4];
388 		for (size_t i = 0; i < 4; i++){
389 			planes[i] = dmabuf->planes[i];
390 			if (planes[i].fd <= 0)
391 				break;
392 			planes[i].fd = arcan_shmif_dupfd(planes[i].fd, -1, false);
393 			n_planes++;
394 		}
395 
396 		if (arcan_shmifext_import_buffer(acon,
397 			SHMIFEXT_BUFFER_GBM, planes, n_planes, sizeof(planes[0]))){
398 
399 		}
400 		else {
401 			for (size_t i = 0; i < n_planes; i++)
402 				if (planes[i].fd > 0)
403 					close(planes[i].fd);
404 		}
405 
406 	return true;
407 	}
408 
409 /* right now this only supports a single transfered buffer, the real support
410  * is close by in another branch, but for the sake of bringup just block those
411  * now */
412 	for (size_t i = 0; i < COUNT_OF(dmabuf->planes); i++){
413 		if (i == 0){
414 			arcan_shmif_signalhandle(acon, SHMIF_SIGVID | SHMIF_SIGBLK_NONE,
415 				dmabuf->planes[i].fd, dmabuf->planes[i].gbm.stride, dmabuf->fmt);
416 		}
417 	}
418 
419 	synch_acon_alpha(acon, fmt_has_alpha(dmabuf->fmt, surf));
420 
421 	trace(TRACE_SURF, "surf_commit(dmabuf:%s)", surf->tracetag);
422 	return true;
423 }
424 
425 /*
426  * since if we have GL already going if the .egl toggle is set, we can pull
427  * in agp and use those functions raw
428  */
429 #include "../../platform/video_platform.h"
push_shm(struct wl_client * cl,struct arcan_shmif_cont * acon,struct wl_resource * buf,struct comp_surf * surf)430 static bool push_shm(struct wl_client* cl,
431 	struct arcan_shmif_cont* acon, struct wl_resource* buf, struct comp_surf* surf)
432 {
433 	struct wl_shm_buffer* shm_buf = wl_shm_buffer_get(buf);
434 	if (!shm_buf)
435 		return false;
436 
437 	trace(TRACE_SURF, "surf_commit(shm:%s)", surf->tracetag);
438 
439 	uint32_t w = wl_shm_buffer_get_width(shm_buf);
440 	uint32_t h = wl_shm_buffer_get_height(shm_buf);
441 	int fmt = wl_shm_buffer_get_format(shm_buf);
442 	void* data = wl_shm_buffer_get_data(shm_buf);
443 	int stride = wl_shm_buffer_get_stride(shm_buf);
444 
445 	if (acon->w != w || acon->h != h){
446 		trace(TRACE_SURF,
447 			"surf_commit(shm, resize to: %zu, %zu)", (size_t)w, (size_t)h);
448 		arcan_shmif_resize(acon, w, h);
449 	}
450 
451 /* resize failed, this will only happen when growing, thus we can crop */
452 	if (acon->w != w || acon->h != h){
453 		w = acon->w;
454 		h = acon->h;
455 	}
456 
457 /* alpha state changed? only changing this flag does not require a resynch
458  * as the hint is checked on each frame */
459 	synch_acon_alpha(acon, fmt_has_alpha(fmt, surf));
460 	wl_shm_buffer_begin_access(shm_buf);
461 	if (shm_to_gl(acon, surf, w, h, fmt, data, stride))
462 		goto out;
463 
464 /* two other options to avoid repacking, one is to actually use this signal-
465  * handle facility to send a descriptor, and mark the type as the WL shared
466  * buffer with the metadata in vidp[] in order for offset and other bits to
467  * make sense.
468  * This is currently not supported in arcan/shmif.
469  *
470  * The other is to actually allow the shmif server to ptrace into us (wut)
471  * and use a rare linuxism known as process_vm_writev and process_vm_readv
472  * and send the pointers that way. One might call that one exotic.
473  */
474 	if (stride != acon->stride){
475 		trace(TRACE_SURF,"surf_commit(stride-mismatch)");
476 		for (size_t row = 0; row < h; row++){
477 			memcpy(&acon->vidp[row * acon->pitch],
478 				&((uint8_t*)data)[row * stride],
479 				w * sizeof(shmif_pixel)
480 			);
481 		}
482 	}
483 	else
484 		memcpy(acon->vidp, data, w * h * sizeof(shmif_pixel));
485 
486 	arcan_shmif_signal(acon, SHMIF_SIGVID | SHMIF_SIGBLK_NONE);
487 
488 out:
489 	wl_shm_buffer_end_access(shm_buf);
490 	return true;
491 }
492 
493 /*
494  * Practically there is another thing to consider here and that is the trash
495  * fire of subsurfaces. Mapping each to a shmif segment is costly, and
496  * snowballs into a bunch of extra work in the WM, making otherwise trivial
497  * features nightmarish. The other possible option here would be to do the
498  * composition ourself into one shmif segment, masking that the thing exists at
499  * all.
500  */
surf_commit(struct wl_client * cl,struct wl_resource * res)501 static void surf_commit(struct wl_client* cl, struct wl_resource* res)
502 {
503 	struct comp_surf* surf = wl_resource_get_user_data(res);
504 	trace(TRACE_SURF, "%s (@%"PRIxPTR")->commit", surf->tracetag, (uintptr_t)surf->cbuf);
505 	struct arcan_shmif_cont* acon = &surf->acon;
506 
507 	if (!surf){
508 		trace(TRACE_SURF, "no surface in resource (severe)");
509 		return;
510 	}
511 
512 /* xwayland surface that is unpaired? */
513 	if (surf->cookie != 0xfeedface && res != surf->client->cursor){
514 		if (!xwl_pair_surface(cl, surf, res)){
515 			trace(TRACE_SURF, "defer commit until paired");
516 			return;
517 		}
518 	}
519 
520 	if (!surf->cbuf){
521 		trace(TRACE_SURF, "no buffer");
522 		if (surf->internal){
523 			surf->internal(surf, CMD_RECONFIGURE);
524 			surf->internal(surf, CMD_FLUSH_CALLBACKS);
525 		}
526 
527 		return;
528 	}
529 
530 	if (!surf->client){
531 		trace(TRACE_SURF, "no bridge");
532 		return;
533 	}
534 
535 	struct wl_resource* buf = (struct wl_resource*)(
536 		(uintptr_t) surf->buf ^ ((uintptr_t) 0xfeedface));
537 	if ((uintptr_t) buf != surf->cbuf){
538 		trace(TRACE_SURF, "corrupted or unknown buf "
539 			"(%"PRIxPTR" vs %"PRIxPTR") (severe)", (uintptr_t) buf, surf->cbuf);
540 		return;
541 	}
542 
543 /*
544  * special case, if the surface we should synch is the currently set
545  * pointer resource, then draw that to the special segment.
546  */
547 	if (surf->cookie != 0xfeedface){
548 		if (res == surf->client->cursor){
549 			acon = &surf->client->acursor;
550 /* synch hot-spot changes at this stage */
551 			if (surf->client->dirty_hot){
552 				struct arcan_event ev = {
553 					.ext.kind = ARCAN_EVENT(MESSAGE)
554 				};
555 				snprintf((char*)ev.ext.message.data,
556 					COUNT_OF(ev.ext.message.data), "hot:%d:%d",
557 					(int)(surf->client->hot_x * surf->scale),
558 					(int)(surf->client->hot_y * surf->scale)
559 				);
560 				arcan_shmif_enqueue(&surf->client->acursor, &ev);
561 				surf->client->dirty_hot = false;
562 			}
563 			trace(TRACE_SURF, "cursor updated");
564 		}
565 /* In general, a surface without a role means that the client is in the wrong
566  * OR that there is a rootless Xwayland surface going - for the latter, we'd
567  * like to figure out if this is correct or not - so wrap around a query
568  * function. Since there can be stalls etc. in the corresponding wm, we need to
569  * tag this surface as pending on failure */
570 }
571 
572 	if (!acon || !acon->addr){
573 		trace(TRACE_SURF, "couldn't map to arcan connection");
574 		return;
575 	}
576 
577 /*
578  * Safeguard due to the SIGBLK_NONE, used for signalling, below.
579  */
580 	while(arcan_shmif_signalstatus(acon) > 0){}
581 
582 /*
583  * So it seems that the buffer- protocol actually don't give us
584  * a type of the buffer, so the canonical way is to just try them in
585  * order shm -> drm -> dma-buf.
586  */
587 
588 	if (
589 		!push_shm(cl, acon, buf, surf) &&
590 		!push_drm(cl, acon, buf, surf) &&
591 		!push_dma(cl, acon, buf, surf)){
592 		trace(TRACE_SURF, "surf_commit(unknown:%s)", surf->tracetag);
593 	}
594 
595 /* might be that this should be moved to the buffer types as well,
596  * since we might need double-triple buffering, uncertain how mesa
597  * actually handles this */
598 	wl_buffer_send_release(buf);
599 
600 	trace(TRACE_SURF,
601 		"surf_commit(%zu,%zu-%zu,%zu)accel_fail=%d",
602 			(size_t)acon->dirty.x1, (size_t)acon->dirty.y1,
603 			(size_t)acon->dirty.x2, (size_t)acon->dirty.y2,
604 			(int)surf->shm_gl_fail);
605 
606 /* reset the dirty rectangle */
607 	acon->dirty.x1 = acon->w;
608 	acon->dirty.x2 = 0;
609 	acon->dirty.y1 = acon->h;
610 	acon->dirty.y2 = 0;
611 }
612 
surf_transform(struct wl_client * cl,struct wl_resource * res,int32_t transform)613 static void surf_transform(struct wl_client* cl,
614 	struct wl_resource* res, int32_t transform)
615 {
616 	trace(TRACE_SURF, "surf_transform(%d)", (int) transform);
617 	struct comp_surf* surf = wl_resource_get_user_data(res);
618 	if (!surf || !surf->acon.addr)
619 		return;
620 
621 	struct arcan_event ev = {
622 		.ext.kind = ARCAN_EVENT(MESSAGE),
623 	};
624 	snprintf((char*)ev.ext.message.data,
625 		COUNT_OF(ev.ext.message.data), "transform:%"PRId32, transform);
626 
627 	arcan_shmif_enqueue(&surf->acon, &ev);
628 }
629 
surf_scale(struct wl_client * cl,struct wl_resource * res,int32_t scale)630 static void surf_scale(struct wl_client* cl,
631 	struct wl_resource* res, int32_t scale)
632 {
633 	trace(TRACE_SURF, "surf_scale(%d)", (int) scale);
634 	struct comp_surf* surf = wl_resource_get_user_data(res);
635 	if (!surf || !surf->acon.addr)
636 		return;
637 
638 	surf->scale = scale > 0 ? scale : 1;
639 	struct arcan_event ev = {
640 		.ext.kind = ARCAN_EVENT(MESSAGE)
641 	};
642 	snprintf((char*)ev.ext.message.data,
643 		COUNT_OF(ev.ext.message.data), "scale:%"PRId32, scale);
644 	arcan_shmif_enqueue(&surf->acon, &ev);
645 }
646