1 /*
2  * Copyright © 2019 Manuel Stoeckl
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files (the
6  * "Software"), to deal in the Software without restriction, including
7  * without limitation the rights to use, copy, modify, merge, publish,
8  * distribute, sublicense, and/or sell copies of the Software, and to
9  * permit persons to whom the Software is furnished to do so, subject to
10  * the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the
13  * next paragraph) shall be included in all copies or substantial
14  * portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19  * NONINFRINGEMENT.  IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
20  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
21  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  */
25 
26 #include "common.h"
27 #include "shadow.h"
28 
29 #include <errno.h>
30 #include <fcntl.h>
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <sys/stat.h>
35 #include <time.h>
36 #include <unistd.h>
37 
38 #include <sys/mman.h>
39 
40 struct compression_settings {
41 	enum compression_mode mode;
42 	int level;
43 };
44 
45 static const struct compression_settings comp_modes[] = {
46 		{COMP_NONE, 0},
47 #ifdef HAS_LZ4
48 		{COMP_LZ4, 1},
49 #endif
50 #ifdef HAS_ZSTD
51 		{COMP_ZSTD, 5},
52 #endif
53 };
54 
55 #ifdef HAS_DMABUF
56 #include <gbm.h>
57 #define TEST_2CPP_FORMAT GBM_FORMAT_GR88
58 #else
59 #define TEST_2CPP_FORMAT 0
60 #endif
61 
update_file(int file_fd,struct gbm_bo * bo,size_t sz,int seqno)62 static int update_file(int file_fd, struct gbm_bo *bo, size_t sz, int seqno)
63 {
64 	(void)bo;
65 	if (rand() % 11 == 0) {
66 		/* no change */
67 		return 0;
68 	}
69 
70 	void *data = mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_SHARED, file_fd,
71 			0);
72 	if (data == MAP_FAILED) {
73 		return -1;
74 	}
75 
76 	size_t start = (size_t)rand() % sz;
77 	size_t end = (size_t)rand() % sz;
78 	if (start > end) {
79 		size_t tmp = start;
80 		start = end;
81 		end = tmp;
82 	}
83 	memset((char *)data + start, seqno, end - start);
84 
85 	munmap(data, sz);
86 	return (int)(end - start);
87 }
88 
update_dmabuf(int file_fd,struct gbm_bo * bo,size_t sz,int seqno)89 static int update_dmabuf(int file_fd, struct gbm_bo *bo, size_t sz, int seqno)
90 {
91 	(void)file_fd;
92 	if (rand() % 11 == 0) {
93 		/* no change */
94 		return 0;
95 	}
96 
97 	void *map_handle = NULL;
98 	void *data = map_dmabuf(bo, true, &map_handle, NULL, NULL);
99 	if (data == MAP_FAILED) {
100 		return -1;
101 	}
102 
103 	size_t start = (size_t)rand() % sz;
104 	size_t end = (size_t)rand() % sz;
105 	if (start > end) {
106 		size_t tmp = start;
107 		start = end;
108 		end = tmp;
109 	}
110 	memset((char *)data + start, seqno, end - start);
111 
112 	unmap_dmabuf(bo, map_handle);
113 	return (int)(end - start);
114 }
115 
combine_transfer_blocks(struct transfer_queue * td)116 static struct bytebuf combine_transfer_blocks(struct transfer_queue *td)
117 {
118 	size_t net_size = 0;
119 	for (int i = td->start; i < td->end; i++) {
120 		net_size += td->vecs[i].iov_len;
121 	}
122 
123 	struct bytebuf ret_block;
124 	ret_block.size = net_size;
125 	ret_block.data = malloc(net_size);
126 	size_t pos = 0;
127 	for (int i = td->start; i < td->end; i++) {
128 		memcpy(ret_block.data + pos, td->vecs[i].iov_base,
129 				td->vecs[i].iov_len);
130 		pos += td->vecs[i].iov_len;
131 	}
132 	return ret_block;
133 }
134 
check_match(int orig_fd,int copy_fd,struct gbm_bo * orig_bo,struct gbm_bo * copy_bo)135 static bool check_match(int orig_fd, int copy_fd, struct gbm_bo *orig_bo,
136 		struct gbm_bo *copy_bo)
137 {
138 	size_t csz = 0, osz = 0;
139 	enum fdcat ctype = get_fd_type(copy_fd, &csz);
140 	enum fdcat otype = get_fd_type(orig_fd, &osz);
141 	if (ctype != otype || csz != osz) {
142 		wp_error("Mirrored file descriptor has different type or size: ot=%d ct=%d | os=%d cs=%d",
143 				otype, ctype, (int)osz, (int)csz);
144 		return false;
145 	}
146 
147 	void *ohandle = NULL, *chandle = NULL;
148 	void *cdata = NULL, *odata = NULL;
149 	if (otype == FDC_FILE) {
150 		cdata = mmap(NULL, csz, PROT_READ, MAP_SHARED, copy_fd, 0);
151 		if (cdata == MAP_FAILED) {
152 			return false;
153 		}
154 		odata = mmap(NULL, osz, PROT_READ, MAP_SHARED, orig_fd, 0);
155 		if (odata == MAP_FAILED) {
156 			munmap(cdata, csz);
157 			return false;
158 		}
159 	} else if (otype == FDC_DMABUF) {
160 		cdata = map_dmabuf(copy_bo, false, &chandle, NULL, NULL);
161 		if (cdata == NULL) {
162 			return false;
163 		}
164 		odata = map_dmabuf(orig_bo, false, &ohandle, NULL, NULL);
165 		if (odata == NULL) {
166 			unmap_dmabuf(copy_bo, chandle);
167 			return false;
168 		}
169 	} else {
170 		return false;
171 	}
172 
173 	bool pass = memcmp(cdata, odata, csz) == 0;
174 
175 	if (otype == FDC_FILE) {
176 		munmap(odata, osz);
177 		munmap(cdata, csz);
178 	} else if (otype == FDC_DMABUF) {
179 		unmap_dmabuf(orig_bo, ohandle);
180 		unmap_dmabuf(copy_bo, chandle);
181 	}
182 
183 	if (!pass) {
184 		wp_error("Mirrored file descriptor contents differ");
185 	}
186 
187 	return pass;
188 }
189 
wait_for_thread_pool(struct thread_pool * pool)190 static void wait_for_thread_pool(struct thread_pool *pool)
191 {
192 	bool done = false;
193 	while (!done) {
194 		uint8_t flush[64];
195 		(void)read(pool->selfpipe_r, flush, sizeof(flush));
196 
197 		/* Also run tasks on main thread, just like the real version */
198 		// TODO: create a 'threadpool.c'
199 		struct task_data task;
200 		bool has_task = request_work_task(pool, &task, &done);
201 
202 		if (has_task) {
203 			run_task(&task, &pool->threads[0]);
204 
205 			pthread_mutex_lock(&pool->work_mutex);
206 			pool->tasks_in_progress--;
207 			pthread_mutex_unlock(&pool->work_mutex);
208 			/* To skip the next poll */
209 		} else {
210 			/* Wait a short amount */
211 			struct timespec waitspec;
212 			waitspec.tv_sec = 0;
213 			waitspec.tv_nsec = 100000;
214 			nanosleep(&waitspec, NULL);
215 		}
216 	}
217 }
218 
test_transfer(struct fd_translation_map * src_map,struct fd_translation_map * dst_map,struct thread_pool * src_pool,struct thread_pool * dst_pool,int rid,int ndiff,struct render_data * render_data)219 static bool test_transfer(struct fd_translation_map *src_map,
220 		struct fd_translation_map *dst_map,
221 		struct thread_pool *src_pool, struct thread_pool *dst_pool,
222 		int rid, int ndiff, struct render_data *render_data)
223 {
224 	struct transfer_queue transfer_data;
225 	memset(&transfer_data, 0, sizeof(struct transfer_queue));
226 	pthread_mutex_init(&transfer_data.async_recv_queue.lock, NULL);
227 
228 	struct shadow_fd *src_shadow = get_shadow_for_rid(src_map, rid);
229 	collect_update(src_pool, src_shadow, &transfer_data, false);
230 	start_parallel_work(src_pool, &transfer_data.async_recv_queue);
231 	wait_for_thread_pool(src_pool);
232 	finish_update(src_shadow);
233 	transfer_load_async(&transfer_data);
234 
235 	if (ndiff == 0) {
236 		size_t ns = 0;
237 		for (int i = transfer_data.start; i < transfer_data.end; i++) {
238 			ns += transfer_data.vecs[i].iov_len;
239 		}
240 		if (transfer_data.end == transfer_data.start) {
241 			/* nothing sent */
242 			cleanup_transfer_queue(&transfer_data);
243 			return true;
244 		}
245 		/* Redundant transfers are acceptable, if inefficient */
246 		wp_error("Collecting updates gave a transfer (%zd bytes, %d blocks) when none was expected",
247 				ns, transfer_data.end - transfer_data.start);
248 	}
249 	if (transfer_data.end == transfer_data.start) {
250 		wp_error("Collecting updates gave a unexpected number (%d) of transfers",
251 				transfer_data.end - transfer_data.start);
252 		cleanup_transfer_queue(&transfer_data);
253 		return false;
254 	}
255 	struct bytebuf res = combine_transfer_blocks(&transfer_data);
256 	cleanup_transfer_queue(&transfer_data);
257 
258 	size_t start = 0;
259 	while (start < res.size) {
260 		struct bytebuf tmp;
261 		tmp.data = &res.data[start];
262 		uint32_t hb = ((uint32_t *)tmp.data)[0];
263 		int32_t xid = ((int32_t *)tmp.data)[1];
264 		tmp.size = transfer_size(hb);
265 		apply_update(dst_map, dst_pool, render_data, transfer_type(hb),
266 				xid, &tmp);
267 		start += alignz(tmp.size, 4);
268 	}
269 	free(res.data);
270 
271 	/* first round, this only exists after the transfer */
272 	struct shadow_fd *dst_shadow = get_shadow_for_rid(dst_map, rid);
273 
274 	return check_match(src_shadow->fd_local, dst_shadow->fd_local,
275 			src_shadow->dmabuf_bo, dst_shadow->dmabuf_bo);
276 }
277 
278 /* This test closes the provided file fd */
test_mirror(int new_file_fd,size_t sz,int (* update)(int fd,struct gbm_bo * bo,size_t sz,int seqno),struct compression_settings comp_mode,int n_src_threads,int n_dst_threads,struct render_data * rd,const struct dmabuf_slice_data * slice_data)279 static bool test_mirror(int new_file_fd, size_t sz,
280 		int (*update)(int fd, struct gbm_bo *bo, size_t sz, int seqno),
281 		struct compression_settings comp_mode, int n_src_threads,
282 		int n_dst_threads, struct render_data *rd,
283 		const struct dmabuf_slice_data *slice_data)
284 {
285 	struct fd_translation_map src_map;
286 	setup_translation_map(&src_map, false);
287 
288 	struct thread_pool src_pool;
289 	setup_thread_pool(&src_pool, comp_mode.mode, comp_mode.level,
290 			n_src_threads);
291 
292 	struct fd_translation_map dst_map;
293 	setup_translation_map(&dst_map, true);
294 
295 	struct thread_pool dst_pool;
296 	setup_thread_pool(&dst_pool, comp_mode.mode, comp_mode.level,
297 			n_dst_threads);
298 
299 	size_t fdsz = 0;
300 	enum fdcat fdtype = get_fd_type(new_file_fd, &fdsz);
301 	struct shadow_fd *src_shadow = translate_fd(&src_map, rd, new_file_fd,
302 			fdtype, fdsz, slice_data, false, false);
303 	struct shadow_fd *dst_shadow = NULL;
304 	int rid = src_shadow->remote_id;
305 
306 	bool pass = true;
307 	for (int i = 0; i < 6; i++) {
308 		bool fwd = i == 0 || i % 2;
309 
310 		int target_fd = fwd ? src_shadow->fd_local
311 				    : dst_shadow->fd_local;
312 		struct gbm_bo *target_bo = fwd ? src_shadow->dmabuf_bo
313 					       : dst_shadow->dmabuf_bo;
314 		if (i == 5 && src_shadow->type == FDC_FILE) {
315 			sz = (sz * 7) / 5;
316 			if (ftruncate(target_fd, (off_t)sz) == -1) {
317 				wp_error("failed to resize file");
318 				break;
319 			}
320 			extend_shm_shadow(fwd ? &src_map : &dst_map,
321 					fwd ? &src_pool : &dst_pool,
322 					fwd ? src_shadow : dst_shadow, sz);
323 		}
324 
325 		int ndiff = i > 0 ? (*update)(target_fd, target_bo, sz, i)
326 				  : (int)sz;
327 		if (ndiff == -1) {
328 			pass = false;
329 			break;
330 		}
331 		bool subpass;
332 		if (fwd) {
333 			src_shadow->is_dirty = true;
334 			damage_everything(&src_shadow->damage);
335 			subpass = test_transfer(&src_map, &dst_map, &src_pool,
336 					&dst_pool, rid, ndiff, rd);
337 		} else {
338 			dst_shadow->is_dirty = true;
339 			damage_everything(&dst_shadow->damage);
340 			subpass = test_transfer(&dst_map, &src_map, &dst_pool,
341 					&dst_pool, rid, ndiff, rd);
342 		}
343 		pass &= subpass;
344 		if (!pass) {
345 			break;
346 		}
347 
348 		dst_shadow = get_shadow_for_rid(&dst_map, rid);
349 	}
350 
351 	cleanup_translation_map(&src_map);
352 	cleanup_translation_map(&dst_map);
353 	cleanup_thread_pool(&src_pool);
354 	cleanup_thread_pool(&dst_pool);
355 	return pass;
356 }
357 
358 log_handler_func_t log_funcs[2] = {NULL, test_atomic_log_handler};
main(int argc,char ** argv)359 int main(int argc, char **argv)
360 {
361 	(void)argc;
362 	(void)argv;
363 
364 	if (mkdir("run", S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH) == -1 &&
365 			errno != EEXIST) {
366 		wp_error("Not allowed to create test directory, cannot run tests.");
367 		return EXIT_FAILURE;
368 	}
369 
370 	/* to avoid warnings when the driver dmabuf size constraints require
371 	 * significant alignment, the width/height are already 64 aligned */
372 	const size_t test_width = 1024;
373 	const size_t test_height = 1280;
374 	const size_t test_cpp = 2;
375 	const size_t test_size = test_width * test_height * test_cpp;
376 	const struct dmabuf_slice_data slice_data = {
377 			.width = (uint32_t)test_width,
378 			.height = (uint32_t)test_height,
379 			.format = TEST_2CPP_FORMAT,
380 			.num_planes = 1,
381 			.modifier = 0,
382 			.offsets = {0, 0, 0, 0},
383 			.strides = {(uint32_t)(test_width * test_cpp), 0, 0, 0},
384 			.using_planes = {true, false, false, false},
385 	};
386 
387 	uint8_t *test_pattern = malloc(test_size);
388 	for (size_t i = 0; i < test_size; i++) {
389 		test_pattern[i] = (uint8_t)i;
390 	}
391 
392 	struct render_data *rd = calloc(1, sizeof(struct render_data));
393 	rd->drm_fd = -1;
394 	rd->av_disabled = true;
395 
396 	bool has_dmabuf = TEST_2CPP_FORMAT != 0;
397 	if (has_dmabuf && init_render_data(rd) == -1) {
398 		has_dmabuf = false;
399 	}
400 
401 	bool all_success = true;
402 	srand(0);
403 	for (size_t c = 0; c < sizeof(comp_modes) / sizeof(comp_modes[0]);
404 			c++) {
405 		for (int gt = 1; gt <= 5; gt++) {
406 			for (int rt = 1; rt <= 5; rt++) {
407 				int file_fd = create_anon_file();
408 				if (file_fd == -1) {
409 					wp_error("Failed to create test file: %s",
410 							strerror(errno));
411 					continue;
412 				}
413 				if (write(file_fd, test_pattern, test_size) !=
414 						(ssize_t)test_size) {
415 					wp_error("Failed to write to test file: %s",
416 							strerror(errno));
417 					checked_close(file_fd);
418 					continue;
419 				}
420 
421 				bool pass = test_mirror(file_fd, test_size,
422 						update_file, comp_modes[c], gt,
423 						rt, rd, &slice_data);
424 
425 				printf("  FILE comp=%d src_thread=%d dst_thread=%d, %s\n",
426 						(int)c, gt, rt,
427 						pass ? "pass" : "FAIL");
428 				all_success &= pass;
429 
430 				if (has_dmabuf) {
431 					struct gbm_bo *bo = make_dmabuf(rd,
432 							test_size, &slice_data);
433 					if (!bo) {
434 						has_dmabuf = false;
435 						continue;
436 					}
437 
438 					void *map_handle = NULL;
439 					void *data = map_dmabuf(bo, true,
440 							&map_handle, NULL,
441 							NULL);
442 					if (!data) {
443 						destroy_dmabuf(bo);
444 						has_dmabuf = false;
445 						continue;
446 					}
447 					memcpy(data, test_pattern, test_size);
448 					unmap_dmabuf(bo, map_handle);
449 
450 					int dmafd = export_dmabuf(bo);
451 					if (dmafd == -1) {
452 						has_dmabuf = false;
453 						continue;
454 					}
455 					destroy_dmabuf(bo);
456 
457 					bool dpass = test_mirror(dmafd,
458 							test_size,
459 							update_dmabuf,
460 							comp_modes[c], gt, rt,
461 							rd, &slice_data);
462 
463 					printf("DMABUF comp=%d src_thread=%d dst_thread=%d, %s\n",
464 							(int)c, gt, rt,
465 							dpass ? "pass"
466 							      : "FAIL");
467 					all_success &= dpass;
468 				}
469 			}
470 		}
471 	}
472 
473 	cleanup_render_data(rd);
474 	free(rd);
475 	free(test_pattern);
476 
477 	printf("All pass: %c\n", all_success ? 'Y' : 'n');
478 	return all_success ? EXIT_SUCCESS : EXIT_FAILURE;
479 }
480