1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/prime_numbers.h>
7 
8 #include "../i915_selftest.h"
9 
10 #include "mock_drm.h"
11 #include "mock_gem_device.h"
12 #include "mock_region.h"
13 
14 #include "gem/i915_gem_context.h"
15 #include "gem/i915_gem_lmem.h"
16 #include "gem/i915_gem_region.h"
17 #include "gem/i915_gem_object_blt.h"
18 #include "gem/selftests/igt_gem_utils.h"
19 #include "gem/selftests/mock_context.h"
20 #include "gt/intel_engine_user.h"
21 #include "gt/intel_gt.h"
22 #include "selftests/igt_flush_test.h"
23 #include "selftests/i915_random.h"
24 
25 static void close_objects(struct intel_memory_region *mem,
26 			  struct list_head *objects)
27 {
28 	struct drm_i915_private *i915 = mem->i915;
29 	struct drm_i915_gem_object *obj, *on;
30 
31 	list_for_each_entry_safe(obj, on, objects, st_link) {
32 		if (i915_gem_object_has_pinned_pages(obj))
33 			i915_gem_object_unpin_pages(obj);
34 		/* No polluting the memory region between tests */
35 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
36 		list_del(&obj->st_link);
37 		i915_gem_object_put(obj);
38 	}
39 
40 	cond_resched();
41 
42 	i915_gem_drain_freed_objects(i915);
43 }
44 
45 static int igt_mock_fill(void *arg)
46 {
47 	struct intel_memory_region *mem = arg;
48 	resource_size_t total = resource_size(&mem->region);
49 	resource_size_t page_size;
50 	resource_size_t rem;
51 	unsigned long max_pages;
52 	unsigned long page_num;
53 	LIST_HEAD(objects);
54 	int err = 0;
55 
56 	page_size = mem->mm.chunk_size;
57 	max_pages = div64_u64(total, page_size);
58 	rem = total;
59 
60 	for_each_prime_number_from(page_num, 1, max_pages) {
61 		resource_size_t size = page_num * page_size;
62 		struct drm_i915_gem_object *obj;
63 
64 		obj = i915_gem_object_create_region(mem, size, 0);
65 		if (IS_ERR(obj)) {
66 			err = PTR_ERR(obj);
67 			break;
68 		}
69 
70 		err = i915_gem_object_pin_pages(obj);
71 		if (err) {
72 			i915_gem_object_put(obj);
73 			break;
74 		}
75 
76 		list_add(&obj->st_link, &objects);
77 		rem -= size;
78 	}
79 
80 	if (err == -ENOMEM)
81 		err = 0;
82 	if (err == -ENXIO) {
83 		if (page_num * page_size <= rem) {
84 			pr_err("%s failed, space still left in region\n",
85 			       __func__);
86 			err = -EINVAL;
87 		} else {
88 			err = 0;
89 		}
90 	}
91 
92 	close_objects(mem, &objects);
93 
94 	return err;
95 }
96 
97 static struct drm_i915_gem_object *
98 igt_object_create(struct intel_memory_region *mem,
99 		  struct list_head *objects,
100 		  u64 size,
101 		  unsigned int flags)
102 {
103 	struct drm_i915_gem_object *obj;
104 	int err;
105 
106 	obj = i915_gem_object_create_region(mem, size, flags);
107 	if (IS_ERR(obj))
108 		return obj;
109 
110 	err = i915_gem_object_pin_pages(obj);
111 	if (err)
112 		goto put;
113 
114 	list_add(&obj->st_link, objects);
115 	return obj;
116 
117 put:
118 	i915_gem_object_put(obj);
119 	return ERR_PTR(err);
120 }
121 
122 static void igt_object_release(struct drm_i915_gem_object *obj)
123 {
124 	i915_gem_object_unpin_pages(obj);
125 	__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
126 	list_del(&obj->st_link);
127 	i915_gem_object_put(obj);
128 }
129 
130 static int igt_mock_contiguous(void *arg)
131 {
132 	struct intel_memory_region *mem = arg;
133 	struct drm_i915_gem_object *obj;
134 	unsigned long n_objects;
135 	LIST_HEAD(objects);
136 	LIST_HEAD(holes);
137 	I915_RND_STATE(prng);
138 	resource_size_t total;
139 	resource_size_t min;
140 	u64 target;
141 	int err = 0;
142 
143 	total = resource_size(&mem->region);
144 
145 	/* Min size */
146 	obj = igt_object_create(mem, &objects, mem->mm.chunk_size,
147 				I915_BO_ALLOC_CONTIGUOUS);
148 	if (IS_ERR(obj))
149 		return PTR_ERR(obj);
150 
151 	if (obj->mm.pages->nents != 1) {
152 		pr_err("%s min object spans multiple sg entries\n", __func__);
153 		err = -EINVAL;
154 		goto err_close_objects;
155 	}
156 
157 	igt_object_release(obj);
158 
159 	/* Max size */
160 	obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
161 	if (IS_ERR(obj))
162 		return PTR_ERR(obj);
163 
164 	if (obj->mm.pages->nents != 1) {
165 		pr_err("%s max object spans multiple sg entries\n", __func__);
166 		err = -EINVAL;
167 		goto err_close_objects;
168 	}
169 
170 	igt_object_release(obj);
171 
172 	/* Internal fragmentation should not bleed into the object size */
173 	target = i915_prandom_u64_state(&prng);
174 	div64_u64_rem(target, total, &target);
175 	target = round_up(target, PAGE_SIZE);
176 	target = max_t(u64, PAGE_SIZE, target);
177 
178 	obj = igt_object_create(mem, &objects, target,
179 				I915_BO_ALLOC_CONTIGUOUS);
180 	if (IS_ERR(obj))
181 		return PTR_ERR(obj);
182 
183 	if (obj->base.size != target) {
184 		pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__,
185 		       obj->base.size, target);
186 		err = -EINVAL;
187 		goto err_close_objects;
188 	}
189 
190 	if (obj->mm.pages->nents != 1) {
191 		pr_err("%s object spans multiple sg entries\n", __func__);
192 		err = -EINVAL;
193 		goto err_close_objects;
194 	}
195 
196 	igt_object_release(obj);
197 
198 	/*
199 	 * Try to fragment the address space, such that half of it is free, but
200 	 * the max contiguous block size is SZ_64K.
201 	 */
202 
203 	target = SZ_64K;
204 	n_objects = div64_u64(total, target);
205 
206 	while (n_objects--) {
207 		struct list_head *list;
208 
209 		if (n_objects % 2)
210 			list = &holes;
211 		else
212 			list = &objects;
213 
214 		obj = igt_object_create(mem, list, target,
215 					I915_BO_ALLOC_CONTIGUOUS);
216 		if (IS_ERR(obj)) {
217 			err = PTR_ERR(obj);
218 			goto err_close_objects;
219 		}
220 	}
221 
222 	close_objects(mem, &holes);
223 
224 	min = target;
225 	target = total >> 1;
226 
227 	/* Make sure we can still allocate all the fragmented space */
228 	obj = igt_object_create(mem, &objects, target, 0);
229 	if (IS_ERR(obj)) {
230 		err = PTR_ERR(obj);
231 		goto err_close_objects;
232 	}
233 
234 	igt_object_release(obj);
235 
236 	/*
237 	 * Even though we have enough free space, we don't have a big enough
238 	 * contiguous block. Make sure that holds true.
239 	 */
240 
241 	do {
242 		bool should_fail = target > min;
243 
244 		obj = igt_object_create(mem, &objects, target,
245 					I915_BO_ALLOC_CONTIGUOUS);
246 		if (should_fail != IS_ERR(obj)) {
247 			pr_err("%s target allocation(%llx) mismatch\n",
248 			       __func__, target);
249 			err = -EINVAL;
250 			goto err_close_objects;
251 		}
252 
253 		target >>= 1;
254 	} while (target >= mem->mm.chunk_size);
255 
256 err_close_objects:
257 	list_splice_tail(&holes, &objects);
258 	close_objects(mem, &objects);
259 	return err;
260 }
261 
262 static int igt_gpu_write_dw(struct intel_context *ce,
263 			    struct i915_vma *vma,
264 			    u32 dword,
265 			    u32 value)
266 {
267 	return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
268 			       vma->size >> PAGE_SHIFT, value);
269 }
270 
271 static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
272 {
273 	unsigned long n;
274 	int err;
275 
276 	i915_gem_object_lock(obj);
277 	err = i915_gem_object_set_to_wc_domain(obj, false);
278 	i915_gem_object_unlock(obj);
279 	if (err)
280 		return err;
281 
282 	err = i915_gem_object_pin_pages(obj);
283 	if (err)
284 		return err;
285 
286 	for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
287 		u32 __iomem *base;
288 		u32 read_val;
289 
290 		base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
291 
292 		read_val = ioread32(base + dword);
293 		io_mapping_unmap_atomic(base);
294 		if (read_val != val) {
295 			pr_err("n=%lu base[%u]=%u, val=%u\n",
296 			       n, dword, read_val, val);
297 			err = -EINVAL;
298 			break;
299 		}
300 	}
301 
302 	i915_gem_object_unpin_pages(obj);
303 	return err;
304 }
305 
306 static int igt_gpu_write(struct i915_gem_context *ctx,
307 			 struct drm_i915_gem_object *obj)
308 {
309 	struct i915_gem_engines *engines;
310 	struct i915_gem_engines_iter it;
311 	struct i915_address_space *vm;
312 	struct intel_context *ce;
313 	I915_RND_STATE(prng);
314 	IGT_TIMEOUT(end_time);
315 	unsigned int count;
316 	struct i915_vma *vma;
317 	int *order;
318 	int i, n;
319 	int err = 0;
320 
321 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
322 
323 	n = 0;
324 	count = 0;
325 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
326 		count++;
327 		if (!intel_engine_can_store_dword(ce->engine))
328 			continue;
329 
330 		vm = ce->vm;
331 		n++;
332 	}
333 	i915_gem_context_unlock_engines(ctx);
334 	if (!n)
335 		return 0;
336 
337 	order = i915_random_order(count * count, &prng);
338 	if (!order)
339 		return -ENOMEM;
340 
341 	vma = i915_vma_instance(obj, vm, NULL);
342 	if (IS_ERR(vma)) {
343 		err = PTR_ERR(vma);
344 		goto out_free;
345 	}
346 
347 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
348 	if (err)
349 		goto out_free;
350 
351 	i = 0;
352 	engines = i915_gem_context_lock_engines(ctx);
353 	do {
354 		u32 rng = prandom_u32_state(&prng);
355 		u32 dword = offset_in_page(rng) / 4;
356 
357 		ce = engines->engines[order[i] % engines->num_engines];
358 		i = (i + 1) % (count * count);
359 		if (!ce || !intel_engine_can_store_dword(ce->engine))
360 			continue;
361 
362 		err = igt_gpu_write_dw(ce, vma, dword, rng);
363 		if (err)
364 			break;
365 
366 		err = igt_cpu_check(obj, dword, rng);
367 		if (err)
368 			break;
369 	} while (!__igt_timeout(end_time, NULL));
370 	i915_gem_context_unlock_engines(ctx);
371 
372 out_free:
373 	kfree(order);
374 
375 	if (err == -ENOMEM)
376 		err = 0;
377 
378 	return err;
379 }
380 
381 static int igt_lmem_create(void *arg)
382 {
383 	struct drm_i915_private *i915 = arg;
384 	struct drm_i915_gem_object *obj;
385 	int err = 0;
386 
387 	obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
388 	if (IS_ERR(obj))
389 		return PTR_ERR(obj);
390 
391 	err = i915_gem_object_pin_pages(obj);
392 	if (err)
393 		goto out_put;
394 
395 	i915_gem_object_unpin_pages(obj);
396 out_put:
397 	i915_gem_object_put(obj);
398 
399 	return err;
400 }
401 
402 static int igt_lmem_write_gpu(void *arg)
403 {
404 	struct drm_i915_private *i915 = arg;
405 	struct drm_i915_gem_object *obj;
406 	struct i915_gem_context *ctx;
407 	struct drm_file *file;
408 	I915_RND_STATE(prng);
409 	u32 sz;
410 	int err;
411 
412 	file = mock_file(i915);
413 	if (IS_ERR(file))
414 		return PTR_ERR(file);
415 
416 	ctx = live_context(i915, file);
417 	if (IS_ERR(ctx)) {
418 		err = PTR_ERR(ctx);
419 		goto out_file;
420 	}
421 
422 	sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
423 
424 	obj = i915_gem_object_create_lmem(i915, sz, 0);
425 	if (IS_ERR(obj)) {
426 		err = PTR_ERR(obj);
427 		goto out_file;
428 	}
429 
430 	err = i915_gem_object_pin_pages(obj);
431 	if (err)
432 		goto out_put;
433 
434 	err = igt_gpu_write(ctx, obj);
435 	if (err)
436 		pr_err("igt_gpu_write failed(%d)\n", err);
437 
438 	i915_gem_object_unpin_pages(obj);
439 out_put:
440 	i915_gem_object_put(obj);
441 out_file:
442 	mock_file_free(i915, file);
443 	return err;
444 }
445 
446 static struct intel_engine_cs *
447 random_engine_class(struct drm_i915_private *i915,
448 		    unsigned int class,
449 		    struct rnd_state *prng)
450 {
451 	struct intel_engine_cs *engine;
452 	unsigned int count;
453 
454 	count = 0;
455 	for (engine = intel_engine_lookup_user(i915, class, 0);
456 	     engine && engine->uabi_class == class;
457 	     engine = rb_entry_safe(rb_next(&engine->uabi_node),
458 				    typeof(*engine), uabi_node))
459 		count++;
460 
461 	count = i915_prandom_u32_max_state(count, prng);
462 	return intel_engine_lookup_user(i915, class, count);
463 }
464 
465 static int igt_lmem_write_cpu(void *arg)
466 {
467 	struct drm_i915_private *i915 = arg;
468 	struct drm_i915_gem_object *obj;
469 	I915_RND_STATE(prng);
470 	IGT_TIMEOUT(end_time);
471 	u32 bytes[] = {
472 		0, /* rng placeholder */
473 		sizeof(u32),
474 		sizeof(u64),
475 		64, /* cl */
476 		PAGE_SIZE,
477 		PAGE_SIZE - sizeof(u32),
478 		PAGE_SIZE - sizeof(u64),
479 		PAGE_SIZE - 64,
480 	};
481 	struct intel_engine_cs *engine;
482 	u32 *vaddr;
483 	u32 sz;
484 	u32 i;
485 	int *order;
486 	int count;
487 	int err;
488 
489 	engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng);
490 	if (!engine)
491 		return 0;
492 
493 	pr_info("%s: using %s\n", __func__, engine->name);
494 
495 	sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
496 	sz = max_t(u32, 2 * PAGE_SIZE, sz);
497 
498 	obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
499 	if (IS_ERR(obj))
500 		return PTR_ERR(obj);
501 
502 	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
503 	if (IS_ERR(vaddr)) {
504 		err = PTR_ERR(vaddr);
505 		goto out_put;
506 	}
507 
508 	/* Put the pages into a known state -- from the gpu for added fun */
509 	err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf);
510 	if (err)
511 		goto out_unpin;
512 
513 	i915_gem_object_lock(obj);
514 	err = i915_gem_object_set_to_wc_domain(obj, true);
515 	i915_gem_object_unlock(obj);
516 	if (err)
517 		goto out_unpin;
518 
519 	count = ARRAY_SIZE(bytes);
520 	order = i915_random_order(count * count, &prng);
521 	if (!order) {
522 		err = -ENOMEM;
523 		goto out_unpin;
524 	}
525 
526 	/* We want to throw in a random width/align */
527 	bytes[0] = igt_random_offset(&prng, 0, PAGE_SIZE, sizeof(u32),
528 				     sizeof(u32));
529 
530 	i = 0;
531 	do {
532 		u32 offset;
533 		u32 align;
534 		u32 dword;
535 		u32 size;
536 		u32 val;
537 
538 		size = bytes[order[i] % count];
539 		i = (i + 1) % (count * count);
540 
541 		align = bytes[order[i] % count];
542 		i = (i + 1) % (count * count);
543 
544 		align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
545 
546 		offset = igt_random_offset(&prng, 0, obj->base.size,
547 					   size, align);
548 
549 		val = prandom_u32_state(&prng);
550 		memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
551 			 size / sizeof(u32));
552 
553 		/*
554 		 * Sample random dw -- don't waste precious time reading every
555 		 * single dw.
556 		 */
557 		dword = igt_random_offset(&prng, offset,
558 					  offset + size,
559 					  sizeof(u32), sizeof(u32));
560 		dword /= sizeof(u32);
561 		if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
562 			pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
563 			       __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
564 			       size, align, offset);
565 			err = -EINVAL;
566 			break;
567 		}
568 	} while (!__igt_timeout(end_time, NULL));
569 
570 out_unpin:
571 	i915_gem_object_unpin_map(obj);
572 out_put:
573 	i915_gem_object_put(obj);
574 
575 	return err;
576 }
577 
578 int intel_memory_region_mock_selftests(void)
579 {
580 	static const struct i915_subtest tests[] = {
581 		SUBTEST(igt_mock_fill),
582 		SUBTEST(igt_mock_contiguous),
583 	};
584 	struct intel_memory_region *mem;
585 	struct drm_i915_private *i915;
586 	int err;
587 
588 	i915 = mock_gem_device();
589 	if (!i915)
590 		return -ENOMEM;
591 
592 	mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
593 	if (IS_ERR(mem)) {
594 		pr_err("failed to create memory region\n");
595 		err = PTR_ERR(mem);
596 		goto out_unref;
597 	}
598 
599 	err = i915_subtests(tests, mem);
600 
601 	intel_memory_region_put(mem);
602 out_unref:
603 	drm_dev_put(&i915->drm);
604 	return err;
605 }
606 
607 int intel_memory_region_live_selftests(struct drm_i915_private *i915)
608 {
609 	static const struct i915_subtest tests[] = {
610 		SUBTEST(igt_lmem_create),
611 		SUBTEST(igt_lmem_write_cpu),
612 		SUBTEST(igt_lmem_write_gpu),
613 	};
614 
615 	if (!HAS_LMEM(i915)) {
616 		pr_info("device lacks LMEM support, skipping\n");
617 		return 0;
618 	}
619 
620 	if (intel_gt_is_wedged(&i915->gt))
621 		return 0;
622 
623 	return i915_live_subtests(tests, i915);
624 }
625