1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #include <linux/highmem.h>
8 #include <linux/prime_numbers.h>
9 
10 #include "gem/i915_gem_internal.h"
11 #include "gem/i915_gem_region.h"
12 #include "gem/i915_gem_ttm.h"
13 #include "gt/intel_engine_pm.h"
14 #include "gt/intel_gpu_commands.h"
15 #include "gt/intel_gt.h"
16 #include "gt/intel_gt_pm.h"
17 #include "gt/intel_migrate.h"
18 #include "i915_ttm_buddy_manager.h"
19 
20 #include "huge_gem_object.h"
21 #include "i915_selftest.h"
22 #include "selftests/i915_random.h"
23 #include "selftests/igt_flush_test.h"
24 #include "selftests/igt_mmap.h"
25 
26 struct tile {
27 	unsigned int width;
28 	unsigned int height;
29 	unsigned int stride;
30 	unsigned int size;
31 	unsigned int tiling;
32 	unsigned int swizzle;
33 };
34 
35 static u64 swizzle_bit(unsigned int bit, u64 offset)
36 {
37 	return (offset & BIT_ULL(bit)) >> (bit - 6);
38 }
39 
40 static u64 tiled_offset(const struct tile *tile, u64 v)
41 {
42 	u64 x, y;
43 
44 	if (tile->tiling == I915_TILING_NONE)
45 		return v;
46 
47 	y = div64_u64_rem(v, tile->stride, &x);
48 	v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
49 
50 	if (tile->tiling == I915_TILING_X) {
51 		v += y * tile->width;
52 		v += div64_u64_rem(x, tile->width, &x) << tile->size;
53 		v += x;
54 	} else if (tile->width == 128) {
55 		const unsigned int ytile_span = 16;
56 		const unsigned int ytile_height = 512;
57 
58 		v += y * ytile_span;
59 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
60 		v += x;
61 	} else {
62 		const unsigned int ytile_span = 32;
63 		const unsigned int ytile_height = 256;
64 
65 		v += y * ytile_span;
66 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
67 		v += x;
68 	}
69 
70 	switch (tile->swizzle) {
71 	case I915_BIT_6_SWIZZLE_9:
72 		v ^= swizzle_bit(9, v);
73 		break;
74 	case I915_BIT_6_SWIZZLE_9_10:
75 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
76 		break;
77 	case I915_BIT_6_SWIZZLE_9_11:
78 		v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
79 		break;
80 	case I915_BIT_6_SWIZZLE_9_10_11:
81 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
82 		break;
83 	}
84 
85 	return v;
86 }
87 
88 static int check_partial_mapping(struct drm_i915_gem_object *obj,
89 				 const struct tile *tile,
90 				 struct rnd_state *prng)
91 {
92 	const unsigned long npages = obj->base.size / PAGE_SIZE;
93 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
94 	struct i915_ggtt_view view;
95 	struct i915_vma *vma;
96 	unsigned long page;
97 	u32 __iomem *io;
98 	struct page *p;
99 	unsigned int n;
100 	u64 offset;
101 	u32 *cpu;
102 	int err;
103 
104 	err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
105 	if (err) {
106 		pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
107 		       tile->tiling, tile->stride, err);
108 		return err;
109 	}
110 
111 	GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
112 	GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
113 
114 	i915_gem_object_lock(obj, NULL);
115 	err = i915_gem_object_set_to_gtt_domain(obj, true);
116 	i915_gem_object_unlock(obj);
117 	if (err) {
118 		pr_err("Failed to flush to GTT write domain; err=%d\n", err);
119 		return err;
120 	}
121 
122 	page = i915_prandom_u32_max_state(npages, prng);
123 	view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
124 
125 	vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
126 	if (IS_ERR(vma)) {
127 		pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
128 		       page, (int)PTR_ERR(vma));
129 		return PTR_ERR(vma);
130 	}
131 
132 	n = page - view.partial.offset;
133 	GEM_BUG_ON(n >= view.partial.size);
134 
135 	io = i915_vma_pin_iomap(vma);
136 	i915_vma_unpin(vma);
137 	if (IS_ERR(io)) {
138 		pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
139 		       page, (int)PTR_ERR(io));
140 		err = PTR_ERR(io);
141 		goto out;
142 	}
143 
144 	iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
145 	i915_vma_unpin_iomap(vma);
146 
147 	offset = tiled_offset(tile, page << PAGE_SHIFT);
148 	if (offset >= obj->base.size)
149 		goto out;
150 
151 	intel_gt_flush_ggtt_writes(to_gt(i915));
152 
153 	p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
154 	cpu = kmap(p) + offset_in_page(offset);
155 	drm_clflush_virt_range(cpu, sizeof(*cpu));
156 	if (*cpu != (u32)page) {
157 		pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
158 		       page, n,
159 		       view.partial.offset,
160 		       view.partial.size,
161 		       vma->size >> PAGE_SHIFT,
162 		       tile->tiling ? tile_row_pages(obj) : 0,
163 		       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
164 		       offset >> PAGE_SHIFT,
165 		       (unsigned int)offset_in_page(offset),
166 		       offset,
167 		       (u32)page, *cpu);
168 		err = -EINVAL;
169 	}
170 	*cpu = 0;
171 	drm_clflush_virt_range(cpu, sizeof(*cpu));
172 	kunmap(p);
173 
174 out:
175 	i915_gem_object_lock(obj, NULL);
176 	i915_vma_destroy(vma);
177 	i915_gem_object_unlock(obj);
178 	return err;
179 }
180 
181 static int check_partial_mappings(struct drm_i915_gem_object *obj,
182 				  const struct tile *tile,
183 				  unsigned long end_time)
184 {
185 	const unsigned int nreal = obj->scratch / PAGE_SIZE;
186 	const unsigned long npages = obj->base.size / PAGE_SIZE;
187 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
188 	struct i915_vma *vma;
189 	unsigned long page;
190 	int err;
191 
192 	err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
193 	if (err) {
194 		pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
195 		       tile->tiling, tile->stride, err);
196 		return err;
197 	}
198 
199 	GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
200 	GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
201 
202 	i915_gem_object_lock(obj, NULL);
203 	err = i915_gem_object_set_to_gtt_domain(obj, true);
204 	i915_gem_object_unlock(obj);
205 	if (err) {
206 		pr_err("Failed to flush to GTT write domain; err=%d\n", err);
207 		return err;
208 	}
209 
210 	for_each_prime_number_from(page, 1, npages) {
211 		struct i915_ggtt_view view =
212 			compute_partial_view(obj, page, MIN_CHUNK_PAGES);
213 		u32 __iomem *io;
214 		struct page *p;
215 		unsigned int n;
216 		u64 offset;
217 		u32 *cpu;
218 
219 		GEM_BUG_ON(view.partial.size > nreal);
220 		cond_resched();
221 
222 		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
223 		if (IS_ERR(vma)) {
224 			pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
225 			       page, (int)PTR_ERR(vma));
226 			return PTR_ERR(vma);
227 		}
228 
229 		n = page - view.partial.offset;
230 		GEM_BUG_ON(n >= view.partial.size);
231 
232 		io = i915_vma_pin_iomap(vma);
233 		i915_vma_unpin(vma);
234 		if (IS_ERR(io)) {
235 			pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
236 			       page, (int)PTR_ERR(io));
237 			return PTR_ERR(io);
238 		}
239 
240 		iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
241 		i915_vma_unpin_iomap(vma);
242 
243 		offset = tiled_offset(tile, page << PAGE_SHIFT);
244 		if (offset >= obj->base.size)
245 			continue;
246 
247 		intel_gt_flush_ggtt_writes(to_gt(i915));
248 
249 		p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
250 		cpu = kmap(p) + offset_in_page(offset);
251 		drm_clflush_virt_range(cpu, sizeof(*cpu));
252 		if (*cpu != (u32)page) {
253 			pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
254 			       page, n,
255 			       view.partial.offset,
256 			       view.partial.size,
257 			       vma->size >> PAGE_SHIFT,
258 			       tile->tiling ? tile_row_pages(obj) : 0,
259 			       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
260 			       offset >> PAGE_SHIFT,
261 			       (unsigned int)offset_in_page(offset),
262 			       offset,
263 			       (u32)page, *cpu);
264 			err = -EINVAL;
265 		}
266 		*cpu = 0;
267 		drm_clflush_virt_range(cpu, sizeof(*cpu));
268 		kunmap(p);
269 		if (err)
270 			return err;
271 
272 		i915_gem_object_lock(obj, NULL);
273 		i915_vma_destroy(vma);
274 		i915_gem_object_unlock(obj);
275 
276 		if (igt_timeout(end_time,
277 				"%s: timed out after tiling=%d stride=%d\n",
278 				__func__, tile->tiling, tile->stride))
279 			return -EINTR;
280 	}
281 
282 	return 0;
283 }
284 
285 static unsigned int
286 setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
287 {
288 	if (GRAPHICS_VER(i915) <= 2) {
289 		tile->height = 16;
290 		tile->width = 128;
291 		tile->size = 11;
292 	} else if (tile->tiling == I915_TILING_Y &&
293 		   HAS_128_BYTE_Y_TILING(i915)) {
294 		tile->height = 32;
295 		tile->width = 128;
296 		tile->size = 12;
297 	} else {
298 		tile->height = 8;
299 		tile->width = 512;
300 		tile->size = 12;
301 	}
302 
303 	if (GRAPHICS_VER(i915) < 4)
304 		return 8192 / tile->width;
305 	else if (GRAPHICS_VER(i915) < 7)
306 		return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
307 	else
308 		return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
309 }
310 
311 static int igt_partial_tiling(void *arg)
312 {
313 	const unsigned int nreal = 1 << 12; /* largest tile row x2 */
314 	struct drm_i915_private *i915 = arg;
315 	struct drm_i915_gem_object *obj;
316 	intel_wakeref_t wakeref;
317 	int tiling;
318 	int err;
319 
320 	if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
321 		return 0;
322 
323 	/* We want to check the page mapping and fencing of a large object
324 	 * mmapped through the GTT. The object we create is larger than can
325 	 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
326 	 * We then check that a write through each partial GGTT vma ends up
327 	 * in the right set of pages within the object, and with the expected
328 	 * tiling, which we verify by manual swizzling.
329 	 */
330 
331 	obj = huge_gem_object(i915,
332 			      nreal << PAGE_SHIFT,
333 			      (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
334 	if (IS_ERR(obj))
335 		return PTR_ERR(obj);
336 
337 	err = i915_gem_object_pin_pages_unlocked(obj);
338 	if (err) {
339 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
340 		       nreal, obj->base.size / PAGE_SIZE, err);
341 		goto out;
342 	}
343 
344 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
345 
346 	if (1) {
347 		IGT_TIMEOUT(end);
348 		struct tile tile;
349 
350 		tile.height = 1;
351 		tile.width = 1;
352 		tile.size = 0;
353 		tile.stride = 0;
354 		tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
355 		tile.tiling = I915_TILING_NONE;
356 
357 		err = check_partial_mappings(obj, &tile, end);
358 		if (err && err != -EINTR)
359 			goto out_unlock;
360 	}
361 
362 	for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
363 		IGT_TIMEOUT(end);
364 		unsigned int max_pitch;
365 		unsigned int pitch;
366 		struct tile tile;
367 
368 		if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
369 			/*
370 			 * The swizzling pattern is actually unknown as it
371 			 * varies based on physical address of each page.
372 			 * See i915_gem_detect_bit_6_swizzle().
373 			 */
374 			break;
375 
376 		tile.tiling = tiling;
377 		switch (tiling) {
378 		case I915_TILING_X:
379 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
380 			break;
381 		case I915_TILING_Y:
382 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
383 			break;
384 		}
385 
386 		GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
387 		if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
388 		    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
389 			continue;
390 
391 		max_pitch = setup_tile_size(&tile, i915);
392 
393 		for (pitch = max_pitch; pitch; pitch >>= 1) {
394 			tile.stride = tile.width * pitch;
395 			err = check_partial_mappings(obj, &tile, end);
396 			if (err == -EINTR)
397 				goto next_tiling;
398 			if (err)
399 				goto out_unlock;
400 
401 			if (pitch > 2 && GRAPHICS_VER(i915) >= 4) {
402 				tile.stride = tile.width * (pitch - 1);
403 				err = check_partial_mappings(obj, &tile, end);
404 				if (err == -EINTR)
405 					goto next_tiling;
406 				if (err)
407 					goto out_unlock;
408 			}
409 
410 			if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) {
411 				tile.stride = tile.width * (pitch + 1);
412 				err = check_partial_mappings(obj, &tile, end);
413 				if (err == -EINTR)
414 					goto next_tiling;
415 				if (err)
416 					goto out_unlock;
417 			}
418 		}
419 
420 		if (GRAPHICS_VER(i915) >= 4) {
421 			for_each_prime_number(pitch, max_pitch) {
422 				tile.stride = tile.width * pitch;
423 				err = check_partial_mappings(obj, &tile, end);
424 				if (err == -EINTR)
425 					goto next_tiling;
426 				if (err)
427 					goto out_unlock;
428 			}
429 		}
430 
431 next_tiling: ;
432 	}
433 
434 out_unlock:
435 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
436 	i915_gem_object_unpin_pages(obj);
437 out:
438 	i915_gem_object_put(obj);
439 	return err;
440 }
441 
442 static int igt_smoke_tiling(void *arg)
443 {
444 	const unsigned int nreal = 1 << 12; /* largest tile row x2 */
445 	struct drm_i915_private *i915 = arg;
446 	struct drm_i915_gem_object *obj;
447 	intel_wakeref_t wakeref;
448 	I915_RND_STATE(prng);
449 	unsigned long count;
450 	IGT_TIMEOUT(end);
451 	int err;
452 
453 	if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
454 		return 0;
455 
456 	/*
457 	 * igt_partial_tiling() does an exhastive check of partial tiling
458 	 * chunking, but will undoubtably run out of time. Here, we do a
459 	 * randomised search and hope over many runs of 1s with different
460 	 * seeds we will do a thorough check.
461 	 *
462 	 * Remember to look at the st_seed if we see a flip-flop in BAT!
463 	 */
464 
465 	if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
466 		return 0;
467 
468 	obj = huge_gem_object(i915,
469 			      nreal << PAGE_SHIFT,
470 			      (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
471 	if (IS_ERR(obj))
472 		return PTR_ERR(obj);
473 
474 	err = i915_gem_object_pin_pages_unlocked(obj);
475 	if (err) {
476 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
477 		       nreal, obj->base.size / PAGE_SIZE, err);
478 		goto out;
479 	}
480 
481 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
482 
483 	count = 0;
484 	do {
485 		struct tile tile;
486 
487 		tile.tiling =
488 			i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
489 		switch (tile.tiling) {
490 		case I915_TILING_NONE:
491 			tile.height = 1;
492 			tile.width = 1;
493 			tile.size = 0;
494 			tile.stride = 0;
495 			tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
496 			break;
497 
498 		case I915_TILING_X:
499 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
500 			break;
501 		case I915_TILING_Y:
502 			tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
503 			break;
504 		}
505 
506 		if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
507 		    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
508 			continue;
509 
510 		if (tile.tiling != I915_TILING_NONE) {
511 			unsigned int max_pitch = setup_tile_size(&tile, i915);
512 
513 			tile.stride =
514 				i915_prandom_u32_max_state(max_pitch, &prng);
515 			tile.stride = (1 + tile.stride) * tile.width;
516 			if (GRAPHICS_VER(i915) < 4)
517 				tile.stride = rounddown_pow_of_two(tile.stride);
518 		}
519 
520 		err = check_partial_mapping(obj, &tile, &prng);
521 		if (err)
522 			break;
523 
524 		count++;
525 	} while (!__igt_timeout(end, NULL));
526 
527 	pr_info("%s: Completed %lu trials\n", __func__, count);
528 
529 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
530 	i915_gem_object_unpin_pages(obj);
531 out:
532 	i915_gem_object_put(obj);
533 	return err;
534 }
535 
536 static int make_obj_busy(struct drm_i915_gem_object *obj)
537 {
538 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
539 	struct intel_engine_cs *engine;
540 
541 	for_each_uabi_engine(engine, i915) {
542 		struct i915_request *rq;
543 		struct i915_vma *vma;
544 		struct i915_gem_ww_ctx ww;
545 		int err;
546 
547 		vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
548 		if (IS_ERR(vma))
549 			return PTR_ERR(vma);
550 
551 		i915_gem_ww_ctx_init(&ww, false);
552 retry:
553 		err = i915_gem_object_lock(obj, &ww);
554 		if (!err)
555 			err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
556 		if (err)
557 			goto err;
558 
559 		rq = intel_engine_create_kernel_request(engine);
560 		if (IS_ERR(rq)) {
561 			err = PTR_ERR(rq);
562 			goto err_unpin;
563 		}
564 
565 		err = i915_request_await_object(rq, vma->obj, true);
566 		if (err == 0)
567 			err = i915_vma_move_to_active(vma, rq,
568 						      EXEC_OBJECT_WRITE);
569 
570 		i915_request_add(rq);
571 err_unpin:
572 		i915_vma_unpin(vma);
573 err:
574 		if (err == -EDEADLK) {
575 			err = i915_gem_ww_ctx_backoff(&ww);
576 			if (!err)
577 				goto retry;
578 		}
579 		i915_gem_ww_ctx_fini(&ww);
580 		if (err)
581 			return err;
582 	}
583 
584 	i915_gem_object_put(obj); /* leave it only alive via its active ref */
585 	return 0;
586 }
587 
588 static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
589 {
590 	if (HAS_LMEM(i915))
591 		return I915_MMAP_TYPE_FIXED;
592 
593 	return I915_MMAP_TYPE_GTT;
594 }
595 
596 static struct drm_i915_gem_object *
597 create_sys_or_internal(struct drm_i915_private *i915,
598 		       unsigned long size)
599 {
600 	if (HAS_LMEM(i915)) {
601 		struct intel_memory_region *sys_region =
602 			i915->mm.regions[INTEL_REGION_SMEM];
603 
604 		return __i915_gem_object_create_user(i915, size, &sys_region, 1);
605 	}
606 
607 	return i915_gem_object_create_internal(i915, size);
608 }
609 
610 static bool assert_mmap_offset(struct drm_i915_private *i915,
611 			       unsigned long size,
612 			       int expected)
613 {
614 	struct drm_i915_gem_object *obj;
615 	u64 offset;
616 	int ret;
617 
618 	obj = create_sys_or_internal(i915, size);
619 	if (IS_ERR(obj))
620 		return expected && expected == PTR_ERR(obj);
621 
622 	ret = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
623 	i915_gem_object_put(obj);
624 
625 	return ret == expected;
626 }
627 
628 static void disable_retire_worker(struct drm_i915_private *i915)
629 {
630 	i915_gem_driver_unregister__shrinker(i915);
631 	intel_gt_pm_get(to_gt(i915));
632 	cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
633 }
634 
635 static void restore_retire_worker(struct drm_i915_private *i915)
636 {
637 	igt_flush_test(i915);
638 	intel_gt_pm_put(to_gt(i915));
639 	i915_gem_driver_register__shrinker(i915);
640 }
641 
642 static void mmap_offset_lock(struct drm_i915_private *i915)
643 	__acquires(&i915->drm.vma_offset_manager->vm_lock)
644 {
645 	write_lock(&i915->drm.vma_offset_manager->vm_lock);
646 }
647 
648 static void mmap_offset_unlock(struct drm_i915_private *i915)
649 	__releases(&i915->drm.vma_offset_manager->vm_lock)
650 {
651 	write_unlock(&i915->drm.vma_offset_manager->vm_lock);
652 }
653 
654 static int igt_mmap_offset_exhaustion(void *arg)
655 {
656 	struct drm_i915_private *i915 = arg;
657 	struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
658 	struct drm_i915_gem_object *obj;
659 	struct drm_mm_node *hole, *next;
660 	int loop, err = 0;
661 	u64 offset;
662 	int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
663 
664 	/* Disable background reaper */
665 	disable_retire_worker(i915);
666 	GEM_BUG_ON(!to_gt(i915)->awake);
667 	intel_gt_retire_requests(to_gt(i915));
668 	i915_gem_drain_freed_objects(i915);
669 
670 	/* Trim the device mmap space to only a page */
671 	mmap_offset_lock(i915);
672 	loop = 1; /* PAGE_SIZE units */
673 	list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
674 		struct drm_mm_node *resv;
675 
676 		resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
677 		if (!resv) {
678 			err = -ENOMEM;
679 			goto out_park;
680 		}
681 
682 		resv->start = drm_mm_hole_node_start(hole) + loop;
683 		resv->size = hole->hole_size - loop;
684 		resv->color = -1ul;
685 		loop = 0;
686 
687 		if (!resv->size) {
688 			kfree(resv);
689 			continue;
690 		}
691 
692 		pr_debug("Reserving hole [%llx + %llx]\n",
693 			 resv->start, resv->size);
694 
695 		err = drm_mm_reserve_node(mm, resv);
696 		if (err) {
697 			pr_err("Failed to trim VMA manager, err=%d\n", err);
698 			kfree(resv);
699 			goto out_park;
700 		}
701 	}
702 	GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
703 	mmap_offset_unlock(i915);
704 
705 	/* Just fits! */
706 	if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
707 		pr_err("Unable to insert object into single page hole\n");
708 		err = -EINVAL;
709 		goto out;
710 	}
711 
712 	/* Too large */
713 	if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
714 		pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
715 		err = -EINVAL;
716 		goto out;
717 	}
718 
719 	/* Fill the hole, further allocation attempts should then fail */
720 	obj = create_sys_or_internal(i915, PAGE_SIZE);
721 	if (IS_ERR(obj)) {
722 		err = PTR_ERR(obj);
723 		pr_err("Unable to create object for reclaimed hole\n");
724 		goto out;
725 	}
726 
727 	err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
728 	if (err) {
729 		pr_err("Unable to insert object into reclaimed hole\n");
730 		goto err_obj;
731 	}
732 
733 	if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
734 		pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
735 		err = -EINVAL;
736 		goto err_obj;
737 	}
738 
739 	i915_gem_object_put(obj);
740 
741 	/* Now fill with busy dead objects that we expect to reap */
742 	for (loop = 0; loop < 3; loop++) {
743 		if (intel_gt_is_wedged(to_gt(i915)))
744 			break;
745 
746 		obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
747 		if (IS_ERR(obj)) {
748 			err = PTR_ERR(obj);
749 			goto out;
750 		}
751 
752 		err = make_obj_busy(obj);
753 		if (err) {
754 			pr_err("[loop %d] Failed to busy the object\n", loop);
755 			goto err_obj;
756 		}
757 	}
758 
759 out:
760 	mmap_offset_lock(i915);
761 out_park:
762 	drm_mm_for_each_node_safe(hole, next, mm) {
763 		if (hole->color != -1ul)
764 			continue;
765 
766 		drm_mm_remove_node(hole);
767 		kfree(hole);
768 	}
769 	mmap_offset_unlock(i915);
770 	restore_retire_worker(i915);
771 	return err;
772 err_obj:
773 	i915_gem_object_put(obj);
774 	goto out;
775 }
776 
777 static int gtt_set(struct drm_i915_gem_object *obj)
778 {
779 	struct i915_vma *vma;
780 	void __iomem *map;
781 	int err = 0;
782 
783 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
784 	if (IS_ERR(vma))
785 		return PTR_ERR(vma);
786 
787 	intel_gt_pm_get(vma->vm->gt);
788 	map = i915_vma_pin_iomap(vma);
789 	i915_vma_unpin(vma);
790 	if (IS_ERR(map)) {
791 		err = PTR_ERR(map);
792 		goto out;
793 	}
794 
795 	memset_io(map, POISON_INUSE, obj->base.size);
796 	i915_vma_unpin_iomap(vma);
797 
798 out:
799 	intel_gt_pm_put(vma->vm->gt);
800 	return err;
801 }
802 
803 static int gtt_check(struct drm_i915_gem_object *obj)
804 {
805 	struct i915_vma *vma;
806 	void __iomem *map;
807 	int err = 0;
808 
809 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
810 	if (IS_ERR(vma))
811 		return PTR_ERR(vma);
812 
813 	intel_gt_pm_get(vma->vm->gt);
814 	map = i915_vma_pin_iomap(vma);
815 	i915_vma_unpin(vma);
816 	if (IS_ERR(map)) {
817 		err = PTR_ERR(map);
818 		goto out;
819 	}
820 
821 	if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
822 		pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
823 		       obj->mm.region->name);
824 		err = -EINVAL;
825 	}
826 	i915_vma_unpin_iomap(vma);
827 
828 out:
829 	intel_gt_pm_put(vma->vm->gt);
830 	return err;
831 }
832 
833 static int wc_set(struct drm_i915_gem_object *obj)
834 {
835 	void *vaddr;
836 
837 	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
838 	if (IS_ERR(vaddr))
839 		return PTR_ERR(vaddr);
840 
841 	memset(vaddr, POISON_INUSE, obj->base.size);
842 	i915_gem_object_flush_map(obj);
843 	i915_gem_object_unpin_map(obj);
844 
845 	return 0;
846 }
847 
848 static int wc_check(struct drm_i915_gem_object *obj)
849 {
850 	void *vaddr;
851 	int err = 0;
852 
853 	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
854 	if (IS_ERR(vaddr))
855 		return PTR_ERR(vaddr);
856 
857 	if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
858 		pr_err("%s: Write via mmap did not land in backing store (WC)\n",
859 		       obj->mm.region->name);
860 		err = -EINVAL;
861 	}
862 	i915_gem_object_unpin_map(obj);
863 
864 	return err;
865 }
866 
867 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
868 {
869 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
870 	bool no_map;
871 
872 	if (obj->ops->mmap_offset)
873 		return type == I915_MMAP_TYPE_FIXED;
874 	else if (type == I915_MMAP_TYPE_FIXED)
875 		return false;
876 
877 	if (type == I915_MMAP_TYPE_GTT &&
878 	    !i915_ggtt_has_aperture(to_gt(i915)->ggtt))
879 		return false;
880 
881 	i915_gem_object_lock(obj, NULL);
882 	no_map = (type != I915_MMAP_TYPE_GTT &&
883 		  !i915_gem_object_has_struct_page(obj) &&
884 		  !i915_gem_object_has_iomem(obj));
885 	i915_gem_object_unlock(obj);
886 
887 	return !no_map;
888 }
889 
890 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
891 static int __igt_mmap(struct drm_i915_private *i915,
892 		      struct drm_i915_gem_object *obj,
893 		      enum i915_mmap_type type)
894 {
895 	struct vm_area_struct *area;
896 	unsigned long addr;
897 	int err, i;
898 	u64 offset;
899 
900 	if (!can_mmap(obj, type))
901 		return 0;
902 
903 	err = wc_set(obj);
904 	if (err == -ENXIO)
905 		err = gtt_set(obj);
906 	if (err)
907 		return err;
908 
909 	err = __assign_mmap_offset(obj, type, &offset, NULL);
910 	if (err)
911 		return err;
912 
913 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
914 	if (IS_ERR_VALUE(addr))
915 		return addr;
916 
917 	pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
918 
919 	mmap_read_lock(current->mm);
920 	area = vma_lookup(current->mm, addr);
921 	mmap_read_unlock(current->mm);
922 	if (!area) {
923 		pr_err("%s: Did not create a vm_area_struct for the mmap\n",
924 		       obj->mm.region->name);
925 		err = -EINVAL;
926 		goto out_unmap;
927 	}
928 
929 	for (i = 0; i < obj->base.size / sizeof(u32); i++) {
930 		u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
931 		u32 x;
932 
933 		if (get_user(x, ux)) {
934 			pr_err("%s: Unable to read from mmap, offset:%zd\n",
935 			       obj->mm.region->name, i * sizeof(x));
936 			err = -EFAULT;
937 			goto out_unmap;
938 		}
939 
940 		if (x != expand32(POISON_INUSE)) {
941 			pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
942 			       obj->mm.region->name,
943 			       i * sizeof(x), x, expand32(POISON_INUSE));
944 			err = -EINVAL;
945 			goto out_unmap;
946 		}
947 
948 		x = expand32(POISON_FREE);
949 		if (put_user(x, ux)) {
950 			pr_err("%s: Unable to write to mmap, offset:%zd\n",
951 			       obj->mm.region->name, i * sizeof(x));
952 			err = -EFAULT;
953 			goto out_unmap;
954 		}
955 	}
956 
957 	if (type == I915_MMAP_TYPE_GTT)
958 		intel_gt_flush_ggtt_writes(to_gt(i915));
959 
960 	err = wc_check(obj);
961 	if (err == -ENXIO)
962 		err = gtt_check(obj);
963 out_unmap:
964 	vm_munmap(addr, obj->base.size);
965 	return err;
966 }
967 
968 static int igt_mmap(void *arg)
969 {
970 	struct drm_i915_private *i915 = arg;
971 	struct intel_memory_region *mr;
972 	enum intel_region_id id;
973 
974 	for_each_memory_region(mr, i915, id) {
975 		unsigned long sizes[] = {
976 			PAGE_SIZE,
977 			mr->min_page_size,
978 			SZ_4M,
979 		};
980 		int i;
981 
982 		if (mr->private)
983 			continue;
984 
985 		for (i = 0; i < ARRAY_SIZE(sizes); i++) {
986 			struct drm_i915_gem_object *obj;
987 			int err;
988 
989 			obj = __i915_gem_object_create_user(i915, sizes[i], &mr, 1);
990 			if (obj == ERR_PTR(-ENODEV))
991 				continue;
992 
993 			if (IS_ERR(obj))
994 				return PTR_ERR(obj);
995 
996 			err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
997 			if (err == 0)
998 				err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
999 			if (err == 0)
1000 				err = __igt_mmap(i915, obj, I915_MMAP_TYPE_FIXED);
1001 
1002 			i915_gem_object_put(obj);
1003 			if (err)
1004 				return err;
1005 		}
1006 	}
1007 
1008 	return 0;
1009 }
1010 
1011 static void igt_close_objects(struct drm_i915_private *i915,
1012 			      struct list_head *objects)
1013 {
1014 	struct drm_i915_gem_object *obj, *on;
1015 
1016 	list_for_each_entry_safe(obj, on, objects, st_link) {
1017 		i915_gem_object_lock(obj, NULL);
1018 		if (i915_gem_object_has_pinned_pages(obj))
1019 			i915_gem_object_unpin_pages(obj);
1020 		/* No polluting the memory region between tests */
1021 		__i915_gem_object_put_pages(obj);
1022 		i915_gem_object_unlock(obj);
1023 		list_del(&obj->st_link);
1024 		i915_gem_object_put(obj);
1025 	}
1026 
1027 	cond_resched();
1028 
1029 	i915_gem_drain_freed_objects(i915);
1030 }
1031 
1032 static void igt_make_evictable(struct list_head *objects)
1033 {
1034 	struct drm_i915_gem_object *obj;
1035 
1036 	list_for_each_entry(obj, objects, st_link) {
1037 		i915_gem_object_lock(obj, NULL);
1038 		if (i915_gem_object_has_pinned_pages(obj))
1039 			i915_gem_object_unpin_pages(obj);
1040 		i915_gem_object_unlock(obj);
1041 	}
1042 
1043 	cond_resched();
1044 }
1045 
1046 static int igt_fill_mappable(struct intel_memory_region *mr,
1047 			     struct list_head *objects)
1048 {
1049 	u64 size, total;
1050 	int err;
1051 
1052 	total = 0;
1053 	size = mr->io_size;
1054 	do {
1055 		struct drm_i915_gem_object *obj;
1056 
1057 		obj = i915_gem_object_create_region(mr, size, 0, 0);
1058 		if (IS_ERR(obj)) {
1059 			err = PTR_ERR(obj);
1060 			goto err_close;
1061 		}
1062 
1063 		list_add(&obj->st_link, objects);
1064 
1065 		err = i915_gem_object_pin_pages_unlocked(obj);
1066 		if (err) {
1067 			if (err != -ENXIO && err != -ENOMEM)
1068 				goto err_close;
1069 
1070 			if (size == mr->min_page_size) {
1071 				err = 0;
1072 				break;
1073 			}
1074 
1075 			size >>= 1;
1076 			continue;
1077 		}
1078 
1079 		total += obj->base.size;
1080 	} while (1);
1081 
1082 	pr_info("%s filled=%lluMiB\n", __func__, total >> 20);
1083 	return 0;
1084 
1085 err_close:
1086 	igt_close_objects(mr->i915, objects);
1087 	return err;
1088 }
1089 
1090 static int ___igt_mmap_migrate(struct drm_i915_private *i915,
1091 			       struct drm_i915_gem_object *obj,
1092 			       unsigned long addr,
1093 			       bool unfaultable)
1094 {
1095 	struct vm_area_struct *area;
1096 	int err = 0, i;
1097 
1098 	pr_info("igt_mmap(%s, %d) @ %lx\n",
1099 		obj->mm.region->name, I915_MMAP_TYPE_FIXED, addr);
1100 
1101 	mmap_read_lock(current->mm);
1102 	area = vma_lookup(current->mm, addr);
1103 	mmap_read_unlock(current->mm);
1104 	if (!area) {
1105 		pr_err("%s: Did not create a vm_area_struct for the mmap\n",
1106 		       obj->mm.region->name);
1107 		err = -EINVAL;
1108 		goto out_unmap;
1109 	}
1110 
1111 	for (i = 0; i < obj->base.size / sizeof(u32); i++) {
1112 		u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
1113 		u32 x;
1114 
1115 		if (get_user(x, ux)) {
1116 			err = -EFAULT;
1117 			if (!unfaultable) {
1118 				pr_err("%s: Unable to read from mmap, offset:%zd\n",
1119 				       obj->mm.region->name, i * sizeof(x));
1120 				goto out_unmap;
1121 			}
1122 
1123 			continue;
1124 		}
1125 
1126 		if (unfaultable) {
1127 			pr_err("%s: Faulted unmappable memory\n",
1128 			       obj->mm.region->name);
1129 			err = -EINVAL;
1130 			goto out_unmap;
1131 		}
1132 
1133 		if (x != expand32(POISON_INUSE)) {
1134 			pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
1135 			       obj->mm.region->name,
1136 			       i * sizeof(x), x, expand32(POISON_INUSE));
1137 			err = -EINVAL;
1138 			goto out_unmap;
1139 		}
1140 
1141 		x = expand32(POISON_FREE);
1142 		if (put_user(x, ux)) {
1143 			pr_err("%s: Unable to write to mmap, offset:%zd\n",
1144 			       obj->mm.region->name, i * sizeof(x));
1145 			err = -EFAULT;
1146 			goto out_unmap;
1147 		}
1148 	}
1149 
1150 	if (unfaultable) {
1151 		if (err == -EFAULT)
1152 			err = 0;
1153 	} else {
1154 		obj->flags &= ~I915_BO_ALLOC_GPU_ONLY;
1155 		err = wc_check(obj);
1156 	}
1157 out_unmap:
1158 	vm_munmap(addr, obj->base.size);
1159 	return err;
1160 }
1161 
1162 #define IGT_MMAP_MIGRATE_TOPDOWN     (1 << 0)
1163 #define IGT_MMAP_MIGRATE_FILL        (1 << 1)
1164 #define IGT_MMAP_MIGRATE_EVICTABLE   (1 << 2)
1165 #define IGT_MMAP_MIGRATE_UNFAULTABLE (1 << 3)
1166 static int __igt_mmap_migrate(struct intel_memory_region **placements,
1167 			      int n_placements,
1168 			      struct intel_memory_region *expected_mr,
1169 			      unsigned int flags)
1170 {
1171 	struct drm_i915_private *i915 = placements[0]->i915;
1172 	struct drm_i915_gem_object *obj;
1173 	struct i915_request *rq = NULL;
1174 	unsigned long addr;
1175 	LIST_HEAD(objects);
1176 	u64 offset;
1177 	int err;
1178 
1179 	obj = __i915_gem_object_create_user(i915, PAGE_SIZE,
1180 					    placements,
1181 					    n_placements);
1182 	if (IS_ERR(obj))
1183 		return PTR_ERR(obj);
1184 
1185 	if (flags & IGT_MMAP_MIGRATE_TOPDOWN)
1186 		obj->flags |= I915_BO_ALLOC_GPU_ONLY;
1187 
1188 	err = __assign_mmap_offset(obj, I915_MMAP_TYPE_FIXED, &offset, NULL);
1189 	if (err)
1190 		goto out_put;
1191 
1192 	/*
1193 	 * This will eventually create a GEM context, due to opening dummy drm
1194 	 * file, which needs a tiny amount of mappable device memory for the top
1195 	 * level paging structures(and perhaps scratch), so make sure we
1196 	 * allocate early, to avoid tears.
1197 	 */
1198 	addr = igt_mmap_offset(i915, offset, obj->base.size,
1199 			       PROT_WRITE, MAP_SHARED);
1200 	if (IS_ERR_VALUE(addr)) {
1201 		err = addr;
1202 		goto out_put;
1203 	}
1204 
1205 	if (flags & IGT_MMAP_MIGRATE_FILL) {
1206 		err = igt_fill_mappable(placements[0], &objects);
1207 		if (err)
1208 			goto out_put;
1209 	}
1210 
1211 	err = i915_gem_object_lock(obj, NULL);
1212 	if (err)
1213 		goto out_put;
1214 
1215 	err = i915_gem_object_pin_pages(obj);
1216 	if (err) {
1217 		i915_gem_object_unlock(obj);
1218 		goto out_put;
1219 	}
1220 
1221 	err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL,
1222 					  obj->mm.pages->sgl, obj->cache_level,
1223 					  i915_gem_object_is_lmem(obj),
1224 					  expand32(POISON_INUSE), &rq);
1225 	i915_gem_object_unpin_pages(obj);
1226 	if (rq) {
1227 		err = dma_resv_reserve_fences(obj->base.resv, 1);
1228 		if (!err)
1229 			dma_resv_add_fence(obj->base.resv, &rq->fence,
1230 					   DMA_RESV_USAGE_KERNEL);
1231 		i915_request_put(rq);
1232 	}
1233 	i915_gem_object_unlock(obj);
1234 	if (err)
1235 		goto out_put;
1236 
1237 	if (flags & IGT_MMAP_MIGRATE_EVICTABLE)
1238 		igt_make_evictable(&objects);
1239 
1240 	err = ___igt_mmap_migrate(i915, obj, addr,
1241 				  flags & IGT_MMAP_MIGRATE_UNFAULTABLE);
1242 	if (!err && obj->mm.region != expected_mr) {
1243 		pr_err("%s region mismatch %s\n", __func__, expected_mr->name);
1244 		err = -EINVAL;
1245 	}
1246 
1247 out_put:
1248 	i915_gem_object_put(obj);
1249 	igt_close_objects(i915, &objects);
1250 	return err;
1251 }
1252 
1253 static int igt_mmap_migrate(void *arg)
1254 {
1255 	struct drm_i915_private *i915 = arg;
1256 	struct intel_memory_region *system = i915->mm.regions[INTEL_REGION_SMEM];
1257 	struct intel_memory_region *mr;
1258 	enum intel_region_id id;
1259 
1260 	for_each_memory_region(mr, i915, id) {
1261 		struct intel_memory_region *mixed[] = { mr, system };
1262 		struct intel_memory_region *single[] = { mr };
1263 		struct ttm_resource_manager *man = mr->region_private;
1264 		resource_size_t saved_io_size;
1265 		int err;
1266 
1267 		if (mr->private)
1268 			continue;
1269 
1270 		if (!mr->io_size)
1271 			continue;
1272 
1273 		/*
1274 		 * For testing purposes let's force small BAR, if not already
1275 		 * present.
1276 		 */
1277 		saved_io_size = mr->io_size;
1278 		if (mr->io_size == mr->total) {
1279 			resource_size_t io_size = mr->io_size;
1280 
1281 			io_size = rounddown_pow_of_two(io_size >> 1);
1282 			if (io_size < PAGE_SIZE)
1283 				continue;
1284 
1285 			mr->io_size = io_size;
1286 			i915_ttm_buddy_man_force_visible_size(man,
1287 							      io_size >> PAGE_SHIFT);
1288 		}
1289 
1290 		/*
1291 		 * Allocate in the mappable portion, should be no suprises here.
1292 		 */
1293 		err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), mr, 0);
1294 		if (err)
1295 			goto out_io_size;
1296 
1297 		/*
1298 		 * Allocate in the non-mappable portion, but force migrating to
1299 		 * the mappable portion on fault (LMEM -> LMEM)
1300 		 */
1301 		err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1302 					 IGT_MMAP_MIGRATE_TOPDOWN |
1303 					 IGT_MMAP_MIGRATE_FILL |
1304 					 IGT_MMAP_MIGRATE_EVICTABLE);
1305 		if (err)
1306 			goto out_io_size;
1307 
1308 		/*
1309 		 * Allocate in the non-mappable portion, but force spilling into
1310 		 * system memory on fault (LMEM -> SMEM)
1311 		 */
1312 		err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), system,
1313 					 IGT_MMAP_MIGRATE_TOPDOWN |
1314 					 IGT_MMAP_MIGRATE_FILL);
1315 		if (err)
1316 			goto out_io_size;
1317 
1318 		/*
1319 		 * Allocate in the non-mappable portion, but since the mappable
1320 		 * portion is already full, and we can't spill to system memory,
1321 		 * then we should expect the fault to fail.
1322 		 */
1323 		err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1324 					 IGT_MMAP_MIGRATE_TOPDOWN |
1325 					 IGT_MMAP_MIGRATE_FILL |
1326 					 IGT_MMAP_MIGRATE_UNFAULTABLE);
1327 out_io_size:
1328 		mr->io_size = saved_io_size;
1329 		i915_ttm_buddy_man_force_visible_size(man,
1330 						      mr->io_size >> PAGE_SHIFT);
1331 		if (err)
1332 			return err;
1333 	}
1334 
1335 	return 0;
1336 }
1337 
1338 static const char *repr_mmap_type(enum i915_mmap_type type)
1339 {
1340 	switch (type) {
1341 	case I915_MMAP_TYPE_GTT: return "gtt";
1342 	case I915_MMAP_TYPE_WB: return "wb";
1343 	case I915_MMAP_TYPE_WC: return "wc";
1344 	case I915_MMAP_TYPE_UC: return "uc";
1345 	case I915_MMAP_TYPE_FIXED: return "fixed";
1346 	default: return "unknown";
1347 	}
1348 }
1349 
1350 static bool can_access(struct drm_i915_gem_object *obj)
1351 {
1352 	bool access;
1353 
1354 	i915_gem_object_lock(obj, NULL);
1355 	access = i915_gem_object_has_struct_page(obj) ||
1356 		i915_gem_object_has_iomem(obj);
1357 	i915_gem_object_unlock(obj);
1358 
1359 	return access;
1360 }
1361 
1362 static int __igt_mmap_access(struct drm_i915_private *i915,
1363 			     struct drm_i915_gem_object *obj,
1364 			     enum i915_mmap_type type)
1365 {
1366 	unsigned long __user *ptr;
1367 	unsigned long A, B;
1368 	unsigned long x, y;
1369 	unsigned long addr;
1370 	int err;
1371 	u64 offset;
1372 
1373 	memset(&A, 0xAA, sizeof(A));
1374 	memset(&B, 0xBB, sizeof(B));
1375 
1376 	if (!can_mmap(obj, type) || !can_access(obj))
1377 		return 0;
1378 
1379 	err = __assign_mmap_offset(obj, type, &offset, NULL);
1380 	if (err)
1381 		return err;
1382 
1383 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1384 	if (IS_ERR_VALUE(addr))
1385 		return addr;
1386 	ptr = (unsigned long __user *)addr;
1387 
1388 	err = __put_user(A, ptr);
1389 	if (err) {
1390 		pr_err("%s(%s): failed to write into user mmap\n",
1391 		       obj->mm.region->name, repr_mmap_type(type));
1392 		goto out_unmap;
1393 	}
1394 
1395 	intel_gt_flush_ggtt_writes(to_gt(i915));
1396 
1397 	err = access_process_vm(current, addr, &x, sizeof(x), 0);
1398 	if (err != sizeof(x)) {
1399 		pr_err("%s(%s): access_process_vm() read failed\n",
1400 		       obj->mm.region->name, repr_mmap_type(type));
1401 		goto out_unmap;
1402 	}
1403 
1404 	err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
1405 	if (err != sizeof(B)) {
1406 		pr_err("%s(%s): access_process_vm() write failed\n",
1407 		       obj->mm.region->name, repr_mmap_type(type));
1408 		goto out_unmap;
1409 	}
1410 
1411 	intel_gt_flush_ggtt_writes(to_gt(i915));
1412 
1413 	err = __get_user(y, ptr);
1414 	if (err) {
1415 		pr_err("%s(%s): failed to read from user mmap\n",
1416 		       obj->mm.region->name, repr_mmap_type(type));
1417 		goto out_unmap;
1418 	}
1419 
1420 	if (x != A || y != B) {
1421 		pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
1422 		       obj->mm.region->name, repr_mmap_type(type),
1423 		       x, y);
1424 		err = -EINVAL;
1425 		goto out_unmap;
1426 	}
1427 
1428 out_unmap:
1429 	vm_munmap(addr, obj->base.size);
1430 	return err;
1431 }
1432 
1433 static int igt_mmap_access(void *arg)
1434 {
1435 	struct drm_i915_private *i915 = arg;
1436 	struct intel_memory_region *mr;
1437 	enum intel_region_id id;
1438 
1439 	for_each_memory_region(mr, i915, id) {
1440 		struct drm_i915_gem_object *obj;
1441 		int err;
1442 
1443 		if (mr->private)
1444 			continue;
1445 
1446 		obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1447 		if (obj == ERR_PTR(-ENODEV))
1448 			continue;
1449 
1450 		if (IS_ERR(obj))
1451 			return PTR_ERR(obj);
1452 
1453 		err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
1454 		if (err == 0)
1455 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
1456 		if (err == 0)
1457 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
1458 		if (err == 0)
1459 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
1460 		if (err == 0)
1461 			err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED);
1462 
1463 		i915_gem_object_put(obj);
1464 		if (err)
1465 			return err;
1466 	}
1467 
1468 	return 0;
1469 }
1470 
1471 static int __igt_mmap_gpu(struct drm_i915_private *i915,
1472 			  struct drm_i915_gem_object *obj,
1473 			  enum i915_mmap_type type)
1474 {
1475 	struct intel_engine_cs *engine;
1476 	unsigned long addr;
1477 	u32 __user *ux;
1478 	u32 bbe;
1479 	int err;
1480 	u64 offset;
1481 
1482 	/*
1483 	 * Verify that the mmap access into the backing store aligns with
1484 	 * that of the GPU, i.e. that mmap is indeed writing into the same
1485 	 * page as being read by the GPU.
1486 	 */
1487 
1488 	if (!can_mmap(obj, type))
1489 		return 0;
1490 
1491 	err = wc_set(obj);
1492 	if (err == -ENXIO)
1493 		err = gtt_set(obj);
1494 	if (err)
1495 		return err;
1496 
1497 	err = __assign_mmap_offset(obj, type, &offset, NULL);
1498 	if (err)
1499 		return err;
1500 
1501 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1502 	if (IS_ERR_VALUE(addr))
1503 		return addr;
1504 
1505 	ux = u64_to_user_ptr((u64)addr);
1506 	bbe = MI_BATCH_BUFFER_END;
1507 	if (put_user(bbe, ux)) {
1508 		pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
1509 		err = -EFAULT;
1510 		goto out_unmap;
1511 	}
1512 
1513 	if (type == I915_MMAP_TYPE_GTT)
1514 		intel_gt_flush_ggtt_writes(to_gt(i915));
1515 
1516 	for_each_uabi_engine(engine, i915) {
1517 		struct i915_request *rq;
1518 		struct i915_vma *vma;
1519 		struct i915_gem_ww_ctx ww;
1520 
1521 		vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
1522 		if (IS_ERR(vma)) {
1523 			err = PTR_ERR(vma);
1524 			goto out_unmap;
1525 		}
1526 
1527 		i915_gem_ww_ctx_init(&ww, false);
1528 retry:
1529 		err = i915_gem_object_lock(obj, &ww);
1530 		if (!err)
1531 			err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
1532 		if (err)
1533 			goto out_ww;
1534 
1535 		rq = i915_request_create(engine->kernel_context);
1536 		if (IS_ERR(rq)) {
1537 			err = PTR_ERR(rq);
1538 			goto out_unpin;
1539 		}
1540 
1541 		err = i915_request_await_object(rq, vma->obj, false);
1542 		if (err == 0)
1543 			err = i915_vma_move_to_active(vma, rq, 0);
1544 
1545 		err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
1546 		i915_request_get(rq);
1547 		i915_request_add(rq);
1548 
1549 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1550 			struct drm_printer p =
1551 				drm_info_printer(engine->i915->drm.dev);
1552 
1553 			pr_err("%s(%s, %s): Failed to execute batch\n",
1554 			       __func__, engine->name, obj->mm.region->name);
1555 			intel_engine_dump(engine, &p,
1556 					  "%s\n", engine->name);
1557 
1558 			intel_gt_set_wedged(engine->gt);
1559 			err = -EIO;
1560 		}
1561 		i915_request_put(rq);
1562 
1563 out_unpin:
1564 		i915_vma_unpin(vma);
1565 out_ww:
1566 		if (err == -EDEADLK) {
1567 			err = i915_gem_ww_ctx_backoff(&ww);
1568 			if (!err)
1569 				goto retry;
1570 		}
1571 		i915_gem_ww_ctx_fini(&ww);
1572 		if (err)
1573 			goto out_unmap;
1574 	}
1575 
1576 out_unmap:
1577 	vm_munmap(addr, obj->base.size);
1578 	return err;
1579 }
1580 
1581 static int igt_mmap_gpu(void *arg)
1582 {
1583 	struct drm_i915_private *i915 = arg;
1584 	struct intel_memory_region *mr;
1585 	enum intel_region_id id;
1586 
1587 	for_each_memory_region(mr, i915, id) {
1588 		struct drm_i915_gem_object *obj;
1589 		int err;
1590 
1591 		if (mr->private)
1592 			continue;
1593 
1594 		obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1595 		if (obj == ERR_PTR(-ENODEV))
1596 			continue;
1597 
1598 		if (IS_ERR(obj))
1599 			return PTR_ERR(obj);
1600 
1601 		err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
1602 		if (err == 0)
1603 			err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
1604 		if (err == 0)
1605 			err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_FIXED);
1606 
1607 		i915_gem_object_put(obj);
1608 		if (err)
1609 			return err;
1610 	}
1611 
1612 	return 0;
1613 }
1614 
1615 static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
1616 {
1617 	if (!pte_present(*pte) || pte_none(*pte)) {
1618 		pr_err("missing PTE:%lx\n",
1619 		       (addr - (unsigned long)data) >> PAGE_SHIFT);
1620 		return -EINVAL;
1621 	}
1622 
1623 	return 0;
1624 }
1625 
1626 static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
1627 {
1628 	if (pte_present(*pte) && !pte_none(*pte)) {
1629 		pr_err("present PTE:%lx; expected to be revoked\n",
1630 		       (addr - (unsigned long)data) >> PAGE_SHIFT);
1631 		return -EINVAL;
1632 	}
1633 
1634 	return 0;
1635 }
1636 
1637 static int check_present(unsigned long addr, unsigned long len)
1638 {
1639 	return apply_to_page_range(current->mm, addr, len,
1640 				   check_present_pte, (void *)addr);
1641 }
1642 
1643 static int check_absent(unsigned long addr, unsigned long len)
1644 {
1645 	return apply_to_page_range(current->mm, addr, len,
1646 				   check_absent_pte, (void *)addr);
1647 }
1648 
1649 static int prefault_range(u64 start, u64 len)
1650 {
1651 	const char __user *addr, *end;
1652 	char __maybe_unused c;
1653 	int err;
1654 
1655 	addr = u64_to_user_ptr(start);
1656 	end = addr + len;
1657 
1658 	for (; addr < end; addr += PAGE_SIZE) {
1659 		err = __get_user(c, addr);
1660 		if (err)
1661 			return err;
1662 	}
1663 
1664 	return __get_user(c, end - 1);
1665 }
1666 
1667 static int __igt_mmap_revoke(struct drm_i915_private *i915,
1668 			     struct drm_i915_gem_object *obj,
1669 			     enum i915_mmap_type type)
1670 {
1671 	unsigned long addr;
1672 	int err;
1673 	u64 offset;
1674 
1675 	if (!can_mmap(obj, type))
1676 		return 0;
1677 
1678 	err = __assign_mmap_offset(obj, type, &offset, NULL);
1679 	if (err)
1680 		return err;
1681 
1682 	addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1683 	if (IS_ERR_VALUE(addr))
1684 		return addr;
1685 
1686 	err = prefault_range(addr, obj->base.size);
1687 	if (err)
1688 		goto out_unmap;
1689 
1690 	err = check_present(addr, obj->base.size);
1691 	if (err) {
1692 		pr_err("%s: was not present\n", obj->mm.region->name);
1693 		goto out_unmap;
1694 	}
1695 
1696 	/*
1697 	 * After unbinding the object from the GGTT, its address may be reused
1698 	 * for other objects. Ergo we have to revoke the previous mmap PTE
1699 	 * access as it no longer points to the same object.
1700 	 */
1701 	i915_gem_object_lock(obj, NULL);
1702 	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
1703 	i915_gem_object_unlock(obj);
1704 	if (err) {
1705 		pr_err("Failed to unbind object!\n");
1706 		goto out_unmap;
1707 	}
1708 
1709 	if (type != I915_MMAP_TYPE_GTT) {
1710 		i915_gem_object_lock(obj, NULL);
1711 		__i915_gem_object_put_pages(obj);
1712 		i915_gem_object_unlock(obj);
1713 		if (i915_gem_object_has_pages(obj)) {
1714 			pr_err("Failed to put-pages object!\n");
1715 			err = -EINVAL;
1716 			goto out_unmap;
1717 		}
1718 	}
1719 
1720 	err = check_absent(addr, obj->base.size);
1721 	if (err) {
1722 		pr_err("%s: was not absent\n", obj->mm.region->name);
1723 		goto out_unmap;
1724 	}
1725 
1726 out_unmap:
1727 	vm_munmap(addr, obj->base.size);
1728 	return err;
1729 }
1730 
1731 static int igt_mmap_revoke(void *arg)
1732 {
1733 	struct drm_i915_private *i915 = arg;
1734 	struct intel_memory_region *mr;
1735 	enum intel_region_id id;
1736 
1737 	for_each_memory_region(mr, i915, id) {
1738 		struct drm_i915_gem_object *obj;
1739 		int err;
1740 
1741 		if (mr->private)
1742 			continue;
1743 
1744 		obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1745 		if (obj == ERR_PTR(-ENODEV))
1746 			continue;
1747 
1748 		if (IS_ERR(obj))
1749 			return PTR_ERR(obj);
1750 
1751 		err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
1752 		if (err == 0)
1753 			err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
1754 		if (err == 0)
1755 			err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_FIXED);
1756 
1757 		i915_gem_object_put(obj);
1758 		if (err)
1759 			return err;
1760 	}
1761 
1762 	return 0;
1763 }
1764 
1765 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1766 {
1767 	static const struct i915_subtest tests[] = {
1768 		SUBTEST(igt_partial_tiling),
1769 		SUBTEST(igt_smoke_tiling),
1770 		SUBTEST(igt_mmap_offset_exhaustion),
1771 		SUBTEST(igt_mmap),
1772 		SUBTEST(igt_mmap_migrate),
1773 		SUBTEST(igt_mmap_access),
1774 		SUBTEST(igt_mmap_revoke),
1775 		SUBTEST(igt_mmap_gpu),
1776 	};
1777 
1778 	return i915_subtests(tests, i915);
1779 }
1780