1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #include <linux/prime_numbers.h>
8 
9 #include "gt/intel_gt.h"
10 #include "gt/intel_gt_pm.h"
11 #include "huge_gem_object.h"
12 #include "i915_selftest.h"
13 #include "selftests/i915_random.h"
14 #include "selftests/igt_flush_test.h"
15 
16 struct tile {
17 	unsigned int width;
18 	unsigned int height;
19 	unsigned int stride;
20 	unsigned int size;
21 	unsigned int tiling;
22 	unsigned int swizzle;
23 };
24 
25 static u64 swizzle_bit(unsigned int bit, u64 offset)
26 {
27 	return (offset & BIT_ULL(bit)) >> (bit - 6);
28 }
29 
30 static u64 tiled_offset(const struct tile *tile, u64 v)
31 {
32 	u64 x, y;
33 
34 	if (tile->tiling == I915_TILING_NONE)
35 		return v;
36 
37 	y = div64_u64_rem(v, tile->stride, &x);
38 	v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
39 
40 	if (tile->tiling == I915_TILING_X) {
41 		v += y * tile->width;
42 		v += div64_u64_rem(x, tile->width, &x) << tile->size;
43 		v += x;
44 	} else if (tile->width == 128) {
45 		const unsigned int ytile_span = 16;
46 		const unsigned int ytile_height = 512;
47 
48 		v += y * ytile_span;
49 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
50 		v += x;
51 	} else {
52 		const unsigned int ytile_span = 32;
53 		const unsigned int ytile_height = 256;
54 
55 		v += y * ytile_span;
56 		v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
57 		v += x;
58 	}
59 
60 	switch (tile->swizzle) {
61 	case I915_BIT_6_SWIZZLE_9:
62 		v ^= swizzle_bit(9, v);
63 		break;
64 	case I915_BIT_6_SWIZZLE_9_10:
65 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
66 		break;
67 	case I915_BIT_6_SWIZZLE_9_11:
68 		v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
69 		break;
70 	case I915_BIT_6_SWIZZLE_9_10_11:
71 		v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
72 		break;
73 	}
74 
75 	return v;
76 }
77 
78 static int check_partial_mapping(struct drm_i915_gem_object *obj,
79 				 const struct tile *tile,
80 				 struct rnd_state *prng)
81 {
82 	const unsigned long npages = obj->base.size / PAGE_SIZE;
83 	struct i915_ggtt_view view;
84 	struct i915_vma *vma;
85 	unsigned long page;
86 	u32 __iomem *io;
87 	struct page *p;
88 	unsigned int n;
89 	u64 offset;
90 	u32 *cpu;
91 	int err;
92 
93 	err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
94 	if (err) {
95 		pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
96 		       tile->tiling, tile->stride, err);
97 		return err;
98 	}
99 
100 	GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
101 	GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
102 
103 	i915_gem_object_lock(obj);
104 	err = i915_gem_object_set_to_gtt_domain(obj, true);
105 	i915_gem_object_unlock(obj);
106 	if (err) {
107 		pr_err("Failed to flush to GTT write domain; err=%d\n", err);
108 		return err;
109 	}
110 
111 	page = i915_prandom_u32_max_state(npages, prng);
112 	view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
113 
114 	vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
115 	if (IS_ERR(vma)) {
116 		pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
117 		       page, (int)PTR_ERR(vma));
118 		return PTR_ERR(vma);
119 	}
120 
121 	n = page - view.partial.offset;
122 	GEM_BUG_ON(n >= view.partial.size);
123 
124 	io = i915_vma_pin_iomap(vma);
125 	i915_vma_unpin(vma);
126 	if (IS_ERR(io)) {
127 		pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
128 		       page, (int)PTR_ERR(io));
129 		err = PTR_ERR(io);
130 		goto out;
131 	}
132 
133 	iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
134 	i915_vma_unpin_iomap(vma);
135 
136 	offset = tiled_offset(tile, page << PAGE_SHIFT);
137 	if (offset >= obj->base.size)
138 		goto out;
139 
140 	intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
141 
142 	p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
143 	cpu = kmap(p) + offset_in_page(offset);
144 	drm_clflush_virt_range(cpu, sizeof(*cpu));
145 	if (*cpu != (u32)page) {
146 		pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
147 		       page, n,
148 		       view.partial.offset,
149 		       view.partial.size,
150 		       vma->size >> PAGE_SHIFT,
151 		       tile->tiling ? tile_row_pages(obj) : 0,
152 		       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
153 		       offset >> PAGE_SHIFT,
154 		       (unsigned int)offset_in_page(offset),
155 		       offset,
156 		       (u32)page, *cpu);
157 		err = -EINVAL;
158 	}
159 	*cpu = 0;
160 	drm_clflush_virt_range(cpu, sizeof(*cpu));
161 	kunmap(p);
162 
163 out:
164 	i915_vma_destroy(vma);
165 	return err;
166 }
167 
168 static int check_partial_mappings(struct drm_i915_gem_object *obj,
169 				  const struct tile *tile,
170 				  unsigned long end_time)
171 {
172 	const unsigned int nreal = obj->scratch / PAGE_SIZE;
173 	const unsigned long npages = obj->base.size / PAGE_SIZE;
174 	struct i915_vma *vma;
175 	unsigned long page;
176 	int err;
177 
178 	err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
179 	if (err) {
180 		pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
181 		       tile->tiling, tile->stride, err);
182 		return err;
183 	}
184 
185 	GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
186 	GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
187 
188 	i915_gem_object_lock(obj);
189 	err = i915_gem_object_set_to_gtt_domain(obj, true);
190 	i915_gem_object_unlock(obj);
191 	if (err) {
192 		pr_err("Failed to flush to GTT write domain; err=%d\n", err);
193 		return err;
194 	}
195 
196 	for_each_prime_number_from(page, 1, npages) {
197 		struct i915_ggtt_view view =
198 			compute_partial_view(obj, page, MIN_CHUNK_PAGES);
199 		u32 __iomem *io;
200 		struct page *p;
201 		unsigned int n;
202 		u64 offset;
203 		u32 *cpu;
204 
205 		GEM_BUG_ON(view.partial.size > nreal);
206 		cond_resched();
207 
208 		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
209 		if (IS_ERR(vma)) {
210 			pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
211 			       page, (int)PTR_ERR(vma));
212 			return PTR_ERR(vma);
213 		}
214 
215 		n = page - view.partial.offset;
216 		GEM_BUG_ON(n >= view.partial.size);
217 
218 		io = i915_vma_pin_iomap(vma);
219 		i915_vma_unpin(vma);
220 		if (IS_ERR(io)) {
221 			pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
222 			       page, (int)PTR_ERR(io));
223 			return PTR_ERR(io);
224 		}
225 
226 		iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
227 		i915_vma_unpin_iomap(vma);
228 
229 		offset = tiled_offset(tile, page << PAGE_SHIFT);
230 		if (offset >= obj->base.size)
231 			continue;
232 
233 		intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
234 
235 		p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
236 		cpu = kmap(p) + offset_in_page(offset);
237 		drm_clflush_virt_range(cpu, sizeof(*cpu));
238 		if (*cpu != (u32)page) {
239 			pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
240 			       page, n,
241 			       view.partial.offset,
242 			       view.partial.size,
243 			       vma->size >> PAGE_SHIFT,
244 			       tile->tiling ? tile_row_pages(obj) : 0,
245 			       vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
246 			       offset >> PAGE_SHIFT,
247 			       (unsigned int)offset_in_page(offset),
248 			       offset,
249 			       (u32)page, *cpu);
250 			err = -EINVAL;
251 		}
252 		*cpu = 0;
253 		drm_clflush_virt_range(cpu, sizeof(*cpu));
254 		kunmap(p);
255 		if (err)
256 			return err;
257 
258 		i915_vma_destroy(vma);
259 
260 		if (igt_timeout(end_time,
261 				"%s: timed out after tiling=%d stride=%d\n",
262 				__func__, tile->tiling, tile->stride))
263 			return -EINTR;
264 	}
265 
266 	return 0;
267 }
268 
269 static unsigned int
270 setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
271 {
272 	if (INTEL_GEN(i915) <= 2) {
273 		tile->height = 16;
274 		tile->width = 128;
275 		tile->size = 11;
276 	} else if (tile->tiling == I915_TILING_Y &&
277 		   HAS_128_BYTE_Y_TILING(i915)) {
278 		tile->height = 32;
279 		tile->width = 128;
280 		tile->size = 12;
281 	} else {
282 		tile->height = 8;
283 		tile->width = 512;
284 		tile->size = 12;
285 	}
286 
287 	if (INTEL_GEN(i915) < 4)
288 		return 8192 / tile->width;
289 	else if (INTEL_GEN(i915) < 7)
290 		return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
291 	else
292 		return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
293 }
294 
295 static int igt_partial_tiling(void *arg)
296 {
297 	const unsigned int nreal = 1 << 12; /* largest tile row x2 */
298 	struct drm_i915_private *i915 = arg;
299 	struct drm_i915_gem_object *obj;
300 	intel_wakeref_t wakeref;
301 	int tiling;
302 	int err;
303 
304 	if (!i915_ggtt_has_aperture(&i915->ggtt))
305 		return 0;
306 
307 	/* We want to check the page mapping and fencing of a large object
308 	 * mmapped through the GTT. The object we create is larger than can
309 	 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
310 	 * We then check that a write through each partial GGTT vma ends up
311 	 * in the right set of pages within the object, and with the expected
312 	 * tiling, which we verify by manual swizzling.
313 	 */
314 
315 	obj = huge_gem_object(i915,
316 			      nreal << PAGE_SHIFT,
317 			      (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
318 	if (IS_ERR(obj))
319 		return PTR_ERR(obj);
320 
321 	err = i915_gem_object_pin_pages(obj);
322 	if (err) {
323 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
324 		       nreal, obj->base.size / PAGE_SIZE, err);
325 		goto out;
326 	}
327 
328 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
329 
330 	if (1) {
331 		IGT_TIMEOUT(end);
332 		struct tile tile;
333 
334 		tile.height = 1;
335 		tile.width = 1;
336 		tile.size = 0;
337 		tile.stride = 0;
338 		tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
339 		tile.tiling = I915_TILING_NONE;
340 
341 		err = check_partial_mappings(obj, &tile, end);
342 		if (err && err != -EINTR)
343 			goto out_unlock;
344 	}
345 
346 	for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
347 		IGT_TIMEOUT(end);
348 		unsigned int max_pitch;
349 		unsigned int pitch;
350 		struct tile tile;
351 
352 		if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
353 			/*
354 			 * The swizzling pattern is actually unknown as it
355 			 * varies based on physical address of each page.
356 			 * See i915_gem_detect_bit_6_swizzle().
357 			 */
358 			break;
359 
360 		tile.tiling = tiling;
361 		switch (tiling) {
362 		case I915_TILING_X:
363 			tile.swizzle = i915->ggtt.bit_6_swizzle_x;
364 			break;
365 		case I915_TILING_Y:
366 			tile.swizzle = i915->ggtt.bit_6_swizzle_y;
367 			break;
368 		}
369 
370 		GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
371 		if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
372 		    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
373 			continue;
374 
375 		max_pitch = setup_tile_size(&tile, i915);
376 
377 		for (pitch = max_pitch; pitch; pitch >>= 1) {
378 			tile.stride = tile.width * pitch;
379 			err = check_partial_mappings(obj, &tile, end);
380 			if (err == -EINTR)
381 				goto next_tiling;
382 			if (err)
383 				goto out_unlock;
384 
385 			if (pitch > 2 && INTEL_GEN(i915) >= 4) {
386 				tile.stride = tile.width * (pitch - 1);
387 				err = check_partial_mappings(obj, &tile, end);
388 				if (err == -EINTR)
389 					goto next_tiling;
390 				if (err)
391 					goto out_unlock;
392 			}
393 
394 			if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
395 				tile.stride = tile.width * (pitch + 1);
396 				err = check_partial_mappings(obj, &tile, end);
397 				if (err == -EINTR)
398 					goto next_tiling;
399 				if (err)
400 					goto out_unlock;
401 			}
402 		}
403 
404 		if (INTEL_GEN(i915) >= 4) {
405 			for_each_prime_number(pitch, max_pitch) {
406 				tile.stride = tile.width * pitch;
407 				err = check_partial_mappings(obj, &tile, end);
408 				if (err == -EINTR)
409 					goto next_tiling;
410 				if (err)
411 					goto out_unlock;
412 			}
413 		}
414 
415 next_tiling: ;
416 	}
417 
418 out_unlock:
419 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
420 	i915_gem_object_unpin_pages(obj);
421 out:
422 	i915_gem_object_put(obj);
423 	return err;
424 }
425 
426 static int igt_smoke_tiling(void *arg)
427 {
428 	const unsigned int nreal = 1 << 12; /* largest tile row x2 */
429 	struct drm_i915_private *i915 = arg;
430 	struct drm_i915_gem_object *obj;
431 	intel_wakeref_t wakeref;
432 	I915_RND_STATE(prng);
433 	unsigned long count;
434 	IGT_TIMEOUT(end);
435 	int err;
436 
437 	if (!i915_ggtt_has_aperture(&i915->ggtt))
438 		return 0;
439 
440 	/*
441 	 * igt_partial_tiling() does an exhastive check of partial tiling
442 	 * chunking, but will undoubtably run out of time. Here, we do a
443 	 * randomised search and hope over many runs of 1s with different
444 	 * seeds we will do a thorough check.
445 	 *
446 	 * Remember to look at the st_seed if we see a flip-flop in BAT!
447 	 */
448 
449 	if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
450 		return 0;
451 
452 	obj = huge_gem_object(i915,
453 			      nreal << PAGE_SHIFT,
454 			      (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
455 	if (IS_ERR(obj))
456 		return PTR_ERR(obj);
457 
458 	err = i915_gem_object_pin_pages(obj);
459 	if (err) {
460 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
461 		       nreal, obj->base.size / PAGE_SIZE, err);
462 		goto out;
463 	}
464 
465 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
466 
467 	count = 0;
468 	do {
469 		struct tile tile;
470 
471 		tile.tiling =
472 			i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
473 		switch (tile.tiling) {
474 		case I915_TILING_NONE:
475 			tile.height = 1;
476 			tile.width = 1;
477 			tile.size = 0;
478 			tile.stride = 0;
479 			tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
480 			break;
481 
482 		case I915_TILING_X:
483 			tile.swizzle = i915->ggtt.bit_6_swizzle_x;
484 			break;
485 		case I915_TILING_Y:
486 			tile.swizzle = i915->ggtt.bit_6_swizzle_y;
487 			break;
488 		}
489 
490 		if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
491 		    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
492 			continue;
493 
494 		if (tile.tiling != I915_TILING_NONE) {
495 			unsigned int max_pitch = setup_tile_size(&tile, i915);
496 
497 			tile.stride =
498 				i915_prandom_u32_max_state(max_pitch, &prng);
499 			tile.stride = (1 + tile.stride) * tile.width;
500 			if (INTEL_GEN(i915) < 4)
501 				tile.stride = rounddown_pow_of_two(tile.stride);
502 		}
503 
504 		err = check_partial_mapping(obj, &tile, &prng);
505 		if (err)
506 			break;
507 
508 		count++;
509 	} while (!__igt_timeout(end, NULL));
510 
511 	pr_info("%s: Completed %lu trials\n", __func__, count);
512 
513 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
514 	i915_gem_object_unpin_pages(obj);
515 out:
516 	i915_gem_object_put(obj);
517 	return err;
518 }
519 
520 static int make_obj_busy(struct drm_i915_gem_object *obj)
521 {
522 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
523 	struct intel_engine_cs *engine;
524 
525 	for_each_uabi_engine(engine, i915) {
526 		struct i915_request *rq;
527 		struct i915_vma *vma;
528 		int err;
529 
530 		vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
531 		if (IS_ERR(vma))
532 			return PTR_ERR(vma);
533 
534 		err = i915_vma_pin(vma, 0, 0, PIN_USER);
535 		if (err)
536 			return err;
537 
538 		rq = i915_request_create(engine->kernel_context);
539 		if (IS_ERR(rq)) {
540 			i915_vma_unpin(vma);
541 			return PTR_ERR(rq);
542 		}
543 
544 		i915_vma_lock(vma);
545 		err = i915_request_await_object(rq, vma->obj, true);
546 		if (err == 0)
547 			err = i915_vma_move_to_active(vma, rq,
548 						      EXEC_OBJECT_WRITE);
549 		i915_vma_unlock(vma);
550 
551 		i915_request_add(rq);
552 		i915_vma_unpin(vma);
553 		if (err)
554 			return err;
555 	}
556 
557 	i915_gem_object_put(obj); /* leave it only alive via its active ref */
558 	return 0;
559 }
560 
561 static bool assert_mmap_offset(struct drm_i915_private *i915,
562 			       unsigned long size,
563 			       int expected)
564 {
565 	struct drm_i915_gem_object *obj;
566 	int err;
567 
568 	obj = i915_gem_object_create_internal(i915, size);
569 	if (IS_ERR(obj))
570 		return PTR_ERR(obj);
571 
572 	err = create_mmap_offset(obj);
573 	i915_gem_object_put(obj);
574 
575 	return err == expected;
576 }
577 
578 static void disable_retire_worker(struct drm_i915_private *i915)
579 {
580 	i915_gem_driver_unregister__shrinker(i915);
581 	intel_gt_pm_get(&i915->gt);
582 	cancel_delayed_work_sync(&i915->gt.requests.retire_work);
583 }
584 
585 static void restore_retire_worker(struct drm_i915_private *i915)
586 {
587 	igt_flush_test(i915);
588 	intel_gt_pm_put(&i915->gt);
589 	i915_gem_driver_register__shrinker(i915);
590 }
591 
592 static void mmap_offset_lock(struct drm_i915_private *i915)
593 	__acquires(&i915->drm.vma_offset_manager->vm_lock)
594 {
595 	write_lock(&i915->drm.vma_offset_manager->vm_lock);
596 }
597 
598 static void mmap_offset_unlock(struct drm_i915_private *i915)
599 	__releases(&i915->drm.vma_offset_manager->vm_lock)
600 {
601 	write_unlock(&i915->drm.vma_offset_manager->vm_lock);
602 }
603 
604 static int igt_mmap_offset_exhaustion(void *arg)
605 {
606 	struct drm_i915_private *i915 = arg;
607 	struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
608 	struct drm_i915_gem_object *obj;
609 	struct drm_mm_node resv, *hole;
610 	u64 hole_start, hole_end;
611 	int loop, err;
612 
613 	/* Disable background reaper */
614 	disable_retire_worker(i915);
615 	GEM_BUG_ON(!i915->gt.awake);
616 
617 	/* Trim the device mmap space to only a page */
618 	memset(&resv, 0, sizeof(resv));
619 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
620 		resv.start = hole_start;
621 		resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */
622 		mmap_offset_lock(i915);
623 		err = drm_mm_reserve_node(mm, &resv);
624 		mmap_offset_unlock(i915);
625 		if (err) {
626 			pr_err("Failed to trim VMA manager, err=%d\n", err);
627 			goto out_park;
628 		}
629 		break;
630 	}
631 
632 	/* Just fits! */
633 	if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
634 		pr_err("Unable to insert object into single page hole\n");
635 		err = -EINVAL;
636 		goto out;
637 	}
638 
639 	/* Too large */
640 	if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
641 		pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
642 		err = -EINVAL;
643 		goto out;
644 	}
645 
646 	/* Fill the hole, further allocation attempts should then fail */
647 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
648 	if (IS_ERR(obj)) {
649 		err = PTR_ERR(obj);
650 		goto out;
651 	}
652 
653 	err = create_mmap_offset(obj);
654 	if (err) {
655 		pr_err("Unable to insert object into reclaimed hole\n");
656 		goto err_obj;
657 	}
658 
659 	if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
660 		pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
661 		err = -EINVAL;
662 		goto err_obj;
663 	}
664 
665 	i915_gem_object_put(obj);
666 
667 	/* Now fill with busy dead objects that we expect to reap */
668 	for (loop = 0; loop < 3; loop++) {
669 		if (intel_gt_is_wedged(&i915->gt))
670 			break;
671 
672 		obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
673 		if (IS_ERR(obj)) {
674 			err = PTR_ERR(obj);
675 			goto out;
676 		}
677 
678 		err = make_obj_busy(obj);
679 		if (err) {
680 			pr_err("[loop %d] Failed to busy the object\n", loop);
681 			goto err_obj;
682 		}
683 	}
684 
685 out:
686 	mmap_offset_lock(i915);
687 	drm_mm_remove_node(&resv);
688 	mmap_offset_unlock(i915);
689 out_park:
690 	restore_retire_worker(i915);
691 	return err;
692 err_obj:
693 	i915_gem_object_put(obj);
694 	goto out;
695 }
696 
697 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
698 {
699 	static const struct i915_subtest tests[] = {
700 		SUBTEST(igt_partial_tiling),
701 		SUBTEST(igt_smoke_tiling),
702 		SUBTEST(igt_mmap_offset_exhaustion),
703 	};
704 
705 	return i915_subtests(tests, i915);
706 }
707