1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27 
28 #include "../i915_selftest.h"
29 #include "i915_random.h"
30 
31 #include "mock_context.h"
32 #include "mock_drm.h"
33 #include "mock_gem_device.h"
34 
35 static void cleanup_freed_objects(struct drm_i915_private *i915)
36 {
37 	/*
38 	 * As we may hold onto the struct_mutex for inordinate lengths of
39 	 * time, the NMI khungtaskd detector may fire for the free objects
40 	 * worker.
41 	 */
42 	mutex_unlock(&i915->drm.struct_mutex);
43 
44 	i915_gem_drain_freed_objects(i915);
45 
46 	mutex_lock(&i915->drm.struct_mutex);
47 }
48 
49 static void fake_free_pages(struct drm_i915_gem_object *obj,
50 			    struct sg_table *pages)
51 {
52 	sg_free_table(pages);
53 	kfree(pages);
54 }
55 
56 static int fake_get_pages(struct drm_i915_gem_object *obj)
57 {
58 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
59 #define PFN_BIAS 0x1000
60 	struct sg_table *pages;
61 	struct scatterlist *sg;
62 	unsigned int sg_page_sizes;
63 	typeof(obj->base.size) rem;
64 
65 	pages = kmalloc(sizeof(*pages), GFP);
66 	if (!pages)
67 		return -ENOMEM;
68 
69 	rem = round_up(obj->base.size, BIT(31)) >> 31;
70 	if (sg_alloc_table(pages, rem, GFP)) {
71 		kfree(pages);
72 		return -ENOMEM;
73 	}
74 
75 	sg_page_sizes = 0;
76 	rem = obj->base.size;
77 	for (sg = pages->sgl; sg; sg = sg_next(sg)) {
78 		unsigned long len = min_t(typeof(rem), rem, BIT(31));
79 
80 		GEM_BUG_ON(!len);
81 		sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
82 		sg_dma_address(sg) = page_to_phys(sg_page(sg));
83 		sg_dma_len(sg) = len;
84 		sg_page_sizes |= len;
85 
86 		rem -= len;
87 	}
88 	GEM_BUG_ON(rem);
89 
90 	obj->mm.madv = I915_MADV_DONTNEED;
91 
92 	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
93 
94 	return 0;
95 #undef GFP
96 }
97 
98 static void fake_put_pages(struct drm_i915_gem_object *obj,
99 			   struct sg_table *pages)
100 {
101 	fake_free_pages(obj, pages);
102 	obj->mm.dirty = false;
103 	obj->mm.madv = I915_MADV_WILLNEED;
104 }
105 
106 static const struct drm_i915_gem_object_ops fake_ops = {
107 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
108 	.get_pages = fake_get_pages,
109 	.put_pages = fake_put_pages,
110 };
111 
112 static struct drm_i915_gem_object *
113 fake_dma_object(struct drm_i915_private *i915, u64 size)
114 {
115 	struct drm_i915_gem_object *obj;
116 
117 	GEM_BUG_ON(!size);
118 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
119 
120 	if (overflows_type(size, obj->base.size))
121 		return ERR_PTR(-E2BIG);
122 
123 	obj = i915_gem_object_alloc();
124 	if (!obj)
125 		goto err;
126 
127 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
128 	i915_gem_object_init(obj, &fake_ops);
129 
130 	obj->write_domain = I915_GEM_DOMAIN_CPU;
131 	obj->read_domains = I915_GEM_DOMAIN_CPU;
132 	obj->cache_level = I915_CACHE_NONE;
133 
134 	/* Preallocate the "backing storage" */
135 	if (i915_gem_object_pin_pages(obj))
136 		goto err_obj;
137 
138 	i915_gem_object_unpin_pages(obj);
139 	return obj;
140 
141 err_obj:
142 	i915_gem_object_put(obj);
143 err:
144 	return ERR_PTR(-ENOMEM);
145 }
146 
147 static int igt_ppgtt_alloc(void *arg)
148 {
149 	struct drm_i915_private *dev_priv = arg;
150 	struct i915_hw_ppgtt *ppgtt;
151 	u64 size, last, limit;
152 	int err = 0;
153 
154 	/* Allocate a ppggt and try to fill the entire range */
155 
156 	if (!HAS_PPGTT(dev_priv))
157 		return 0;
158 
159 	ppgtt = __hw_ppgtt_create(dev_priv);
160 	if (IS_ERR(ppgtt))
161 		return PTR_ERR(ppgtt);
162 
163 	if (!ppgtt->vm.allocate_va_range)
164 		goto err_ppgtt_cleanup;
165 
166 	/*
167 	 * While we only allocate the page tables here and so we could
168 	 * address a much larger GTT than we could actually fit into
169 	 * RAM, a practical limit is the amount of physical pages in the system.
170 	 * This should ensure that we do not run into the oomkiller during
171 	 * the test and take down the machine wilfully.
172 	 */
173 	limit = totalram_pages() << PAGE_SHIFT;
174 	limit = min(ppgtt->vm.total, limit);
175 
176 	/* Check we can allocate the entire range */
177 	for (size = 4096; size <= limit; size <<= 2) {
178 		err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
179 		if (err) {
180 			if (err == -ENOMEM) {
181 				pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
182 					size, ilog2(size));
183 				err = 0; /* virtual space too large! */
184 			}
185 			goto err_ppgtt_cleanup;
186 		}
187 
188 		cond_resched();
189 
190 		ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
191 	}
192 
193 	/* Check we can incrementally allocate the entire range */
194 	for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
195 		err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
196 						  last, size - last);
197 		if (err) {
198 			if (err == -ENOMEM) {
199 				pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
200 					last, size - last, ilog2(size));
201 				err = 0; /* virtual space too large! */
202 			}
203 			goto err_ppgtt_cleanup;
204 		}
205 
206 		cond_resched();
207 	}
208 
209 err_ppgtt_cleanup:
210 	mutex_lock(&dev_priv->drm.struct_mutex);
211 	i915_ppgtt_put(ppgtt);
212 	mutex_unlock(&dev_priv->drm.struct_mutex);
213 	return err;
214 }
215 
216 static int lowlevel_hole(struct drm_i915_private *i915,
217 			 struct i915_address_space *vm,
218 			 u64 hole_start, u64 hole_end,
219 			 unsigned long end_time)
220 {
221 	I915_RND_STATE(seed_prng);
222 	unsigned int size;
223 	struct i915_vma mock_vma;
224 
225 	memset(&mock_vma, 0, sizeof(struct i915_vma));
226 
227 	/* Keep creating larger objects until one cannot fit into the hole */
228 	for (size = 12; (hole_end - hole_start) >> size; size++) {
229 		I915_RND_SUBSTATE(prng, seed_prng);
230 		struct drm_i915_gem_object *obj;
231 		unsigned int *order, count, n;
232 		u64 hole_size;
233 
234 		hole_size = (hole_end - hole_start) >> size;
235 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
236 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
237 		count = hole_size >> 1;
238 		if (!count) {
239 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
240 				 __func__, hole_start, hole_end, size, hole_size);
241 			break;
242 		}
243 
244 		do {
245 			order = i915_random_order(count, &prng);
246 			if (order)
247 				break;
248 		} while (count >>= 1);
249 		if (!count)
250 			return -ENOMEM;
251 		GEM_BUG_ON(!order);
252 
253 		GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
254 		GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
255 
256 		/* Ignore allocation failures (i.e. don't report them as
257 		 * a test failure) as we are purposefully allocating very
258 		 * large objects without checking that we have sufficient
259 		 * memory. We expect to hit -ENOMEM.
260 		 */
261 
262 		obj = fake_dma_object(i915, BIT_ULL(size));
263 		if (IS_ERR(obj)) {
264 			kfree(order);
265 			break;
266 		}
267 
268 		GEM_BUG_ON(obj->base.size != BIT_ULL(size));
269 
270 		if (i915_gem_object_pin_pages(obj)) {
271 			i915_gem_object_put(obj);
272 			kfree(order);
273 			break;
274 		}
275 
276 		for (n = 0; n < count; n++) {
277 			u64 addr = hole_start + order[n] * BIT_ULL(size);
278 			intel_wakeref_t wakeref;
279 
280 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
281 
282 			if (igt_timeout(end_time,
283 					"%s timed out before %d/%d\n",
284 					__func__, n, count)) {
285 				hole_end = hole_start; /* quit */
286 				break;
287 			}
288 
289 			if (vm->allocate_va_range &&
290 			    vm->allocate_va_range(vm, addr, BIT_ULL(size)))
291 				break;
292 
293 			mock_vma.pages = obj->mm.pages;
294 			mock_vma.node.size = BIT_ULL(size);
295 			mock_vma.node.start = addr;
296 
297 			wakeref = intel_runtime_pm_get(i915);
298 			vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
299 			intel_runtime_pm_put(i915, wakeref);
300 		}
301 		count = n;
302 
303 		i915_random_reorder(order, count, &prng);
304 		for (n = 0; n < count; n++) {
305 			u64 addr = hole_start + order[n] * BIT_ULL(size);
306 
307 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
308 			vm->clear_range(vm, addr, BIT_ULL(size));
309 		}
310 
311 		i915_gem_object_unpin_pages(obj);
312 		i915_gem_object_put(obj);
313 
314 		kfree(order);
315 
316 		cleanup_freed_objects(i915);
317 	}
318 
319 	return 0;
320 }
321 
322 static void close_object_list(struct list_head *objects,
323 			      struct i915_address_space *vm)
324 {
325 	struct drm_i915_gem_object *obj, *on;
326 	int ignored;
327 
328 	list_for_each_entry_safe(obj, on, objects, st_link) {
329 		struct i915_vma *vma;
330 
331 		vma = i915_vma_instance(obj, vm, NULL);
332 		if (!IS_ERR(vma))
333 			ignored = i915_vma_unbind(vma);
334 		/* Only ppgtt vma may be closed before the object is freed */
335 		if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
336 			i915_vma_close(vma);
337 
338 		list_del(&obj->st_link);
339 		i915_gem_object_put(obj);
340 	}
341 }
342 
343 static int fill_hole(struct drm_i915_private *i915,
344 		     struct i915_address_space *vm,
345 		     u64 hole_start, u64 hole_end,
346 		     unsigned long end_time)
347 {
348 	const u64 hole_size = hole_end - hole_start;
349 	struct drm_i915_gem_object *obj;
350 	const unsigned long max_pages =
351 		min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
352 	const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
353 	unsigned long npages, prime, flags;
354 	struct i915_vma *vma;
355 	LIST_HEAD(objects);
356 	int err;
357 
358 	/* Try binding many VMA working inwards from either edge */
359 
360 	flags = PIN_OFFSET_FIXED | PIN_USER;
361 	if (i915_is_ggtt(vm))
362 		flags |= PIN_GLOBAL;
363 
364 	for_each_prime_number_from(prime, 2, max_step) {
365 		for (npages = 1; npages <= max_pages; npages *= prime) {
366 			const u64 full_size = npages << PAGE_SHIFT;
367 			const struct {
368 				const char *name;
369 				u64 offset;
370 				int step;
371 			} phases[] = {
372 				{ "top-down", hole_end, -1, },
373 				{ "bottom-up", hole_start, 1, },
374 				{ }
375 			}, *p;
376 
377 			obj = fake_dma_object(i915, full_size);
378 			if (IS_ERR(obj))
379 				break;
380 
381 			list_add(&obj->st_link, &objects);
382 
383 			/* Align differing sized objects against the edges, and
384 			 * check we don't walk off into the void when binding
385 			 * them into the GTT.
386 			 */
387 			for (p = phases; p->name; p++) {
388 				u64 offset;
389 
390 				offset = p->offset;
391 				list_for_each_entry(obj, &objects, st_link) {
392 					vma = i915_vma_instance(obj, vm, NULL);
393 					if (IS_ERR(vma))
394 						continue;
395 
396 					if (p->step < 0) {
397 						if (offset < hole_start + obj->base.size)
398 							break;
399 						offset -= obj->base.size;
400 					}
401 
402 					err = i915_vma_pin(vma, 0, 0, offset | flags);
403 					if (err) {
404 						pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
405 						       __func__, p->name, err, npages, prime, offset);
406 						goto err;
407 					}
408 
409 					if (!drm_mm_node_allocated(&vma->node) ||
410 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
411 						pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
412 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
413 						       offset);
414 						err = -EINVAL;
415 						goto err;
416 					}
417 
418 					i915_vma_unpin(vma);
419 
420 					if (p->step > 0) {
421 						if (offset + obj->base.size > hole_end)
422 							break;
423 						offset += obj->base.size;
424 					}
425 				}
426 
427 				offset = p->offset;
428 				list_for_each_entry(obj, &objects, st_link) {
429 					vma = i915_vma_instance(obj, vm, NULL);
430 					if (IS_ERR(vma))
431 						continue;
432 
433 					if (p->step < 0) {
434 						if (offset < hole_start + obj->base.size)
435 							break;
436 						offset -= obj->base.size;
437 					}
438 
439 					if (!drm_mm_node_allocated(&vma->node) ||
440 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
441 						pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
442 						       __func__, p->name, vma->node.start, vma->node.size,
443 						       offset);
444 						err = -EINVAL;
445 						goto err;
446 					}
447 
448 					err = i915_vma_unbind(vma);
449 					if (err) {
450 						pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
451 						       __func__, p->name, vma->node.start, vma->node.size,
452 						       err);
453 						goto err;
454 					}
455 
456 					if (p->step > 0) {
457 						if (offset + obj->base.size > hole_end)
458 							break;
459 						offset += obj->base.size;
460 					}
461 				}
462 
463 				offset = p->offset;
464 				list_for_each_entry_reverse(obj, &objects, st_link) {
465 					vma = i915_vma_instance(obj, vm, NULL);
466 					if (IS_ERR(vma))
467 						continue;
468 
469 					if (p->step < 0) {
470 						if (offset < hole_start + obj->base.size)
471 							break;
472 						offset -= obj->base.size;
473 					}
474 
475 					err = i915_vma_pin(vma, 0, 0, offset | flags);
476 					if (err) {
477 						pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
478 						       __func__, p->name, err, npages, prime, offset);
479 						goto err;
480 					}
481 
482 					if (!drm_mm_node_allocated(&vma->node) ||
483 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
484 						pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
485 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
486 						       offset);
487 						err = -EINVAL;
488 						goto err;
489 					}
490 
491 					i915_vma_unpin(vma);
492 
493 					if (p->step > 0) {
494 						if (offset + obj->base.size > hole_end)
495 							break;
496 						offset += obj->base.size;
497 					}
498 				}
499 
500 				offset = p->offset;
501 				list_for_each_entry_reverse(obj, &objects, st_link) {
502 					vma = i915_vma_instance(obj, vm, NULL);
503 					if (IS_ERR(vma))
504 						continue;
505 
506 					if (p->step < 0) {
507 						if (offset < hole_start + obj->base.size)
508 							break;
509 						offset -= obj->base.size;
510 					}
511 
512 					if (!drm_mm_node_allocated(&vma->node) ||
513 					    i915_vma_misplaced(vma, 0, 0, offset | flags)) {
514 						pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
515 						       __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
516 						       offset);
517 						err = -EINVAL;
518 						goto err;
519 					}
520 
521 					err = i915_vma_unbind(vma);
522 					if (err) {
523 						pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
524 						       __func__, p->name, vma->node.start, vma->node.size,
525 						       err);
526 						goto err;
527 					}
528 
529 					if (p->step > 0) {
530 						if (offset + obj->base.size > hole_end)
531 							break;
532 						offset += obj->base.size;
533 					}
534 				}
535 			}
536 
537 			if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
538 					__func__, npages, prime)) {
539 				err = -EINTR;
540 				goto err;
541 			}
542 		}
543 
544 		close_object_list(&objects, vm);
545 		cleanup_freed_objects(i915);
546 	}
547 
548 	return 0;
549 
550 err:
551 	close_object_list(&objects, vm);
552 	return err;
553 }
554 
555 static int walk_hole(struct drm_i915_private *i915,
556 		     struct i915_address_space *vm,
557 		     u64 hole_start, u64 hole_end,
558 		     unsigned long end_time)
559 {
560 	const u64 hole_size = hole_end - hole_start;
561 	const unsigned long max_pages =
562 		min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
563 	unsigned long flags;
564 	u64 size;
565 
566 	/* Try binding a single VMA in different positions within the hole */
567 
568 	flags = PIN_OFFSET_FIXED | PIN_USER;
569 	if (i915_is_ggtt(vm))
570 		flags |= PIN_GLOBAL;
571 
572 	for_each_prime_number_from(size, 1, max_pages) {
573 		struct drm_i915_gem_object *obj;
574 		struct i915_vma *vma;
575 		u64 addr;
576 		int err = 0;
577 
578 		obj = fake_dma_object(i915, size << PAGE_SHIFT);
579 		if (IS_ERR(obj))
580 			break;
581 
582 		vma = i915_vma_instance(obj, vm, NULL);
583 		if (IS_ERR(vma)) {
584 			err = PTR_ERR(vma);
585 			goto err_put;
586 		}
587 
588 		for (addr = hole_start;
589 		     addr + obj->base.size < hole_end;
590 		     addr += obj->base.size) {
591 			err = i915_vma_pin(vma, 0, 0, addr | flags);
592 			if (err) {
593 				pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
594 				       __func__, addr, vma->size,
595 				       hole_start, hole_end, err);
596 				goto err_close;
597 			}
598 			i915_vma_unpin(vma);
599 
600 			if (!drm_mm_node_allocated(&vma->node) ||
601 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
602 				pr_err("%s incorrect at %llx + %llx\n",
603 				       __func__, addr, vma->size);
604 				err = -EINVAL;
605 				goto err_close;
606 			}
607 
608 			err = i915_vma_unbind(vma);
609 			if (err) {
610 				pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
611 				       __func__, addr, vma->size, err);
612 				goto err_close;
613 			}
614 
615 			GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
616 
617 			if (igt_timeout(end_time,
618 					"%s timed out at %llx\n",
619 					__func__, addr)) {
620 				err = -EINTR;
621 				goto err_close;
622 			}
623 		}
624 
625 err_close:
626 		if (!i915_vma_is_ggtt(vma))
627 			i915_vma_close(vma);
628 err_put:
629 		i915_gem_object_put(obj);
630 		if (err)
631 			return err;
632 
633 		cleanup_freed_objects(i915);
634 	}
635 
636 	return 0;
637 }
638 
639 static int pot_hole(struct drm_i915_private *i915,
640 		    struct i915_address_space *vm,
641 		    u64 hole_start, u64 hole_end,
642 		    unsigned long end_time)
643 {
644 	struct drm_i915_gem_object *obj;
645 	struct i915_vma *vma;
646 	unsigned long flags;
647 	unsigned int pot;
648 	int err = 0;
649 
650 	flags = PIN_OFFSET_FIXED | PIN_USER;
651 	if (i915_is_ggtt(vm))
652 		flags |= PIN_GLOBAL;
653 
654 	obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
655 	if (IS_ERR(obj))
656 		return PTR_ERR(obj);
657 
658 	vma = i915_vma_instance(obj, vm, NULL);
659 	if (IS_ERR(vma)) {
660 		err = PTR_ERR(vma);
661 		goto err_obj;
662 	}
663 
664 	/* Insert a pair of pages across every pot boundary within the hole */
665 	for (pot = fls64(hole_end - 1) - 1;
666 	     pot > ilog2(2 * I915_GTT_PAGE_SIZE);
667 	     pot--) {
668 		u64 step = BIT_ULL(pot);
669 		u64 addr;
670 
671 		for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
672 		     addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
673 		     addr += step) {
674 			err = i915_vma_pin(vma, 0, 0, addr | flags);
675 			if (err) {
676 				pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
677 				       __func__,
678 				       addr,
679 				       hole_start, hole_end,
680 				       err);
681 				goto err;
682 			}
683 
684 			if (!drm_mm_node_allocated(&vma->node) ||
685 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
686 				pr_err("%s incorrect at %llx + %llx\n",
687 				       __func__, addr, vma->size);
688 				i915_vma_unpin(vma);
689 				err = i915_vma_unbind(vma);
690 				err = -EINVAL;
691 				goto err;
692 			}
693 
694 			i915_vma_unpin(vma);
695 			err = i915_vma_unbind(vma);
696 			GEM_BUG_ON(err);
697 		}
698 
699 		if (igt_timeout(end_time,
700 				"%s timed out after %d/%d\n",
701 				__func__, pot, fls64(hole_end - 1) - 1)) {
702 			err = -EINTR;
703 			goto err;
704 		}
705 	}
706 
707 err:
708 	if (!i915_vma_is_ggtt(vma))
709 		i915_vma_close(vma);
710 err_obj:
711 	i915_gem_object_put(obj);
712 	return err;
713 }
714 
715 static int drunk_hole(struct drm_i915_private *i915,
716 		      struct i915_address_space *vm,
717 		      u64 hole_start, u64 hole_end,
718 		      unsigned long end_time)
719 {
720 	I915_RND_STATE(prng);
721 	unsigned int size;
722 	unsigned long flags;
723 
724 	flags = PIN_OFFSET_FIXED | PIN_USER;
725 	if (i915_is_ggtt(vm))
726 		flags |= PIN_GLOBAL;
727 
728 	/* Keep creating larger objects until one cannot fit into the hole */
729 	for (size = 12; (hole_end - hole_start) >> size; size++) {
730 		struct drm_i915_gem_object *obj;
731 		unsigned int *order, count, n;
732 		struct i915_vma *vma;
733 		u64 hole_size;
734 		int err = -ENODEV;
735 
736 		hole_size = (hole_end - hole_start) >> size;
737 		if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
738 			hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
739 		count = hole_size >> 1;
740 		if (!count) {
741 			pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
742 				 __func__, hole_start, hole_end, size, hole_size);
743 			break;
744 		}
745 
746 		do {
747 			order = i915_random_order(count, &prng);
748 			if (order)
749 				break;
750 		} while (count >>= 1);
751 		if (!count)
752 			return -ENOMEM;
753 		GEM_BUG_ON(!order);
754 
755 		/* Ignore allocation failures (i.e. don't report them as
756 		 * a test failure) as we are purposefully allocating very
757 		 * large objects without checking that we have sufficient
758 		 * memory. We expect to hit -ENOMEM.
759 		 */
760 
761 		obj = fake_dma_object(i915, BIT_ULL(size));
762 		if (IS_ERR(obj)) {
763 			kfree(order);
764 			break;
765 		}
766 
767 		vma = i915_vma_instance(obj, vm, NULL);
768 		if (IS_ERR(vma)) {
769 			err = PTR_ERR(vma);
770 			goto err_obj;
771 		}
772 
773 		GEM_BUG_ON(vma->size != BIT_ULL(size));
774 
775 		for (n = 0; n < count; n++) {
776 			u64 addr = hole_start + order[n] * BIT_ULL(size);
777 
778 			err = i915_vma_pin(vma, 0, 0, addr | flags);
779 			if (err) {
780 				pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
781 				       __func__,
782 				       addr, BIT_ULL(size),
783 				       hole_start, hole_end,
784 				       err);
785 				goto err;
786 			}
787 
788 			if (!drm_mm_node_allocated(&vma->node) ||
789 			    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
790 				pr_err("%s incorrect at %llx + %llx\n",
791 				       __func__, addr, BIT_ULL(size));
792 				i915_vma_unpin(vma);
793 				err = i915_vma_unbind(vma);
794 				err = -EINVAL;
795 				goto err;
796 			}
797 
798 			i915_vma_unpin(vma);
799 			err = i915_vma_unbind(vma);
800 			GEM_BUG_ON(err);
801 
802 			if (igt_timeout(end_time,
803 					"%s timed out after %d/%d\n",
804 					__func__, n, count)) {
805 				err = -EINTR;
806 				goto err;
807 			}
808 		}
809 
810 err:
811 		if (!i915_vma_is_ggtt(vma))
812 			i915_vma_close(vma);
813 err_obj:
814 		i915_gem_object_put(obj);
815 		kfree(order);
816 		if (err)
817 			return err;
818 
819 		cleanup_freed_objects(i915);
820 	}
821 
822 	return 0;
823 }
824 
825 static int __shrink_hole(struct drm_i915_private *i915,
826 			 struct i915_address_space *vm,
827 			 u64 hole_start, u64 hole_end,
828 			 unsigned long end_time)
829 {
830 	struct drm_i915_gem_object *obj;
831 	unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
832 	unsigned int order = 12;
833 	LIST_HEAD(objects);
834 	int err = 0;
835 	u64 addr;
836 
837 	/* Keep creating larger objects until one cannot fit into the hole */
838 	for (addr = hole_start; addr < hole_end; ) {
839 		struct i915_vma *vma;
840 		u64 size = BIT_ULL(order++);
841 
842 		size = min(size, hole_end - addr);
843 		obj = fake_dma_object(i915, size);
844 		if (IS_ERR(obj)) {
845 			err = PTR_ERR(obj);
846 			break;
847 		}
848 
849 		list_add(&obj->st_link, &objects);
850 
851 		vma = i915_vma_instance(obj, vm, NULL);
852 		if (IS_ERR(vma)) {
853 			err = PTR_ERR(vma);
854 			break;
855 		}
856 
857 		GEM_BUG_ON(vma->size != size);
858 
859 		err = i915_vma_pin(vma, 0, 0, addr | flags);
860 		if (err) {
861 			pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
862 			       __func__, addr, size, hole_start, hole_end, err);
863 			break;
864 		}
865 
866 		if (!drm_mm_node_allocated(&vma->node) ||
867 		    i915_vma_misplaced(vma, 0, 0, addr | flags)) {
868 			pr_err("%s incorrect at %llx + %llx\n",
869 			       __func__, addr, size);
870 			i915_vma_unpin(vma);
871 			err = i915_vma_unbind(vma);
872 			err = -EINVAL;
873 			break;
874 		}
875 
876 		i915_vma_unpin(vma);
877 		addr += size;
878 
879 		if (igt_timeout(end_time,
880 				"%s timed out at ofset %llx [%llx - %llx]\n",
881 				__func__, addr, hole_start, hole_end)) {
882 			err = -EINTR;
883 			break;
884 		}
885 	}
886 
887 	close_object_list(&objects, vm);
888 	cleanup_freed_objects(i915);
889 	return err;
890 }
891 
892 static int shrink_hole(struct drm_i915_private *i915,
893 		       struct i915_address_space *vm,
894 		       u64 hole_start, u64 hole_end,
895 		       unsigned long end_time)
896 {
897 	unsigned long prime;
898 	int err;
899 
900 	vm->fault_attr.probability = 999;
901 	atomic_set(&vm->fault_attr.times, -1);
902 
903 	for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
904 		vm->fault_attr.interval = prime;
905 		err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
906 		if (err)
907 			break;
908 	}
909 
910 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
911 
912 	return err;
913 }
914 
915 static int shrink_boom(struct drm_i915_private *i915,
916 		       struct i915_address_space *vm,
917 		       u64 hole_start, u64 hole_end,
918 		       unsigned long end_time)
919 {
920 	unsigned int sizes[] = { SZ_2M, SZ_1G };
921 	struct drm_i915_gem_object *purge;
922 	struct drm_i915_gem_object *explode;
923 	int err;
924 	int i;
925 
926 	/*
927 	 * Catch the case which shrink_hole seems to miss. The setup here
928 	 * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
929 	 * ensuring that all vma assiocated with the respective pd/pdp are
930 	 * unpinned at the time.
931 	 */
932 
933 	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
934 		unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
935 		unsigned int size = sizes[i];
936 		struct i915_vma *vma;
937 
938 		purge = fake_dma_object(i915, size);
939 		if (IS_ERR(purge))
940 			return PTR_ERR(purge);
941 
942 		vma = i915_vma_instance(purge, vm, NULL);
943 		if (IS_ERR(vma)) {
944 			err = PTR_ERR(vma);
945 			goto err_purge;
946 		}
947 
948 		err = i915_vma_pin(vma, 0, 0, flags);
949 		if (err)
950 			goto err_purge;
951 
952 		/* Should now be ripe for purging */
953 		i915_vma_unpin(vma);
954 
955 		explode = fake_dma_object(i915, size);
956 		if (IS_ERR(explode)) {
957 			err = PTR_ERR(explode);
958 			goto err_purge;
959 		}
960 
961 		vm->fault_attr.probability = 100;
962 		vm->fault_attr.interval = 1;
963 		atomic_set(&vm->fault_attr.times, -1);
964 
965 		vma = i915_vma_instance(explode, vm, NULL);
966 		if (IS_ERR(vma)) {
967 			err = PTR_ERR(vma);
968 			goto err_explode;
969 		}
970 
971 		err = i915_vma_pin(vma, 0, 0, flags | size);
972 		if (err)
973 			goto err_explode;
974 
975 		i915_vma_unpin(vma);
976 
977 		i915_gem_object_put(purge);
978 		i915_gem_object_put(explode);
979 
980 		memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
981 		cleanup_freed_objects(i915);
982 	}
983 
984 	return 0;
985 
986 err_explode:
987 	i915_gem_object_put(explode);
988 err_purge:
989 	i915_gem_object_put(purge);
990 	memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
991 	return err;
992 }
993 
994 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
995 			  int (*func)(struct drm_i915_private *i915,
996 				      struct i915_address_space *vm,
997 				      u64 hole_start, u64 hole_end,
998 				      unsigned long end_time))
999 {
1000 	struct drm_file *file;
1001 	struct i915_hw_ppgtt *ppgtt;
1002 	IGT_TIMEOUT(end_time);
1003 	int err;
1004 
1005 	if (!HAS_FULL_PPGTT(dev_priv))
1006 		return 0;
1007 
1008 	file = mock_file(dev_priv);
1009 	if (IS_ERR(file))
1010 		return PTR_ERR(file);
1011 
1012 	mutex_lock(&dev_priv->drm.struct_mutex);
1013 	ppgtt = i915_ppgtt_create(dev_priv);
1014 	if (IS_ERR(ppgtt)) {
1015 		err = PTR_ERR(ppgtt);
1016 		goto out_unlock;
1017 	}
1018 	GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1019 	GEM_BUG_ON(ppgtt->vm.closed);
1020 
1021 	err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
1022 
1023 	i915_ppgtt_put(ppgtt);
1024 out_unlock:
1025 	mutex_unlock(&dev_priv->drm.struct_mutex);
1026 
1027 	mock_file_free(dev_priv, file);
1028 	return err;
1029 }
1030 
1031 static int igt_ppgtt_fill(void *arg)
1032 {
1033 	return exercise_ppgtt(arg, fill_hole);
1034 }
1035 
1036 static int igt_ppgtt_walk(void *arg)
1037 {
1038 	return exercise_ppgtt(arg, walk_hole);
1039 }
1040 
1041 static int igt_ppgtt_pot(void *arg)
1042 {
1043 	return exercise_ppgtt(arg, pot_hole);
1044 }
1045 
1046 static int igt_ppgtt_drunk(void *arg)
1047 {
1048 	return exercise_ppgtt(arg, drunk_hole);
1049 }
1050 
1051 static int igt_ppgtt_lowlevel(void *arg)
1052 {
1053 	return exercise_ppgtt(arg, lowlevel_hole);
1054 }
1055 
1056 static int igt_ppgtt_shrink(void *arg)
1057 {
1058 	return exercise_ppgtt(arg, shrink_hole);
1059 }
1060 
1061 static int igt_ppgtt_shrink_boom(void *arg)
1062 {
1063 	return exercise_ppgtt(arg, shrink_boom);
1064 }
1065 
1066 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
1067 {
1068 	struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1069 	struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1070 
1071 	if (a->start < b->start)
1072 		return -1;
1073 	else
1074 		return 1;
1075 }
1076 
1077 static int exercise_ggtt(struct drm_i915_private *i915,
1078 			 int (*func)(struct drm_i915_private *i915,
1079 				     struct i915_address_space *vm,
1080 				     u64 hole_start, u64 hole_end,
1081 				     unsigned long end_time))
1082 {
1083 	struct i915_ggtt *ggtt = &i915->ggtt;
1084 	u64 hole_start, hole_end, last = 0;
1085 	struct drm_mm_node *node;
1086 	IGT_TIMEOUT(end_time);
1087 	int err = 0;
1088 
1089 	mutex_lock(&i915->drm.struct_mutex);
1090 restart:
1091 	list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1092 	drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1093 		if (hole_start < last)
1094 			continue;
1095 
1096 		if (ggtt->vm.mm.color_adjust)
1097 			ggtt->vm.mm.color_adjust(node, 0,
1098 						 &hole_start, &hole_end);
1099 		if (hole_start >= hole_end)
1100 			continue;
1101 
1102 		err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
1103 		if (err)
1104 			break;
1105 
1106 		/* As we have manipulated the drm_mm, the list may be corrupt */
1107 		last = hole_end;
1108 		goto restart;
1109 	}
1110 	mutex_unlock(&i915->drm.struct_mutex);
1111 
1112 	return err;
1113 }
1114 
1115 static int igt_ggtt_fill(void *arg)
1116 {
1117 	return exercise_ggtt(arg, fill_hole);
1118 }
1119 
1120 static int igt_ggtt_walk(void *arg)
1121 {
1122 	return exercise_ggtt(arg, walk_hole);
1123 }
1124 
1125 static int igt_ggtt_pot(void *arg)
1126 {
1127 	return exercise_ggtt(arg, pot_hole);
1128 }
1129 
1130 static int igt_ggtt_drunk(void *arg)
1131 {
1132 	return exercise_ggtt(arg, drunk_hole);
1133 }
1134 
1135 static int igt_ggtt_lowlevel(void *arg)
1136 {
1137 	return exercise_ggtt(arg, lowlevel_hole);
1138 }
1139 
1140 static int igt_ggtt_page(void *arg)
1141 {
1142 	const unsigned int count = PAGE_SIZE/sizeof(u32);
1143 	I915_RND_STATE(prng);
1144 	struct drm_i915_private *i915 = arg;
1145 	struct i915_ggtt *ggtt = &i915->ggtt;
1146 	struct drm_i915_gem_object *obj;
1147 	intel_wakeref_t wakeref;
1148 	struct drm_mm_node tmp;
1149 	unsigned int *order, n;
1150 	int err;
1151 
1152 	mutex_lock(&i915->drm.struct_mutex);
1153 
1154 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1155 	if (IS_ERR(obj)) {
1156 		err = PTR_ERR(obj);
1157 		goto out_unlock;
1158 	}
1159 
1160 	err = i915_gem_object_pin_pages(obj);
1161 	if (err)
1162 		goto out_free;
1163 
1164 	memset(&tmp, 0, sizeof(tmp));
1165 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1166 					  count * PAGE_SIZE, 0,
1167 					  I915_COLOR_UNEVICTABLE,
1168 					  0, ggtt->mappable_end,
1169 					  DRM_MM_INSERT_LOW);
1170 	if (err)
1171 		goto out_unpin;
1172 
1173 	wakeref = intel_runtime_pm_get(i915);
1174 
1175 	for (n = 0; n < count; n++) {
1176 		u64 offset = tmp.start + n * PAGE_SIZE;
1177 
1178 		ggtt->vm.insert_page(&ggtt->vm,
1179 				     i915_gem_object_get_dma_address(obj, 0),
1180 				     offset, I915_CACHE_NONE, 0);
1181 	}
1182 
1183 	order = i915_random_order(count, &prng);
1184 	if (!order) {
1185 		err = -ENOMEM;
1186 		goto out_remove;
1187 	}
1188 
1189 	for (n = 0; n < count; n++) {
1190 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1191 		u32 __iomem *vaddr;
1192 
1193 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1194 		iowrite32(n, vaddr + n);
1195 		io_mapping_unmap_atomic(vaddr);
1196 	}
1197 	i915_gem_flush_ggtt_writes(i915);
1198 
1199 	i915_random_reorder(order, count, &prng);
1200 	for (n = 0; n < count; n++) {
1201 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
1202 		u32 __iomem *vaddr;
1203 		u32 val;
1204 
1205 		vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1206 		val = ioread32(vaddr + n);
1207 		io_mapping_unmap_atomic(vaddr);
1208 
1209 		if (val != n) {
1210 			pr_err("insert page failed: found %d, expected %d\n",
1211 			       val, n);
1212 			err = -EINVAL;
1213 			break;
1214 		}
1215 	}
1216 
1217 	kfree(order);
1218 out_remove:
1219 	ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1220 	intel_runtime_pm_put(i915, wakeref);
1221 	drm_mm_remove_node(&tmp);
1222 out_unpin:
1223 	i915_gem_object_unpin_pages(obj);
1224 out_free:
1225 	i915_gem_object_put(obj);
1226 out_unlock:
1227 	mutex_unlock(&i915->drm.struct_mutex);
1228 	return err;
1229 }
1230 
1231 static void track_vma_bind(struct i915_vma *vma)
1232 {
1233 	struct drm_i915_gem_object *obj = vma->obj;
1234 
1235 	obj->bind_count++; /* track for eviction later */
1236 	__i915_gem_object_pin_pages(obj);
1237 
1238 	vma->pages = obj->mm.pages;
1239 
1240 	mutex_lock(&vma->vm->mutex);
1241 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1242 	mutex_unlock(&vma->vm->mutex);
1243 }
1244 
1245 static int exercise_mock(struct drm_i915_private *i915,
1246 			 int (*func)(struct drm_i915_private *i915,
1247 				     struct i915_address_space *vm,
1248 				     u64 hole_start, u64 hole_end,
1249 				     unsigned long end_time))
1250 {
1251 	const u64 limit = totalram_pages() << PAGE_SHIFT;
1252 	struct i915_gem_context *ctx;
1253 	struct i915_hw_ppgtt *ppgtt;
1254 	IGT_TIMEOUT(end_time);
1255 	int err;
1256 
1257 	ctx = mock_context(i915, "mock");
1258 	if (!ctx)
1259 		return -ENOMEM;
1260 
1261 	ppgtt = ctx->ppgtt;
1262 	GEM_BUG_ON(!ppgtt);
1263 
1264 	err = func(i915, &ppgtt->vm, 0, min(ppgtt->vm.total, limit), end_time);
1265 
1266 	mock_context_close(ctx);
1267 	return err;
1268 }
1269 
1270 static int igt_mock_fill(void *arg)
1271 {
1272 	struct i915_ggtt *ggtt = arg;
1273 
1274 	return exercise_mock(ggtt->vm.i915, fill_hole);
1275 }
1276 
1277 static int igt_mock_walk(void *arg)
1278 {
1279 	struct i915_ggtt *ggtt = arg;
1280 
1281 	return exercise_mock(ggtt->vm.i915, walk_hole);
1282 }
1283 
1284 static int igt_mock_pot(void *arg)
1285 {
1286 	struct i915_ggtt *ggtt = arg;
1287 
1288 	return exercise_mock(ggtt->vm.i915, pot_hole);
1289 }
1290 
1291 static int igt_mock_drunk(void *arg)
1292 {
1293 	struct i915_ggtt *ggtt = arg;
1294 
1295 	return exercise_mock(ggtt->vm.i915, drunk_hole);
1296 }
1297 
1298 static int igt_gtt_reserve(void *arg)
1299 {
1300 	struct i915_ggtt *ggtt = arg;
1301 	struct drm_i915_gem_object *obj, *on;
1302 	LIST_HEAD(objects);
1303 	u64 total;
1304 	int err = -ENODEV;
1305 
1306 	/* i915_gem_gtt_reserve() tries to reserve the precise range
1307 	 * for the node, and evicts if it has to. So our test checks that
1308 	 * it can give us the requsted space and prevent overlaps.
1309 	 */
1310 
1311 	/* Start by filling the GGTT */
1312 	for (total = 0;
1313 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1314 	     total += 2 * I915_GTT_PAGE_SIZE) {
1315 		struct i915_vma *vma;
1316 
1317 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1318 						      2 * PAGE_SIZE);
1319 		if (IS_ERR(obj)) {
1320 			err = PTR_ERR(obj);
1321 			goto out;
1322 		}
1323 
1324 		err = i915_gem_object_pin_pages(obj);
1325 		if (err) {
1326 			i915_gem_object_put(obj);
1327 			goto out;
1328 		}
1329 
1330 		list_add(&obj->st_link, &objects);
1331 
1332 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1333 		if (IS_ERR(vma)) {
1334 			err = PTR_ERR(vma);
1335 			goto out;
1336 		}
1337 
1338 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1339 					   obj->base.size,
1340 					   total,
1341 					   obj->cache_level,
1342 					   0);
1343 		if (err) {
1344 			pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1345 			       total, ggtt->vm.total, err);
1346 			goto out;
1347 		}
1348 		track_vma_bind(vma);
1349 
1350 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1351 		if (vma->node.start != total ||
1352 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1353 			pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1354 			       vma->node.start, vma->node.size,
1355 			       total, 2*I915_GTT_PAGE_SIZE);
1356 			err = -EINVAL;
1357 			goto out;
1358 		}
1359 	}
1360 
1361 	/* Now we start forcing evictions */
1362 	for (total = I915_GTT_PAGE_SIZE;
1363 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1364 	     total += 2 * I915_GTT_PAGE_SIZE) {
1365 		struct i915_vma *vma;
1366 
1367 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1368 						      2 * PAGE_SIZE);
1369 		if (IS_ERR(obj)) {
1370 			err = PTR_ERR(obj);
1371 			goto out;
1372 		}
1373 
1374 		err = i915_gem_object_pin_pages(obj);
1375 		if (err) {
1376 			i915_gem_object_put(obj);
1377 			goto out;
1378 		}
1379 
1380 		list_add(&obj->st_link, &objects);
1381 
1382 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1383 		if (IS_ERR(vma)) {
1384 			err = PTR_ERR(vma);
1385 			goto out;
1386 		}
1387 
1388 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1389 					   obj->base.size,
1390 					   total,
1391 					   obj->cache_level,
1392 					   0);
1393 		if (err) {
1394 			pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1395 			       total, ggtt->vm.total, err);
1396 			goto out;
1397 		}
1398 		track_vma_bind(vma);
1399 
1400 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1401 		if (vma->node.start != total ||
1402 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1403 			pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1404 			       vma->node.start, vma->node.size,
1405 			       total, 2*I915_GTT_PAGE_SIZE);
1406 			err = -EINVAL;
1407 			goto out;
1408 		}
1409 	}
1410 
1411 	/* And then try at random */
1412 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1413 		struct i915_vma *vma;
1414 		u64 offset;
1415 
1416 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1417 		if (IS_ERR(vma)) {
1418 			err = PTR_ERR(vma);
1419 			goto out;
1420 		}
1421 
1422 		err = i915_vma_unbind(vma);
1423 		if (err) {
1424 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1425 			goto out;
1426 		}
1427 
1428 		offset = random_offset(0, ggtt->vm.total,
1429 				       2*I915_GTT_PAGE_SIZE,
1430 				       I915_GTT_MIN_ALIGNMENT);
1431 
1432 		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1433 					   obj->base.size,
1434 					   offset,
1435 					   obj->cache_level,
1436 					   0);
1437 		if (err) {
1438 			pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1439 			       total, ggtt->vm.total, err);
1440 			goto out;
1441 		}
1442 		track_vma_bind(vma);
1443 
1444 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1445 		if (vma->node.start != offset ||
1446 		    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1447 			pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1448 			       vma->node.start, vma->node.size,
1449 			       offset, 2*I915_GTT_PAGE_SIZE);
1450 			err = -EINVAL;
1451 			goto out;
1452 		}
1453 	}
1454 
1455 out:
1456 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1457 		i915_gem_object_unpin_pages(obj);
1458 		i915_gem_object_put(obj);
1459 	}
1460 	return err;
1461 }
1462 
1463 static int igt_gtt_insert(void *arg)
1464 {
1465 	struct i915_ggtt *ggtt = arg;
1466 	struct drm_i915_gem_object *obj, *on;
1467 	struct drm_mm_node tmp = {};
1468 	const struct invalid_insert {
1469 		u64 size;
1470 		u64 alignment;
1471 		u64 start, end;
1472 	} invalid_insert[] = {
1473 		{
1474 			ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1475 			0, ggtt->vm.total,
1476 		},
1477 		{
1478 			2*I915_GTT_PAGE_SIZE, 0,
1479 			0, I915_GTT_PAGE_SIZE,
1480 		},
1481 		{
1482 			-(u64)I915_GTT_PAGE_SIZE, 0,
1483 			0, 4*I915_GTT_PAGE_SIZE,
1484 		},
1485 		{
1486 			-(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1487 			0, 4*I915_GTT_PAGE_SIZE,
1488 		},
1489 		{
1490 			I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1491 			I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1492 		},
1493 		{}
1494 	}, *ii;
1495 	LIST_HEAD(objects);
1496 	u64 total;
1497 	int err = -ENODEV;
1498 
1499 	/* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1500 	 * to the node, evicting if required.
1501 	 */
1502 
1503 	/* Check a couple of obviously invalid requests */
1504 	for (ii = invalid_insert; ii->size; ii++) {
1505 		err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
1506 					  ii->size, ii->alignment,
1507 					  I915_COLOR_UNEVICTABLE,
1508 					  ii->start, ii->end,
1509 					  0);
1510 		if (err != -ENOSPC) {
1511 			pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1512 			       ii->size, ii->alignment, ii->start, ii->end,
1513 			       err);
1514 			return -EINVAL;
1515 		}
1516 	}
1517 
1518 	/* Start by filling the GGTT */
1519 	for (total = 0;
1520 	     total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1521 	     total += I915_GTT_PAGE_SIZE) {
1522 		struct i915_vma *vma;
1523 
1524 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1525 						      I915_GTT_PAGE_SIZE);
1526 		if (IS_ERR(obj)) {
1527 			err = PTR_ERR(obj);
1528 			goto out;
1529 		}
1530 
1531 		err = i915_gem_object_pin_pages(obj);
1532 		if (err) {
1533 			i915_gem_object_put(obj);
1534 			goto out;
1535 		}
1536 
1537 		list_add(&obj->st_link, &objects);
1538 
1539 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1540 		if (IS_ERR(vma)) {
1541 			err = PTR_ERR(vma);
1542 			goto out;
1543 		}
1544 
1545 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1546 					  obj->base.size, 0, obj->cache_level,
1547 					  0, ggtt->vm.total,
1548 					  0);
1549 		if (err == -ENOSPC) {
1550 			/* maxed out the GGTT space */
1551 			i915_gem_object_put(obj);
1552 			break;
1553 		}
1554 		if (err) {
1555 			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1556 			       total, ggtt->vm.total, err);
1557 			goto out;
1558 		}
1559 		track_vma_bind(vma);
1560 		__i915_vma_pin(vma);
1561 
1562 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1563 	}
1564 
1565 	list_for_each_entry(obj, &objects, st_link) {
1566 		struct i915_vma *vma;
1567 
1568 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1569 		if (IS_ERR(vma)) {
1570 			err = PTR_ERR(vma);
1571 			goto out;
1572 		}
1573 
1574 		if (!drm_mm_node_allocated(&vma->node)) {
1575 			pr_err("VMA was unexpectedly evicted!\n");
1576 			err = -EINVAL;
1577 			goto out;
1578 		}
1579 
1580 		__i915_vma_unpin(vma);
1581 	}
1582 
1583 	/* If we then reinsert, we should find the same hole */
1584 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1585 		struct i915_vma *vma;
1586 		u64 offset;
1587 
1588 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1589 		if (IS_ERR(vma)) {
1590 			err = PTR_ERR(vma);
1591 			goto out;
1592 		}
1593 
1594 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1595 		offset = vma->node.start;
1596 
1597 		err = i915_vma_unbind(vma);
1598 		if (err) {
1599 			pr_err("i915_vma_unbind failed with err=%d!\n", err);
1600 			goto out;
1601 		}
1602 
1603 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1604 					  obj->base.size, 0, obj->cache_level,
1605 					  0, ggtt->vm.total,
1606 					  0);
1607 		if (err) {
1608 			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1609 			       total, ggtt->vm.total, err);
1610 			goto out;
1611 		}
1612 		track_vma_bind(vma);
1613 
1614 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1615 		if (vma->node.start != offset) {
1616 			pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1617 			       offset, vma->node.start);
1618 			err = -EINVAL;
1619 			goto out;
1620 		}
1621 	}
1622 
1623 	/* And then force evictions */
1624 	for (total = 0;
1625 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1626 	     total += 2 * I915_GTT_PAGE_SIZE) {
1627 		struct i915_vma *vma;
1628 
1629 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
1630 						      2 * I915_GTT_PAGE_SIZE);
1631 		if (IS_ERR(obj)) {
1632 			err = PTR_ERR(obj);
1633 			goto out;
1634 		}
1635 
1636 		err = i915_gem_object_pin_pages(obj);
1637 		if (err) {
1638 			i915_gem_object_put(obj);
1639 			goto out;
1640 		}
1641 
1642 		list_add(&obj->st_link, &objects);
1643 
1644 		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1645 		if (IS_ERR(vma)) {
1646 			err = PTR_ERR(vma);
1647 			goto out;
1648 		}
1649 
1650 		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1651 					  obj->base.size, 0, obj->cache_level,
1652 					  0, ggtt->vm.total,
1653 					  0);
1654 		if (err) {
1655 			pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1656 			       total, ggtt->vm.total, err);
1657 			goto out;
1658 		}
1659 		track_vma_bind(vma);
1660 
1661 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1662 	}
1663 
1664 out:
1665 	list_for_each_entry_safe(obj, on, &objects, st_link) {
1666 		i915_gem_object_unpin_pages(obj);
1667 		i915_gem_object_put(obj);
1668 	}
1669 	return err;
1670 }
1671 
1672 int i915_gem_gtt_mock_selftests(void)
1673 {
1674 	static const struct i915_subtest tests[] = {
1675 		SUBTEST(igt_mock_drunk),
1676 		SUBTEST(igt_mock_walk),
1677 		SUBTEST(igt_mock_pot),
1678 		SUBTEST(igt_mock_fill),
1679 		SUBTEST(igt_gtt_reserve),
1680 		SUBTEST(igt_gtt_insert),
1681 	};
1682 	struct drm_i915_private *i915;
1683 	struct i915_ggtt *ggtt;
1684 	int err;
1685 
1686 	i915 = mock_gem_device();
1687 	if (!i915)
1688 		return -ENOMEM;
1689 
1690 	ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
1691 	if (!ggtt) {
1692 		err = -ENOMEM;
1693 		goto out_put;
1694 	}
1695 	mock_init_ggtt(i915, ggtt);
1696 
1697 	mutex_lock(&i915->drm.struct_mutex);
1698 	err = i915_subtests(tests, ggtt);
1699 	mock_device_flush(i915);
1700 	mutex_unlock(&i915->drm.struct_mutex);
1701 
1702 	i915_gem_drain_freed_objects(i915);
1703 
1704 	mock_fini_ggtt(ggtt);
1705 	kfree(ggtt);
1706 out_put:
1707 	drm_dev_put(&i915->drm);
1708 	return err;
1709 }
1710 
1711 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
1712 {
1713 	static const struct i915_subtest tests[] = {
1714 		SUBTEST(igt_ppgtt_alloc),
1715 		SUBTEST(igt_ppgtt_lowlevel),
1716 		SUBTEST(igt_ppgtt_drunk),
1717 		SUBTEST(igt_ppgtt_walk),
1718 		SUBTEST(igt_ppgtt_pot),
1719 		SUBTEST(igt_ppgtt_fill),
1720 		SUBTEST(igt_ppgtt_shrink),
1721 		SUBTEST(igt_ppgtt_shrink_boom),
1722 		SUBTEST(igt_ggtt_lowlevel),
1723 		SUBTEST(igt_ggtt_drunk),
1724 		SUBTEST(igt_ggtt_walk),
1725 		SUBTEST(igt_ggtt_pot),
1726 		SUBTEST(igt_ggtt_fill),
1727 		SUBTEST(igt_ggtt_page),
1728 	};
1729 
1730 	GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
1731 
1732 	return i915_subtests(tests, i915);
1733 }
1734