xref: /openbsd/sys/dev/pci/drm/i915/i915_gem.c (revision 9ea232b5)
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/dma-fence-array.h>
29 #include <linux/kthread.h>
30 #include <linux/dma-resv.h>
31 #include <linux/shmem_fs.h>
32 #include <linux/slab.h>
33 #include <linux/stop_machine.h>
34 #include <linux/swap.h>
35 #include <linux/pci.h>
36 #include <linux/dma-buf.h>
37 #include <linux/mman.h>
38 
39 #include <drm/drm_cache.h>
40 #include <drm/drm_vma_manager.h>
41 
42 #include <dev/pci/agpvar.h>
43 
44 #include "display/intel_display.h"
45 #include "display/intel_frontbuffer.h"
46 
47 #include "gem/i915_gem_clflush.h"
48 #include "gem/i915_gem_context.h"
49 #include "gem/i915_gem_ioctls.h"
50 #include "gem/i915_gem_mman.h"
51 #include "gem/i915_gem_pm.h"
52 #include "gem/i915_gem_region.h"
53 #include "gem/i915_gem_userptr.h"
54 #include "gt/intel_engine_user.h"
55 #include "gt/intel_gt.h"
56 #include "gt/intel_gt_pm.h"
57 #include "gt/intel_workarounds.h"
58 
59 #include "i915_drv.h"
60 #include "i915_file_private.h"
61 #include "i915_trace.h"
62 #include "i915_vgpu.h"
63 #include "intel_clock_gating.h"
64 
65 static int
66 insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
67 {
68 	int err;
69 
70 	err = mutex_lock_interruptible(&ggtt->vm.mutex);
71 	if (err)
72 		return err;
73 
74 	memset(node, 0, sizeof(*node));
75 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
76 					  size, 0, I915_COLOR_UNEVICTABLE,
77 					  0, ggtt->mappable_end,
78 					  DRM_MM_INSERT_LOW);
79 
80 	mutex_unlock(&ggtt->vm.mutex);
81 
82 	return err;
83 }
84 
85 static void
86 remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
87 {
88 	mutex_lock(&ggtt->vm.mutex);
89 	drm_mm_remove_node(node);
90 	mutex_unlock(&ggtt->vm.mutex);
91 }
92 
93 int
94 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
95 			    struct drm_file *file)
96 {
97 	struct drm_i915_private *i915 = to_i915(dev);
98 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
99 	struct drm_i915_gem_get_aperture *args = data;
100 	struct i915_vma *vma;
101 	u64 pinned;
102 
103 	if (mutex_lock_interruptible(&ggtt->vm.mutex))
104 		return -EINTR;
105 
106 	pinned = ggtt->vm.reserved;
107 	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
108 		if (i915_vma_is_pinned(vma))
109 			pinned += vma->node.size;
110 
111 	mutex_unlock(&ggtt->vm.mutex);
112 
113 	args->aper_size = ggtt->vm.total;
114 	args->aper_available_size = args->aper_size - pinned;
115 
116 	return 0;
117 }
118 
119 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
120 			   unsigned long flags)
121 {
122 	struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
123 	bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK);
124 	DRM_LIST_HEAD(still_in_list);
125 	intel_wakeref_t wakeref;
126 	struct i915_vma *vma;
127 	int ret;
128 
129 	assert_object_held(obj);
130 
131 	if (list_empty(&obj->vma.list))
132 		return 0;
133 
134 	/*
135 	 * As some machines use ACPI to handle runtime-resume callbacks, and
136 	 * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
137 	 * as they are required by the shrinker. Ergo, we wake the device up
138 	 * first just in case.
139 	 */
140 	wakeref = intel_runtime_pm_get(rpm);
141 
142 try_again:
143 	ret = 0;
144 	spin_lock(&obj->vma.lock);
145 	while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
146 						       struct i915_vma,
147 						       obj_link))) {
148 		list_move_tail(&vma->obj_link, &still_in_list);
149 		if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
150 			continue;
151 
152 		if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
153 			ret = -EBUSY;
154 			break;
155 		}
156 
157 		/*
158 		 * Requiring the vm destructor to take the object lock
159 		 * before destroying a vma would help us eliminate the
160 		 * i915_vm_tryget() here, AND thus also the barrier stuff
161 		 * at the end. That's an easy fix, but sleeping locks in
162 		 * a kthread should generally be avoided.
163 		 */
164 		ret = -EAGAIN;
165 		if (!i915_vm_tryget(vma->vm))
166 			break;
167 
168 		spin_unlock(&obj->vma.lock);
169 
170 		/*
171 		 * Since i915_vma_parked() takes the object lock
172 		 * before vma destruction, it won't race us here,
173 		 * and destroy the vma from under us.
174 		 */
175 
176 		ret = -EBUSY;
177 		if (flags & I915_GEM_OBJECT_UNBIND_ASYNC) {
178 			assert_object_held(vma->obj);
179 			ret = i915_vma_unbind_async(vma, vm_trylock);
180 		}
181 
182 		if (ret == -EBUSY && (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
183 				      !i915_vma_is_active(vma))) {
184 			if (vm_trylock) {
185 				if (mutex_trylock(&vma->vm->mutex)) {
186 					ret = __i915_vma_unbind(vma);
187 					mutex_unlock(&vma->vm->mutex);
188 				}
189 			} else {
190 				ret = i915_vma_unbind(vma);
191 			}
192 		}
193 
194 		i915_vm_put(vma->vm);
195 		spin_lock(&obj->vma.lock);
196 	}
197 	list_splice_init(&still_in_list, &obj->vma.list);
198 	spin_unlock(&obj->vma.lock);
199 
200 	if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
201 		rcu_barrier(); /* flush the i915_vm_release() */
202 		goto try_again;
203 	}
204 
205 	intel_runtime_pm_put(rpm, wakeref);
206 
207 	return ret;
208 }
209 
210 static int
211 shmem_pread(struct vm_page *page, int offset, int len, char __user *user_data,
212 	    bool needs_clflush)
213 {
214 	char *vaddr;
215 	int ret;
216 
217 	vaddr = kmap(page);
218 
219 	if (needs_clflush)
220 		drm_clflush_virt_range(vaddr + offset, len);
221 
222 	ret = __copy_to_user(user_data, vaddr + offset, len);
223 
224 	kunmap_va(vaddr);
225 
226 	return ret ? -EFAULT : 0;
227 }
228 
229 static int
230 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
231 		     struct drm_i915_gem_pread *args)
232 {
233 	unsigned int needs_clflush;
234 	char __user *user_data;
235 	unsigned long offset;
236 	pgoff_t idx;
237 	u64 remain;
238 	int ret;
239 
240 	ret = i915_gem_object_lock_interruptible(obj, NULL);
241 	if (ret)
242 		return ret;
243 
244 	ret = i915_gem_object_pin_pages(obj);
245 	if (ret)
246 		goto err_unlock;
247 
248 	ret = i915_gem_object_prepare_read(obj, &needs_clflush);
249 	if (ret)
250 		goto err_unpin;
251 
252 	i915_gem_object_finish_access(obj);
253 	i915_gem_object_unlock(obj);
254 
255 	remain = args->size;
256 	user_data = u64_to_user_ptr(args->data_ptr);
257 	offset = offset_in_page(args->offset);
258 	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
259 		struct vm_page *page = i915_gem_object_get_page(obj, idx);
260 		unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
261 
262 		ret = shmem_pread(page, offset, length, user_data,
263 				  needs_clflush);
264 		if (ret)
265 			break;
266 
267 		remain -= length;
268 		user_data += length;
269 		offset = 0;
270 	}
271 
272 	i915_gem_object_unpin_pages(obj);
273 	return ret;
274 
275 err_unpin:
276 	i915_gem_object_unpin_pages(obj);
277 err_unlock:
278 	i915_gem_object_unlock(obj);
279 	return ret;
280 }
281 
282 #ifdef __linux__
283 static inline bool
284 gtt_user_read(struct io_mapping *mapping,
285 	      loff_t base, int offset,
286 	      char __user *user_data, int length)
287 {
288 	void __iomem *vaddr;
289 	unsigned long unwritten;
290 
291 	/* We can use the cpu mem copy function because this is X86. */
292 	vaddr = io_mapping_map_atomic_wc(mapping, base);
293 	unwritten = __copy_to_user_inatomic(user_data,
294 					    (void __force *)vaddr + offset,
295 					    length);
296 	io_mapping_unmap_atomic(vaddr);
297 	if (unwritten) {
298 		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
299 		unwritten = copy_to_user(user_data,
300 					 (void __force *)vaddr + offset,
301 					 length);
302 		io_mapping_unmap(vaddr);
303 	}
304 	return unwritten;
305 }
306 #else
307 static inline bool
308 gtt_user_read(struct drm_i915_private *dev_priv,
309 	      loff_t base, int offset,
310 	      char __user *user_data, int length)
311 {
312 	bus_space_handle_t bsh;
313 	void __iomem *vaddr;
314 	unsigned long unwritten;
315 
316 	/* We can use the cpu mem copy function because this is X86. */
317 	agp_map_atomic(dev_priv->agph, base, &bsh);
318 	vaddr = bus_space_vaddr(dev_priv->bst, bsh);
319 	unwritten = __copy_to_user_inatomic(user_data,
320 					    (void __force *)vaddr + offset,
321 					    length);
322 	agp_unmap_atomic(dev_priv->agph, bsh);
323 	if (unwritten) {
324 		agp_map_subregion(dev_priv->agph, base, PAGE_SIZE, &bsh);
325 		vaddr = bus_space_vaddr(dev_priv->bst, bsh);
326 		unwritten = copy_to_user(user_data,
327 					 (void __force *)vaddr + offset,
328 					 length);
329 		agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE);
330 	}
331 	return unwritten;
332 }
333 #endif
334 
335 static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
336 					     struct drm_mm_node *node,
337 					     bool write)
338 {
339 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
340 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
341 	struct i915_vma *vma;
342 	struct i915_gem_ww_ctx ww;
343 	int ret;
344 
345 	i915_gem_ww_ctx_init(&ww, true);
346 retry:
347 	vma = ERR_PTR(-ENODEV);
348 	ret = i915_gem_object_lock(obj, &ww);
349 	if (ret)
350 		goto err_ww;
351 
352 	ret = i915_gem_object_set_to_gtt_domain(obj, write);
353 	if (ret)
354 		goto err_ww;
355 
356 	if (!i915_gem_object_is_tiled(obj))
357 		vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
358 						  PIN_MAPPABLE |
359 						  PIN_NONBLOCK /* NOWARN */ |
360 						  PIN_NOEVICT);
361 	if (vma == ERR_PTR(-EDEADLK)) {
362 		ret = -EDEADLK;
363 		goto err_ww;
364 	} else if (!IS_ERR(vma)) {
365 		node->start = i915_ggtt_offset(vma);
366 		node->flags = 0;
367 	} else {
368 		ret = insert_mappable_node(ggtt, node, PAGE_SIZE);
369 		if (ret)
370 			goto err_ww;
371 		GEM_BUG_ON(!drm_mm_node_allocated(node));
372 		vma = NULL;
373 	}
374 
375 	ret = i915_gem_object_pin_pages(obj);
376 	if (ret) {
377 		if (drm_mm_node_allocated(node)) {
378 			ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
379 			remove_mappable_node(ggtt, node);
380 		} else {
381 			i915_vma_unpin(vma);
382 		}
383 	}
384 
385 err_ww:
386 	if (ret == -EDEADLK) {
387 		ret = i915_gem_ww_ctx_backoff(&ww);
388 		if (!ret)
389 			goto retry;
390 	}
391 	i915_gem_ww_ctx_fini(&ww);
392 
393 	return ret ? ERR_PTR(ret) : vma;
394 }
395 
396 static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
397 				 struct drm_mm_node *node,
398 				 struct i915_vma *vma)
399 {
400 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
401 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
402 
403 	i915_gem_object_unpin_pages(obj);
404 	if (drm_mm_node_allocated(node)) {
405 		ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
406 		remove_mappable_node(ggtt, node);
407 	} else {
408 		i915_vma_unpin(vma);
409 	}
410 }
411 
412 static int
413 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
414 		   const struct drm_i915_gem_pread *args)
415 {
416 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
417 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
418 	unsigned long remain, offset;
419 	intel_wakeref_t wakeref;
420 	struct drm_mm_node node;
421 	void __user *user_data;
422 	struct i915_vma *vma;
423 	int ret = 0;
424 
425 	if (overflows_type(args->size, remain) ||
426 	    overflows_type(args->offset, offset))
427 		return -EINVAL;
428 
429 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
430 
431 	vma = i915_gem_gtt_prepare(obj, &node, false);
432 	if (IS_ERR(vma)) {
433 		ret = PTR_ERR(vma);
434 		goto out_rpm;
435 	}
436 
437 	user_data = u64_to_user_ptr(args->data_ptr);
438 	remain = args->size;
439 	offset = args->offset;
440 
441 	while (remain > 0) {
442 		/* Operation in this page
443 		 *
444 		 * page_base = page offset within aperture
445 		 * page_offset = offset within page
446 		 * page_length = bytes to copy for this page
447 		 */
448 		u32 page_base = node.start;
449 		unsigned page_offset = offset_in_page(offset);
450 		unsigned page_length = PAGE_SIZE - page_offset;
451 		page_length = remain < page_length ? remain : page_length;
452 		if (drm_mm_node_allocated(&node)) {
453 			ggtt->vm.insert_page(&ggtt->vm,
454 					     i915_gem_object_get_dma_address(obj,
455 									     offset >> PAGE_SHIFT),
456 					     node.start,
457 					     i915_gem_get_pat_index(i915,
458 								    I915_CACHE_NONE), 0);
459 		} else {
460 			page_base += offset & LINUX_PAGE_MASK;
461 		}
462 
463 		if (gtt_user_read(i915, page_base, page_offset,
464 				  user_data, page_length)) {
465 			ret = -EFAULT;
466 			break;
467 		}
468 
469 		remain -= page_length;
470 		user_data += page_length;
471 		offset += page_length;
472 	}
473 
474 	i915_gem_gtt_cleanup(obj, &node, vma);
475 out_rpm:
476 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
477 	return ret;
478 }
479 
480 /**
481  * i915_gem_pread_ioctl - Reads data from the object referenced by handle.
482  * @dev: drm device pointer
483  * @data: ioctl data blob
484  * @file: drm file pointer
485  *
486  * On error, the contents of *data are undefined.
487  */
488 int
489 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
490 		     struct drm_file *file)
491 {
492 	struct drm_i915_private *i915 = to_i915(dev);
493 	struct drm_i915_gem_pread *args = data;
494 	struct drm_i915_gem_object *obj;
495 	int ret;
496 
497 	/* PREAD is disallowed for all platforms after TGL-LP.  This also
498 	 * covers all platforms with local memory.
499 	 */
500 	if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
501 		return -EOPNOTSUPP;
502 
503 	if (args->size == 0)
504 		return 0;
505 
506 	if (!access_ok(u64_to_user_ptr(args->data_ptr),
507 		       args->size))
508 		return -EFAULT;
509 
510 	obj = i915_gem_object_lookup(file, args->handle);
511 	if (!obj)
512 		return -ENOENT;
513 
514 	/* Bounds check source.  */
515 	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
516 		ret = -EINVAL;
517 		goto out;
518 	}
519 
520 	trace_i915_gem_object_pread(obj, args->offset, args->size);
521 	ret = -ENODEV;
522 	if (obj->ops->pread)
523 		ret = obj->ops->pread(obj, args);
524 	if (ret != -ENODEV)
525 		goto out;
526 
527 	ret = i915_gem_object_wait(obj,
528 				   I915_WAIT_INTERRUPTIBLE,
529 				   MAX_SCHEDULE_TIMEOUT);
530 	if (ret)
531 		goto out;
532 
533 	ret = i915_gem_shmem_pread(obj, args);
534 	if (ret == -EFAULT || ret == -ENODEV)
535 		ret = i915_gem_gtt_pread(obj, args);
536 
537 out:
538 	i915_gem_object_put(obj);
539 	return ret;
540 }
541 
542 /* This is the fast write path which cannot handle
543  * page faults in the source data
544  */
545 #ifdef __linux__
546 static inline bool
547 ggtt_write(struct io_mapping *mapping,
548 	   loff_t base, int offset,
549 	   char __user *user_data, int length)
550 {
551 	void __iomem *vaddr;
552 	unsigned long unwritten;
553 
554 	/* We can use the cpu mem copy function because this is X86. */
555 	vaddr = io_mapping_map_atomic_wc(mapping, base);
556 	unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
557 						      user_data, length);
558 	io_mapping_unmap_atomic(vaddr);
559 	if (unwritten) {
560 		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
561 		unwritten = copy_from_user((void __force *)vaddr + offset,
562 					   user_data, length);
563 		io_mapping_unmap(vaddr);
564 	}
565 
566 	return unwritten;
567 }
568 #else
569 static inline bool
570 ggtt_write(struct drm_i915_private *dev_priv,
571 	   loff_t base, int offset,
572 	   char __user *user_data, int length)
573 {
574 	bus_space_handle_t bsh;
575 	void __iomem *vaddr;
576 	unsigned long unwritten;
577 
578 	/* We can use the cpu mem copy function because this is X86. */
579 	agp_map_atomic(dev_priv->agph, base, &bsh);
580 	vaddr = bus_space_vaddr(dev_priv->bst, bsh);
581 	unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
582 						      user_data, length);
583 	agp_unmap_atomic(dev_priv->agph, bsh);
584 	if (unwritten) {
585 		agp_map_subregion(dev_priv->agph, base, PAGE_SIZE, &bsh);
586 		vaddr = bus_space_vaddr(dev_priv->bst, bsh);
587 		unwritten = copy_from_user((void __force *)vaddr + offset,
588 					   user_data, length);
589 		agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE);
590 	}
591 
592 	return unwritten;
593 }
594 #endif
595 
596 /**
597  * i915_gem_gtt_pwrite_fast - This is the fast pwrite path, where we copy the data directly from the
598  * user into the GTT, uncached.
599  * @obj: i915 GEM object
600  * @args: pwrite arguments structure
601  */
602 static int
603 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
604 			 const struct drm_i915_gem_pwrite *args)
605 {
606 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
607 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
608 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
609 	unsigned long remain, offset;
610 	intel_wakeref_t wakeref;
611 	struct drm_mm_node node;
612 	struct i915_vma *vma;
613 	void __user *user_data;
614 	int ret = 0;
615 
616 	if (overflows_type(args->size, remain) ||
617 	    overflows_type(args->offset, offset))
618 		return -EINVAL;
619 
620 	if (i915_gem_object_has_struct_page(obj)) {
621 		/*
622 		 * Avoid waking the device up if we can fallback, as
623 		 * waking/resuming is very slow (worst-case 10-100 ms
624 		 * depending on PCI sleeps and our own resume time).
625 		 * This easily dwarfs any performance advantage from
626 		 * using the cache bypass of indirect GGTT access.
627 		 */
628 		wakeref = intel_runtime_pm_get_if_in_use(rpm);
629 		if (!wakeref)
630 			return -EFAULT;
631 	} else {
632 		/* No backing pages, no fallback, we must force GGTT access */
633 		wakeref = intel_runtime_pm_get(rpm);
634 	}
635 
636 	vma = i915_gem_gtt_prepare(obj, &node, true);
637 	if (IS_ERR(vma)) {
638 		ret = PTR_ERR(vma);
639 		goto out_rpm;
640 	}
641 
642 	i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
643 
644 	user_data = u64_to_user_ptr(args->data_ptr);
645 	offset = args->offset;
646 	remain = args->size;
647 	while (remain) {
648 		/* Operation in this page
649 		 *
650 		 * page_base = page offset within aperture
651 		 * page_offset = offset within page
652 		 * page_length = bytes to copy for this page
653 		 */
654 		u32 page_base = node.start;
655 		unsigned int page_offset = offset_in_page(offset);
656 		unsigned int page_length = PAGE_SIZE - page_offset;
657 		page_length = remain < page_length ? remain : page_length;
658 		if (drm_mm_node_allocated(&node)) {
659 			/* flush the write before we modify the GGTT */
660 			intel_gt_flush_ggtt_writes(ggtt->vm.gt);
661 			ggtt->vm.insert_page(&ggtt->vm,
662 					     i915_gem_object_get_dma_address(obj,
663 									     offset >> PAGE_SHIFT),
664 					     node.start,
665 					     i915_gem_get_pat_index(i915,
666 								    I915_CACHE_NONE), 0);
667 			wmb(); /* flush modifications to the GGTT (insert_page) */
668 		} else {
669 			page_base += offset & LINUX_PAGE_MASK;
670 		}
671 		/* If we get a fault while copying data, then (presumably) our
672 		 * source page isn't available.  Return the error and we'll
673 		 * retry in the slow path.
674 		 * If the object is non-shmem backed, we retry again with the
675 		 * path that handles page fault.
676 		 */
677 		if (ggtt_write(i915, page_base, page_offset,
678 			       user_data, page_length)) {
679 			ret = -EFAULT;
680 			break;
681 		}
682 
683 		remain -= page_length;
684 		user_data += page_length;
685 		offset += page_length;
686 	}
687 
688 	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
689 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
690 
691 	i915_gem_gtt_cleanup(obj, &node, vma);
692 out_rpm:
693 	intel_runtime_pm_put(rpm, wakeref);
694 	return ret;
695 }
696 
697 /* Per-page copy function for the shmem pwrite fastpath.
698  * Flushes invalid cachelines before writing to the target if
699  * needs_clflush_before is set and flushes out any written cachelines after
700  * writing if needs_clflush is set.
701  */
702 static int
703 shmem_pwrite(struct vm_page *page, int offset, int len, char __user *user_data,
704 	     bool needs_clflush_before,
705 	     bool needs_clflush_after)
706 {
707 	char *vaddr;
708 	int ret;
709 
710 	vaddr = kmap(page);
711 
712 	if (needs_clflush_before)
713 		drm_clflush_virt_range(vaddr + offset, len);
714 
715 	ret = __copy_from_user(vaddr + offset, user_data, len);
716 	if (!ret && needs_clflush_after)
717 		drm_clflush_virt_range(vaddr + offset, len);
718 
719 	kunmap_va(vaddr);
720 
721 	return ret ? -EFAULT : 0;
722 }
723 
724 static int
725 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
726 		      const struct drm_i915_gem_pwrite *args)
727 {
728 	unsigned int partial_cacheline_write;
729 	unsigned int needs_clflush;
730 	void __user *user_data;
731 	unsigned long offset;
732 	pgoff_t idx;
733 	u64 remain;
734 	int ret;
735 
736 	ret = i915_gem_object_lock_interruptible(obj, NULL);
737 	if (ret)
738 		return ret;
739 
740 	ret = i915_gem_object_pin_pages(obj);
741 	if (ret)
742 		goto err_unlock;
743 
744 	ret = i915_gem_object_prepare_write(obj, &needs_clflush);
745 	if (ret)
746 		goto err_unpin;
747 
748 	i915_gem_object_finish_access(obj);
749 	i915_gem_object_unlock(obj);
750 
751 	/* If we don't overwrite a cacheline completely we need to be
752 	 * careful to have up-to-date data by first clflushing. Don't
753 	 * overcomplicate things and flush the entire patch.
754 	 */
755 	partial_cacheline_write = 0;
756 	if (needs_clflush & CLFLUSH_BEFORE)
757 		partial_cacheline_write = curcpu()->ci_cflushsz - 1;
758 
759 	user_data = u64_to_user_ptr(args->data_ptr);
760 	remain = args->size;
761 	offset = offset_in_page(args->offset);
762 	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
763 		struct vm_page *page = i915_gem_object_get_page(obj, idx);
764 		unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
765 
766 		ret = shmem_pwrite(page, offset, length, user_data,
767 				   (offset | length) & partial_cacheline_write,
768 				   needs_clflush & CLFLUSH_AFTER);
769 		if (ret)
770 			break;
771 
772 		remain -= length;
773 		user_data += length;
774 		offset = 0;
775 	}
776 
777 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
778 
779 	i915_gem_object_unpin_pages(obj);
780 	return ret;
781 
782 err_unpin:
783 	i915_gem_object_unpin_pages(obj);
784 err_unlock:
785 	i915_gem_object_unlock(obj);
786 	return ret;
787 }
788 
789 /**
790  * i915_gem_pwrite_ioctl - Writes data to the object referenced by handle.
791  * @dev: drm device
792  * @data: ioctl data blob
793  * @file: drm file
794  *
795  * On error, the contents of the buffer that were to be modified are undefined.
796  */
797 int
798 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
799 		      struct drm_file *file)
800 {
801 	struct drm_i915_private *i915 = to_i915(dev);
802 	struct drm_i915_gem_pwrite *args = data;
803 	struct drm_i915_gem_object *obj;
804 	int ret;
805 
806 	/* PWRITE is disallowed for all platforms after TGL-LP.  This also
807 	 * covers all platforms with local memory.
808 	 */
809 	if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
810 		return -EOPNOTSUPP;
811 
812 	if (args->size == 0)
813 		return 0;
814 
815 	if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
816 		return -EFAULT;
817 
818 	obj = i915_gem_object_lookup(file, args->handle);
819 	if (!obj)
820 		return -ENOENT;
821 
822 	/* Bounds check destination. */
823 	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
824 		ret = -EINVAL;
825 		goto err;
826 	}
827 
828 	/* Writes not allowed into this read-only object */
829 	if (i915_gem_object_is_readonly(obj)) {
830 		ret = -EINVAL;
831 		goto err;
832 	}
833 
834 	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
835 
836 	ret = -ENODEV;
837 	if (obj->ops->pwrite)
838 		ret = obj->ops->pwrite(obj, args);
839 	if (ret != -ENODEV)
840 		goto err;
841 
842 	ret = i915_gem_object_wait(obj,
843 				   I915_WAIT_INTERRUPTIBLE |
844 				   I915_WAIT_ALL,
845 				   MAX_SCHEDULE_TIMEOUT);
846 	if (ret)
847 		goto err;
848 
849 	ret = -EFAULT;
850 	/* We can only do the GTT pwrite on untiled buffers, as otherwise
851 	 * it would end up going through the fenced access, and we'll get
852 	 * different detiling behavior between reading and writing.
853 	 * pread/pwrite currently are reading and writing from the CPU
854 	 * perspective, requiring manual detiling by the client.
855 	 */
856 	if (!i915_gem_object_has_struct_page(obj) ||
857 	    i915_gem_cpu_write_needs_clflush(obj))
858 		/* Note that the gtt paths might fail with non-page-backed user
859 		 * pointers (e.g. gtt mappings when moving data between
860 		 * textures). Fallback to the shmem path in that case.
861 		 */
862 		ret = i915_gem_gtt_pwrite_fast(obj, args);
863 
864 	if (ret == -EFAULT || ret == -ENOSPC) {
865 		if (i915_gem_object_has_struct_page(obj))
866 			ret = i915_gem_shmem_pwrite(obj, args);
867 	}
868 
869 err:
870 	i915_gem_object_put(obj);
871 	return ret;
872 }
873 
874 /**
875  * i915_gem_sw_finish_ioctl - Called when user space has done writes to this buffer
876  * @dev: drm device
877  * @data: ioctl data blob
878  * @file: drm file
879  */
880 int
881 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
882 			 struct drm_file *file)
883 {
884 	struct drm_i915_gem_sw_finish *args = data;
885 	struct drm_i915_gem_object *obj;
886 
887 	obj = i915_gem_object_lookup(file, args->handle);
888 	if (!obj)
889 		return -ENOENT;
890 
891 	/*
892 	 * Proxy objects are barred from CPU access, so there is no
893 	 * need to ban sw_finish as it is a nop.
894 	 */
895 
896 	/* Pinned buffers may be scanout, so flush the cache */
897 	i915_gem_object_flush_if_display(obj);
898 	i915_gem_object_put(obj);
899 
900 	return 0;
901 }
902 
903 void i915_gem_runtime_suspend(struct drm_i915_private *i915)
904 {
905 	struct drm_i915_gem_object *obj, *on;
906 	int i;
907 
908 	/*
909 	 * Only called during RPM suspend. All users of the userfault_list
910 	 * must be holding an RPM wakeref to ensure that this can not
911 	 * run concurrently with themselves (and use the struct_mutex for
912 	 * protection between themselves).
913 	 */
914 
915 	list_for_each_entry_safe(obj, on,
916 				 &to_gt(i915)->ggtt->userfault_list, userfault_link)
917 		__i915_gem_object_release_mmap_gtt(obj);
918 
919 	list_for_each_entry_safe(obj, on,
920 				 &i915->runtime_pm.lmem_userfault_list, userfault_link)
921 		i915_gem_object_runtime_pm_release_mmap_offset(obj);
922 
923 	/*
924 	 * The fence will be lost when the device powers down. If any were
925 	 * in use by hardware (i.e. they are pinned), we should not be powering
926 	 * down! All other fences will be reacquired by the user upon waking.
927 	 */
928 	for (i = 0; i < to_gt(i915)->ggtt->num_fences; i++) {
929 		struct i915_fence_reg *reg = &to_gt(i915)->ggtt->fence_regs[i];
930 
931 		/*
932 		 * Ideally we want to assert that the fence register is not
933 		 * live at this point (i.e. that no piece of code will be
934 		 * trying to write through fence + GTT, as that both violates
935 		 * our tracking of activity and associated locking/barriers,
936 		 * but also is illegal given that the hw is powered down).
937 		 *
938 		 * Previously we used reg->pin_count as a "liveness" indicator.
939 		 * That is not sufficient, and we need a more fine-grained
940 		 * tool if we want to have a sanity check here.
941 		 */
942 
943 		if (!reg->vma)
944 			continue;
945 
946 		GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
947 		reg->dirty = true;
948 	}
949 }
950 
951 static void discard_ggtt_vma(struct i915_vma *vma)
952 {
953 	struct drm_i915_gem_object *obj = vma->obj;
954 
955 	spin_lock(&obj->vma.lock);
956 	if (!RB_EMPTY_NODE(&vma->obj_node)) {
957 		rb_erase(&vma->obj_node, &obj->vma.tree);
958 		RB_CLEAR_NODE(&vma->obj_node);
959 	}
960 	spin_unlock(&obj->vma.lock);
961 }
962 
963 struct i915_vma *
964 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
965 			    struct i915_gem_ww_ctx *ww,
966 			    const struct i915_gtt_view *view,
967 			    u64 size, u64 alignment, u64 flags)
968 {
969 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
970 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
971 	struct i915_vma *vma;
972 	int ret;
973 
974 	GEM_WARN_ON(!ww);
975 
976 	if (flags & PIN_MAPPABLE &&
977 	    (!view || view->type == I915_GTT_VIEW_NORMAL)) {
978 		/*
979 		 * If the required space is larger than the available
980 		 * aperture, we will not able to find a slot for the
981 		 * object and unbinding the object now will be in
982 		 * vain. Worse, doing so may cause us to ping-pong
983 		 * the object in and out of the Global GTT and
984 		 * waste a lot of cycles under the mutex.
985 		 */
986 		if (obj->base.size > ggtt->mappable_end)
987 			return ERR_PTR(-E2BIG);
988 
989 		/*
990 		 * If NONBLOCK is set the caller is optimistically
991 		 * trying to cache the full object within the mappable
992 		 * aperture, and *must* have a fallback in place for
993 		 * situations where we cannot bind the object. We
994 		 * can be a little more lax here and use the fallback
995 		 * more often to avoid costly migrations of ourselves
996 		 * and other objects within the aperture.
997 		 *
998 		 * Half-the-aperture is used as a simple heuristic.
999 		 * More interesting would to do search for a free
1000 		 * block prior to making the commitment to unbind.
1001 		 * That caters for the self-harm case, and with a
1002 		 * little more heuristics (e.g. NOFAULT, NOEVICT)
1003 		 * we could try to minimise harm to others.
1004 		 */
1005 		if (flags & PIN_NONBLOCK &&
1006 		    obj->base.size > ggtt->mappable_end / 2)
1007 			return ERR_PTR(-ENOSPC);
1008 	}
1009 
1010 new_vma:
1011 	vma = i915_vma_instance(obj, &ggtt->vm, view);
1012 	if (IS_ERR(vma))
1013 		return vma;
1014 
1015 	if (i915_vma_misplaced(vma, size, alignment, flags)) {
1016 		if (flags & PIN_NONBLOCK) {
1017 			if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
1018 				return ERR_PTR(-ENOSPC);
1019 
1020 			/*
1021 			 * If this misplaced vma is too big (i.e, at-least
1022 			 * half the size of aperture) or hasn't been pinned
1023 			 * mappable before, we ignore the misplacement when
1024 			 * PIN_NONBLOCK is set in order to avoid the ping-pong
1025 			 * issue described above. In other words, we try to
1026 			 * avoid the costly operation of unbinding this vma
1027 			 * from the GGTT and rebinding it back because there
1028 			 * may not be enough space for this vma in the aperture.
1029 			 */
1030 			if (flags & PIN_MAPPABLE &&
1031 			    (vma->fence_size > ggtt->mappable_end / 2 ||
1032 			    !i915_vma_is_map_and_fenceable(vma)))
1033 				return ERR_PTR(-ENOSPC);
1034 		}
1035 
1036 		if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) {
1037 			discard_ggtt_vma(vma);
1038 			goto new_vma;
1039 		}
1040 
1041 		ret = i915_vma_unbind(vma);
1042 		if (ret)
1043 			return ERR_PTR(ret);
1044 	}
1045 
1046 	ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
1047 
1048 	if (ret)
1049 		return ERR_PTR(ret);
1050 
1051 	if (vma->fence && !i915_gem_object_is_tiled(obj)) {
1052 		mutex_lock(&ggtt->vm.mutex);
1053 		i915_vma_revoke_fence(vma);
1054 		mutex_unlock(&ggtt->vm.mutex);
1055 	}
1056 
1057 	ret = i915_vma_wait_for_bind(vma);
1058 	if (ret) {
1059 		i915_vma_unpin(vma);
1060 		return ERR_PTR(ret);
1061 	}
1062 
1063 	return vma;
1064 }
1065 
1066 struct i915_vma * __must_check
1067 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1068 			 const struct i915_gtt_view *view,
1069 			 u64 size, u64 alignment, u64 flags)
1070 {
1071 	struct i915_gem_ww_ctx ww;
1072 	struct i915_vma *ret;
1073 	int err;
1074 
1075 	for_i915_gem_ww(&ww, err, true) {
1076 		err = i915_gem_object_lock(obj, &ww);
1077 		if (err)
1078 			continue;
1079 
1080 		ret = i915_gem_object_ggtt_pin_ww(obj, &ww, view, size,
1081 						  alignment, flags);
1082 		if (IS_ERR(ret))
1083 			err = PTR_ERR(ret);
1084 	}
1085 
1086 	return err ? ERR_PTR(err) : ret;
1087 }
1088 
1089 int
1090 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1091 		       struct drm_file *file_priv)
1092 {
1093 	struct drm_i915_private *i915 = to_i915(dev);
1094 	struct drm_i915_gem_madvise *args = data;
1095 	struct drm_i915_gem_object *obj;
1096 	int err;
1097 
1098 	switch (args->madv) {
1099 	case I915_MADV_DONTNEED:
1100 	case I915_MADV_WILLNEED:
1101 	    break;
1102 	default:
1103 	    return -EINVAL;
1104 	}
1105 
1106 	obj = i915_gem_object_lookup(file_priv, args->handle);
1107 	if (!obj)
1108 		return -ENOENT;
1109 
1110 	err = i915_gem_object_lock_interruptible(obj, NULL);
1111 	if (err)
1112 		goto out;
1113 
1114 	if (i915_gem_object_has_pages(obj) &&
1115 	    i915_gem_object_is_tiled(obj) &&
1116 	    i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
1117 		if (obj->mm.madv == I915_MADV_WILLNEED) {
1118 			GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
1119 			i915_gem_object_clear_tiling_quirk(obj);
1120 			i915_gem_object_make_shrinkable(obj);
1121 		}
1122 		if (args->madv == I915_MADV_WILLNEED) {
1123 			GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
1124 			i915_gem_object_make_unshrinkable(obj);
1125 			i915_gem_object_set_tiling_quirk(obj);
1126 		}
1127 	}
1128 
1129 	if (obj->mm.madv != __I915_MADV_PURGED) {
1130 		obj->mm.madv = args->madv;
1131 		if (obj->ops->adjust_lru)
1132 			obj->ops->adjust_lru(obj);
1133 	}
1134 
1135 	if (i915_gem_object_has_pages(obj) ||
1136 	    i915_gem_object_has_self_managed_shrink_list(obj)) {
1137 		unsigned long flags;
1138 
1139 		spin_lock_irqsave(&i915->mm.obj_lock, flags);
1140 		if (!list_empty(&obj->mm.link)) {
1141 			struct list_head *list;
1142 
1143 			if (obj->mm.madv != I915_MADV_WILLNEED)
1144 				list = &i915->mm.purge_list;
1145 			else
1146 				list = &i915->mm.shrink_list;
1147 			list_move_tail(&obj->mm.link, list);
1148 
1149 		}
1150 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1151 	}
1152 
1153 	/* if the object is no longer attached, discard its backing storage */
1154 	if (obj->mm.madv == I915_MADV_DONTNEED &&
1155 	    !i915_gem_object_has_pages(obj))
1156 		i915_gem_object_truncate(obj);
1157 
1158 	args->retained = obj->mm.madv != __I915_MADV_PURGED;
1159 
1160 	i915_gem_object_unlock(obj);
1161 out:
1162 	i915_gem_object_put(obj);
1163 	return err;
1164 }
1165 
1166 /*
1167  * A single pass should suffice to release all the freed objects (along most
1168  * call paths), but be a little more paranoid in that freeing the objects does
1169  * take a little amount of time, during which the rcu callbacks could have added
1170  * new objects into the freed list, and armed the work again.
1171  */
1172 void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
1173 {
1174 	while (atomic_read(&i915->mm.free_count)) {
1175 		flush_work(&i915->mm.free_work);
1176 		drain_workqueue(i915->bdev.wq);
1177 		rcu_barrier();
1178 	}
1179 }
1180 
1181 /*
1182  * Similar to objects above (see i915_gem_drain_freed-objects), in general we
1183  * have workers that are armed by RCU and then rearm themselves in their
1184  * callbacks. To be paranoid, we need to drain the workqueue a second time after
1185  * waiting for the RCU grace period so that we catch work queued via RCU from
1186  * the first pass. As neither drain_workqueue() nor flush_workqueue() report a
1187  * result, we make an assumption that we only don't require more than 3 passes
1188  * to catch all _recursive_ RCU delayed work.
1189  */
1190 void i915_gem_drain_workqueue(struct drm_i915_private *i915)
1191 {
1192 	int i;
1193 
1194 	for (i = 0; i < 3; i++) {
1195 		flush_workqueue(i915->wq);
1196 		rcu_barrier();
1197 		i915_gem_drain_freed_objects(i915);
1198 	}
1199 
1200 	drain_workqueue(i915->wq);
1201 }
1202 
1203 int i915_gem_init(struct drm_i915_private *dev_priv)
1204 {
1205 	struct intel_gt *gt;
1206 	unsigned int i;
1207 	int ret;
1208 
1209 	/*
1210 	 * In the proccess of replacing cache_level with pat_index a tricky
1211 	 * dependency is created on the definition of the enum i915_cache_level.
1212 	 * in case this enum is changed, PTE encode would be broken.
1213 	 * Add a WARNING here. And remove when we completely quit using this
1214 	 * enum
1215 	 */
1216 	BUILD_BUG_ON(I915_CACHE_NONE != 0 ||
1217 		     I915_CACHE_LLC != 1 ||
1218 		     I915_CACHE_L3_LLC != 2 ||
1219 		     I915_CACHE_WT != 3 ||
1220 		     I915_MAX_CACHE_LEVEL != 4);
1221 
1222 	/* We need to fallback to 4K pages if host doesn't support huge gtt. */
1223 	if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1224 		RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K;
1225 
1226 	ret = i915_gem_init_userptr(dev_priv);
1227 	if (ret)
1228 		return ret;
1229 
1230 	for_each_gt(gt, dev_priv, i) {
1231 		intel_uc_fetch_firmwares(&gt->uc);
1232 		intel_wopcm_init(&gt->wopcm);
1233 		if (GRAPHICS_VER(dev_priv) >= 8)
1234 			setup_private_pat(gt);
1235 	}
1236 
1237 	ret = i915_init_ggtt(dev_priv);
1238 	if (ret) {
1239 		GEM_BUG_ON(ret == -EIO);
1240 		goto err_unlock;
1241 	}
1242 
1243 	/*
1244 	 * Despite its name intel_clock_gating_init applies both display
1245 	 * clock gating workarounds; GT mmio workarounds and the occasional
1246 	 * GT power context workaround. Worse, sometimes it includes a context
1247 	 * register workaround which we need to apply before we record the
1248 	 * default HW state for all contexts.
1249 	 *
1250 	 * FIXME: break up the workarounds and apply them at the right time!
1251 	 */
1252 	intel_clock_gating_init(dev_priv);
1253 
1254 	for_each_gt(gt, dev_priv, i) {
1255 		ret = intel_gt_init(gt);
1256 		if (ret)
1257 			goto err_unlock;
1258 	}
1259 
1260 	/*
1261 	 * Register engines early to ensure the engine list is in its final
1262 	 * rb-tree form, lowering the amount of code that has to deal with
1263 	 * the intermediate llist state.
1264 	 */
1265 	intel_engines_driver_register(dev_priv);
1266 
1267 	return 0;
1268 
1269 	/*
1270 	 * Unwinding is complicated by that we want to handle -EIO to mean
1271 	 * disable GPU submission but keep KMS alive. We want to mark the
1272 	 * HW as irrevisibly wedged, but keep enough state around that the
1273 	 * driver doesn't explode during runtime.
1274 	 */
1275 err_unlock:
1276 	i915_gem_drain_workqueue(dev_priv);
1277 
1278 	if (ret != -EIO) {
1279 		for_each_gt(gt, dev_priv, i) {
1280 			intel_gt_driver_remove(gt);
1281 			intel_gt_driver_release(gt);
1282 			intel_uc_cleanup_firmwares(&gt->uc);
1283 		}
1284 	}
1285 
1286 	if (ret == -EIO) {
1287 		/*
1288 		 * Allow engines or uC initialisation to fail by marking the GPU
1289 		 * as wedged. But we only want to do this when the GPU is angry,
1290 		 * for all other failure, such as an allocation failure, bail.
1291 		 */
1292 		for_each_gt(gt, dev_priv, i) {
1293 			if (!intel_gt_is_wedged(gt)) {
1294 				i915_probe_error(dev_priv,
1295 						 "Failed to initialize GPU, declaring it wedged!\n");
1296 				intel_gt_set_wedged(gt);
1297 			}
1298 		}
1299 
1300 		/* Minimal basic recovery for KMS */
1301 		ret = i915_ggtt_enable_hw(dev_priv);
1302 		i915_ggtt_resume(to_gt(dev_priv)->ggtt);
1303 		intel_clock_gating_init(dev_priv);
1304 	}
1305 
1306 	i915_gem_drain_freed_objects(dev_priv);
1307 
1308 	return ret;
1309 }
1310 
1311 void i915_gem_driver_register(struct drm_i915_private *i915)
1312 {
1313 	i915_gem_driver_register__shrinker(i915);
1314 }
1315 
1316 void i915_gem_driver_unregister(struct drm_i915_private *i915)
1317 {
1318 	i915_gem_driver_unregister__shrinker(i915);
1319 }
1320 
1321 void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
1322 {
1323 	struct intel_gt *gt;
1324 	unsigned int i;
1325 
1326 	i915_gem_suspend_late(dev_priv);
1327 	for_each_gt(gt, dev_priv, i)
1328 		intel_gt_driver_remove(gt);
1329 	dev_priv->uabi_engines = RB_ROOT;
1330 
1331 	/* Flush any outstanding unpin_work. */
1332 	i915_gem_drain_workqueue(dev_priv);
1333 }
1334 
1335 void i915_gem_driver_release(struct drm_i915_private *dev_priv)
1336 {
1337 	struct intel_gt *gt;
1338 	unsigned int i;
1339 
1340 	for_each_gt(gt, dev_priv, i) {
1341 		intel_gt_driver_release(gt);
1342 		intel_uc_cleanup_firmwares(&gt->uc);
1343 	}
1344 
1345 	/* Flush any outstanding work, including i915_gem_context.release_work. */
1346 	i915_gem_drain_workqueue(dev_priv);
1347 
1348 	drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
1349 }
1350 
1351 static void i915_gem_init__mm(struct drm_i915_private *i915)
1352 {
1353 	mtx_init(&i915->mm.obj_lock, IPL_TTY);
1354 
1355 	init_llist_head(&i915->mm.free_list);
1356 
1357 	INIT_LIST_HEAD(&i915->mm.purge_list);
1358 	INIT_LIST_HEAD(&i915->mm.shrink_list);
1359 
1360 	i915_gem_init__objects(i915);
1361 }
1362 
1363 void i915_gem_init_early(struct drm_i915_private *dev_priv)
1364 {
1365 	i915_gem_init__mm(dev_priv);
1366 	i915_gem_init__contexts(dev_priv);
1367 
1368 	mtx_init(&dev_priv->display.fb_tracking.lock, IPL_NONE);
1369 }
1370 
1371 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1372 {
1373 	i915_gem_drain_workqueue(dev_priv);
1374 	GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1375 	GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1376 	drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
1377 }
1378 
1379 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1380 {
1381 	struct drm_i915_file_private *file_priv;
1382 	struct i915_drm_client *client;
1383 	int ret = -ENOMEM;
1384 
1385 	drm_dbg(&i915->drm, "\n");
1386 
1387 	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1388 	if (!file_priv)
1389 		goto err_alloc;
1390 
1391 	client = i915_drm_client_alloc();
1392 	if (!client)
1393 		goto err_client;
1394 
1395 	file->driver_priv = file_priv;
1396 	file_priv->i915 = i915;
1397 	file_priv->file = file;
1398 	file_priv->client = client;
1399 
1400 	file_priv->bsd_engine = -1;
1401 	file_priv->hang_timestamp = jiffies;
1402 
1403 	ret = i915_gem_context_open(i915, file);
1404 	if (ret)
1405 		goto err_context;
1406 
1407 	return 0;
1408 
1409 err_context:
1410 	i915_drm_client_put(client);
1411 err_client:
1412 	kfree(file_priv);
1413 err_alloc:
1414 	return ret;
1415 }
1416 
1417 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1418 #include "selftests/mock_gem_device.c"
1419 #include "selftests/i915_gem.c"
1420 #endif
1421