1 /*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include <drm/drm_vma_manager.h>
29 #include <linux/dma-fence-array.h>
30 #include <linux/kthread.h>
31 #include <linux/dma-resv.h>
32 #include <linux/shmem_fs.h>
33 #include <linux/slab.h>
34 #include <linux/stop_machine.h>
35 #include <linux/swap.h>
36 #include <linux/pci.h>
37 #include <linux/dma-buf.h>
38 #include <linux/mman.h>
39
40 #include "display/intel_display.h"
41 #include "display/intel_frontbuffer.h"
42
43 #include "gem/i915_gem_clflush.h"
44 #include "gem/i915_gem_context.h"
45 #include "gem/i915_gem_ioctls.h"
46 #include "gem/i915_gem_mman.h"
47 #include "gem/i915_gem_region.h"
48 #include "gt/intel_engine_user.h"
49 #include "gt/intel_gt.h"
50 #include "gt/intel_gt_pm.h"
51 #include "gt/intel_workarounds.h"
52
53 #include "i915_drv.h"
54 #include "i915_trace.h"
55 #include "i915_vgpu.h"
56
57 #include "intel_pm.h"
58
59 static int
insert_mappable_node(struct i915_ggtt * ggtt,struct drm_mm_node * node,u32 size)60 insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
61 {
62 int err;
63
64 err = mutex_lock_interruptible(&ggtt->vm.mutex);
65 if (err)
66 return err;
67
68 memset(node, 0, sizeof(*node));
69 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
70 size, 0, I915_COLOR_UNEVICTABLE,
71 0, ggtt->mappable_end,
72 DRM_MM_INSERT_LOW);
73
74 mutex_unlock(&ggtt->vm.mutex);
75
76 return err;
77 }
78
79 static void
remove_mappable_node(struct i915_ggtt * ggtt,struct drm_mm_node * node)80 remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
81 {
82 mutex_lock(&ggtt->vm.mutex);
83 drm_mm_remove_node(node);
84 mutex_unlock(&ggtt->vm.mutex);
85 }
86
87 int
i915_gem_get_aperture_ioctl(struct drm_device * dev,void * data,struct drm_file * file)88 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
89 struct drm_file *file)
90 {
91 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
92 struct drm_i915_gem_get_aperture *args = data;
93 struct i915_vma *vma;
94 u64 pinned;
95
96 if (mutex_lock_interruptible(&ggtt->vm.mutex))
97 return -EINTR;
98
99 pinned = ggtt->vm.reserved;
100 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
101 if (i915_vma_is_pinned(vma))
102 pinned += vma->node.size;
103
104 mutex_unlock(&ggtt->vm.mutex);
105
106 args->aper_size = ggtt->vm.total;
107 args->aper_available_size = args->aper_size - pinned;
108
109 return 0;
110 }
111
i915_gem_object_unbind(struct drm_i915_gem_object * obj,unsigned long flags)112 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
113 unsigned long flags)
114 {
115 struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
116 LIST_HEAD(still_in_list);
117 intel_wakeref_t wakeref;
118 struct i915_vma *vma;
119 int ret;
120
121 if (list_empty(&obj->vma.list))
122 return 0;
123
124 /*
125 * As some machines use ACPI to handle runtime-resume callbacks, and
126 * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
127 * as they are required by the shrinker. Ergo, we wake the device up
128 * first just in case.
129 */
130 wakeref = intel_runtime_pm_get(rpm);
131
132 try_again:
133 ret = 0;
134 spin_lock(&obj->vma.lock);
135 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
136 struct i915_vma,
137 obj_link))) {
138 struct i915_address_space *vm = vma->vm;
139
140 list_move_tail(&vma->obj_link, &still_in_list);
141 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
142 continue;
143
144 if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
145 ret = -EBUSY;
146 break;
147 }
148
149 ret = -EAGAIN;
150 if (!i915_vm_tryopen(vm))
151 break;
152
153 /* Prevent vma being freed by i915_vma_parked as we unbind */
154 vma = __i915_vma_get(vma);
155 spin_unlock(&obj->vma.lock);
156
157 if (vma) {
158 ret = -EBUSY;
159 if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
160 !i915_vma_is_active(vma))
161 ret = i915_vma_unbind(vma);
162
163 __i915_vma_put(vma);
164 }
165
166 i915_vm_close(vm);
167 spin_lock(&obj->vma.lock);
168 }
169 list_splice_init(&still_in_list, &obj->vma.list);
170 spin_unlock(&obj->vma.lock);
171
172 if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
173 rcu_barrier(); /* flush the i915_vm_release() */
174 goto try_again;
175 }
176
177 intel_runtime_pm_put(rpm, wakeref);
178
179 return ret;
180 }
181
182 static int
shmem_pread(struct page * page,int offset,int len,char __user * user_data,bool needs_clflush)183 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
184 bool needs_clflush)
185 {
186 char *vaddr;
187 int ret;
188
189 vaddr = kmap(page);
190
191 if (needs_clflush)
192 drm_clflush_virt_range(vaddr + offset, len);
193
194 ret = __copy_to_user(user_data, vaddr + offset, len);
195
196 kunmap(page);
197
198 return ret ? -EFAULT : 0;
199 }
200
201 static int
i915_gem_shmem_pread(struct drm_i915_gem_object * obj,struct drm_i915_gem_pread * args)202 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
203 struct drm_i915_gem_pread *args)
204 {
205 unsigned int needs_clflush;
206 unsigned int idx, offset;
207 char __user *user_data;
208 u64 remain;
209 int ret;
210
211 ret = i915_gem_object_lock_interruptible(obj, NULL);
212 if (ret)
213 return ret;
214
215 ret = i915_gem_object_pin_pages(obj);
216 if (ret)
217 goto err_unlock;
218
219 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
220 if (ret)
221 goto err_unpin;
222
223 i915_gem_object_finish_access(obj);
224 i915_gem_object_unlock(obj);
225
226 remain = args->size;
227 user_data = u64_to_user_ptr(args->data_ptr);
228 offset = offset_in_page(args->offset);
229 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
230 struct page *page = i915_gem_object_get_page(obj, idx);
231 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
232
233 ret = shmem_pread(page, offset, length, user_data,
234 needs_clflush);
235 if (ret)
236 break;
237
238 remain -= length;
239 user_data += length;
240 offset = 0;
241 }
242
243 i915_gem_object_unpin_pages(obj);
244 return ret;
245
246 err_unpin:
247 i915_gem_object_unpin_pages(obj);
248 err_unlock:
249 i915_gem_object_unlock(obj);
250 return ret;
251 }
252
253 static inline bool
gtt_user_read(struct io_mapping * mapping,loff_t base,int offset,char __user * user_data,int length)254 gtt_user_read(struct io_mapping *mapping,
255 loff_t base, int offset,
256 char __user *user_data, int length)
257 {
258 void __iomem *vaddr;
259 unsigned long unwritten;
260
261 /* We can use the cpu mem copy function because this is X86. */
262 vaddr = io_mapping_map_atomic_wc(mapping, base);
263 unwritten = __copy_to_user_inatomic(user_data,
264 (void __force *)vaddr + offset,
265 length);
266 io_mapping_unmap_atomic(vaddr);
267 if (unwritten) {
268 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
269 unwritten = copy_to_user(user_data,
270 (void __force *)vaddr + offset,
271 length);
272 io_mapping_unmap(vaddr);
273 }
274 return unwritten;
275 }
276
i915_gem_gtt_prepare(struct drm_i915_gem_object * obj,struct drm_mm_node * node,bool write)277 static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
278 struct drm_mm_node *node,
279 bool write)
280 {
281 struct drm_i915_private *i915 = to_i915(obj->base.dev);
282 struct i915_ggtt *ggtt = &i915->ggtt;
283 struct i915_vma *vma;
284 struct i915_gem_ww_ctx ww;
285 int ret;
286
287 i915_gem_ww_ctx_init(&ww, true);
288 retry:
289 vma = ERR_PTR(-ENODEV);
290 ret = i915_gem_object_lock(obj, &ww);
291 if (ret)
292 goto err_ww;
293
294 ret = i915_gem_object_set_to_gtt_domain(obj, write);
295 if (ret)
296 goto err_ww;
297
298 if (!i915_gem_object_is_tiled(obj))
299 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
300 PIN_MAPPABLE |
301 PIN_NONBLOCK /* NOWARN */ |
302 PIN_NOEVICT);
303 if (vma == ERR_PTR(-EDEADLK)) {
304 ret = -EDEADLK;
305 goto err_ww;
306 } else if (!IS_ERR(vma)) {
307 node->start = i915_ggtt_offset(vma);
308 node->flags = 0;
309 } else {
310 ret = insert_mappable_node(ggtt, node, PAGE_SIZE);
311 if (ret)
312 goto err_ww;
313 GEM_BUG_ON(!drm_mm_node_allocated(node));
314 vma = NULL;
315 }
316
317 ret = i915_gem_object_pin_pages(obj);
318 if (ret) {
319 if (drm_mm_node_allocated(node)) {
320 ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
321 remove_mappable_node(ggtt, node);
322 } else {
323 i915_vma_unpin(vma);
324 }
325 }
326
327 err_ww:
328 if (ret == -EDEADLK) {
329 ret = i915_gem_ww_ctx_backoff(&ww);
330 if (!ret)
331 goto retry;
332 }
333 i915_gem_ww_ctx_fini(&ww);
334
335 return ret ? ERR_PTR(ret) : vma;
336 }
337
i915_gem_gtt_cleanup(struct drm_i915_gem_object * obj,struct drm_mm_node * node,struct i915_vma * vma)338 static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
339 struct drm_mm_node *node,
340 struct i915_vma *vma)
341 {
342 struct drm_i915_private *i915 = to_i915(obj->base.dev);
343 struct i915_ggtt *ggtt = &i915->ggtt;
344
345 i915_gem_object_unpin_pages(obj);
346 if (drm_mm_node_allocated(node)) {
347 ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
348 remove_mappable_node(ggtt, node);
349 } else {
350 i915_vma_unpin(vma);
351 }
352 }
353
354 static int
i915_gem_gtt_pread(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pread * args)355 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
356 const struct drm_i915_gem_pread *args)
357 {
358 struct drm_i915_private *i915 = to_i915(obj->base.dev);
359 struct i915_ggtt *ggtt = &i915->ggtt;
360 intel_wakeref_t wakeref;
361 struct drm_mm_node node;
362 void __user *user_data;
363 struct i915_vma *vma;
364 u64 remain, offset;
365 int ret = 0;
366
367 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
368
369 vma = i915_gem_gtt_prepare(obj, &node, false);
370 if (IS_ERR(vma)) {
371 ret = PTR_ERR(vma);
372 goto out_rpm;
373 }
374
375 user_data = u64_to_user_ptr(args->data_ptr);
376 remain = args->size;
377 offset = args->offset;
378
379 while (remain > 0) {
380 /* Operation in this page
381 *
382 * page_base = page offset within aperture
383 * page_offset = offset within page
384 * page_length = bytes to copy for this page
385 */
386 u32 page_base = node.start;
387 unsigned page_offset = offset_in_page(offset);
388 unsigned page_length = PAGE_SIZE - page_offset;
389 page_length = remain < page_length ? remain : page_length;
390 if (drm_mm_node_allocated(&node)) {
391 ggtt->vm.insert_page(&ggtt->vm,
392 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
393 node.start, I915_CACHE_NONE, 0);
394 } else {
395 page_base += offset & PAGE_MASK;
396 }
397
398 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
399 user_data, page_length)) {
400 ret = -EFAULT;
401 break;
402 }
403
404 remain -= page_length;
405 user_data += page_length;
406 offset += page_length;
407 }
408
409 i915_gem_gtt_cleanup(obj, &node, vma);
410 out_rpm:
411 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
412 return ret;
413 }
414
415 /**
416 * Reads data from the object referenced by handle.
417 * @dev: drm device pointer
418 * @data: ioctl data blob
419 * @file: drm file pointer
420 *
421 * On error, the contents of *data are undefined.
422 */
423 int
i915_gem_pread_ioctl(struct drm_device * dev,void * data,struct drm_file * file)424 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
425 struct drm_file *file)
426 {
427 struct drm_i915_private *i915 = to_i915(dev);
428 struct drm_i915_gem_pread *args = data;
429 struct drm_i915_gem_object *obj;
430 int ret;
431
432 /* PREAD is disallowed for all platforms after TGL-LP. This also
433 * covers all platforms with local memory.
434 */
435 if (INTEL_GEN(i915) >= 12 && !IS_TIGERLAKE(i915))
436 return -EOPNOTSUPP;
437
438 if (args->size == 0)
439 return 0;
440
441 if (!access_ok(u64_to_user_ptr(args->data_ptr),
442 args->size))
443 return -EFAULT;
444
445 obj = i915_gem_object_lookup(file, args->handle);
446 if (!obj)
447 return -ENOENT;
448
449 /* Bounds check source. */
450 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
451 ret = -EINVAL;
452 goto out;
453 }
454
455 trace_i915_gem_object_pread(obj, args->offset, args->size);
456 ret = -ENODEV;
457 if (obj->ops->pread)
458 ret = obj->ops->pread(obj, args);
459 if (ret != -ENODEV)
460 goto out;
461
462 ret = -ENODEV;
463 if (obj->ops->pread)
464 ret = obj->ops->pread(obj, args);
465 if (ret != -ENODEV)
466 goto out;
467
468 ret = i915_gem_object_wait(obj,
469 I915_WAIT_INTERRUPTIBLE,
470 MAX_SCHEDULE_TIMEOUT);
471 if (ret)
472 goto out;
473
474 ret = i915_gem_shmem_pread(obj, args);
475 if (ret == -EFAULT || ret == -ENODEV)
476 ret = i915_gem_gtt_pread(obj, args);
477
478 out:
479 i915_gem_object_put(obj);
480 return ret;
481 }
482
483 /* This is the fast write path which cannot handle
484 * page faults in the source data
485 */
486
487 static inline bool
ggtt_write(struct io_mapping * mapping,loff_t base,int offset,char __user * user_data,int length)488 ggtt_write(struct io_mapping *mapping,
489 loff_t base, int offset,
490 char __user *user_data, int length)
491 {
492 void __iomem *vaddr;
493 unsigned long unwritten;
494
495 /* We can use the cpu mem copy function because this is X86. */
496 vaddr = io_mapping_map_atomic_wc(mapping, base);
497 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
498 user_data, length);
499 io_mapping_unmap_atomic(vaddr);
500 if (unwritten) {
501 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
502 unwritten = copy_from_user((void __force *)vaddr + offset,
503 user_data, length);
504 io_mapping_unmap(vaddr);
505 }
506
507 return unwritten;
508 }
509
510 /**
511 * This is the fast pwrite path, where we copy the data directly from the
512 * user into the GTT, uncached.
513 * @obj: i915 GEM object
514 * @args: pwrite arguments structure
515 */
516 static int
i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pwrite * args)517 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
518 const struct drm_i915_gem_pwrite *args)
519 {
520 struct drm_i915_private *i915 = to_i915(obj->base.dev);
521 struct i915_ggtt *ggtt = &i915->ggtt;
522 struct intel_runtime_pm *rpm = &i915->runtime_pm;
523 intel_wakeref_t wakeref;
524 struct drm_mm_node node;
525 struct i915_vma *vma;
526 u64 remain, offset;
527 void __user *user_data;
528 int ret = 0;
529
530 if (i915_gem_object_has_struct_page(obj)) {
531 /*
532 * Avoid waking the device up if we can fallback, as
533 * waking/resuming is very slow (worst-case 10-100 ms
534 * depending on PCI sleeps and our own resume time).
535 * This easily dwarfs any performance advantage from
536 * using the cache bypass of indirect GGTT access.
537 */
538 wakeref = intel_runtime_pm_get_if_in_use(rpm);
539 if (!wakeref)
540 return -EFAULT;
541 } else {
542 /* No backing pages, no fallback, we must force GGTT access */
543 wakeref = intel_runtime_pm_get(rpm);
544 }
545
546 vma = i915_gem_gtt_prepare(obj, &node, true);
547 if (IS_ERR(vma)) {
548 ret = PTR_ERR(vma);
549 goto out_rpm;
550 }
551
552 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
553
554 user_data = u64_to_user_ptr(args->data_ptr);
555 offset = args->offset;
556 remain = args->size;
557 while (remain) {
558 /* Operation in this page
559 *
560 * page_base = page offset within aperture
561 * page_offset = offset within page
562 * page_length = bytes to copy for this page
563 */
564 u32 page_base = node.start;
565 unsigned int page_offset = offset_in_page(offset);
566 unsigned int page_length = PAGE_SIZE - page_offset;
567 page_length = remain < page_length ? remain : page_length;
568 if (drm_mm_node_allocated(&node)) {
569 /* flush the write before we modify the GGTT */
570 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
571 ggtt->vm.insert_page(&ggtt->vm,
572 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
573 node.start, I915_CACHE_NONE, 0);
574 wmb(); /* flush modifications to the GGTT (insert_page) */
575 } else {
576 page_base += offset & PAGE_MASK;
577 }
578 /* If we get a fault while copying data, then (presumably) our
579 * source page isn't available. Return the error and we'll
580 * retry in the slow path.
581 * If the object is non-shmem backed, we retry again with the
582 * path that handles page fault.
583 */
584 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
585 user_data, page_length)) {
586 ret = -EFAULT;
587 break;
588 }
589
590 remain -= page_length;
591 user_data += page_length;
592 offset += page_length;
593 }
594
595 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
596 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
597
598 i915_gem_gtt_cleanup(obj, &node, vma);
599 out_rpm:
600 intel_runtime_pm_put(rpm, wakeref);
601 return ret;
602 }
603
604 /* Per-page copy function for the shmem pwrite fastpath.
605 * Flushes invalid cachelines before writing to the target if
606 * needs_clflush_before is set and flushes out any written cachelines after
607 * writing if needs_clflush is set.
608 */
609 static int
shmem_pwrite(struct page * page,int offset,int len,char __user * user_data,bool needs_clflush_before,bool needs_clflush_after)610 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
611 bool needs_clflush_before,
612 bool needs_clflush_after)
613 {
614 char *vaddr;
615 int ret;
616
617 vaddr = kmap(page);
618
619 if (needs_clflush_before)
620 drm_clflush_virt_range(vaddr + offset, len);
621
622 ret = __copy_from_user(vaddr + offset, user_data, len);
623 if (!ret && needs_clflush_after)
624 drm_clflush_virt_range(vaddr + offset, len);
625
626 kunmap(page);
627
628 return ret ? -EFAULT : 0;
629 }
630
631 static int
i915_gem_shmem_pwrite(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pwrite * args)632 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
633 const struct drm_i915_gem_pwrite *args)
634 {
635 unsigned int partial_cacheline_write;
636 unsigned int needs_clflush;
637 unsigned int offset, idx;
638 void __user *user_data;
639 u64 remain;
640 int ret;
641
642 ret = i915_gem_object_lock_interruptible(obj, NULL);
643 if (ret)
644 return ret;
645
646 ret = i915_gem_object_pin_pages(obj);
647 if (ret)
648 goto err_unlock;
649
650 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
651 if (ret)
652 goto err_unpin;
653
654 i915_gem_object_finish_access(obj);
655 i915_gem_object_unlock(obj);
656
657 /* If we don't overwrite a cacheline completely we need to be
658 * careful to have up-to-date data by first clflushing. Don't
659 * overcomplicate things and flush the entire patch.
660 */
661 partial_cacheline_write = 0;
662 if (needs_clflush & CLFLUSH_BEFORE)
663 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
664
665 user_data = u64_to_user_ptr(args->data_ptr);
666 remain = args->size;
667 offset = offset_in_page(args->offset);
668 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
669 struct page *page = i915_gem_object_get_page(obj, idx);
670 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
671
672 ret = shmem_pwrite(page, offset, length, user_data,
673 (offset | length) & partial_cacheline_write,
674 needs_clflush & CLFLUSH_AFTER);
675 if (ret)
676 break;
677
678 remain -= length;
679 user_data += length;
680 offset = 0;
681 }
682
683 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
684
685 i915_gem_object_unpin_pages(obj);
686 return ret;
687
688 err_unpin:
689 i915_gem_object_unpin_pages(obj);
690 err_unlock:
691 i915_gem_object_unlock(obj);
692 return ret;
693 }
694
695 /**
696 * Writes data to the object referenced by handle.
697 * @dev: drm device
698 * @data: ioctl data blob
699 * @file: drm file
700 *
701 * On error, the contents of the buffer that were to be modified are undefined.
702 */
703 int
i915_gem_pwrite_ioctl(struct drm_device * dev,void * data,struct drm_file * file)704 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
705 struct drm_file *file)
706 {
707 struct drm_i915_private *i915 = to_i915(dev);
708 struct drm_i915_gem_pwrite *args = data;
709 struct drm_i915_gem_object *obj;
710 int ret;
711
712 /* PWRITE is disallowed for all platforms after TGL-LP. This also
713 * covers all platforms with local memory.
714 */
715 if (INTEL_GEN(i915) >= 12 && !IS_TIGERLAKE(i915))
716 return -EOPNOTSUPP;
717
718 if (args->size == 0)
719 return 0;
720
721 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
722 return -EFAULT;
723
724 obj = i915_gem_object_lookup(file, args->handle);
725 if (!obj)
726 return -ENOENT;
727
728 /* Bounds check destination. */
729 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
730 ret = -EINVAL;
731 goto err;
732 }
733
734 /* Writes not allowed into this read-only object */
735 if (i915_gem_object_is_readonly(obj)) {
736 ret = -EINVAL;
737 goto err;
738 }
739
740 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
741
742 ret = -ENODEV;
743 if (obj->ops->pwrite)
744 ret = obj->ops->pwrite(obj, args);
745 if (ret != -ENODEV)
746 goto err;
747
748 ret = i915_gem_object_wait(obj,
749 I915_WAIT_INTERRUPTIBLE |
750 I915_WAIT_ALL,
751 MAX_SCHEDULE_TIMEOUT);
752 if (ret)
753 goto err;
754
755 ret = -EFAULT;
756 /* We can only do the GTT pwrite on untiled buffers, as otherwise
757 * it would end up going through the fenced access, and we'll get
758 * different detiling behavior between reading and writing.
759 * pread/pwrite currently are reading and writing from the CPU
760 * perspective, requiring manual detiling by the client.
761 */
762 if (!i915_gem_object_has_struct_page(obj) ||
763 cpu_write_needs_clflush(obj))
764 /* Note that the gtt paths might fail with non-page-backed user
765 * pointers (e.g. gtt mappings when moving data between
766 * textures). Fallback to the shmem path in that case.
767 */
768 ret = i915_gem_gtt_pwrite_fast(obj, args);
769
770 if (ret == -EFAULT || ret == -ENOSPC) {
771 if (i915_gem_object_has_struct_page(obj))
772 ret = i915_gem_shmem_pwrite(obj, args);
773 }
774
775 err:
776 i915_gem_object_put(obj);
777 return ret;
778 }
779
780 /**
781 * Called when user space has done writes to this buffer
782 * @dev: drm device
783 * @data: ioctl data blob
784 * @file: drm file
785 */
786 int
i915_gem_sw_finish_ioctl(struct drm_device * dev,void * data,struct drm_file * file)787 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
788 struct drm_file *file)
789 {
790 struct drm_i915_gem_sw_finish *args = data;
791 struct drm_i915_gem_object *obj;
792
793 obj = i915_gem_object_lookup(file, args->handle);
794 if (!obj)
795 return -ENOENT;
796
797 /*
798 * Proxy objects are barred from CPU access, so there is no
799 * need to ban sw_finish as it is a nop.
800 */
801
802 /* Pinned buffers may be scanout, so flush the cache */
803 i915_gem_object_flush_if_display(obj);
804 i915_gem_object_put(obj);
805
806 return 0;
807 }
808
i915_gem_runtime_suspend(struct drm_i915_private * i915)809 void i915_gem_runtime_suspend(struct drm_i915_private *i915)
810 {
811 struct drm_i915_gem_object *obj, *on;
812 int i;
813
814 /*
815 * Only called during RPM suspend. All users of the userfault_list
816 * must be holding an RPM wakeref to ensure that this can not
817 * run concurrently with themselves (and use the struct_mutex for
818 * protection between themselves).
819 */
820
821 list_for_each_entry_safe(obj, on,
822 &i915->ggtt.userfault_list, userfault_link)
823 __i915_gem_object_release_mmap_gtt(obj);
824
825 /*
826 * The fence will be lost when the device powers down. If any were
827 * in use by hardware (i.e. they are pinned), we should not be powering
828 * down! All other fences will be reacquired by the user upon waking.
829 */
830 for (i = 0; i < i915->ggtt.num_fences; i++) {
831 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
832
833 /*
834 * Ideally we want to assert that the fence register is not
835 * live at this point (i.e. that no piece of code will be
836 * trying to write through fence + GTT, as that both violates
837 * our tracking of activity and associated locking/barriers,
838 * but also is illegal given that the hw is powered down).
839 *
840 * Previously we used reg->pin_count as a "liveness" indicator.
841 * That is not sufficient, and we need a more fine-grained
842 * tool if we want to have a sanity check here.
843 */
844
845 if (!reg->vma)
846 continue;
847
848 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
849 reg->dirty = true;
850 }
851 }
852
discard_ggtt_vma(struct i915_vma * vma)853 static void discard_ggtt_vma(struct i915_vma *vma)
854 {
855 struct drm_i915_gem_object *obj = vma->obj;
856
857 spin_lock(&obj->vma.lock);
858 if (!RB_EMPTY_NODE(&vma->obj_node)) {
859 rb_erase(&vma->obj_node, &obj->vma.tree);
860 RB_CLEAR_NODE(&vma->obj_node);
861 }
862 spin_unlock(&obj->vma.lock);
863 }
864
865 struct i915_vma *
i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object * obj,struct i915_gem_ww_ctx * ww,const struct i915_ggtt_view * view,u64 size,u64 alignment,u64 flags)866 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
867 struct i915_gem_ww_ctx *ww,
868 const struct i915_ggtt_view *view,
869 u64 size, u64 alignment, u64 flags)
870 {
871 struct drm_i915_private *i915 = to_i915(obj->base.dev);
872 struct i915_ggtt *ggtt = &i915->ggtt;
873 struct i915_vma *vma;
874 int ret;
875
876 if (flags & PIN_MAPPABLE &&
877 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
878 /*
879 * If the required space is larger than the available
880 * aperture, we will not able to find a slot for the
881 * object and unbinding the object now will be in
882 * vain. Worse, doing so may cause us to ping-pong
883 * the object in and out of the Global GTT and
884 * waste a lot of cycles under the mutex.
885 */
886 if (obj->base.size > ggtt->mappable_end)
887 return ERR_PTR(-E2BIG);
888
889 /*
890 * If NONBLOCK is set the caller is optimistically
891 * trying to cache the full object within the mappable
892 * aperture, and *must* have a fallback in place for
893 * situations where we cannot bind the object. We
894 * can be a little more lax here and use the fallback
895 * more often to avoid costly migrations of ourselves
896 * and other objects within the aperture.
897 *
898 * Half-the-aperture is used as a simple heuristic.
899 * More interesting would to do search for a free
900 * block prior to making the commitment to unbind.
901 * That caters for the self-harm case, and with a
902 * little more heuristics (e.g. NOFAULT, NOEVICT)
903 * we could try to minimise harm to others.
904 */
905 if (flags & PIN_NONBLOCK &&
906 obj->base.size > ggtt->mappable_end / 2)
907 return ERR_PTR(-ENOSPC);
908 }
909
910 new_vma:
911 vma = i915_vma_instance(obj, &ggtt->vm, view);
912 if (IS_ERR(vma))
913 return vma;
914
915 if (i915_vma_misplaced(vma, size, alignment, flags)) {
916 if (flags & PIN_NONBLOCK) {
917 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
918 return ERR_PTR(-ENOSPC);
919
920 if (flags & PIN_MAPPABLE &&
921 vma->fence_size > ggtt->mappable_end / 2)
922 return ERR_PTR(-ENOSPC);
923 }
924
925 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) {
926 discard_ggtt_vma(vma);
927 goto new_vma;
928 }
929
930 ret = i915_vma_unbind(vma);
931 if (ret)
932 return ERR_PTR(ret);
933 }
934
935 if (ww)
936 ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
937 else
938 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
939
940 if (ret)
941 return ERR_PTR(ret);
942
943 if (vma->fence && !i915_gem_object_is_tiled(obj)) {
944 mutex_lock(&ggtt->vm.mutex);
945 i915_vma_revoke_fence(vma);
946 mutex_unlock(&ggtt->vm.mutex);
947 }
948
949 ret = i915_vma_wait_for_bind(vma);
950 if (ret) {
951 i915_vma_unpin(vma);
952 return ERR_PTR(ret);
953 }
954
955 return vma;
956 }
957
958 int
i915_gem_madvise_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)959 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
960 struct drm_file *file_priv)
961 {
962 struct drm_i915_private *i915 = to_i915(dev);
963 struct drm_i915_gem_madvise *args = data;
964 struct drm_i915_gem_object *obj;
965 int err;
966
967 switch (args->madv) {
968 case I915_MADV_DONTNEED:
969 case I915_MADV_WILLNEED:
970 break;
971 default:
972 return -EINVAL;
973 }
974
975 obj = i915_gem_object_lookup(file_priv, args->handle);
976 if (!obj)
977 return -ENOENT;
978
979 err = i915_gem_object_lock_interruptible(obj, NULL);
980 if (err)
981 goto out;
982
983 if (i915_gem_object_has_pages(obj) &&
984 i915_gem_object_is_tiled(obj) &&
985 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
986 if (obj->mm.madv == I915_MADV_WILLNEED) {
987 GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
988 i915_gem_object_clear_tiling_quirk(obj);
989 i915_gem_object_make_shrinkable(obj);
990 }
991 if (args->madv == I915_MADV_WILLNEED) {
992 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
993 i915_gem_object_make_unshrinkable(obj);
994 i915_gem_object_set_tiling_quirk(obj);
995 }
996 }
997
998 if (obj->mm.madv != __I915_MADV_PURGED)
999 obj->mm.madv = args->madv;
1000
1001 if (i915_gem_object_has_pages(obj)) {
1002 struct list_head *list;
1003
1004 if (i915_gem_object_is_shrinkable(obj)) {
1005 unsigned long flags;
1006
1007 spin_lock_irqsave(&i915->mm.obj_lock, flags);
1008
1009 if (obj->mm.madv != I915_MADV_WILLNEED)
1010 list = &i915->mm.purge_list;
1011 else
1012 list = &i915->mm.shrink_list;
1013 list_move_tail(&obj->mm.link, list);
1014
1015 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1016 }
1017 }
1018
1019 /* if the object is no longer attached, discard its backing storage */
1020 if (obj->mm.madv == I915_MADV_DONTNEED &&
1021 !i915_gem_object_has_pages(obj))
1022 i915_gem_object_truncate(obj);
1023
1024 args->retained = obj->mm.madv != __I915_MADV_PURGED;
1025
1026 i915_gem_object_unlock(obj);
1027 out:
1028 i915_gem_object_put(obj);
1029 return err;
1030 }
1031
i915_gem_init(struct drm_i915_private * dev_priv)1032 int i915_gem_init(struct drm_i915_private *dev_priv)
1033 {
1034 int ret;
1035
1036 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1037 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1038 mkwrite_device_info(dev_priv)->page_sizes =
1039 I915_GTT_PAGE_SIZE_4K;
1040
1041 ret = i915_gem_init_userptr(dev_priv);
1042 if (ret)
1043 return ret;
1044
1045 intel_uc_fetch_firmwares(&dev_priv->gt.uc);
1046 intel_wopcm_init(&dev_priv->wopcm);
1047
1048 ret = i915_init_ggtt(dev_priv);
1049 if (ret) {
1050 GEM_BUG_ON(ret == -EIO);
1051 goto err_unlock;
1052 }
1053
1054 /*
1055 * Despite its name intel_init_clock_gating applies both display
1056 * clock gating workarounds; GT mmio workarounds and the occasional
1057 * GT power context workaround. Worse, sometimes it includes a context
1058 * register workaround which we need to apply before we record the
1059 * default HW state for all contexts.
1060 *
1061 * FIXME: break up the workarounds and apply them at the right time!
1062 */
1063 intel_init_clock_gating(dev_priv);
1064
1065 ret = intel_gt_init(&dev_priv->gt);
1066 if (ret)
1067 goto err_unlock;
1068
1069 return 0;
1070
1071 /*
1072 * Unwinding is complicated by that we want to handle -EIO to mean
1073 * disable GPU submission but keep KMS alive. We want to mark the
1074 * HW as irrevisibly wedged, but keep enough state around that the
1075 * driver doesn't explode during runtime.
1076 */
1077 err_unlock:
1078 i915_gem_drain_workqueue(dev_priv);
1079
1080 if (ret != -EIO)
1081 intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1082
1083 if (ret == -EIO) {
1084 /*
1085 * Allow engines or uC initialisation to fail by marking the GPU
1086 * as wedged. But we only want to do this when the GPU is angry,
1087 * for all other failure, such as an allocation failure, bail.
1088 */
1089 if (!intel_gt_is_wedged(&dev_priv->gt)) {
1090 i915_probe_error(dev_priv,
1091 "Failed to initialize GPU, declaring it wedged!\n");
1092 intel_gt_set_wedged(&dev_priv->gt);
1093 }
1094
1095 /* Minimal basic recovery for KMS */
1096 ret = i915_ggtt_enable_hw(dev_priv);
1097 i915_ggtt_resume(&dev_priv->ggtt);
1098 intel_init_clock_gating(dev_priv);
1099 }
1100
1101 i915_gem_drain_freed_objects(dev_priv);
1102 return ret;
1103 }
1104
i915_gem_driver_register(struct drm_i915_private * i915)1105 void i915_gem_driver_register(struct drm_i915_private *i915)
1106 {
1107 i915_gem_driver_register__shrinker(i915);
1108
1109 intel_engines_driver_register(i915);
1110 }
1111
i915_gem_driver_unregister(struct drm_i915_private * i915)1112 void i915_gem_driver_unregister(struct drm_i915_private *i915)
1113 {
1114 i915_gem_driver_unregister__shrinker(i915);
1115 }
1116
i915_gem_driver_remove(struct drm_i915_private * dev_priv)1117 void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
1118 {
1119 intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
1120
1121 i915_gem_suspend_late(dev_priv);
1122 intel_gt_driver_remove(&dev_priv->gt);
1123 dev_priv->uabi_engines = RB_ROOT;
1124
1125 /* Flush any outstanding unpin_work. */
1126 i915_gem_drain_workqueue(dev_priv);
1127
1128 i915_gem_drain_freed_objects(dev_priv);
1129 }
1130
i915_gem_driver_release(struct drm_i915_private * dev_priv)1131 void i915_gem_driver_release(struct drm_i915_private *dev_priv)
1132 {
1133 intel_gt_driver_release(&dev_priv->gt);
1134
1135 intel_wa_list_free(&dev_priv->gt_wa_list);
1136
1137 intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1138
1139 i915_gem_drain_freed_objects(dev_priv);
1140
1141 drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
1142 }
1143
i915_gem_init__mm(struct drm_i915_private * i915)1144 static void i915_gem_init__mm(struct drm_i915_private *i915)
1145 {
1146 spin_lock_init(&i915->mm.obj_lock);
1147
1148 init_llist_head(&i915->mm.free_list);
1149
1150 INIT_LIST_HEAD(&i915->mm.purge_list);
1151 INIT_LIST_HEAD(&i915->mm.shrink_list);
1152
1153 i915_gem_init__objects(i915);
1154 }
1155
i915_gem_init_early(struct drm_i915_private * dev_priv)1156 void i915_gem_init_early(struct drm_i915_private *dev_priv)
1157 {
1158 i915_gem_init__mm(dev_priv);
1159 i915_gem_init__contexts(dev_priv);
1160
1161 spin_lock_init(&dev_priv->fb_tracking.lock);
1162 }
1163
i915_gem_cleanup_early(struct drm_i915_private * dev_priv)1164 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1165 {
1166 i915_gem_drain_freed_objects(dev_priv);
1167 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1168 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1169 drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
1170 }
1171
i915_gem_open(struct drm_i915_private * i915,struct drm_file * file)1172 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1173 {
1174 struct drm_i915_file_private *file_priv;
1175 int ret;
1176
1177 DRM_DEBUG("\n");
1178
1179 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1180 if (!file_priv)
1181 return -ENOMEM;
1182
1183 file->driver_priv = file_priv;
1184 file_priv->dev_priv = i915;
1185 file_priv->file = file;
1186
1187 file_priv->bsd_engine = -1;
1188 file_priv->hang_timestamp = jiffies;
1189
1190 ret = i915_gem_context_open(i915, file);
1191 if (ret)
1192 kfree(file_priv);
1193
1194 return ret;
1195 }
1196
i915_gem_ww_ctx_init(struct i915_gem_ww_ctx * ww,bool intr)1197 void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ww, bool intr)
1198 {
1199 ww_acquire_init(&ww->ctx, &reservation_ww_class);
1200 INIT_LIST_HEAD(&ww->obj_list);
1201 ww->intr = intr;
1202 ww->contended = NULL;
1203 }
1204
i915_gem_ww_ctx_unlock_all(struct i915_gem_ww_ctx * ww)1205 static void i915_gem_ww_ctx_unlock_all(struct i915_gem_ww_ctx *ww)
1206 {
1207 struct drm_i915_gem_object *obj;
1208
1209 while ((obj = list_first_entry_or_null(&ww->obj_list, struct drm_i915_gem_object, obj_link))) {
1210 list_del(&obj->obj_link);
1211 i915_gem_object_unlock(obj);
1212 }
1213 }
1214
i915_gem_ww_unlock_single(struct drm_i915_gem_object * obj)1215 void i915_gem_ww_unlock_single(struct drm_i915_gem_object *obj)
1216 {
1217 list_del(&obj->obj_link);
1218 i915_gem_object_unlock(obj);
1219 }
1220
i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx * ww)1221 void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ww)
1222 {
1223 i915_gem_ww_ctx_unlock_all(ww);
1224 WARN_ON(ww->contended);
1225 ww_acquire_fini(&ww->ctx);
1226 }
1227
i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx * ww)1228 int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ww)
1229 {
1230 int ret = 0;
1231
1232 if (WARN_ON(!ww->contended))
1233 return -EINVAL;
1234
1235 i915_gem_ww_ctx_unlock_all(ww);
1236 if (ww->intr)
1237 ret = dma_resv_lock_slow_interruptible(ww->contended->base.resv, &ww->ctx);
1238 else
1239 dma_resv_lock_slow(ww->contended->base.resv, &ww->ctx);
1240
1241 if (!ret)
1242 list_add_tail(&ww->contended->obj_link, &ww->obj_list);
1243
1244 ww->contended = NULL;
1245
1246 return ret;
1247 }
1248
1249 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1250 #include "selftests/mock_gem_device.c"
1251 #include "selftests/i915_gem.c"
1252 #endif
1253