1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2014-2016 Intel Corporation
5 */
6
7 #include "i915_drv.h"
8 #include "i915_gem_object.h"
9 #include "i915_scatterlist.h"
10 #include "i915_gem_lmem.h"
11 #include "i915_gem_mman.h"
12
__i915_gem_object_set_pages(struct drm_i915_gem_object * obj,struct sg_table * pages,unsigned int sg_page_sizes)13 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
14 struct sg_table *pages,
15 unsigned int sg_page_sizes)
16 {
17 struct drm_i915_private *i915 = to_i915(obj->base.dev);
18 unsigned long supported = INTEL_INFO(i915)->page_sizes;
19 bool shrinkable;
20 int i;
21
22 assert_object_held_shared(obj);
23
24 if (i915_gem_object_is_volatile(obj))
25 obj->mm.madv = I915_MADV_DONTNEED;
26
27 /* Make the pages coherent with the GPU (flushing any swapin). */
28 if (obj->cache_dirty) {
29 obj->write_domain = 0;
30 if (i915_gem_object_has_struct_page(obj))
31 drm_clflush_sg(pages);
32 obj->cache_dirty = false;
33 }
34
35 obj->mm.get_page.sg_pos = pages->sgl;
36 obj->mm.get_page.sg_idx = 0;
37 obj->mm.get_dma_page.sg_pos = pages->sgl;
38 obj->mm.get_dma_page.sg_idx = 0;
39
40 obj->mm.pages = pages;
41
42 GEM_BUG_ON(!sg_page_sizes);
43 obj->mm.page_sizes.phys = sg_page_sizes;
44
45 /*
46 * Calculate the supported page-sizes which fit into the given
47 * sg_page_sizes. This will give us the page-sizes which we may be able
48 * to use opportunistically when later inserting into the GTT. For
49 * example if phys=2G, then in theory we should be able to use 1G, 2M,
50 * 64K or 4K pages, although in practice this will depend on a number of
51 * other factors.
52 */
53 obj->mm.page_sizes.sg = 0;
54 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
55 if (obj->mm.page_sizes.phys & ~0u << i)
56 obj->mm.page_sizes.sg |= BIT(i);
57 }
58 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
59
60 shrinkable = i915_gem_object_is_shrinkable(obj);
61
62 if (i915_gem_object_is_tiled(obj) &&
63 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
64 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
65 i915_gem_object_set_tiling_quirk(obj);
66 shrinkable = false;
67 }
68
69 if (shrinkable) {
70 struct list_head *list;
71 unsigned long flags;
72
73 assert_object_held(obj);
74 spin_lock_irqsave(&i915->mm.obj_lock, flags);
75
76 i915->mm.shrink_count++;
77 i915->mm.shrink_memory += obj->base.size;
78
79 if (obj->mm.madv != I915_MADV_WILLNEED)
80 list = &i915->mm.purge_list;
81 else
82 list = &i915->mm.shrink_list;
83 list_add_tail(&obj->mm.link, list);
84
85 atomic_set(&obj->mm.shrink_pin, 0);
86 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
87 }
88 }
89
____i915_gem_object_get_pages(struct drm_i915_gem_object * obj)90 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
91 {
92 struct drm_i915_private *i915 = to_i915(obj->base.dev);
93 int err;
94
95 assert_object_held_shared(obj);
96
97 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
98 drm_dbg(&i915->drm,
99 "Attempting to obtain a purgeable object\n");
100 return -EFAULT;
101 }
102
103 err = obj->ops->get_pages(obj);
104 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
105
106 return err;
107 }
108
109 /* Ensure that the associated pages are gathered from the backing storage
110 * and pinned into our object. i915_gem_object_pin_pages() may be called
111 * multiple times before they are released by a single call to
112 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
113 * either as a result of memory pressure (reaping pages under the shrinker)
114 * or as the object is itself released.
115 */
__i915_gem_object_get_pages(struct drm_i915_gem_object * obj)116 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
117 {
118 int err;
119
120 assert_object_held(obj);
121
122 assert_object_held_shared(obj);
123
124 if (unlikely(!i915_gem_object_has_pages(obj))) {
125 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
126
127 err = ____i915_gem_object_get_pages(obj);
128 if (err)
129 return err;
130
131 smp_mb__before_atomic();
132 }
133 atomic_inc(&obj->mm.pages_pin_count);
134
135 return 0;
136 }
137
i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object * obj)138 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
139 {
140 struct i915_gem_ww_ctx ww;
141 int err;
142
143 i915_gem_ww_ctx_init(&ww, true);
144 retry:
145 err = i915_gem_object_lock(obj, &ww);
146 if (!err)
147 err = i915_gem_object_pin_pages(obj);
148
149 if (err == -EDEADLK) {
150 err = i915_gem_ww_ctx_backoff(&ww);
151 if (!err)
152 goto retry;
153 }
154 i915_gem_ww_ctx_fini(&ww);
155 return err;
156 }
157
158 /* Immediately discard the backing storage */
i915_gem_object_truncate(struct drm_i915_gem_object * obj)159 void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
160 {
161 drm_gem_free_mmap_offset(&obj->base);
162 if (obj->ops->truncate)
163 obj->ops->truncate(obj);
164 }
165
166 /* Try to discard unwanted pages */
i915_gem_object_writeback(struct drm_i915_gem_object * obj)167 void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
168 {
169 assert_object_held_shared(obj);
170 GEM_BUG_ON(i915_gem_object_has_pages(obj));
171
172 if (obj->ops->writeback)
173 obj->ops->writeback(obj);
174 }
175
__i915_gem_object_reset_page_iter(struct drm_i915_gem_object * obj)176 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
177 {
178 struct radix_tree_iter iter;
179 void __rcu **slot;
180
181 rcu_read_lock();
182 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
183 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
184 radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
185 radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
186 rcu_read_unlock();
187 }
188
unmap_object(struct drm_i915_gem_object * obj,void * ptr)189 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
190 {
191 if (is_vmalloc_addr(ptr))
192 vunmap(ptr);
193 }
194
195 struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object * obj)196 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
197 {
198 struct sg_table *pages;
199
200 assert_object_held_shared(obj);
201
202 pages = fetch_and_zero(&obj->mm.pages);
203 if (IS_ERR_OR_NULL(pages))
204 return pages;
205
206 if (i915_gem_object_is_volatile(obj))
207 obj->mm.madv = I915_MADV_WILLNEED;
208
209 i915_gem_object_make_unshrinkable(obj);
210
211 if (obj->mm.mapping) {
212 unmap_object(obj, page_mask_bits(obj->mm.mapping));
213 obj->mm.mapping = NULL;
214 }
215
216 __i915_gem_object_reset_page_iter(obj);
217 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
218
219 return pages;
220 }
221
__i915_gem_object_put_pages(struct drm_i915_gem_object * obj)222 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
223 {
224 struct sg_table *pages;
225
226 if (i915_gem_object_has_pinned_pages(obj))
227 return -EBUSY;
228
229 /* May be called by shrinker from within get_pages() (on another bo) */
230 assert_object_held_shared(obj);
231
232 i915_gem_object_release_mmap_offset(obj);
233
234 /*
235 * ->put_pages might need to allocate memory for the bit17 swizzle
236 * array, hence protect them from being reaped by removing them from gtt
237 * lists early.
238 */
239 pages = __i915_gem_object_unset_pages(obj);
240
241 /*
242 * XXX Temporary hijinx to avoid updating all backends to handle
243 * NULL pages. In the future, when we have more asynchronous
244 * get_pages backends we should be better able to handle the
245 * cancellation of the async task in a more uniform manner.
246 */
247 if (!IS_ERR_OR_NULL(pages))
248 obj->ops->put_pages(obj, pages);
249
250 return 0;
251 }
252
253 /* The 'mapping' part of i915_gem_object_pin_map() below */
i915_gem_object_map_page(struct drm_i915_gem_object * obj,enum i915_map_type type)254 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
255 enum i915_map_type type)
256 {
257 unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
258 struct page *stack[32], **pages = stack, *page;
259 struct sgt_iter iter;
260 pgprot_t pgprot;
261 void *vaddr;
262
263 switch (type) {
264 default:
265 MISSING_CASE(type);
266 fallthrough; /* to use PAGE_KERNEL anyway */
267 case I915_MAP_WB:
268 /*
269 * On 32b, highmem using a finite set of indirect PTE (i.e.
270 * vmap) to provide virtual mappings of the high pages.
271 * As these are finite, map_new_virtual() must wait for some
272 * other kmap() to finish when it runs out. If we map a large
273 * number of objects, there is no method for it to tell us
274 * to release the mappings, and we deadlock.
275 *
276 * However, if we make an explicit vmap of the page, that
277 * uses a larger vmalloc arena, and also has the ability
278 * to tell us to release unwanted mappings. Most importantly,
279 * it will fail and propagate an error instead of waiting
280 * forever.
281 *
282 * So if the page is beyond the 32b boundary, make an explicit
283 * vmap.
284 */
285 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
286 return page_address(sg_page(obj->mm.pages->sgl));
287 pgprot = PAGE_KERNEL;
288 break;
289 case I915_MAP_WC:
290 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
291 break;
292 }
293
294 if (n_pages > ARRAY_SIZE(stack)) {
295 /* Too big for stack -- allocate temporary array instead */
296 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
297 if (!pages)
298 return ERR_PTR(-ENOMEM);
299 }
300
301 i = 0;
302 for_each_sgt_page(page, iter, obj->mm.pages)
303 pages[i++] = page;
304 vaddr = vmap(pages, n_pages, 0, pgprot);
305 if (pages != stack)
306 kvfree(pages);
307
308 return vaddr ?: ERR_PTR(-ENOMEM);
309 }
310
i915_gem_object_map_pfn(struct drm_i915_gem_object * obj,enum i915_map_type type)311 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
312 enum i915_map_type type)
313 {
314 resource_size_t iomap = obj->mm.region->iomap.base -
315 obj->mm.region->region.start;
316 unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
317 unsigned long stack[32], *pfns = stack, i;
318 struct sgt_iter iter;
319 dma_addr_t addr;
320 void *vaddr;
321
322 if (type != I915_MAP_WC)
323 return ERR_PTR(-ENODEV);
324
325 if (n_pfn > ARRAY_SIZE(stack)) {
326 /* Too big for stack -- allocate temporary array instead */
327 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
328 if (!pfns)
329 return ERR_PTR(-ENOMEM);
330 }
331
332 i = 0;
333 for_each_sgt_daddr(addr, iter, obj->mm.pages)
334 pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
335 vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
336 if (pfns != stack)
337 kvfree(pfns);
338
339 return vaddr ?: ERR_PTR(-ENOMEM);
340 }
341
342 /* get, pin, and map the pages of the object into kernel space */
i915_gem_object_pin_map(struct drm_i915_gem_object * obj,enum i915_map_type type)343 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
344 enum i915_map_type type)
345 {
346 enum i915_map_type has_type;
347 bool pinned;
348 void *ptr;
349 int err;
350
351 if (!i915_gem_object_has_struct_page(obj) &&
352 !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
353 return ERR_PTR(-ENXIO);
354
355 assert_object_held(obj);
356
357 pinned = !(type & I915_MAP_OVERRIDE);
358 type &= ~I915_MAP_OVERRIDE;
359
360 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
361 if (unlikely(!i915_gem_object_has_pages(obj))) {
362 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
363
364 err = ____i915_gem_object_get_pages(obj);
365 if (err)
366 return ERR_PTR(err);
367
368 smp_mb__before_atomic();
369 }
370 atomic_inc(&obj->mm.pages_pin_count);
371 pinned = false;
372 }
373 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
374
375 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
376 if (ptr && has_type != type) {
377 if (pinned) {
378 ptr = ERR_PTR(-EBUSY);
379 goto err_unpin;
380 }
381
382 unmap_object(obj, ptr);
383
384 ptr = obj->mm.mapping = NULL;
385 }
386
387 if (!ptr) {
388 if (GEM_WARN_ON(type == I915_MAP_WC &&
389 !static_cpu_has(X86_FEATURE_PAT)))
390 ptr = ERR_PTR(-ENODEV);
391 else if (i915_gem_object_has_struct_page(obj))
392 ptr = i915_gem_object_map_page(obj, type);
393 else
394 ptr = i915_gem_object_map_pfn(obj, type);
395 if (IS_ERR(ptr))
396 goto err_unpin;
397
398 obj->mm.mapping = page_pack_bits(ptr, type);
399 }
400
401 return ptr;
402
403 err_unpin:
404 atomic_dec(&obj->mm.pages_pin_count);
405 return ptr;
406 }
407
i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object * obj,enum i915_map_type type)408 void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
409 enum i915_map_type type)
410 {
411 void *ret;
412
413 i915_gem_object_lock(obj, NULL);
414 ret = i915_gem_object_pin_map(obj, type);
415 i915_gem_object_unlock(obj);
416
417 return ret;
418 }
419
__i915_gem_object_flush_map(struct drm_i915_gem_object * obj,unsigned long offset,unsigned long size)420 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
421 unsigned long offset,
422 unsigned long size)
423 {
424 enum i915_map_type has_type;
425 void *ptr;
426
427 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
428 GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
429 offset, size, obj->base.size));
430
431 wmb(); /* let all previous writes be visible to coherent partners */
432 obj->mm.dirty = true;
433
434 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
435 return;
436
437 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
438 if (has_type == I915_MAP_WC)
439 return;
440
441 drm_clflush_virt_range(ptr + offset, size);
442 if (size == obj->base.size) {
443 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
444 obj->cache_dirty = false;
445 }
446 }
447
__i915_gem_object_release_map(struct drm_i915_gem_object * obj)448 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
449 {
450 GEM_BUG_ON(!obj->mm.mapping);
451
452 /*
453 * We allow removing the mapping from underneath pinned pages!
454 *
455 * Furthermore, since this is an unsafe operation reserved only
456 * for construction time manipulation, we ignore locking prudence.
457 */
458 unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
459
460 i915_gem_object_unpin_map(obj);
461 }
462
463 struct scatterlist *
__i915_gem_object_get_sg(struct drm_i915_gem_object * obj,struct i915_gem_object_page_iter * iter,unsigned int n,unsigned int * offset,bool allow_alloc)464 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
465 struct i915_gem_object_page_iter *iter,
466 unsigned int n,
467 unsigned int *offset,
468 bool allow_alloc)
469 {
470 const bool dma = iter == &obj->mm.get_dma_page;
471 struct scatterlist *sg;
472 unsigned int idx, count;
473
474 might_sleep();
475 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
476 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
477
478 /* As we iterate forward through the sg, we record each entry in a
479 * radixtree for quick repeated (backwards) lookups. If we have seen
480 * this index previously, we will have an entry for it.
481 *
482 * Initial lookup is O(N), but this is amortized to O(1) for
483 * sequential page access (where each new request is consecutive
484 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
485 * i.e. O(1) with a large constant!
486 */
487 if (n < READ_ONCE(iter->sg_idx))
488 goto lookup;
489
490 if (!allow_alloc)
491 goto manual_lookup;
492
493 mutex_lock(&iter->lock);
494
495 /* We prefer to reuse the last sg so that repeated lookup of this
496 * (or the subsequent) sg are fast - comparing against the last
497 * sg is faster than going through the radixtree.
498 */
499
500 sg = iter->sg_pos;
501 idx = iter->sg_idx;
502 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
503
504 while (idx + count <= n) {
505 void *entry;
506 unsigned long i;
507 int ret;
508
509 /* If we cannot allocate and insert this entry, or the
510 * individual pages from this range, cancel updating the
511 * sg_idx so that on this lookup we are forced to linearly
512 * scan onwards, but on future lookups we will try the
513 * insertion again (in which case we need to be careful of
514 * the error return reporting that we have already inserted
515 * this index).
516 */
517 ret = radix_tree_insert(&iter->radix, idx, sg);
518 if (ret && ret != -EEXIST)
519 goto scan;
520
521 entry = xa_mk_value(idx);
522 for (i = 1; i < count; i++) {
523 ret = radix_tree_insert(&iter->radix, idx + i, entry);
524 if (ret && ret != -EEXIST)
525 goto scan;
526 }
527
528 idx += count;
529 sg = ____sg_next(sg);
530 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
531 }
532
533 scan:
534 iter->sg_pos = sg;
535 iter->sg_idx = idx;
536
537 mutex_unlock(&iter->lock);
538
539 if (unlikely(n < idx)) /* insertion completed by another thread */
540 goto lookup;
541
542 goto manual_walk;
543
544 manual_lookup:
545 idx = 0;
546 sg = obj->mm.pages->sgl;
547 count = __sg_page_count(sg);
548
549 manual_walk:
550 /*
551 * In case we failed to insert the entry into the radixtree, we need
552 * to look beyond the current sg.
553 */
554 while (idx + count <= n) {
555 idx += count;
556 sg = ____sg_next(sg);
557 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
558 }
559
560 *offset = n - idx;
561 return sg;
562
563 lookup:
564 rcu_read_lock();
565
566 sg = radix_tree_lookup(&iter->radix, n);
567 GEM_BUG_ON(!sg);
568
569 /* If this index is in the middle of multi-page sg entry,
570 * the radix tree will contain a value entry that points
571 * to the start of that range. We will return the pointer to
572 * the base page and the offset of this page within the
573 * sg entry's range.
574 */
575 *offset = 0;
576 if (unlikely(xa_is_value(sg))) {
577 unsigned long base = xa_to_value(sg);
578
579 sg = radix_tree_lookup(&iter->radix, base);
580 GEM_BUG_ON(!sg);
581
582 *offset = n - base;
583 }
584
585 rcu_read_unlock();
586
587 return sg;
588 }
589
590 struct page *
i915_gem_object_get_page(struct drm_i915_gem_object * obj,unsigned int n)591 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
592 {
593 struct scatterlist *sg;
594 unsigned int offset;
595
596 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
597
598 sg = i915_gem_object_get_sg(obj, n, &offset, true);
599 return nth_page(sg_page(sg), offset);
600 }
601
602 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
603 struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object * obj,unsigned int n)604 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
605 unsigned int n)
606 {
607 struct page *page;
608
609 page = i915_gem_object_get_page(obj, n);
610 if (!obj->mm.dirty)
611 set_page_dirty(page);
612
613 return page;
614 }
615
616 dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object * obj,unsigned long n,unsigned int * len)617 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
618 unsigned long n,
619 unsigned int *len)
620 {
621 struct scatterlist *sg;
622 unsigned int offset;
623
624 sg = i915_gem_object_get_sg_dma(obj, n, &offset, true);
625
626 if (len)
627 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
628
629 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
630 }
631
632 dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object * obj,unsigned long n)633 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
634 unsigned long n)
635 {
636 return i915_gem_object_get_dma_address_len(obj, n, NULL);
637 }
638