1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2014-2016 Intel Corporation
5 */
6
7 #include <drm/drm_cache.h>
8
9 #include "gt/intel_gt.h"
10 #include "gt/intel_tlb.h"
11
12 #include "i915_drv.h"
13 #include "i915_gem_object.h"
14 #include "i915_scatterlist.h"
15 #include "i915_gem_lmem.h"
16 #include "i915_gem_mman.h"
17
__i915_gem_object_set_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)18 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
19 struct sg_table *pages)
20 {
21 struct drm_i915_private *i915 = to_i915(obj->base.dev);
22 unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
23 bool shrinkable;
24 int i;
25
26 assert_object_held_shared(obj);
27
28 if (i915_gem_object_is_volatile(obj))
29 obj->mm.madv = I915_MADV_DONTNEED;
30
31 /* Make the pages coherent with the GPU (flushing any swapin). */
32 if (obj->cache_dirty) {
33 WARN_ON_ONCE(IS_DGFX(i915));
34 obj->write_domain = 0;
35 if (i915_gem_object_has_struct_page(obj))
36 drm_clflush_sg(pages);
37 obj->cache_dirty = false;
38 }
39
40 obj->mm.get_page.sg_pos = pages->sgl;
41 obj->mm.get_page.sg_idx = 0;
42 obj->mm.get_dma_page.sg_pos = pages->sgl;
43 obj->mm.get_dma_page.sg_idx = 0;
44
45 obj->mm.pages = pages;
46
47 obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl);
48 GEM_BUG_ON(!obj->mm.page_sizes.phys);
49
50 /*
51 * Calculate the supported page-sizes which fit into the given
52 * sg_page_sizes. This will give us the page-sizes which we may be able
53 * to use opportunistically when later inserting into the GTT. For
54 * example if phys=2G, then in theory we should be able to use 1G, 2M,
55 * 64K or 4K pages, although in practice this will depend on a number of
56 * other factors.
57 */
58 obj->mm.page_sizes.sg = 0;
59 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
60 if (obj->mm.page_sizes.phys & ~0u << i)
61 obj->mm.page_sizes.sg |= BIT(i);
62 }
63 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
64
65 shrinkable = i915_gem_object_is_shrinkable(obj);
66
67 if (i915_gem_object_is_tiled(obj) &&
68 i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
69 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
70 i915_gem_object_set_tiling_quirk(obj);
71 GEM_BUG_ON(!list_empty(&obj->mm.link));
72 atomic_inc(&obj->mm.shrink_pin);
73 shrinkable = false;
74 }
75
76 if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) {
77 struct list_head *list;
78 unsigned long flags;
79
80 assert_object_held(obj);
81 spin_lock_irqsave(&i915->mm.obj_lock, flags);
82
83 i915->mm.shrink_count++;
84 i915->mm.shrink_memory += obj->base.size;
85
86 if (obj->mm.madv != I915_MADV_WILLNEED)
87 list = &i915->mm.purge_list;
88 else
89 list = &i915->mm.shrink_list;
90 list_add_tail(&obj->mm.link, list);
91
92 atomic_set(&obj->mm.shrink_pin, 0);
93 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
94 }
95 }
96
____i915_gem_object_get_pages(struct drm_i915_gem_object * obj)97 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
98 {
99 struct drm_i915_private *i915 = to_i915(obj->base.dev);
100 int err;
101
102 assert_object_held_shared(obj);
103
104 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
105 drm_dbg(&i915->drm,
106 "Attempting to obtain a purgeable object\n");
107 return -EFAULT;
108 }
109
110 err = obj->ops->get_pages(obj);
111 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
112
113 return err;
114 }
115
116 /* Ensure that the associated pages are gathered from the backing storage
117 * and pinned into our object. i915_gem_object_pin_pages() may be called
118 * multiple times before they are released by a single call to
119 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
120 * either as a result of memory pressure (reaping pages under the shrinker)
121 * or as the object is itself released.
122 */
__i915_gem_object_get_pages(struct drm_i915_gem_object * obj)123 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
124 {
125 int err;
126
127 assert_object_held(obj);
128
129 assert_object_held_shared(obj);
130
131 if (unlikely(!i915_gem_object_has_pages(obj))) {
132 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
133
134 err = ____i915_gem_object_get_pages(obj);
135 if (err)
136 return err;
137
138 smp_mb__before_atomic();
139 }
140 atomic_inc(&obj->mm.pages_pin_count);
141
142 return 0;
143 }
144
i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object * obj)145 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
146 {
147 struct i915_gem_ww_ctx ww;
148 int err;
149
150 i915_gem_ww_ctx_init(&ww, true);
151 retry:
152 err = i915_gem_object_lock(obj, &ww);
153 if (!err)
154 err = i915_gem_object_pin_pages(obj);
155
156 if (err == -EDEADLK) {
157 err = i915_gem_ww_ctx_backoff(&ww);
158 if (!err)
159 goto retry;
160 }
161 i915_gem_ww_ctx_fini(&ww);
162 return err;
163 }
164
165 /* Immediately discard the backing storage */
i915_gem_object_truncate(struct drm_i915_gem_object * obj)166 int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
167 {
168 if (obj->ops->truncate)
169 return obj->ops->truncate(obj);
170
171 return 0;
172 }
173
__i915_gem_object_reset_page_iter(struct drm_i915_gem_object * obj)174 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
175 {
176 struct radix_tree_iter iter;
177 void __rcu **slot;
178
179 rcu_read_lock();
180 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
181 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
182 radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
183 radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
184 rcu_read_unlock();
185 }
186
unmap_object(struct drm_i915_gem_object * obj,void * ptr)187 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
188 {
189 if (is_vmalloc_addr(ptr))
190 vunmap(ptr, obj->base.size);
191 }
192
flush_tlb_invalidate(struct drm_i915_gem_object * obj)193 static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
194 {
195 struct drm_i915_private *i915 = to_i915(obj->base.dev);
196 struct intel_gt *gt;
197 int id;
198
199 for_each_gt(gt, i915, id) {
200 if (!obj->mm.tlb[id])
201 continue;
202
203 intel_gt_invalidate_tlb_full(gt, obj->mm.tlb[id]);
204 obj->mm.tlb[id] = 0;
205 }
206 }
207
208 struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object * obj)209 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
210 {
211 struct sg_table *pages;
212
213 assert_object_held_shared(obj);
214
215 pages = fetch_and_zero(&obj->mm.pages);
216 if (IS_ERR_OR_NULL(pages))
217 return pages;
218
219 if (i915_gem_object_is_volatile(obj))
220 obj->mm.madv = I915_MADV_WILLNEED;
221
222 if (!i915_gem_object_has_self_managed_shrink_list(obj))
223 i915_gem_object_make_unshrinkable(obj);
224
225 if (obj->mm.mapping) {
226 unmap_object(obj, page_mask_bits(obj->mm.mapping));
227 obj->mm.mapping = NULL;
228 }
229
230 __i915_gem_object_reset_page_iter(obj);
231 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
232
233 flush_tlb_invalidate(obj);
234
235 return pages;
236 }
237
__i915_gem_object_put_pages(struct drm_i915_gem_object * obj)238 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
239 {
240 struct sg_table *pages;
241
242 if (i915_gem_object_has_pinned_pages(obj))
243 return -EBUSY;
244
245 /* May be called by shrinker from within get_pages() (on another bo) */
246 assert_object_held_shared(obj);
247
248 i915_gem_object_release_mmap_offset(obj);
249
250 /*
251 * ->put_pages might need to allocate memory for the bit17 swizzle
252 * array, hence protect them from being reaped by removing them from gtt
253 * lists early.
254 */
255 pages = __i915_gem_object_unset_pages(obj);
256
257 /*
258 * XXX Temporary hijinx to avoid updating all backends to handle
259 * NULL pages. In the future, when we have more asynchronous
260 * get_pages backends we should be better able to handle the
261 * cancellation of the async task in a more uniform manner.
262 */
263 if (!IS_ERR_OR_NULL(pages))
264 obj->ops->put_pages(obj, pages);
265
266 return 0;
267 }
268
269 /* The 'mapping' part of i915_gem_object_pin_map() below */
i915_gem_object_map_page(struct drm_i915_gem_object * obj,enum i915_map_type type)270 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
271 enum i915_map_type type)
272 {
273 unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
274 struct vm_page *stack[32], **pages = stack, *page;
275 struct sgt_iter iter;
276 pgprot_t pgprot;
277 void *vaddr;
278
279 switch (type) {
280 default:
281 MISSING_CASE(type);
282 fallthrough; /* to use PAGE_KERNEL anyway */
283 case I915_MAP_WB:
284 /*
285 * On 32b, highmem using a finite set of indirect PTE (i.e.
286 * vmap) to provide virtual mappings of the high pages.
287 * As these are finite, map_new_virtual() must wait for some
288 * other kmap() to finish when it runs out. If we map a large
289 * number of objects, there is no method for it to tell us
290 * to release the mappings, and we deadlock.
291 *
292 * However, if we make an explicit vmap of the page, that
293 * uses a larger vmalloc arena, and also has the ability
294 * to tell us to release unwanted mappings. Most importantly,
295 * it will fail and propagate an error instead of waiting
296 * forever.
297 *
298 * So if the page is beyond the 32b boundary, make an explicit
299 * vmap.
300 */
301 #ifdef notyet
302 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
303 return page_address(sg_page(obj->mm.pages->sgl));
304 #endif
305 pgprot = PAGE_KERNEL;
306 break;
307 case I915_MAP_WC:
308 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
309 break;
310 }
311
312 if (n_pages > ARRAY_SIZE(stack)) {
313 /* Too big for stack -- allocate temporary array instead */
314 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
315 if (!pages)
316 return ERR_PTR(-ENOMEM);
317 }
318
319 i = 0;
320 for_each_sgt_page(page, iter, obj->mm.pages)
321 pages[i++] = page;
322 vaddr = vmap(pages, n_pages, 0, pgprot);
323 if (pages != stack)
324 kvfree(pages);
325
326 return vaddr ?: ERR_PTR(-ENOMEM);
327 }
328
i915_gem_object_map_pfn(struct drm_i915_gem_object * obj,enum i915_map_type type)329 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
330 enum i915_map_type type)
331 {
332 resource_size_t iomap = obj->mm.region->iomap.base -
333 obj->mm.region->region.start;
334 unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
335 unsigned long stack[32], *pfns = stack, i;
336 struct sgt_iter iter;
337 dma_addr_t addr;
338 void *vaddr;
339
340 GEM_BUG_ON(type != I915_MAP_WC);
341
342 if (n_pfn > ARRAY_SIZE(stack)) {
343 /* Too big for stack -- allocate temporary array instead */
344 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
345 if (!pfns)
346 return ERR_PTR(-ENOMEM);
347 }
348
349 i = 0;
350 for_each_sgt_daddr(addr, iter, obj->mm.pages)
351 pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
352 vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
353 if (pfns != stack)
354 kvfree(pfns);
355
356 return vaddr ?: ERR_PTR(-ENOMEM);
357 }
358
359 /* get, pin, and map the pages of the object into kernel space */
i915_gem_object_pin_map(struct drm_i915_gem_object * obj,enum i915_map_type type)360 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
361 enum i915_map_type type)
362 {
363 enum i915_map_type has_type;
364 bool pinned;
365 void *ptr;
366 int err;
367
368 if (!i915_gem_object_has_struct_page(obj) &&
369 !i915_gem_object_has_iomem(obj))
370 return ERR_PTR(-ENXIO);
371
372 if (WARN_ON_ONCE(obj->flags & I915_BO_ALLOC_GPU_ONLY))
373 return ERR_PTR(-EINVAL);
374
375 assert_object_held(obj);
376
377 pinned = !(type & I915_MAP_OVERRIDE);
378 type &= ~I915_MAP_OVERRIDE;
379
380 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
381 if (unlikely(!i915_gem_object_has_pages(obj))) {
382 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
383
384 err = ____i915_gem_object_get_pages(obj);
385 if (err)
386 return ERR_PTR(err);
387
388 smp_mb__before_atomic();
389 }
390 atomic_inc(&obj->mm.pages_pin_count);
391 pinned = false;
392 }
393 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
394
395 /*
396 * For discrete our CPU mappings needs to be consistent in order to
397 * function correctly on !x86. When mapping things through TTM, we use
398 * the same rules to determine the caching type.
399 *
400 * The caching rules, starting from DG1:
401 *
402 * - If the object can be placed in device local-memory, then the
403 * pages should be allocated and mapped as write-combined only.
404 *
405 * - Everything else is always allocated and mapped as write-back,
406 * with the guarantee that everything is also coherent with the
407 * GPU.
408 *
409 * Internal users of lmem are already expected to get this right, so no
410 * fudging needed there.
411 */
412 if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) {
413 if (type != I915_MAP_WC && !obj->mm.n_placements) {
414 ptr = ERR_PTR(-ENODEV);
415 goto err_unpin;
416 }
417
418 type = I915_MAP_WC;
419 } else if (IS_DGFX(to_i915(obj->base.dev))) {
420 type = I915_MAP_WB;
421 }
422
423 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
424 if (ptr && has_type != type) {
425 if (pinned) {
426 ptr = ERR_PTR(-EBUSY);
427 goto err_unpin;
428 }
429
430 unmap_object(obj, ptr);
431
432 ptr = obj->mm.mapping = NULL;
433 }
434
435 if (!ptr) {
436 err = i915_gem_object_wait_moving_fence(obj, true);
437 if (err) {
438 ptr = ERR_PTR(err);
439 goto err_unpin;
440 }
441
442 if (GEM_WARN_ON(type == I915_MAP_WC && !pat_enabled()))
443 ptr = ERR_PTR(-ENODEV);
444 else if (i915_gem_object_has_struct_page(obj))
445 ptr = i915_gem_object_map_page(obj, type);
446 else
447 ptr = i915_gem_object_map_pfn(obj, type);
448 if (IS_ERR(ptr))
449 goto err_unpin;
450
451 obj->mm.mapping = page_pack_bits(ptr, type);
452 }
453
454 return ptr;
455
456 err_unpin:
457 atomic_dec(&obj->mm.pages_pin_count);
458 return ptr;
459 }
460
i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object * obj,enum i915_map_type type)461 void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
462 enum i915_map_type type)
463 {
464 void *ret;
465
466 i915_gem_object_lock(obj, NULL);
467 ret = i915_gem_object_pin_map(obj, type);
468 i915_gem_object_unlock(obj);
469
470 return ret;
471 }
472
__i915_gem_object_flush_map(struct drm_i915_gem_object * obj,unsigned long offset,unsigned long size)473 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
474 unsigned long offset,
475 unsigned long size)
476 {
477 enum i915_map_type has_type;
478 void *ptr;
479
480 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
481 GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
482 offset, size, obj->base.size));
483
484 wmb(); /* let all previous writes be visible to coherent partners */
485 obj->mm.dirty = true;
486
487 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
488 return;
489
490 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
491 if (has_type == I915_MAP_WC)
492 return;
493
494 drm_clflush_virt_range(ptr + offset, size);
495 if (size == obj->base.size) {
496 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
497 obj->cache_dirty = false;
498 }
499 }
500
__i915_gem_object_release_map(struct drm_i915_gem_object * obj)501 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
502 {
503 GEM_BUG_ON(!obj->mm.mapping);
504
505 /*
506 * We allow removing the mapping from underneath pinned pages!
507 *
508 * Furthermore, since this is an unsafe operation reserved only
509 * for construction time manipulation, we ignore locking prudence.
510 */
511 unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
512
513 i915_gem_object_unpin_map(obj);
514 }
515
516 struct scatterlist *
__i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object * obj,struct i915_gem_object_page_iter * iter,pgoff_t n,unsigned int * offset)517 __i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj,
518 struct i915_gem_object_page_iter *iter,
519 pgoff_t n,
520 unsigned int *offset)
521
522 {
523 const bool dma = iter == &obj->mm.get_dma_page ||
524 iter == &obj->ttm.get_io_page;
525 unsigned int idx, count;
526 struct scatterlist *sg;
527
528 might_sleep();
529 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
530 if (!i915_gem_object_has_pinned_pages(obj))
531 assert_object_held(obj);
532
533 /* As we iterate forward through the sg, we record each entry in a
534 * radixtree for quick repeated (backwards) lookups. If we have seen
535 * this index previously, we will have an entry for it.
536 *
537 * Initial lookup is O(N), but this is amortized to O(1) for
538 * sequential page access (where each new request is consecutive
539 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
540 * i.e. O(1) with a large constant!
541 */
542 if (n < READ_ONCE(iter->sg_idx))
543 goto lookup;
544
545 mutex_lock(&iter->lock);
546
547 /* We prefer to reuse the last sg so that repeated lookup of this
548 * (or the subsequent) sg are fast - comparing against the last
549 * sg is faster than going through the radixtree.
550 */
551
552 sg = iter->sg_pos;
553 idx = iter->sg_idx;
554 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
555
556 while (idx + count <= n) {
557 void *entry;
558 unsigned long i;
559 int ret;
560
561 /* If we cannot allocate and insert this entry, or the
562 * individual pages from this range, cancel updating the
563 * sg_idx so that on this lookup we are forced to linearly
564 * scan onwards, but on future lookups we will try the
565 * insertion again (in which case we need to be careful of
566 * the error return reporting that we have already inserted
567 * this index).
568 */
569 ret = radix_tree_insert(&iter->radix, idx, sg);
570 if (ret && ret != -EEXIST)
571 goto scan;
572
573 entry = xa_mk_value(idx);
574 for (i = 1; i < count; i++) {
575 ret = radix_tree_insert(&iter->radix, idx + i, entry);
576 if (ret && ret != -EEXIST)
577 goto scan;
578 }
579
580 idx += count;
581 sg = ____sg_next(sg);
582 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
583 }
584
585 scan:
586 iter->sg_pos = sg;
587 iter->sg_idx = idx;
588
589 mutex_unlock(&iter->lock);
590
591 if (unlikely(n < idx)) /* insertion completed by another thread */
592 goto lookup;
593
594 /* In case we failed to insert the entry into the radixtree, we need
595 * to look beyond the current sg.
596 */
597 while (idx + count <= n) {
598 idx += count;
599 sg = ____sg_next(sg);
600 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
601 }
602
603 *offset = n - idx;
604 return sg;
605
606 lookup:
607 rcu_read_lock();
608
609 sg = radix_tree_lookup(&iter->radix, n);
610 GEM_BUG_ON(!sg);
611
612 /* If this index is in the middle of multi-page sg entry,
613 * the radix tree will contain a value entry that points
614 * to the start of that range. We will return the pointer to
615 * the base page and the offset of this page within the
616 * sg entry's range.
617 */
618 *offset = 0;
619 if (unlikely(xa_is_value(sg))) {
620 unsigned long base = xa_to_value(sg);
621
622 sg = radix_tree_lookup(&iter->radix, base);
623 GEM_BUG_ON(!sg);
624
625 *offset = n - base;
626 }
627
628 rcu_read_unlock();
629
630 return sg;
631 }
632
633 struct vm_page *
__i915_gem_object_get_page(struct drm_i915_gem_object * obj,pgoff_t n)634 __i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n)
635 {
636 struct scatterlist *sg;
637 unsigned int offset;
638
639 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
640
641 sg = i915_gem_object_get_sg(obj, n, &offset);
642 return nth_page(sg_page(sg), offset);
643 }
644
645 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
646 struct vm_page *
__i915_gem_object_get_dirty_page(struct drm_i915_gem_object * obj,pgoff_t n)647 __i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n)
648 {
649 struct vm_page *page;
650
651 page = i915_gem_object_get_page(obj, n);
652 if (!obj->mm.dirty)
653 set_page_dirty(page);
654
655 return page;
656 }
657
658 dma_addr_t
__i915_gem_object_get_dma_address_len(struct drm_i915_gem_object * obj,pgoff_t n,unsigned int * len)659 __i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
660 pgoff_t n, unsigned int *len)
661 {
662 struct scatterlist *sg;
663 unsigned int offset;
664
665 sg = i915_gem_object_get_sg_dma(obj, n, &offset);
666
667 if (len)
668 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
669
670 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
671 }
672
673 dma_addr_t
__i915_gem_object_get_dma_address(struct drm_i915_gem_object * obj,pgoff_t n)674 __i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n)
675 {
676 return i915_gem_object_get_dma_address_len(obj, n, NULL);
677 }
678