1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 Noralf Trønnes
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 
15 #ifdef CONFIG_X86
16 #include <asm/set_memory.h>
17 #endif
18 
19 #include <drm/drm.h>
20 #include <drm/drm_device.h>
21 #include <drm/drm_drv.h>
22 #include <drm/drm_gem_shmem_helper.h>
23 #include <drm/drm_prime.h>
24 #include <drm/drm_print.h>
25 
26 MODULE_IMPORT_NS(DMA_BUF);
27 
28 /**
29  * DOC: overview
30  *
31  * This library provides helpers for GEM objects backed by shmem buffers
32  * allocated using anonymous pageable memory.
33  *
34  * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
35  * For GEM callback helpers in struct &drm_gem_object functions, see likewise
36  * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
37  * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
38  */
39 
40 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
41 	.free = drm_gem_shmem_object_free,
42 	.print_info = drm_gem_shmem_object_print_info,
43 	.pin = drm_gem_shmem_object_pin,
44 	.unpin = drm_gem_shmem_object_unpin,
45 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
46 	.vmap = drm_gem_shmem_object_vmap,
47 	.vunmap = drm_gem_shmem_object_vunmap,
48 	.mmap = drm_gem_shmem_object_mmap,
49 	.vm_ops = &drm_gem_shmem_vm_ops,
50 };
51 
52 static struct drm_gem_shmem_object *
53 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
54 {
55 	struct drm_gem_shmem_object *shmem;
56 	struct drm_gem_object *obj;
57 	int ret = 0;
58 
59 	size = PAGE_ALIGN(size);
60 
61 	if (dev->driver->gem_create_object) {
62 		obj = dev->driver->gem_create_object(dev, size);
63 		if (IS_ERR(obj))
64 			return ERR_CAST(obj);
65 		shmem = to_drm_gem_shmem_obj(obj);
66 	} else {
67 		shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
68 		if (!shmem)
69 			return ERR_PTR(-ENOMEM);
70 		obj = &shmem->base;
71 	}
72 
73 	if (!obj->funcs)
74 		obj->funcs = &drm_gem_shmem_funcs;
75 
76 	if (private) {
77 		drm_gem_private_object_init(dev, obj, size);
78 		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
79 	} else {
80 		ret = drm_gem_object_init(dev, obj, size);
81 	}
82 	if (ret)
83 		goto err_free;
84 
85 	ret = drm_gem_create_mmap_offset(obj);
86 	if (ret)
87 		goto err_release;
88 
89 	mutex_init(&shmem->pages_lock);
90 	mutex_init(&shmem->vmap_lock);
91 	INIT_LIST_HEAD(&shmem->madv_list);
92 
93 	if (!private) {
94 		/*
95 		 * Our buffers are kept pinned, so allocating them
96 		 * from the MOVABLE zone is a really bad idea, and
97 		 * conflicts with CMA. See comments above new_inode()
98 		 * why this is required _and_ expected if you're
99 		 * going to pin these pages.
100 		 */
101 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
102 				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
103 	}
104 
105 	return shmem;
106 
107 err_release:
108 	drm_gem_object_release(obj);
109 err_free:
110 	kfree(obj);
111 
112 	return ERR_PTR(ret);
113 }
114 /**
115  * drm_gem_shmem_create - Allocate an object with the given size
116  * @dev: DRM device
117  * @size: Size of the object to allocate
118  *
119  * This function creates a shmem GEM object.
120  *
121  * Returns:
122  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
123  * error code on failure.
124  */
125 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
126 {
127 	return __drm_gem_shmem_create(dev, size, false);
128 }
129 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
130 
131 /**
132  * drm_gem_shmem_free - Free resources associated with a shmem GEM object
133  * @shmem: shmem GEM object to free
134  *
135  * This function cleans up the GEM object state and frees the memory used to
136  * store the object itself.
137  */
138 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
139 {
140 	struct drm_gem_object *obj = &shmem->base;
141 
142 	WARN_ON(shmem->vmap_use_count);
143 
144 	if (obj->import_attach) {
145 		drm_prime_gem_destroy(obj, shmem->sgt);
146 	} else {
147 		if (shmem->sgt) {
148 			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
149 					  DMA_BIDIRECTIONAL, 0);
150 			sg_free_table(shmem->sgt);
151 			kfree(shmem->sgt);
152 		}
153 		if (shmem->pages)
154 			drm_gem_shmem_put_pages(shmem);
155 	}
156 
157 	WARN_ON(shmem->pages_use_count);
158 
159 	drm_gem_object_release(obj);
160 	mutex_destroy(&shmem->pages_lock);
161 	mutex_destroy(&shmem->vmap_lock);
162 	kfree(shmem);
163 }
164 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
165 
166 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
167 {
168 	struct drm_gem_object *obj = &shmem->base;
169 	struct page **pages;
170 
171 	if (shmem->pages_use_count++ > 0)
172 		return 0;
173 
174 	pages = drm_gem_get_pages(obj);
175 	if (IS_ERR(pages)) {
176 		DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
177 		shmem->pages_use_count = 0;
178 		return PTR_ERR(pages);
179 	}
180 
181 	/*
182 	 * TODO: Allocating WC pages which are correctly flushed is only
183 	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
184 	 * ttm_pool.c could use.
185 	 */
186 #ifdef CONFIG_X86
187 	if (shmem->map_wc)
188 		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
189 #endif
190 
191 	shmem->pages = pages;
192 
193 	return 0;
194 }
195 
196 /*
197  * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
198  * @shmem: shmem GEM object
199  *
200  * This function makes sure that backing pages exists for the shmem GEM object
201  * and increases the use count.
202  *
203  * Returns:
204  * 0 on success or a negative error code on failure.
205  */
206 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
207 {
208 	int ret;
209 
210 	WARN_ON(shmem->base.import_attach);
211 
212 	ret = mutex_lock_interruptible(&shmem->pages_lock);
213 	if (ret)
214 		return ret;
215 	ret = drm_gem_shmem_get_pages_locked(shmem);
216 	mutex_unlock(&shmem->pages_lock);
217 
218 	return ret;
219 }
220 EXPORT_SYMBOL(drm_gem_shmem_get_pages);
221 
222 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
223 {
224 	struct drm_gem_object *obj = &shmem->base;
225 
226 	if (WARN_ON_ONCE(!shmem->pages_use_count))
227 		return;
228 
229 	if (--shmem->pages_use_count > 0)
230 		return;
231 
232 #ifdef CONFIG_X86
233 	if (shmem->map_wc)
234 		set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
235 #endif
236 
237 	drm_gem_put_pages(obj, shmem->pages,
238 			  shmem->pages_mark_dirty_on_put,
239 			  shmem->pages_mark_accessed_on_put);
240 	shmem->pages = NULL;
241 }
242 
243 /*
244  * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
245  * @shmem: shmem GEM object
246  *
247  * This function decreases the use count and puts the backing pages when use drops to zero.
248  */
249 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
250 {
251 	mutex_lock(&shmem->pages_lock);
252 	drm_gem_shmem_put_pages_locked(shmem);
253 	mutex_unlock(&shmem->pages_lock);
254 }
255 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
256 
257 /**
258  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
259  * @shmem: shmem GEM object
260  *
261  * This function makes sure the backing pages are pinned in memory while the
262  * buffer is exported.
263  *
264  * Returns:
265  * 0 on success or a negative error code on failure.
266  */
267 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
268 {
269 	WARN_ON(shmem->base.import_attach);
270 
271 	return drm_gem_shmem_get_pages(shmem);
272 }
273 EXPORT_SYMBOL(drm_gem_shmem_pin);
274 
275 /**
276  * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
277  * @shmem: shmem GEM object
278  *
279  * This function removes the requirement that the backing pages are pinned in
280  * memory.
281  */
282 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
283 {
284 	WARN_ON(shmem->base.import_attach);
285 
286 	drm_gem_shmem_put_pages(shmem);
287 }
288 EXPORT_SYMBOL(drm_gem_shmem_unpin);
289 
290 static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
291 				     struct iosys_map *map)
292 {
293 	struct drm_gem_object *obj = &shmem->base;
294 	int ret = 0;
295 
296 	if (shmem->vmap_use_count++ > 0) {
297 		iosys_map_set_vaddr(map, shmem->vaddr);
298 		return 0;
299 	}
300 
301 	if (obj->import_attach) {
302 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
303 		if (!ret) {
304 			if (WARN_ON(map->is_iomem)) {
305 				ret = -EIO;
306 				goto err_put_pages;
307 			}
308 			shmem->vaddr = map->vaddr;
309 		}
310 	} else {
311 		pgprot_t prot = PAGE_KERNEL;
312 
313 		ret = drm_gem_shmem_get_pages(shmem);
314 		if (ret)
315 			goto err_zero_use;
316 
317 		if (shmem->map_wc)
318 			prot = pgprot_writecombine(prot);
319 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
320 				    VM_MAP, prot);
321 		if (!shmem->vaddr)
322 			ret = -ENOMEM;
323 		else
324 			iosys_map_set_vaddr(map, shmem->vaddr);
325 	}
326 
327 	if (ret) {
328 		DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
329 		goto err_put_pages;
330 	}
331 
332 	return 0;
333 
334 err_put_pages:
335 	if (!obj->import_attach)
336 		drm_gem_shmem_put_pages(shmem);
337 err_zero_use:
338 	shmem->vmap_use_count = 0;
339 
340 	return ret;
341 }
342 
343 /*
344  * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
345  * @shmem: shmem GEM object
346  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
347  *       store.
348  *
349  * This function makes sure that a contiguous kernel virtual address mapping
350  * exists for the buffer backing the shmem GEM object. It hides the differences
351  * between dma-buf imported and natively allocated objects.
352  *
353  * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
354  *
355  * Returns:
356  * 0 on success or a negative error code on failure.
357  */
358 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
359 		       struct iosys_map *map)
360 {
361 	int ret;
362 
363 	ret = mutex_lock_interruptible(&shmem->vmap_lock);
364 	if (ret)
365 		return ret;
366 	ret = drm_gem_shmem_vmap_locked(shmem, map);
367 	mutex_unlock(&shmem->vmap_lock);
368 
369 	return ret;
370 }
371 EXPORT_SYMBOL(drm_gem_shmem_vmap);
372 
373 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
374 					struct iosys_map *map)
375 {
376 	struct drm_gem_object *obj = &shmem->base;
377 
378 	if (WARN_ON_ONCE(!shmem->vmap_use_count))
379 		return;
380 
381 	if (--shmem->vmap_use_count > 0)
382 		return;
383 
384 	if (obj->import_attach) {
385 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
386 	} else {
387 		vunmap(shmem->vaddr);
388 		drm_gem_shmem_put_pages(shmem);
389 	}
390 
391 	shmem->vaddr = NULL;
392 }
393 
394 /*
395  * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
396  * @shmem: shmem GEM object
397  * @map: Kernel virtual address where the SHMEM GEM object was mapped
398  *
399  * This function cleans up a kernel virtual address mapping acquired by
400  * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
401  * zero.
402  *
403  * This function hides the differences between dma-buf imported and natively
404  * allocated objects.
405  */
406 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
407 			  struct iosys_map *map)
408 {
409 	mutex_lock(&shmem->vmap_lock);
410 	drm_gem_shmem_vunmap_locked(shmem, map);
411 	mutex_unlock(&shmem->vmap_lock);
412 }
413 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
414 
415 static struct drm_gem_shmem_object *
416 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
417 				 struct drm_device *dev, size_t size,
418 				 uint32_t *handle)
419 {
420 	struct drm_gem_shmem_object *shmem;
421 	int ret;
422 
423 	shmem = drm_gem_shmem_create(dev, size);
424 	if (IS_ERR(shmem))
425 		return shmem;
426 
427 	/*
428 	 * Allocate an id of idr table where the obj is registered
429 	 * and handle has the id what user can see.
430 	 */
431 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
432 	/* drop reference from allocate - handle holds it now. */
433 	drm_gem_object_put(&shmem->base);
434 	if (ret)
435 		return ERR_PTR(ret);
436 
437 	return shmem;
438 }
439 
440 /* Update madvise status, returns true if not purged, else
441  * false or -errno.
442  */
443 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
444 {
445 	mutex_lock(&shmem->pages_lock);
446 
447 	if (shmem->madv >= 0)
448 		shmem->madv = madv;
449 
450 	madv = shmem->madv;
451 
452 	mutex_unlock(&shmem->pages_lock);
453 
454 	return (madv >= 0);
455 }
456 EXPORT_SYMBOL(drm_gem_shmem_madvise);
457 
458 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
459 {
460 	struct drm_gem_object *obj = &shmem->base;
461 	struct drm_device *dev = obj->dev;
462 
463 	WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
464 
465 	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
466 	sg_free_table(shmem->sgt);
467 	kfree(shmem->sgt);
468 	shmem->sgt = NULL;
469 
470 	drm_gem_shmem_put_pages_locked(shmem);
471 
472 	shmem->madv = -1;
473 
474 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
475 	drm_gem_free_mmap_offset(obj);
476 
477 	/* Our goal here is to return as much of the memory as
478 	 * is possible back to the system as we are called from OOM.
479 	 * To do this we must instruct the shmfs to drop all of its
480 	 * backing pages, *now*.
481 	 */
482 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
483 
484 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
485 }
486 EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
487 
488 bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
489 {
490 	if (!mutex_trylock(&shmem->pages_lock))
491 		return false;
492 	drm_gem_shmem_purge_locked(shmem);
493 	mutex_unlock(&shmem->pages_lock);
494 
495 	return true;
496 }
497 EXPORT_SYMBOL(drm_gem_shmem_purge);
498 
499 /**
500  * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
501  * @file: DRM file structure to create the dumb buffer for
502  * @dev: DRM device
503  * @args: IOCTL data
504  *
505  * This function computes the pitch of the dumb buffer and rounds it up to an
506  * integer number of bytes per pixel. Drivers for hardware that doesn't have
507  * any additional restrictions on the pitch can directly use this function as
508  * their &drm_driver.dumb_create callback.
509  *
510  * For hardware with additional restrictions, drivers can adjust the fields
511  * set up by userspace before calling into this function.
512  *
513  * Returns:
514  * 0 on success or a negative error code on failure.
515  */
516 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
517 			      struct drm_mode_create_dumb *args)
518 {
519 	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
520 	struct drm_gem_shmem_object *shmem;
521 
522 	if (!args->pitch || !args->size) {
523 		args->pitch = min_pitch;
524 		args->size = PAGE_ALIGN(args->pitch * args->height);
525 	} else {
526 		/* ensure sane minimum values */
527 		if (args->pitch < min_pitch)
528 			args->pitch = min_pitch;
529 		if (args->size < args->pitch * args->height)
530 			args->size = PAGE_ALIGN(args->pitch * args->height);
531 	}
532 
533 	shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
534 
535 	return PTR_ERR_OR_ZERO(shmem);
536 }
537 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
538 
539 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
540 {
541 	struct vm_area_struct *vma = vmf->vma;
542 	struct drm_gem_object *obj = vma->vm_private_data;
543 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
544 	loff_t num_pages = obj->size >> PAGE_SHIFT;
545 	vm_fault_t ret;
546 	struct page *page;
547 	pgoff_t page_offset;
548 
549 	/* We don't use vmf->pgoff since that has the fake offset */
550 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
551 
552 	mutex_lock(&shmem->pages_lock);
553 
554 	if (page_offset >= num_pages ||
555 	    WARN_ON_ONCE(!shmem->pages) ||
556 	    shmem->madv < 0) {
557 		ret = VM_FAULT_SIGBUS;
558 	} else {
559 		page = shmem->pages[page_offset];
560 
561 		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
562 	}
563 
564 	mutex_unlock(&shmem->pages_lock);
565 
566 	return ret;
567 }
568 
569 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
570 {
571 	struct drm_gem_object *obj = vma->vm_private_data;
572 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
573 	int ret;
574 
575 	WARN_ON(shmem->base.import_attach);
576 
577 	ret = drm_gem_shmem_get_pages(shmem);
578 	WARN_ON_ONCE(ret != 0);
579 
580 	drm_gem_vm_open(vma);
581 }
582 
583 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
584 {
585 	struct drm_gem_object *obj = vma->vm_private_data;
586 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
587 
588 	drm_gem_shmem_put_pages(shmem);
589 	drm_gem_vm_close(vma);
590 }
591 
592 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
593 	.fault = drm_gem_shmem_fault,
594 	.open = drm_gem_shmem_vm_open,
595 	.close = drm_gem_shmem_vm_close,
596 };
597 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
598 
599 /**
600  * drm_gem_shmem_mmap - Memory-map a shmem GEM object
601  * @shmem: shmem GEM object
602  * @vma: VMA for the area to be mapped
603  *
604  * This function implements an augmented version of the GEM DRM file mmap
605  * operation for shmem objects.
606  *
607  * Returns:
608  * 0 on success or a negative error code on failure.
609  */
610 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
611 {
612 	struct drm_gem_object *obj = &shmem->base;
613 	int ret;
614 
615 	if (obj->import_attach) {
616 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
617 		drm_gem_object_put(obj);
618 		vma->vm_private_data = NULL;
619 
620 		return dma_buf_mmap(obj->dma_buf, vma, 0);
621 	}
622 
623 	ret = drm_gem_shmem_get_pages(shmem);
624 	if (ret) {
625 		drm_gem_vm_close(vma);
626 		return ret;
627 	}
628 
629 	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
630 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
631 	if (shmem->map_wc)
632 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
633 
634 	return 0;
635 }
636 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
637 
638 /**
639  * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
640  * @shmem: shmem GEM object
641  * @p: DRM printer
642  * @indent: Tab indentation level
643  */
644 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
645 			      struct drm_printer *p, unsigned int indent)
646 {
647 	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
648 	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
649 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
650 }
651 EXPORT_SYMBOL(drm_gem_shmem_print_info);
652 
653 /**
654  * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
655  *                              pages for a shmem GEM object
656  * @shmem: shmem GEM object
657  *
658  * This function exports a scatter/gather table suitable for PRIME usage by
659  * calling the standard DMA mapping API.
660  *
661  * Drivers who need to acquire an scatter/gather table for objects need to call
662  * drm_gem_shmem_get_pages_sgt() instead.
663  *
664  * Returns:
665  * A pointer to the scatter/gather table of pinned pages or NULL on failure.
666  */
667 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
668 {
669 	struct drm_gem_object *obj = &shmem->base;
670 
671 	WARN_ON(shmem->base.import_attach);
672 
673 	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
674 }
675 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
676 
677 /**
678  * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
679  *				 scatter/gather table for a shmem GEM object.
680  * @shmem: shmem GEM object
681  *
682  * This function returns a scatter/gather table suitable for driver usage. If
683  * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
684  * table created.
685  *
686  * This is the main function for drivers to get at backing storage, and it hides
687  * and difference between dma-buf imported and natively allocated objects.
688  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
689  *
690  * Returns:
691  * A pointer to the scatter/gather table of pinned pages or errno on failure.
692  */
693 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
694 {
695 	struct drm_gem_object *obj = &shmem->base;
696 	int ret;
697 	struct sg_table *sgt;
698 
699 	if (shmem->sgt)
700 		return shmem->sgt;
701 
702 	WARN_ON(obj->import_attach);
703 
704 	ret = drm_gem_shmem_get_pages(shmem);
705 	if (ret)
706 		return ERR_PTR(ret);
707 
708 	sgt = drm_gem_shmem_get_sg_table(shmem);
709 	if (IS_ERR(sgt)) {
710 		ret = PTR_ERR(sgt);
711 		goto err_put_pages;
712 	}
713 	/* Map the pages for use by the h/w. */
714 	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
715 	if (ret)
716 		goto err_free_sgt;
717 
718 	shmem->sgt = sgt;
719 
720 	return sgt;
721 
722 err_free_sgt:
723 	sg_free_table(sgt);
724 	kfree(sgt);
725 err_put_pages:
726 	drm_gem_shmem_put_pages(shmem);
727 	return ERR_PTR(ret);
728 }
729 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
730 
731 /**
732  * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
733  *                 another driver's scatter/gather table of pinned pages
734  * @dev: Device to import into
735  * @attach: DMA-BUF attachment
736  * @sgt: Scatter/gather table of pinned pages
737  *
738  * This function imports a scatter/gather table exported via DMA-BUF by
739  * another driver. Drivers that use the shmem helpers should set this as their
740  * &drm_driver.gem_prime_import_sg_table callback.
741  *
742  * Returns:
743  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
744  * error code on failure.
745  */
746 struct drm_gem_object *
747 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
748 				    struct dma_buf_attachment *attach,
749 				    struct sg_table *sgt)
750 {
751 	size_t size = PAGE_ALIGN(attach->dmabuf->size);
752 	struct drm_gem_shmem_object *shmem;
753 
754 	shmem = __drm_gem_shmem_create(dev, size, true);
755 	if (IS_ERR(shmem))
756 		return ERR_CAST(shmem);
757 
758 	shmem->sgt = sgt;
759 
760 	DRM_DEBUG_PRIME("size = %zu\n", size);
761 
762 	return &shmem->base;
763 }
764 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
765 
766 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
767 MODULE_IMPORT_NS(DMA_BUF);
768 MODULE_LICENSE("GPL v2");
769