1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 Noralf Trønnes
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 
14 #ifdef CONFIG_X86
15 #include <asm/set_memory.h>
16 #endif
17 
18 #include <drm/drm.h>
19 #include <drm/drm_device.h>
20 #include <drm/drm_drv.h>
21 #include <drm/drm_gem_shmem_helper.h>
22 #include <drm/drm_prime.h>
23 #include <drm/drm_print.h>
24 
25 MODULE_IMPORT_NS(DMA_BUF);
26 
27 /**
28  * DOC: overview
29  *
30  * This library provides helpers for GEM objects backed by shmem buffers
31  * allocated using anonymous pageable memory.
32  *
33  * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
34  * For GEM callback helpers in struct &drm_gem_object functions, see likewise
35  * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
36  * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
37  */
38 
39 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
40 	.free = drm_gem_shmem_object_free,
41 	.print_info = drm_gem_shmem_object_print_info,
42 	.pin = drm_gem_shmem_object_pin,
43 	.unpin = drm_gem_shmem_object_unpin,
44 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
45 	.vmap = drm_gem_shmem_object_vmap,
46 	.vunmap = drm_gem_shmem_object_vunmap,
47 	.mmap = drm_gem_shmem_object_mmap,
48 	.vm_ops = &drm_gem_shmem_vm_ops,
49 };
50 
51 static struct drm_gem_shmem_object *
__drm_gem_shmem_create(struct drm_device * dev,size_t size,bool private)52 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
53 {
54 	struct drm_gem_shmem_object *shmem;
55 	struct drm_gem_object *obj;
56 	int ret = 0;
57 
58 	size = PAGE_ALIGN(size);
59 
60 	if (dev->driver->gem_create_object) {
61 		obj = dev->driver->gem_create_object(dev, size);
62 		if (IS_ERR(obj))
63 			return ERR_CAST(obj);
64 		shmem = to_drm_gem_shmem_obj(obj);
65 	} else {
66 		shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
67 		if (!shmem)
68 			return ERR_PTR(-ENOMEM);
69 		obj = &shmem->base;
70 	}
71 
72 	if (!obj->funcs)
73 		obj->funcs = &drm_gem_shmem_funcs;
74 
75 	if (private) {
76 		drm_gem_private_object_init(dev, obj, size);
77 		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
78 	} else {
79 		ret = drm_gem_object_init(dev, obj, size);
80 	}
81 	if (ret) {
82 		drm_gem_private_object_fini(obj);
83 		goto err_free;
84 	}
85 
86 	ret = drm_gem_create_mmap_offset(obj);
87 	if (ret)
88 		goto err_release;
89 
90 	INIT_LIST_HEAD(&shmem->madv_list);
91 
92 	if (!private) {
93 		/*
94 		 * Our buffers are kept pinned, so allocating them
95 		 * from the MOVABLE zone is a really bad idea, and
96 		 * conflicts with CMA. See comments above new_inode()
97 		 * why this is required _and_ expected if you're
98 		 * going to pin these pages.
99 		 */
100 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
101 				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
102 	}
103 
104 	return shmem;
105 
106 err_release:
107 	drm_gem_object_release(obj);
108 err_free:
109 	kfree(obj);
110 
111 	return ERR_PTR(ret);
112 }
113 /**
114  * drm_gem_shmem_create - Allocate an object with the given size
115  * @dev: DRM device
116  * @size: Size of the object to allocate
117  *
118  * This function creates a shmem GEM object.
119  *
120  * Returns:
121  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
122  * error code on failure.
123  */
drm_gem_shmem_create(struct drm_device * dev,size_t size)124 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
125 {
126 	return __drm_gem_shmem_create(dev, size, false);
127 }
128 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
129 
130 /**
131  * drm_gem_shmem_free - Free resources associated with a shmem GEM object
132  * @shmem: shmem GEM object to free
133  *
134  * This function cleans up the GEM object state and frees the memory used to
135  * store the object itself.
136  */
drm_gem_shmem_free(struct drm_gem_shmem_object * shmem)137 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
138 {
139 	struct drm_gem_object *obj = &shmem->base;
140 
141 	if (obj->import_attach) {
142 		drm_prime_gem_destroy(obj, shmem->sgt);
143 	} else {
144 		dma_resv_lock(shmem->base.resv, NULL);
145 
146 		drm_WARN_ON(obj->dev, shmem->vmap_use_count);
147 
148 		if (shmem->sgt) {
149 			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
150 					  DMA_BIDIRECTIONAL, 0);
151 			sg_free_table(shmem->sgt);
152 			kfree(shmem->sgt);
153 		}
154 		if (shmem->pages)
155 			drm_gem_shmem_put_pages(shmem);
156 
157 		drm_WARN_ON(obj->dev, shmem->pages_use_count);
158 
159 		dma_resv_unlock(shmem->base.resv);
160 	}
161 
162 	drm_gem_object_release(obj);
163 	kfree(shmem);
164 }
165 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
166 
drm_gem_shmem_get_pages(struct drm_gem_shmem_object * shmem)167 static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
168 {
169 	struct drm_gem_object *obj = &shmem->base;
170 	struct page **pages;
171 
172 	dma_resv_assert_held(shmem->base.resv);
173 
174 	if (shmem->pages_use_count++ > 0)
175 		return 0;
176 
177 	pages = drm_gem_get_pages(obj);
178 	if (IS_ERR(pages)) {
179 		drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
180 			    PTR_ERR(pages));
181 		shmem->pages_use_count = 0;
182 		return PTR_ERR(pages);
183 	}
184 
185 	/*
186 	 * TODO: Allocating WC pages which are correctly flushed is only
187 	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
188 	 * ttm_pool.c could use.
189 	 */
190 #ifdef CONFIG_X86
191 	if (shmem->map_wc)
192 		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
193 #endif
194 
195 	shmem->pages = pages;
196 
197 	return 0;
198 }
199 
200 /*
201  * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
202  * @shmem: shmem GEM object
203  *
204  * This function decreases the use count and puts the backing pages when use drops to zero.
205  */
drm_gem_shmem_put_pages(struct drm_gem_shmem_object * shmem)206 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
207 {
208 	struct drm_gem_object *obj = &shmem->base;
209 
210 	dma_resv_assert_held(shmem->base.resv);
211 
212 	if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
213 		return;
214 
215 	if (--shmem->pages_use_count > 0)
216 		return;
217 
218 #ifdef CONFIG_X86
219 	if (shmem->map_wc)
220 		set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
221 #endif
222 
223 	drm_gem_put_pages(obj, shmem->pages,
224 			  shmem->pages_mark_dirty_on_put,
225 			  shmem->pages_mark_accessed_on_put);
226 	shmem->pages = NULL;
227 }
228 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
229 
drm_gem_shmem_pin_locked(struct drm_gem_shmem_object * shmem)230 int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
231 {
232 	int ret;
233 
234 	dma_resv_assert_held(shmem->base.resv);
235 
236 	drm_WARN_ON(shmem->base.dev, shmem->base.import_attach);
237 
238 	ret = drm_gem_shmem_get_pages(shmem);
239 
240 	return ret;
241 }
242 EXPORT_SYMBOL(drm_gem_shmem_pin_locked);
243 
drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object * shmem)244 void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
245 {
246 	dma_resv_assert_held(shmem->base.resv);
247 
248 	drm_gem_shmem_put_pages(shmem);
249 }
250 EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
251 
252 /**
253  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
254  * @shmem: shmem GEM object
255  *
256  * This function makes sure the backing pages are pinned in memory while the
257  * buffer is exported.
258  *
259  * Returns:
260  * 0 on success or a negative error code on failure.
261  */
drm_gem_shmem_pin(struct drm_gem_shmem_object * shmem)262 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
263 {
264 	struct drm_gem_object *obj = &shmem->base;
265 	int ret;
266 
267 	drm_WARN_ON(obj->dev, obj->import_attach);
268 
269 	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
270 	if (ret)
271 		return ret;
272 	ret = drm_gem_shmem_pin_locked(shmem);
273 	dma_resv_unlock(shmem->base.resv);
274 
275 	return ret;
276 }
277 EXPORT_SYMBOL(drm_gem_shmem_pin);
278 
279 /**
280  * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
281  * @shmem: shmem GEM object
282  *
283  * This function removes the requirement that the backing pages are pinned in
284  * memory.
285  */
drm_gem_shmem_unpin(struct drm_gem_shmem_object * shmem)286 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
287 {
288 	struct drm_gem_object *obj = &shmem->base;
289 
290 	drm_WARN_ON(obj->dev, obj->import_attach);
291 
292 	dma_resv_lock(shmem->base.resv, NULL);
293 	drm_gem_shmem_unpin_locked(shmem);
294 	dma_resv_unlock(shmem->base.resv);
295 }
296 EXPORT_SYMBOL(drm_gem_shmem_unpin);
297 
298 /*
299  * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
300  * @shmem: shmem GEM object
301  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
302  *       store.
303  *
304  * This function makes sure that a contiguous kernel virtual address mapping
305  * exists for the buffer backing the shmem GEM object. It hides the differences
306  * between dma-buf imported and natively allocated objects.
307  *
308  * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
309  *
310  * Returns:
311  * 0 on success or a negative error code on failure.
312  */
drm_gem_shmem_vmap(struct drm_gem_shmem_object * shmem,struct iosys_map * map)313 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
314 		       struct iosys_map *map)
315 {
316 	struct drm_gem_object *obj = &shmem->base;
317 	int ret = 0;
318 
319 	if (obj->import_attach) {
320 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
321 		if (!ret) {
322 			if (drm_WARN_ON(obj->dev, map->is_iomem)) {
323 				dma_buf_vunmap(obj->import_attach->dmabuf, map);
324 				return -EIO;
325 			}
326 		}
327 	} else {
328 		pgprot_t prot = PAGE_KERNEL;
329 
330 		dma_resv_assert_held(shmem->base.resv);
331 
332 		if (shmem->vmap_use_count++ > 0) {
333 			iosys_map_set_vaddr(map, shmem->vaddr);
334 			return 0;
335 		}
336 
337 		ret = drm_gem_shmem_get_pages(shmem);
338 		if (ret)
339 			goto err_zero_use;
340 
341 		if (shmem->map_wc)
342 			prot = pgprot_writecombine(prot);
343 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
344 				    VM_MAP, prot);
345 		if (!shmem->vaddr)
346 			ret = -ENOMEM;
347 		else
348 			iosys_map_set_vaddr(map, shmem->vaddr);
349 	}
350 
351 	if (ret) {
352 		drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
353 		goto err_put_pages;
354 	}
355 
356 	return 0;
357 
358 err_put_pages:
359 	if (!obj->import_attach)
360 		drm_gem_shmem_put_pages(shmem);
361 err_zero_use:
362 	shmem->vmap_use_count = 0;
363 
364 	return ret;
365 }
366 EXPORT_SYMBOL(drm_gem_shmem_vmap);
367 
368 /*
369  * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
370  * @shmem: shmem GEM object
371  * @map: Kernel virtual address where the SHMEM GEM object was mapped
372  *
373  * This function cleans up a kernel virtual address mapping acquired by
374  * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
375  * zero.
376  *
377  * This function hides the differences between dma-buf imported and natively
378  * allocated objects.
379  */
drm_gem_shmem_vunmap(struct drm_gem_shmem_object * shmem,struct iosys_map * map)380 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
381 			  struct iosys_map *map)
382 {
383 	struct drm_gem_object *obj = &shmem->base;
384 
385 	if (obj->import_attach) {
386 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
387 	} else {
388 		dma_resv_assert_held(shmem->base.resv);
389 
390 		if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
391 			return;
392 
393 		if (--shmem->vmap_use_count > 0)
394 			return;
395 
396 		vunmap(shmem->vaddr);
397 		drm_gem_shmem_put_pages(shmem);
398 	}
399 
400 	shmem->vaddr = NULL;
401 }
402 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
403 
404 static int
drm_gem_shmem_create_with_handle(struct drm_file * file_priv,struct drm_device * dev,size_t size,uint32_t * handle)405 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
406 				 struct drm_device *dev, size_t size,
407 				 uint32_t *handle)
408 {
409 	struct drm_gem_shmem_object *shmem;
410 	int ret;
411 
412 	shmem = drm_gem_shmem_create(dev, size);
413 	if (IS_ERR(shmem))
414 		return PTR_ERR(shmem);
415 
416 	/*
417 	 * Allocate an id of idr table where the obj is registered
418 	 * and handle has the id what user can see.
419 	 */
420 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
421 	/* drop reference from allocate - handle holds it now. */
422 	drm_gem_object_put(&shmem->base);
423 
424 	return ret;
425 }
426 
427 /* Update madvise status, returns true if not purged, else
428  * false or -errno.
429  */
drm_gem_shmem_madvise(struct drm_gem_shmem_object * shmem,int madv)430 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
431 {
432 	dma_resv_assert_held(shmem->base.resv);
433 
434 	if (shmem->madv >= 0)
435 		shmem->madv = madv;
436 
437 	madv = shmem->madv;
438 
439 	return (madv >= 0);
440 }
441 EXPORT_SYMBOL(drm_gem_shmem_madvise);
442 
drm_gem_shmem_purge(struct drm_gem_shmem_object * shmem)443 void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
444 {
445 	struct drm_gem_object *obj = &shmem->base;
446 	struct drm_device *dev = obj->dev;
447 
448 	dma_resv_assert_held(shmem->base.resv);
449 
450 	drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
451 
452 	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
453 	sg_free_table(shmem->sgt);
454 	kfree(shmem->sgt);
455 	shmem->sgt = NULL;
456 
457 	drm_gem_shmem_put_pages(shmem);
458 
459 	shmem->madv = -1;
460 
461 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
462 	drm_gem_free_mmap_offset(obj);
463 
464 	/* Our goal here is to return as much of the memory as
465 	 * is possible back to the system as we are called from OOM.
466 	 * To do this we must instruct the shmfs to drop all of its
467 	 * backing pages, *now*.
468 	 */
469 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
470 
471 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
472 }
473 EXPORT_SYMBOL(drm_gem_shmem_purge);
474 
475 /**
476  * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
477  * @file: DRM file structure to create the dumb buffer for
478  * @dev: DRM device
479  * @args: IOCTL data
480  *
481  * This function computes the pitch of the dumb buffer and rounds it up to an
482  * integer number of bytes per pixel. Drivers for hardware that doesn't have
483  * any additional restrictions on the pitch can directly use this function as
484  * their &drm_driver.dumb_create callback.
485  *
486  * For hardware with additional restrictions, drivers can adjust the fields
487  * set up by userspace before calling into this function.
488  *
489  * Returns:
490  * 0 on success or a negative error code on failure.
491  */
drm_gem_shmem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)492 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
493 			      struct drm_mode_create_dumb *args)
494 {
495 	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
496 
497 	if (!args->pitch || !args->size) {
498 		args->pitch = min_pitch;
499 		args->size = PAGE_ALIGN(args->pitch * args->height);
500 	} else {
501 		/* ensure sane minimum values */
502 		if (args->pitch < min_pitch)
503 			args->pitch = min_pitch;
504 		if (args->size < args->pitch * args->height)
505 			args->size = PAGE_ALIGN(args->pitch * args->height);
506 	}
507 
508 	return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
509 }
510 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
511 
drm_gem_shmem_fault(struct vm_fault * vmf)512 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
513 {
514 	struct vm_area_struct *vma = vmf->vma;
515 	struct drm_gem_object *obj = vma->vm_private_data;
516 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
517 	loff_t num_pages = obj->size >> PAGE_SHIFT;
518 	vm_fault_t ret;
519 	struct page *page;
520 	pgoff_t page_offset;
521 
522 	/* We don't use vmf->pgoff since that has the fake offset */
523 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
524 
525 	dma_resv_lock(shmem->base.resv, NULL);
526 
527 	if (page_offset >= num_pages ||
528 	    drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
529 	    shmem->madv < 0) {
530 		ret = VM_FAULT_SIGBUS;
531 	} else {
532 		page = shmem->pages[page_offset];
533 
534 		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
535 	}
536 
537 	dma_resv_unlock(shmem->base.resv);
538 
539 	return ret;
540 }
541 
drm_gem_shmem_vm_open(struct vm_area_struct * vma)542 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
543 {
544 	struct drm_gem_object *obj = vma->vm_private_data;
545 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
546 
547 	drm_WARN_ON(obj->dev, obj->import_attach);
548 
549 	dma_resv_lock(shmem->base.resv, NULL);
550 
551 	/*
552 	 * We should have already pinned the pages when the buffer was first
553 	 * mmap'd, vm_open() just grabs an additional reference for the new
554 	 * mm the vma is getting copied into (ie. on fork()).
555 	 */
556 	if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
557 		shmem->pages_use_count++;
558 
559 	dma_resv_unlock(shmem->base.resv);
560 
561 	drm_gem_vm_open(vma);
562 }
563 
drm_gem_shmem_vm_close(struct vm_area_struct * vma)564 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
565 {
566 	struct drm_gem_object *obj = vma->vm_private_data;
567 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
568 
569 	dma_resv_lock(shmem->base.resv, NULL);
570 	drm_gem_shmem_put_pages(shmem);
571 	dma_resv_unlock(shmem->base.resv);
572 
573 	drm_gem_vm_close(vma);
574 }
575 
576 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
577 	.fault = drm_gem_shmem_fault,
578 	.open = drm_gem_shmem_vm_open,
579 	.close = drm_gem_shmem_vm_close,
580 };
581 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
582 
583 /**
584  * drm_gem_shmem_mmap - Memory-map a shmem GEM object
585  * @shmem: shmem GEM object
586  * @vma: VMA for the area to be mapped
587  *
588  * This function implements an augmented version of the GEM DRM file mmap
589  * operation for shmem objects.
590  *
591  * Returns:
592  * 0 on success or a negative error code on failure.
593  */
drm_gem_shmem_mmap(struct drm_gem_shmem_object * shmem,struct vm_area_struct * vma)594 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
595 {
596 	struct drm_gem_object *obj = &shmem->base;
597 	int ret;
598 
599 	if (obj->import_attach) {
600 		/* Reset both vm_ops and vm_private_data, so we don't end up with
601 		 * vm_ops pointing to our implementation if the dma-buf backend
602 		 * doesn't set those fields.
603 		 */
604 		vma->vm_private_data = NULL;
605 		vma->vm_ops = NULL;
606 
607 		ret = dma_buf_mmap(obj->dma_buf, vma, 0);
608 
609 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
610 		if (!ret)
611 			drm_gem_object_put(obj);
612 
613 		return ret;
614 	}
615 
616 	if (is_cow_mapping(vma->vm_flags))
617 		return -EINVAL;
618 
619 	dma_resv_lock(shmem->base.resv, NULL);
620 	ret = drm_gem_shmem_get_pages(shmem);
621 	dma_resv_unlock(shmem->base.resv);
622 
623 	if (ret)
624 		return ret;
625 
626 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
627 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
628 	if (shmem->map_wc)
629 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
630 
631 	return 0;
632 }
633 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
634 
635 /**
636  * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
637  * @shmem: shmem GEM object
638  * @p: DRM printer
639  * @indent: Tab indentation level
640  */
drm_gem_shmem_print_info(const struct drm_gem_shmem_object * shmem,struct drm_printer * p,unsigned int indent)641 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
642 			      struct drm_printer *p, unsigned int indent)
643 {
644 	if (shmem->base.import_attach)
645 		return;
646 
647 	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
648 	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
649 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
650 }
651 EXPORT_SYMBOL(drm_gem_shmem_print_info);
652 
653 /**
654  * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
655  *                              pages for a shmem GEM object
656  * @shmem: shmem GEM object
657  *
658  * This function exports a scatter/gather table suitable for PRIME usage by
659  * calling the standard DMA mapping API.
660  *
661  * Drivers who need to acquire an scatter/gather table for objects need to call
662  * drm_gem_shmem_get_pages_sgt() instead.
663  *
664  * Returns:
665  * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
666  */
drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object * shmem)667 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
668 {
669 	struct drm_gem_object *obj = &shmem->base;
670 
671 	drm_WARN_ON(obj->dev, obj->import_attach);
672 
673 	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
674 }
675 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
676 
drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object * shmem)677 static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
678 {
679 	struct drm_gem_object *obj = &shmem->base;
680 	int ret;
681 	struct sg_table *sgt;
682 
683 	if (shmem->sgt)
684 		return shmem->sgt;
685 
686 	drm_WARN_ON(obj->dev, obj->import_attach);
687 
688 	ret = drm_gem_shmem_get_pages(shmem);
689 	if (ret)
690 		return ERR_PTR(ret);
691 
692 	sgt = drm_gem_shmem_get_sg_table(shmem);
693 	if (IS_ERR(sgt)) {
694 		ret = PTR_ERR(sgt);
695 		goto err_put_pages;
696 	}
697 	/* Map the pages for use by the h/w. */
698 	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
699 	if (ret)
700 		goto err_free_sgt;
701 
702 	shmem->sgt = sgt;
703 
704 	return sgt;
705 
706 err_free_sgt:
707 	sg_free_table(sgt);
708 	kfree(sgt);
709 err_put_pages:
710 	drm_gem_shmem_put_pages(shmem);
711 	return ERR_PTR(ret);
712 }
713 
714 /**
715  * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
716  *				 scatter/gather table for a shmem GEM object.
717  * @shmem: shmem GEM object
718  *
719  * This function returns a scatter/gather table suitable for driver usage. If
720  * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
721  * table created.
722  *
723  * This is the main function for drivers to get at backing storage, and it hides
724  * and difference between dma-buf imported and natively allocated objects.
725  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
726  *
727  * Returns:
728  * A pointer to the scatter/gather table of pinned pages or errno on failure.
729  */
drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object * shmem)730 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
731 {
732 	int ret;
733 	struct sg_table *sgt;
734 
735 	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
736 	if (ret)
737 		return ERR_PTR(ret);
738 	sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
739 	dma_resv_unlock(shmem->base.resv);
740 
741 	return sgt;
742 }
743 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
744 
745 /**
746  * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
747  *                 another driver's scatter/gather table of pinned pages
748  * @dev: Device to import into
749  * @attach: DMA-BUF attachment
750  * @sgt: Scatter/gather table of pinned pages
751  *
752  * This function imports a scatter/gather table exported via DMA-BUF by
753  * another driver. Drivers that use the shmem helpers should set this as their
754  * &drm_driver.gem_prime_import_sg_table callback.
755  *
756  * Returns:
757  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
758  * error code on failure.
759  */
760 struct drm_gem_object *
drm_gem_shmem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)761 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
762 				    struct dma_buf_attachment *attach,
763 				    struct sg_table *sgt)
764 {
765 	size_t size = PAGE_ALIGN(attach->dmabuf->size);
766 	struct drm_gem_shmem_object *shmem;
767 
768 	shmem = __drm_gem_shmem_create(dev, size, true);
769 	if (IS_ERR(shmem))
770 		return ERR_CAST(shmem);
771 
772 	shmem->sgt = sgt;
773 
774 	drm_dbg_prime(dev, "size = %zu\n", size);
775 
776 	return &shmem->base;
777 }
778 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
779 
780 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
781 MODULE_IMPORT_NS(DMA_BUF);
782 MODULE_LICENSE("GPL v2");
783