xref: /dragonfly/sys/dev/drm/drm_prime.c (revision fcf6efef)
1 /*
2  * Copyright © 2012 Red Hat
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Dave Airlie <airlied@redhat.com>
25  *      Rob Clark <rob.clark@linaro.org>
26  *
27  */
28 
29 #include <linux/export.h>
30 #include <linux/dma-buf.h>
31 #include <drm/drmP.h>
32 #include <drm/drm_gem.h>
33 
34 #include "drm_internal.h"
35 
36 /*
37  * DMA-BUF/GEM Object references and lifetime overview:
38  *
39  * On the export the dma_buf holds a reference to the exporting GEM
40  * object. It takes this reference in handle_to_fd_ioctl, when it
41  * first calls .prime_export and stores the exporting GEM object in
42  * the dma_buf priv. This reference is released when the dma_buf
43  * object goes away in the driver .release function.
44  *
45  * On the import the importing GEM object holds a reference to the
46  * dma_buf (which in turn holds a ref to the exporting GEM object).
47  * It takes that reference in the fd_to_handle ioctl.
48  * It calls dma_buf_get, creates an attachment to it and stores the
49  * attachment in the GEM object. When this attachment is destroyed
50  * when the imported object is destroyed, we remove the attachment
51  * and drop the reference to the dma_buf.
52  *
53  * Thus the chain of references always flows in one direction
54  * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
55  *
56  * Self-importing: if userspace is using PRIME as a replacement for flink
57  * then it will get a fd->handle request for a GEM object that it created.
58  * Drivers should detect this situation and return back the gem object
59  * from the dma-buf private.  Prime will do this automatically for drivers that
60  * use the drm_gem_prime_{import,export} helpers.
61  */
62 
63 struct drm_prime_member {
64 	struct list_head entry;
65 	struct dma_buf *dma_buf;
66 	uint32_t handle;
67 };
68 
69 struct drm_prime_attachment {
70 	struct sg_table *sgt;
71 	enum dma_data_direction dir;
72 };
73 
74 #if 0
75 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
76 				    struct dma_buf *dma_buf, uint32_t handle)
77 {
78 	struct drm_prime_member *member;
79 
80 	member = kmalloc(sizeof(*member), GFP_KERNEL);
81 	if (!member)
82 		return -ENOMEM;
83 
84 	get_dma_buf(dma_buf);
85 	member->dma_buf = dma_buf;
86 	member->handle = handle;
87 	list_add(&member->entry, &prime_fpriv->head);
88 	return 0;
89 }
90 
91 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
92 						      uint32_t handle)
93 {
94 	struct drm_prime_member *member;
95 
96 	list_for_each_entry(member, &prime_fpriv->head, entry) {
97 		if (member->handle == handle)
98 			return member->dma_buf;
99 	}
100 
101 	return NULL;
102 }
103 
104 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
105 				       struct dma_buf *dma_buf,
106 				       uint32_t *handle)
107 {
108 	struct drm_prime_member *member;
109 
110 	list_for_each_entry(member, &prime_fpriv->head, entry) {
111 		if (member->dma_buf == dma_buf) {
112 			*handle = member->handle;
113 			return 0;
114 		}
115 	}
116 	return -ENOENT;
117 }
118 #endif
119 
120 static int drm_gem_map_attach(struct dma_buf *dma_buf,
121 			      struct device *target_dev,
122 			      struct dma_buf_attachment *attach)
123 {
124 	struct drm_prime_attachment *prime_attach;
125 	struct drm_gem_object *obj = dma_buf->priv;
126 	struct drm_device *dev = obj->dev;
127 
128 	prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
129 	if (!prime_attach)
130 		return -ENOMEM;
131 
132 	prime_attach->dir = DMA_NONE;
133 	attach->priv = prime_attach;
134 
135 	if (!dev->driver->gem_prime_pin)
136 		return 0;
137 
138 	return dev->driver->gem_prime_pin(obj);
139 }
140 
141 static void drm_gem_map_detach(struct dma_buf *dma_buf,
142 			       struct dma_buf_attachment *attach)
143 {
144 	struct drm_prime_attachment *prime_attach = attach->priv;
145 	struct drm_gem_object *obj = dma_buf->priv;
146 	struct drm_device *dev = obj->dev;
147 	struct sg_table *sgt;
148 
149 	if (dev->driver->gem_prime_unpin)
150 		dev->driver->gem_prime_unpin(obj);
151 
152 	if (!prime_attach)
153 		return;
154 
155 	sgt = prime_attach->sgt;
156 	if (sgt) {
157 		if (prime_attach->dir != DMA_NONE)
158 			dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
159 					prime_attach->dir);
160 		sg_free_table(sgt);
161 	}
162 
163 	kfree(sgt);
164 	kfree(prime_attach);
165 	attach->priv = NULL;
166 }
167 
168 #if 0
169 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
170 					struct dma_buf *dma_buf)
171 {
172 	struct drm_prime_member *member, *safe;
173 
174 	list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
175 		if (member->dma_buf == dma_buf) {
176 			dma_buf_put(dma_buf);
177 			list_del(&member->entry);
178 			kfree(member);
179 		}
180 	}
181 }
182 #endif
183 
184 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
185 					    enum dma_data_direction dir)
186 {
187 	struct drm_prime_attachment *prime_attach = attach->priv;
188 	struct drm_gem_object *obj = attach->dmabuf->priv;
189 	struct sg_table *sgt;
190 
191 	if (WARN_ON(dir == DMA_NONE || !prime_attach))
192 		return ERR_PTR(-EINVAL);
193 
194 	/* return the cached mapping when possible */
195 	if (prime_attach->dir == dir)
196 		return prime_attach->sgt;
197 
198 	/*
199 	 * two mappings with different directions for the same attachment are
200 	 * not allowed
201 	 */
202 	if (WARN_ON(prime_attach->dir != DMA_NONE))
203 		return ERR_PTR(-EBUSY);
204 
205 	sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
206 
207 	if (!IS_ERR(sgt)) {
208 		if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
209 			sg_free_table(sgt);
210 			kfree(sgt);
211 			sgt = ERR_PTR(-ENOMEM);
212 		} else {
213 			prime_attach->sgt = sgt;
214 			prime_attach->dir = dir;
215 		}
216 	}
217 
218 	return sgt;
219 }
220 
221 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
222 				  struct sg_table *sgt,
223 				  enum dma_data_direction dir)
224 {
225 	/* nothing to be done here */
226 }
227 
228 /**
229  * drm_gem_dmabuf_release - dma_buf release implementation for GEM
230  * @dma_buf: buffer to be released
231  *
232  * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
233  * must use this in their dma_buf ops structure as the release callback.
234  */
235 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
236 {
237 	struct drm_gem_object *obj = dma_buf->priv;
238 
239 	/* drop the reference on the export fd holds */
240 	drm_gem_object_unreference_unlocked(obj);
241 }
242 EXPORT_SYMBOL(drm_gem_dmabuf_release);
243 
244 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
245 {
246 	struct drm_gem_object *obj = dma_buf->priv;
247 	struct drm_device *dev = obj->dev;
248 
249 	return dev->driver->gem_prime_vmap(obj);
250 }
251 
252 static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
253 {
254 	struct drm_gem_object *obj = dma_buf->priv;
255 	struct drm_device *dev = obj->dev;
256 
257 	dev->driver->gem_prime_vunmap(obj, vaddr);
258 }
259 
260 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
261 					unsigned long page_num)
262 {
263 	return NULL;
264 }
265 
266 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
267 					 unsigned long page_num, void *addr)
268 {
269 
270 }
271 static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
272 				 unsigned long page_num)
273 {
274 	return NULL;
275 }
276 
277 static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
278 				  unsigned long page_num, void *addr)
279 {
280 
281 }
282 
283 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
284 			       struct vm_area_struct *vma)
285 {
286 	struct drm_gem_object *obj = dma_buf->priv;
287 	struct drm_device *dev = obj->dev;
288 
289 	if (!dev->driver->gem_prime_mmap)
290 		return -ENOSYS;
291 
292 	return dev->driver->gem_prime_mmap(obj, vma);
293 }
294 
295 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
296 	.attach = drm_gem_map_attach,
297 	.detach = drm_gem_map_detach,
298 	.map_dma_buf = drm_gem_map_dma_buf,
299 	.unmap_dma_buf = drm_gem_unmap_dma_buf,
300 	.release = drm_gem_dmabuf_release,
301 	.kmap = drm_gem_dmabuf_kmap,
302 	.kmap_atomic = drm_gem_dmabuf_kmap_atomic,
303 	.kunmap = drm_gem_dmabuf_kunmap,
304 	.kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
305 	.mmap = drm_gem_dmabuf_mmap,
306 	.vmap = drm_gem_dmabuf_vmap,
307 	.vunmap = drm_gem_dmabuf_vunmap,
308 };
309 
310 /**
311  * DOC: PRIME Helpers
312  *
313  * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
314  * simpler APIs by using the helper functions @drm_gem_prime_export and
315  * @drm_gem_prime_import.  These functions implement dma-buf support in terms of
316  * six lower-level driver callbacks:
317  *
318  * Export callbacks:
319  *
320  *  * @gem_prime_pin (optional): prepare a GEM object for exporting
321  *  * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
322  *  * @gem_prime_vmap: vmap a buffer exported by your driver
323  *  * @gem_prime_vunmap: vunmap a buffer exported by your driver
324  *  * @gem_prime_mmap (optional): mmap a buffer exported by your driver
325  *
326  * Import callback:
327  *
328  *  * @gem_prime_import_sg_table (import): produce a GEM object from another
329  *    driver's scatter/gather table
330  */
331 
332 /**
333  * drm_gem_prime_export - helper library implementation of the export callback
334  * @dev: drm_device to export from
335  * @obj: GEM object to export
336  * @flags: flags like DRM_CLOEXEC and DRM_RDWR
337  *
338  * This is the implementation of the gem_prime_export functions for GEM drivers
339  * using the PRIME helpers.
340  */
341 struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
342 				     struct drm_gem_object *obj, int flags)
343 {
344 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
345 
346 	exp_info.ops = &drm_gem_prime_dmabuf_ops;
347 	exp_info.size = obj->size;
348 	exp_info.flags = flags;
349 	exp_info.priv = obj;
350 
351 	if (dev->driver->gem_prime_res_obj)
352 		exp_info.resv = dev->driver->gem_prime_res_obj(obj);
353 
354 	return dma_buf_export(&exp_info);
355 }
356 EXPORT_SYMBOL(drm_gem_prime_export);
357 
358 #if 0
359 static struct dma_buf *export_and_register_object(struct drm_device *dev,
360 						  struct drm_gem_object *obj,
361 						  uint32_t flags)
362 {
363 	struct dma_buf *dmabuf;
364 
365 	/* prevent races with concurrent gem_close. */
366 	if (obj->handle_count == 0) {
367 		dmabuf = ERR_PTR(-ENOENT);
368 		return dmabuf;
369 	}
370 
371 	dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
372 	if (IS_ERR(dmabuf)) {
373 		/* normally the created dma-buf takes ownership of the ref,
374 		 * but if that fails then drop the ref
375 		 */
376 		return dmabuf;
377 	}
378 
379 	/*
380 	 * Note that callers do not need to clean up the export cache
381 	 * since the check for obj->handle_count guarantees that someone
382 	 * will clean it up.
383 	 */
384 	obj->dma_buf = dmabuf;
385 	get_dma_buf(obj->dma_buf);
386 	/* Grab a new ref since the callers is now used by the dma-buf */
387 	drm_gem_object_reference(obj);
388 
389 	return dmabuf;
390 }
391 #endif
392 
393 /**
394  * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
395  * @dev: dev to export the buffer from
396  * @file_priv: drm file-private structure
397  * @handle: buffer handle to export
398  * @flags: flags like DRM_CLOEXEC
399  * @prime_fd: pointer to storage for the fd id of the create dma-buf
400  *
401  * This is the PRIME export function which must be used mandatorily by GEM
402  * drivers to ensure correct lifetime management of the underlying GEM object.
403  * The actual exporting from GEM object to a dma-buf is done through the
404  * gem_prime_export driver callback.
405  */
406 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
407 			       struct drm_file *file_priv, uint32_t handle,
408 			       uint32_t flags,
409 			       int *prime_fd)
410 {
411 #if 0
412 	struct drm_gem_object *obj;
413 #endif
414 	int ret = 0;
415 #if 0
416 	struct dma_buf *dmabuf;
417 
418 	mutex_lock(&file_priv->prime.lock);
419 	obj = drm_gem_object_lookup(file_priv, handle);
420 	if (!obj)  {
421 #endif
422 		ret = -ENOENT;
423 #if 0
424 		goto out_unlock;
425 	}
426 
427 	dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
428 	if (dmabuf) {
429 		get_dma_buf(dmabuf);
430 		goto out_have_handle;
431 	}
432 
433 	mutex_lock(&dev->object_name_lock);
434 	/* re-export the original imported object */
435 	if (obj->import_attach) {
436 		dmabuf = obj->import_attach->dmabuf;
437 		get_dma_buf(dmabuf);
438 		goto out_have_obj;
439 	}
440 
441 	if (obj->dma_buf) {
442 		get_dma_buf(obj->dma_buf);
443 		dmabuf = obj->dma_buf;
444 		goto out_have_obj;
445 	}
446 
447 	dmabuf = export_and_register_object(dev, obj, flags);
448 	if (IS_ERR(dmabuf)) {
449 		/* normally the created dma-buf takes ownership of the ref,
450 		 * but if that fails then drop the ref
451 		 */
452 		ret = PTR_ERR(dmabuf);
453 		mutex_unlock(&dev->object_name_lock);
454 		goto out;
455 	}
456 
457 out_have_obj:
458 	/*
459 	 * If we've exported this buffer then cheat and add it to the import list
460 	 * so we get the correct handle back. We must do this under the
461 	 * protection of dev->object_name_lock to ensure that a racing gem close
462 	 * ioctl doesn't miss to remove this buffer handle from the cache.
463 	 */
464 	ret = drm_prime_add_buf_handle(&file_priv->prime,
465 				       dmabuf, handle);
466 	mutex_unlock(&dev->object_name_lock);
467 	if (ret)
468 		goto fail_put_dmabuf;
469 
470 out_have_handle:
471 	ret = dma_buf_fd(dmabuf, flags);
472 	/*
473 	 * We must _not_ remove the buffer from the handle cache since the newly
474 	 * created dma buf is already linked in the global obj->dma_buf pointer,
475 	 * and that is invariant as long as a userspace gem handle exists.
476 	 * Closing the handle will clean out the cache anyway, so we don't leak.
477 	 */
478 	if (ret < 0) {
479 		goto fail_put_dmabuf;
480 	} else {
481 		*prime_fd = ret;
482 		ret = 0;
483 	}
484 
485 	goto out;
486 
487 fail_put_dmabuf:
488 	dma_buf_put(dmabuf);
489 out:
490 	drm_gem_object_unreference_unlocked(obj);
491 out_unlock:
492 	mutex_unlock(&file_priv->prime.lock);
493 #endif
494 
495 	return ret;
496 }
497 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
498 
499 /**
500  * drm_gem_prime_import - helper library implementation of the import callback
501  * @dev: drm_device to import into
502  * @dma_buf: dma-buf object to import
503  *
504  * This is the implementation of the gem_prime_import functions for GEM drivers
505  * using the PRIME helpers.
506  */
507 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
508 					    struct dma_buf *dma_buf)
509 {
510 #if 0
511 	struct dma_buf_attachment *attach;
512 	struct sg_table *sgt;
513 	struct drm_gem_object *obj;
514 	int ret;
515 
516 	if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
517 		obj = dma_buf->priv;
518 		if (obj->dev == dev) {
519 			/*
520 			 * Importing dmabuf exported from out own gem increases
521 			 * refcount on gem itself instead of f_count of dmabuf.
522 			 */
523 			drm_gem_object_reference(obj);
524 			return obj;
525 		}
526 	}
527 
528 	if (!dev->driver->gem_prime_import_sg_table)
529 #endif
530 		return ERR_PTR(-EINVAL);
531 
532 #if 0
533 	attach = dma_buf_attach(dma_buf, dev->dev);
534 	if (IS_ERR(attach))
535 		return ERR_CAST(attach);
536 
537 	get_dma_buf(dma_buf);
538 
539 	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
540 	if (IS_ERR(sgt)) {
541 		ret = PTR_ERR(sgt);
542 		goto fail_detach;
543 	}
544 
545 	obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
546 	if (IS_ERR(obj)) {
547 		ret = PTR_ERR(obj);
548 		goto fail_unmap;
549 	}
550 
551 	obj->import_attach = attach;
552 
553 	return obj;
554 
555 fail_unmap:
556 	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
557 fail_detach:
558 	dma_buf_detach(dma_buf, attach);
559 	dma_buf_put(dma_buf);
560 
561 	return ERR_PTR(ret);
562 #endif
563 }
564 EXPORT_SYMBOL(drm_gem_prime_import);
565 
566 /**
567  * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
568  * @dev: dev to export the buffer from
569  * @file_priv: drm file-private structure
570  * @prime_fd: fd id of the dma-buf which should be imported
571  * @handle: pointer to storage for the handle of the imported buffer object
572  *
573  * This is the PRIME import function which must be used mandatorily by GEM
574  * drivers to ensure correct lifetime management of the underlying GEM object.
575  * The actual importing of GEM object from the dma-buf is done through the
576  * gem_import_export driver callback.
577  */
578 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
579 			       struct drm_file *file_priv, int prime_fd,
580 			       uint32_t *handle)
581 {
582 #if 0
583 	struct dma_buf *dma_buf;
584 	struct drm_gem_object *obj;
585 	int ret;
586 
587 	dma_buf = dma_buf_get(prime_fd);
588 	if (IS_ERR(dma_buf))
589 		return PTR_ERR(dma_buf);
590 
591 	mutex_lock(&file_priv->prime.lock);
592 
593 	ret = drm_prime_lookup_buf_handle(&file_priv->prime,
594 			dma_buf, handle);
595 	if (ret == 0)
596 		goto out_put;
597 
598 	/* never seen this one, need to import */
599 	mutex_lock(&dev->object_name_lock);
600 	obj = dev->driver->gem_prime_import(dev, dma_buf);
601 	if (IS_ERR(obj)) {
602 		ret = PTR_ERR(obj);
603 		goto out_unlock;
604 	}
605 
606 	if (obj->dma_buf) {
607 		WARN_ON(obj->dma_buf != dma_buf);
608 	} else {
609 		obj->dma_buf = dma_buf;
610 		get_dma_buf(dma_buf);
611 	}
612 
613 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
614 	ret = drm_gem_handle_create_tail(file_priv, obj, handle);
615 	drm_gem_object_unreference_unlocked(obj);
616 	if (ret)
617 		goto out_put;
618 
619 	ret = drm_prime_add_buf_handle(&file_priv->prime,
620 			dma_buf, *handle);
621 	if (ret)
622 		goto fail;
623 
624 	mutex_unlock(&file_priv->prime.lock);
625 
626 	dma_buf_put(dma_buf);
627 
628 	return 0;
629 
630 fail:
631 	/* hmm, if driver attached, we are relying on the free-object path
632 	 * to detach.. which seems ok..
633 	 */
634 	drm_gem_handle_delete(file_priv, *handle);
635 out_unlock:
636 	mutex_unlock(&dev->object_name_lock);
637 out_put:
638 	dma_buf_put(dma_buf);
639 	mutex_unlock(&file_priv->prime.lock);
640 	return ret;
641 #endif
642 	return -EINVAL;
643 }
644 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
645 
646 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
647 				 struct drm_file *file_priv)
648 {
649 	struct drm_prime_handle *args = data;
650 
651 	if (!drm_core_check_feature(dev, DRIVER_PRIME))
652 		return -EINVAL;
653 
654 	if (!dev->driver->prime_handle_to_fd)
655 		return -ENOSYS;
656 
657 	/* check flags are valid */
658 	if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
659 		return -EINVAL;
660 
661 	return dev->driver->prime_handle_to_fd(dev, file_priv,
662 			args->handle, args->flags, &args->fd);
663 }
664 
665 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
666 				 struct drm_file *file_priv)
667 {
668 	struct drm_prime_handle *args = data;
669 
670 	if (!drm_core_check_feature(dev, DRIVER_PRIME))
671 		return -EINVAL;
672 
673 	if (!dev->driver->prime_fd_to_handle)
674 		return -ENOSYS;
675 
676 	return dev->driver->prime_fd_to_handle(dev, file_priv,
677 			args->fd, &args->handle);
678 }
679 
680 /**
681  * drm_prime_pages_to_sg - converts a page array into an sg list
682  * @pages: pointer to the array of page pointers to convert
683  * @nr_pages: length of the page vector
684  *
685  * This helper creates an sg table object from a set of pages
686  * the driver is responsible for mapping the pages into the
687  * importers address space for use with dma_buf itself.
688  */
689 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
690 {
691 #if 0
692 	struct sg_table *sg = NULL;
693 #endif
694 	int ret;
695 
696 #if 0
697 	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
698 	if (!sg) {
699 #endif
700 		ret = -ENOMEM;
701 #if 0
702 		goto out;
703 	}
704 
705 	ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
706 				nr_pages << PAGE_SHIFT, GFP_KERNEL);
707 	if (ret)
708 		goto out;
709 
710 	return sg;
711 out:
712 	kfree(sg);
713 #endif
714 	return ERR_PTR(ret);
715 }
716 EXPORT_SYMBOL(drm_prime_pages_to_sg);
717 
718 /**
719  * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
720  * @sgt: scatter-gather table to convert
721  * @pages: array of page pointers to store the page array in
722  * @addrs: optional array to store the dma bus address of each page
723  * @max_pages: size of both the passed-in arrays
724  *
725  * Exports an sg table into an array of pages and addresses. This is currently
726  * required by the TTM driver in order to do correct fault handling.
727  */
728 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
729 				     dma_addr_t *addrs, int max_pages)
730 {
731 	unsigned count;
732 	struct scatterlist *sg;
733 	struct page *page;
734 	u32 len;
735 	int pg_index;
736 	dma_addr_t addr;
737 
738 	pg_index = 0;
739 	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
740 		len = sg->length;
741 		page = sg_page(sg);
742 		addr = sg_dma_address(sg);
743 
744 		while (len > 0) {
745 			if (WARN_ON(pg_index >= max_pages))
746 				return -1;
747 			pages[pg_index] = page;
748 			if (addrs)
749 				addrs[pg_index] = addr;
750 
751 			page++;
752 			addr += PAGE_SIZE;
753 			len -= PAGE_SIZE;
754 			pg_index++;
755 		}
756 	}
757 	return 0;
758 }
759 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
760 
761 /**
762  * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
763  * @obj: GEM object which was created from a dma-buf
764  * @sg: the sg-table which was pinned at import time
765  *
766  * This is the cleanup functions which GEM drivers need to call when they use
767  * @drm_gem_prime_import to import dma-bufs.
768  */
769 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
770 {
771 	struct dma_buf_attachment *attach;
772 	struct dma_buf *dma_buf;
773 	attach = obj->import_attach;
774 	if (sg)
775 		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
776 	dma_buf = attach->dmabuf;
777 #if 0
778 	dma_buf_detach(attach->dmabuf, attach);
779 	/* remove the reference */
780 	dma_buf_put(dma_buf);
781 #endif
782 }
783 EXPORT_SYMBOL(drm_prime_gem_destroy);
784 
785 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
786 {
787 	INIT_LIST_HEAD(&prime_fpriv->head);
788 	lockinit(&prime_fpriv->lock, "drmpfpl", 0, LK_CANRECURSE);
789 }
790 
791 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
792 {
793 	/* by now drm_gem_release should've made sure the list is empty */
794 	WARN_ON(!list_empty(&prime_fpriv->head));
795 }
796