xref: /dragonfly/sys/dev/drm/drm_prime.c (revision 0de61e28)
1 /*
2  * Copyright © 2012 Red Hat
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Dave Airlie <airlied@redhat.com>
25  *      Rob Clark <rob.clark@linaro.org>
26  *
27  */
28 
29 #include <linux/export.h>
30 #include <linux/dma-buf.h>
31 #include <linux/rbtree.h>
32 #include <drm/drmP.h>
33 #include <drm/drm_gem.h>
34 
35 #include "drm_internal.h"
36 
37 /*
38  * DMA-BUF/GEM Object references and lifetime overview:
39  *
40  * On the export the dma_buf holds a reference to the exporting GEM
41  * object. It takes this reference in handle_to_fd_ioctl, when it
42  * first calls .prime_export and stores the exporting GEM object in
43  * the dma_buf priv. This reference is released when the dma_buf
44  * object goes away in the driver .release function.
45  *
46  * On the import the importing GEM object holds a reference to the
47  * dma_buf (which in turn holds a ref to the exporting GEM object).
48  * It takes that reference in the fd_to_handle ioctl.
49  * It calls dma_buf_get, creates an attachment to it and stores the
50  * attachment in the GEM object. When this attachment is destroyed
51  * when the imported object is destroyed, we remove the attachment
52  * and drop the reference to the dma_buf.
53  *
54  * Thus the chain of references always flows in one direction
55  * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
56  *
57  * Self-importing: if userspace is using PRIME as a replacement for flink
58  * then it will get a fd->handle request for a GEM object that it created.
59  * Drivers should detect this situation and return back the gem object
60  * from the dma-buf private.  Prime will do this automatically for drivers that
61  * use the drm_gem_prime_{import,export} helpers.
62  */
63 
64 struct drm_prime_member {
65 	struct dma_buf *dma_buf;
66 	uint32_t handle;
67 
68 	struct rb_node dmabuf_rb;
69 	struct rb_node handle_rb;
70 };
71 
72 struct drm_prime_attachment {
73 	struct sg_table *sgt;
74 	enum dma_data_direction dir;
75 };
76 
77 #if 0
78 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
79 				    struct dma_buf *dma_buf, uint32_t handle)
80 {
81 	struct drm_prime_member *member;
82 	struct rb_node **p, *rb;
83 
84 	member = kmalloc(sizeof(*member), GFP_KERNEL);
85 	if (!member)
86 		return -ENOMEM;
87 
88 	get_dma_buf(dma_buf);
89 	member->dma_buf = dma_buf;
90 	member->handle = handle;
91 
92 	rb = NULL;
93 	p = &prime_fpriv->dmabufs.rb_node;
94 	while (*p) {
95 		struct drm_prime_member *pos;
96 
97 		rb = *p;
98 		pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
99 		if (dma_buf > pos->dma_buf)
100 			p = &rb->rb_right;
101 		else
102 			p = &rb->rb_left;
103 	}
104 	rb_link_node(&member->dmabuf_rb, rb, p);
105 	rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
106 
107 	rb = NULL;
108 	p = &prime_fpriv->handles.rb_node;
109 	while (*p) {
110 		struct drm_prime_member *pos;
111 
112 		rb = *p;
113 		pos = rb_entry(rb, struct drm_prime_member, handle_rb);
114 		if (handle > pos->handle)
115 			p = &rb->rb_right;
116 		else
117 			p = &rb->rb_left;
118 	}
119 	rb_link_node(&member->handle_rb, rb, p);
120 	rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
121 
122 	return 0;
123 }
124 
125 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
126 						      uint32_t handle)
127 {
128 	struct rb_node *rb;
129 
130 	rb = prime_fpriv->handles.rb_node;
131 	while (rb) {
132 		struct drm_prime_member *member;
133 
134 		member = rb_entry(rb, struct drm_prime_member, handle_rb);
135 		if (member->handle == handle)
136 			return member->dma_buf;
137 		else if (member->handle < handle)
138 			rb = rb->rb_right;
139 		else
140 			rb = rb->rb_left;
141 	}
142 
143 	return NULL;
144 }
145 
146 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
147 				       struct dma_buf *dma_buf,
148 				       uint32_t *handle)
149 {
150 	struct rb_node *rb;
151 
152 	rb = prime_fpriv->dmabufs.rb_node;
153 	while (rb) {
154 		struct drm_prime_member *member;
155 
156 		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
157 		if (member->dma_buf == dma_buf) {
158 			*handle = member->handle;
159 			return 0;
160 		} else if (member->dma_buf < dma_buf) {
161 			rb = rb->rb_right;
162 		} else {
163 			rb = rb->rb_left;
164 		}
165 	}
166 
167 	return -ENOENT;
168 }
169 #endif
170 
171 static int drm_gem_map_attach(struct dma_buf *dma_buf,
172 			      struct device *target_dev,
173 			      struct dma_buf_attachment *attach)
174 {
175 	struct drm_prime_attachment *prime_attach;
176 	struct drm_gem_object *obj = dma_buf->priv;
177 	struct drm_device *dev = obj->dev;
178 
179 	prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
180 	if (!prime_attach)
181 		return -ENOMEM;
182 
183 	prime_attach->dir = DMA_NONE;
184 	attach->priv = prime_attach;
185 
186 	if (!dev->driver->gem_prime_pin)
187 		return 0;
188 
189 	return dev->driver->gem_prime_pin(obj);
190 }
191 
192 static void drm_gem_map_detach(struct dma_buf *dma_buf,
193 			       struct dma_buf_attachment *attach)
194 {
195 	struct drm_prime_attachment *prime_attach = attach->priv;
196 	struct drm_gem_object *obj = dma_buf->priv;
197 	struct drm_device *dev = obj->dev;
198 	struct sg_table *sgt;
199 
200 	if (dev->driver->gem_prime_unpin)
201 		dev->driver->gem_prime_unpin(obj);
202 
203 	if (!prime_attach)
204 		return;
205 
206 	sgt = prime_attach->sgt;
207 	if (sgt) {
208 		if (prime_attach->dir != DMA_NONE)
209 			dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
210 					prime_attach->dir);
211 		sg_free_table(sgt);
212 	}
213 
214 	kfree(sgt);
215 	kfree(prime_attach);
216 	attach->priv = NULL;
217 }
218 
219 #if 0
220 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
221 					struct dma_buf *dma_buf)
222 {
223 	struct rb_node *rb;
224 
225 	rb = prime_fpriv->dmabufs.rb_node;
226 	while (rb) {
227 		struct drm_prime_member *member;
228 
229 		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
230 		if (member->dma_buf == dma_buf) {
231 			rb_erase(&member->handle_rb, &prime_fpriv->handles);
232 			rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
233 
234 			dma_buf_put(dma_buf);
235 			kfree(member);
236 			return;
237 		} else if (member->dma_buf < dma_buf) {
238 			rb = rb->rb_right;
239 		} else {
240 			rb = rb->rb_left;
241 		}
242 	}
243 }
244 #endif
245 
246 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
247 					    enum dma_data_direction dir)
248 {
249 	struct drm_prime_attachment *prime_attach = attach->priv;
250 	struct drm_gem_object *obj = attach->dmabuf->priv;
251 	struct sg_table *sgt;
252 
253 	if (WARN_ON(dir == DMA_NONE || !prime_attach))
254 		return ERR_PTR(-EINVAL);
255 
256 	/* return the cached mapping when possible */
257 	if (prime_attach->dir == dir)
258 		return prime_attach->sgt;
259 
260 	/*
261 	 * two mappings with different directions for the same attachment are
262 	 * not allowed
263 	 */
264 	if (WARN_ON(prime_attach->dir != DMA_NONE))
265 		return ERR_PTR(-EBUSY);
266 
267 	sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
268 
269 	if (!IS_ERR(sgt)) {
270 		if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
271 			sg_free_table(sgt);
272 			kfree(sgt);
273 			sgt = ERR_PTR(-ENOMEM);
274 		} else {
275 			prime_attach->sgt = sgt;
276 			prime_attach->dir = dir;
277 		}
278 	}
279 
280 	return sgt;
281 }
282 
283 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
284 				  struct sg_table *sgt,
285 				  enum dma_data_direction dir)
286 {
287 	/* nothing to be done here */
288 }
289 
290 /**
291  * drm_gem_dmabuf_export - dma_buf export implementation for GEM
292  * @dev: parent device for the exported dmabuf
293  * @exp_info: the export information used by dma_buf_export()
294  *
295  * This wraps dma_buf_export() for use by generic GEM drivers that are using
296  * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
297  * a reference to the drm_device which is released by drm_gem_dmabuf_release().
298  *
299  * Returns the new dmabuf.
300  */
301 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
302 				      struct dma_buf_export_info *exp_info)
303 {
304 	struct dma_buf *dma_buf;
305 
306 	dma_buf = dma_buf_export(exp_info);
307 	if (!IS_ERR(dma_buf))
308 		drm_dev_ref(dev);
309 
310 	return dma_buf;
311 }
312 EXPORT_SYMBOL(drm_gem_dmabuf_export);
313 
314 /**
315  * drm_gem_dmabuf_release - dma_buf release implementation for GEM
316  * @dma_buf: buffer to be released
317  *
318  * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
319  * must use this in their dma_buf ops structure as the release callback.
320  * drm_gem_dmabuf_release() should be used in conjunction with
321  * drm_gem_dmabuf_export().
322  */
323 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
324 {
325 	struct drm_gem_object *obj = dma_buf->priv;
326 	struct drm_device *dev = obj->dev;
327 
328 	/* drop the reference on the export fd holds */
329 	drm_gem_object_unreference_unlocked(obj);
330 
331 	drm_dev_unref(dev);
332 }
333 EXPORT_SYMBOL(drm_gem_dmabuf_release);
334 
335 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
336 {
337 	struct drm_gem_object *obj = dma_buf->priv;
338 	struct drm_device *dev = obj->dev;
339 
340 	return dev->driver->gem_prime_vmap(obj);
341 }
342 
343 static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
344 {
345 	struct drm_gem_object *obj = dma_buf->priv;
346 	struct drm_device *dev = obj->dev;
347 
348 	dev->driver->gem_prime_vunmap(obj, vaddr);
349 }
350 
351 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
352 					unsigned long page_num)
353 {
354 	return NULL;
355 }
356 
357 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
358 					 unsigned long page_num, void *addr)
359 {
360 
361 }
362 static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
363 				 unsigned long page_num)
364 {
365 	return NULL;
366 }
367 
368 static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
369 				  unsigned long page_num, void *addr)
370 {
371 
372 }
373 
374 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
375 			       struct vm_area_struct *vma)
376 {
377 	struct drm_gem_object *obj = dma_buf->priv;
378 	struct drm_device *dev = obj->dev;
379 
380 	if (!dev->driver->gem_prime_mmap)
381 		return -ENOSYS;
382 
383 	return dev->driver->gem_prime_mmap(obj, vma);
384 }
385 
386 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
387 	.attach = drm_gem_map_attach,
388 	.detach = drm_gem_map_detach,
389 	.map_dma_buf = drm_gem_map_dma_buf,
390 	.unmap_dma_buf = drm_gem_unmap_dma_buf,
391 	.release = drm_gem_dmabuf_release,
392 	.kmap = drm_gem_dmabuf_kmap,
393 	.kmap_atomic = drm_gem_dmabuf_kmap_atomic,
394 	.kunmap = drm_gem_dmabuf_kunmap,
395 	.kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
396 	.mmap = drm_gem_dmabuf_mmap,
397 	.vmap = drm_gem_dmabuf_vmap,
398 	.vunmap = drm_gem_dmabuf_vunmap,
399 };
400 
401 /**
402  * DOC: PRIME Helpers
403  *
404  * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
405  * simpler APIs by using the helper functions @drm_gem_prime_export and
406  * @drm_gem_prime_import.  These functions implement dma-buf support in terms of
407  * six lower-level driver callbacks:
408  *
409  * Export callbacks:
410  *
411  *  * @gem_prime_pin (optional): prepare a GEM object for exporting
412  *  * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
413  *  * @gem_prime_vmap: vmap a buffer exported by your driver
414  *  * @gem_prime_vunmap: vunmap a buffer exported by your driver
415  *  * @gem_prime_mmap (optional): mmap a buffer exported by your driver
416  *
417  * Import callback:
418  *
419  *  * @gem_prime_import_sg_table (import): produce a GEM object from another
420  *    driver's scatter/gather table
421  */
422 
423 /**
424  * drm_gem_prime_export - helper library implementation of the export callback
425  * @dev: drm_device to export from
426  * @obj: GEM object to export
427  * @flags: flags like DRM_CLOEXEC and DRM_RDWR
428  *
429  * This is the implementation of the gem_prime_export functions for GEM drivers
430  * using the PRIME helpers.
431  */
432 struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
433 				     struct drm_gem_object *obj,
434 				     int flags)
435 {
436 	struct dma_buf_export_info exp_info = {
437 #if 0
438 		.exp_name = KBUILD_MODNAME, /* white lie for debug */
439 		.owner = dev->driver->fops->owner,
440 #endif
441 		.ops = &drm_gem_prime_dmabuf_ops,
442 		.size = obj->size,
443 		.flags = flags,
444 		.priv = obj,
445 	};
446 
447 	if (dev->driver->gem_prime_res_obj)
448 		exp_info.resv = dev->driver->gem_prime_res_obj(obj);
449 
450 	return drm_gem_dmabuf_export(dev, &exp_info);
451 }
452 EXPORT_SYMBOL(drm_gem_prime_export);
453 
454 #if 0
455 static struct dma_buf *export_and_register_object(struct drm_device *dev,
456 						  struct drm_gem_object *obj,
457 						  uint32_t flags)
458 {
459 	struct dma_buf *dmabuf;
460 
461 	/* prevent races with concurrent gem_close. */
462 	if (obj->handle_count == 0) {
463 		dmabuf = ERR_PTR(-ENOENT);
464 		return dmabuf;
465 	}
466 
467 	dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
468 	if (IS_ERR(dmabuf)) {
469 		/* normally the created dma-buf takes ownership of the ref,
470 		 * but if that fails then drop the ref
471 		 */
472 		return dmabuf;
473 	}
474 
475 	/*
476 	 * Note that callers do not need to clean up the export cache
477 	 * since the check for obj->handle_count guarantees that someone
478 	 * will clean it up.
479 	 */
480 	obj->dma_buf = dmabuf;
481 	get_dma_buf(obj->dma_buf);
482 	/* Grab a new ref since the callers is now used by the dma-buf */
483 	drm_gem_object_reference(obj);
484 
485 	return dmabuf;
486 }
487 #endif
488 
489 /**
490  * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
491  * @dev: dev to export the buffer from
492  * @file_priv: drm file-private structure
493  * @handle: buffer handle to export
494  * @flags: flags like DRM_CLOEXEC
495  * @prime_fd: pointer to storage for the fd id of the create dma-buf
496  *
497  * This is the PRIME export function which must be used mandatorily by GEM
498  * drivers to ensure correct lifetime management of the underlying GEM object.
499  * The actual exporting from GEM object to a dma-buf is done through the
500  * gem_prime_export driver callback.
501  */
502 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
503 			       struct drm_file *file_priv, uint32_t handle,
504 			       uint32_t flags,
505 			       int *prime_fd)
506 {
507 #if 0
508 	struct drm_gem_object *obj;
509 #endif
510 	int ret = 0;
511 #if 0
512 	struct dma_buf *dmabuf;
513 
514 	mutex_lock(&file_priv->prime.lock);
515 	obj = drm_gem_object_lookup(file_priv, handle);
516 	if (!obj)  {
517 #endif
518 		ret = -ENOENT;
519 #if 0
520 		goto out_unlock;
521 	}
522 
523 	dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
524 	if (dmabuf) {
525 		get_dma_buf(dmabuf);
526 		goto out_have_handle;
527 	}
528 
529 	mutex_lock(&dev->object_name_lock);
530 	/* re-export the original imported object */
531 	if (obj->import_attach) {
532 		dmabuf = obj->import_attach->dmabuf;
533 		get_dma_buf(dmabuf);
534 		goto out_have_obj;
535 	}
536 
537 	if (obj->dma_buf) {
538 		get_dma_buf(obj->dma_buf);
539 		dmabuf = obj->dma_buf;
540 		goto out_have_obj;
541 	}
542 
543 	dmabuf = export_and_register_object(dev, obj, flags);
544 	if (IS_ERR(dmabuf)) {
545 		/* normally the created dma-buf takes ownership of the ref,
546 		 * but if that fails then drop the ref
547 		 */
548 		ret = PTR_ERR(dmabuf);
549 		mutex_unlock(&dev->object_name_lock);
550 		goto out;
551 	}
552 
553 out_have_obj:
554 	/*
555 	 * If we've exported this buffer then cheat and add it to the import list
556 	 * so we get the correct handle back. We must do this under the
557 	 * protection of dev->object_name_lock to ensure that a racing gem close
558 	 * ioctl doesn't miss to remove this buffer handle from the cache.
559 	 */
560 	ret = drm_prime_add_buf_handle(&file_priv->prime,
561 				       dmabuf, handle);
562 	mutex_unlock(&dev->object_name_lock);
563 	if (ret)
564 		goto fail_put_dmabuf;
565 
566 out_have_handle:
567 	ret = dma_buf_fd(dmabuf, flags);
568 	/*
569 	 * We must _not_ remove the buffer from the handle cache since the newly
570 	 * created dma buf is already linked in the global obj->dma_buf pointer,
571 	 * and that is invariant as long as a userspace gem handle exists.
572 	 * Closing the handle will clean out the cache anyway, so we don't leak.
573 	 */
574 	if (ret < 0) {
575 		goto fail_put_dmabuf;
576 	} else {
577 		*prime_fd = ret;
578 		ret = 0;
579 	}
580 
581 	goto out;
582 
583 fail_put_dmabuf:
584 	dma_buf_put(dmabuf);
585 out:
586 	drm_gem_object_unreference_unlocked(obj);
587 out_unlock:
588 	mutex_unlock(&file_priv->prime.lock);
589 #endif
590 
591 	return ret;
592 }
593 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
594 
595 /**
596  * drm_gem_prime_import - helper library implementation of the import callback
597  * @dev: drm_device to import into
598  * @dma_buf: dma-buf object to import
599  *
600  * This is the implementation of the gem_prime_import functions for GEM drivers
601  * using the PRIME helpers.
602  */
603 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
604 					    struct dma_buf *dma_buf)
605 {
606 #if 0
607 	struct dma_buf_attachment *attach;
608 	struct sg_table *sgt;
609 	struct drm_gem_object *obj;
610 	int ret;
611 
612 	if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
613 		obj = dma_buf->priv;
614 		if (obj->dev == dev) {
615 			/*
616 			 * Importing dmabuf exported from out own gem increases
617 			 * refcount on gem itself instead of f_count of dmabuf.
618 			 */
619 			drm_gem_object_reference(obj);
620 			return obj;
621 		}
622 	}
623 
624 	if (!dev->driver->gem_prime_import_sg_table)
625 #endif
626 		return ERR_PTR(-EINVAL);
627 
628 #if 0
629 	attach = dma_buf_attach(dma_buf, dev->dev);
630 	if (IS_ERR(attach))
631 		return ERR_CAST(attach);
632 
633 	get_dma_buf(dma_buf);
634 
635 	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
636 	if (IS_ERR(sgt)) {
637 		ret = PTR_ERR(sgt);
638 		goto fail_detach;
639 	}
640 
641 	obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
642 	if (IS_ERR(obj)) {
643 		ret = PTR_ERR(obj);
644 		goto fail_unmap;
645 	}
646 
647 	obj->import_attach = attach;
648 
649 	return obj;
650 
651 fail_unmap:
652 	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
653 fail_detach:
654 	dma_buf_detach(dma_buf, attach);
655 	dma_buf_put(dma_buf);
656 
657 	return ERR_PTR(ret);
658 #endif
659 }
660 EXPORT_SYMBOL(drm_gem_prime_import);
661 
662 /**
663  * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
664  * @dev: dev to export the buffer from
665  * @file_priv: drm file-private structure
666  * @prime_fd: fd id of the dma-buf which should be imported
667  * @handle: pointer to storage for the handle of the imported buffer object
668  *
669  * This is the PRIME import function which must be used mandatorily by GEM
670  * drivers to ensure correct lifetime management of the underlying GEM object.
671  * The actual importing of GEM object from the dma-buf is done through the
672  * gem_import_export driver callback.
673  */
674 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
675 			       struct drm_file *file_priv, int prime_fd,
676 			       uint32_t *handle)
677 {
678 #if 0
679 	struct dma_buf *dma_buf;
680 	struct drm_gem_object *obj;
681 	int ret;
682 
683 	dma_buf = dma_buf_get(prime_fd);
684 	if (IS_ERR(dma_buf))
685 		return PTR_ERR(dma_buf);
686 
687 	mutex_lock(&file_priv->prime.lock);
688 
689 	ret = drm_prime_lookup_buf_handle(&file_priv->prime,
690 			dma_buf, handle);
691 	if (ret == 0)
692 		goto out_put;
693 
694 	/* never seen this one, need to import */
695 	mutex_lock(&dev->object_name_lock);
696 	obj = dev->driver->gem_prime_import(dev, dma_buf);
697 	if (IS_ERR(obj)) {
698 		ret = PTR_ERR(obj);
699 		goto out_unlock;
700 	}
701 
702 	if (obj->dma_buf) {
703 		WARN_ON(obj->dma_buf != dma_buf);
704 	} else {
705 		obj->dma_buf = dma_buf;
706 		get_dma_buf(dma_buf);
707 	}
708 
709 	/* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
710 	ret = drm_gem_handle_create_tail(file_priv, obj, handle);
711 	drm_gem_object_unreference_unlocked(obj);
712 	if (ret)
713 		goto out_put;
714 
715 	ret = drm_prime_add_buf_handle(&file_priv->prime,
716 			dma_buf, *handle);
717 	mutex_unlock(&file_priv->prime.lock);
718 	if (ret)
719 		goto fail;
720 
721 	dma_buf_put(dma_buf);
722 
723 	return 0;
724 
725 fail:
726 	/* hmm, if driver attached, we are relying on the free-object path
727 	 * to detach.. which seems ok..
728 	 */
729 	drm_gem_handle_delete(file_priv, *handle);
730 	dma_buf_put(dma_buf);
731 	return ret;
732 
733 out_unlock:
734 	mutex_unlock(&dev->object_name_lock);
735 out_put:
736 	mutex_unlock(&file_priv->prime.lock);
737 	dma_buf_put(dma_buf);
738 	return ret;
739 #endif
740 	return -EINVAL;
741 }
742 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
743 
744 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
745 				 struct drm_file *file_priv)
746 {
747 	struct drm_prime_handle *args = data;
748 
749 	if (!drm_core_check_feature(dev, DRIVER_PRIME))
750 		return -EINVAL;
751 
752 	if (!dev->driver->prime_handle_to_fd)
753 		return -ENOSYS;
754 
755 	/* check flags are valid */
756 	if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
757 		return -EINVAL;
758 
759 	return dev->driver->prime_handle_to_fd(dev, file_priv,
760 			args->handle, args->flags, &args->fd);
761 }
762 
763 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
764 				 struct drm_file *file_priv)
765 {
766 	struct drm_prime_handle *args = data;
767 
768 	if (!drm_core_check_feature(dev, DRIVER_PRIME))
769 		return -EINVAL;
770 
771 	if (!dev->driver->prime_fd_to_handle)
772 		return -ENOSYS;
773 
774 	return dev->driver->prime_fd_to_handle(dev, file_priv,
775 			args->fd, &args->handle);
776 }
777 
778 /**
779  * drm_prime_pages_to_sg - converts a page array into an sg list
780  * @pages: pointer to the array of page pointers to convert
781  * @nr_pages: length of the page vector
782  *
783  * This helper creates an sg table object from a set of pages
784  * the driver is responsible for mapping the pages into the
785  * importers address space for use with dma_buf itself.
786  */
787 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
788 {
789 #if 0
790 	struct sg_table *sg = NULL;
791 #endif
792 	int ret;
793 
794 #if 0
795 	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
796 	if (!sg) {
797 #endif
798 		ret = -ENOMEM;
799 #if 0
800 		goto out;
801 	}
802 
803 	ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
804 				nr_pages << PAGE_SHIFT, GFP_KERNEL);
805 	if (ret)
806 		goto out;
807 
808 	return sg;
809 out:
810 	kfree(sg);
811 #endif
812 	return ERR_PTR(ret);
813 }
814 EXPORT_SYMBOL(drm_prime_pages_to_sg);
815 
816 /**
817  * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
818  * @sgt: scatter-gather table to convert
819  * @pages: array of page pointers to store the page array in
820  * @addrs: optional array to store the dma bus address of each page
821  * @max_pages: size of both the passed-in arrays
822  *
823  * Exports an sg table into an array of pages and addresses. This is currently
824  * required by the TTM driver in order to do correct fault handling.
825  */
826 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
827 				     dma_addr_t *addrs, int max_pages)
828 {
829 	unsigned count;
830 	struct scatterlist *sg;
831 	struct page *page;
832 	u32 len;
833 	int pg_index;
834 	dma_addr_t addr;
835 
836 	pg_index = 0;
837 	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
838 		len = sg->length;
839 		page = sg_page(sg);
840 		addr = sg_dma_address(sg);
841 
842 		while (len > 0) {
843 			if (WARN_ON(pg_index >= max_pages))
844 				return -1;
845 			pages[pg_index] = page;
846 			if (addrs)
847 				addrs[pg_index] = addr;
848 
849 			page++;
850 			addr += PAGE_SIZE;
851 			len -= PAGE_SIZE;
852 			pg_index++;
853 		}
854 	}
855 	return 0;
856 }
857 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
858 
859 /**
860  * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
861  * @obj: GEM object which was created from a dma-buf
862  * @sg: the sg-table which was pinned at import time
863  *
864  * This is the cleanup functions which GEM drivers need to call when they use
865  * @drm_gem_prime_import to import dma-bufs.
866  */
867 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
868 {
869 	struct dma_buf_attachment *attach;
870 	struct dma_buf *dma_buf;
871 	attach = obj->import_attach;
872 	if (sg)
873 		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
874 	dma_buf = attach->dmabuf;
875 #if 0
876 	dma_buf_detach(attach->dmabuf, attach);
877 	/* remove the reference */
878 	dma_buf_put(dma_buf);
879 #endif
880 }
881 EXPORT_SYMBOL(drm_prime_gem_destroy);
882 
883 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
884 {
885 	lockinit(&prime_fpriv->lock, "drmpfpl", 0, LK_CANRECURSE);
886 	prime_fpriv->dmabufs = LINUX_RB_ROOT;
887 	prime_fpriv->handles = LINUX_RB_ROOT;
888 }
889 
890 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
891 {
892 	/* by now drm_gem_release should've made sure the list is empty */
893 	WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
894 }
895