xref: /netbsd/sys/external/bsd/drm2/dist/drm/drm_prime.c (revision 813ac315)
1 /*	$NetBSD: drm_prime.c,v 1.20 2022/07/06 01:12:45 riastradh Exp $	*/
2 
3 /*
4  * Copyright © 2012 Red Hat
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23  * IN THE SOFTWARE.
24  *
25  * Authors:
26  *      Dave Airlie <airlied@redhat.com>
27  *      Rob Clark <rob.clark@linaro.org>
28  *
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: drm_prime.c,v 1.20 2022/07/06 01:12:45 riastradh Exp $");
33 
34 #include <linux/export.h>
35 #include <linux/dma-buf.h>
36 #include <linux/rbtree.h>
37 
38 #include <drm/drm.h>
39 #include <drm/drm_drv.h>
40 #include <drm/drm_file.h>
41 #include <drm/drm_framebuffer.h>
42 #include <drm/drm_gem.h>
43 #include <drm/drm_prime.h>
44 
45 #include "drm_internal.h"
46 
47 #ifdef __NetBSD__
48 
49 #include <sys/file.h>
50 
51 #include <drm/bus_dma_hacks.h>
52 
53 #include <linux/nbsd-namespace.h>
54 
55 #endif	/* __NetBSD__ */
56 
57 /**
58  * DOC: overview and lifetime rules
59  *
60  * Similar to GEM global names, PRIME file descriptors are also used to share
61  * buffer objects across processes. They offer additional security: as file
62  * descriptors must be explicitly sent over UNIX domain sockets to be shared
63  * between applications, they can't be guessed like the globally unique GEM
64  * names.
65  *
66  * Drivers that support the PRIME API implement the
67  * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations.
68  * GEM based drivers must use drm_gem_prime_handle_to_fd() and
69  * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the
70  * actual driver interfaces is provided through the &drm_gem_object_funcs.export
71  * and &drm_driver.gem_prime_import hooks.
72  *
73  * &dma_buf_ops implementations for GEM drivers are all individually exported
74  * for drivers which need to overwrite or reimplement some of them.
75  *
76  * Reference Counting for GEM Drivers
77  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
78  *
79  * On the export the &dma_buf holds a reference to the exported buffer object,
80  * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
81  * IOCTL, when it first calls &drm_gem_object_funcs.export
82  * and stores the exporting GEM object in the &dma_buf.priv field. This
83  * reference needs to be released when the final reference to the &dma_buf
84  * itself is dropped and its &dma_buf_ops.release function is called.  For
85  * GEM-based drivers, the &dma_buf should be exported using
86  * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
87  *
88  * Thus the chain of references always flows in one direction, avoiding loops:
89  * importing GEM object -> dma-buf -> exported GEM bo. A further complication
90  * are the lookup caches for import and export. These are required to guarantee
91  * that any given object will always have only one uniqe userspace handle. This
92  * is required to allow userspace to detect duplicated imports, since some GEM
93  * drivers do fail command submissions if a given buffer object is listed more
94  * than once. These import and export caches in &drm_prime_file_private only
95  * retain a weak reference, which is cleaned up when the corresponding object is
96  * released.
97  *
98  * Self-importing: If userspace is using PRIME as a replacement for flink then
99  * it will get a fd->handle request for a GEM object that it created.  Drivers
100  * should detect this situation and return back the underlying object from the
101  * dma-buf private. For GEM based drivers this is handled in
102  * drm_gem_prime_import() already.
103  */
104 
105 struct drm_prime_member {
106 	struct dma_buf *dma_buf;
107 	uint32_t handle;
108 
109 	struct rb_node dmabuf_rb;
110 	struct rb_node handle_rb;
111 };
112 
113 #ifdef __NetBSD__
114 static int
compare_dmabufs(void * cookie,const void * va,const void * vb)115 compare_dmabufs(void *cookie, const void *va, const void *vb)
116 {
117 	const struct drm_prime_member *ma = va;
118 	const struct drm_prime_member *mb = vb;
119 
120 	if (ma->dma_buf < mb->dma_buf)
121 		return -1;
122 	if (ma->dma_buf > mb->dma_buf)
123 		return +1;
124 	return 0;
125 }
126 
127 static int
compare_dmabuf_key(void * cookie,const void * vm,const void * vk)128 compare_dmabuf_key(void *cookie, const void *vm, const void *vk)
129 {
130 	const struct drm_prime_member *m = vm;
131 	const struct dma_buf *const *kp = vk;
132 
133 	if (m->dma_buf < *kp)
134 		return -1;
135 	if (m->dma_buf > *kp)
136 		return +1;
137 	return 0;
138 }
139 
140 static int
compare_handles(void * cookie,const void * va,const void * vb)141 compare_handles(void *cookie, const void *va, const void *vb)
142 {
143 	const struct drm_prime_member *ma = va;
144 	const struct drm_prime_member *mb = vb;
145 
146 	if (ma->handle < mb->handle)
147 		return -1;
148 	if (ma->handle > mb->handle)
149 		return +1;
150 	return 0;
151 }
152 
153 static int
compare_handle_key(void * cookie,const void * vm,const void * vk)154 compare_handle_key(void *cookie, const void *vm, const void *vk)
155 {
156 	const struct drm_prime_member *m = vm;
157 	const uint32_t *kp = vk;
158 
159 	if (m->handle < *kp)
160 		return -1;
161 	if (m->handle > *kp)
162 		return +1;
163 	return 0;
164 }
165 
166 static const rb_tree_ops_t dmabuf_ops = {
167 	.rbto_compare_nodes = compare_dmabufs,
168 	.rbto_compare_key = compare_dmabuf_key,
169 	.rbto_node_offset = offsetof(struct drm_prime_member, dmabuf_rb),
170 };
171 
172 static const rb_tree_ops_t handle_ops = {
173 	.rbto_compare_nodes = compare_handles,
174 	.rbto_compare_key = compare_handle_key,
175 	.rbto_node_offset = offsetof(struct drm_prime_member, handle_rb),
176 };
177 #endif
178 
drm_prime_add_buf_handle(struct drm_prime_file_private * prime_fpriv,struct dma_buf * dma_buf,uint32_t handle)179 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
180 				    struct dma_buf *dma_buf, uint32_t handle)
181 {
182 	struct drm_prime_member *member;
183 #ifdef __NetBSD__
184 	struct drm_prime_member *collision __diagused;
185 #else
186 	struct rb_node **p, *rb;
187 #endif
188 
189 	member = kmalloc(sizeof(*member), GFP_KERNEL);
190 	if (!member)
191 		return -ENOMEM;
192 
193 	get_dma_buf(dma_buf);
194 	member->dma_buf = dma_buf;
195 	member->handle = handle;
196 
197 #ifdef __NetBSD__
198 	collision = rb_tree_insert_node(&prime_fpriv->dmabufs.rbr_tree,
199 	    member);
200 	KASSERT(collision == member);
201 #else
202 	rb = NULL;
203 	p = &prime_fpriv->dmabufs.rb_node;
204 	while (*p) {
205 		struct drm_prime_member *pos;
206 
207 		rb = *p;
208 		pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
209 		if (dma_buf > pos->dma_buf)
210 			p = &rb->rb_right;
211 		else
212 			p = &rb->rb_left;
213 	}
214 	rb_link_node(&member->dmabuf_rb, rb, p);
215 	rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
216 #endif
217 
218 #ifdef __NetBSD__
219 	collision = rb_tree_insert_node(&prime_fpriv->handles.rbr_tree,
220 	    member);
221 	KASSERT(collision == member);
222 #else
223 	rb = NULL;
224 	p = &prime_fpriv->handles.rb_node;
225 	while (*p) {
226 		struct drm_prime_member *pos;
227 
228 		rb = *p;
229 		pos = rb_entry(rb, struct drm_prime_member, handle_rb);
230 		if (handle > pos->handle)
231 			p = &rb->rb_right;
232 		else
233 			p = &rb->rb_left;
234 	}
235 	rb_link_node(&member->handle_rb, rb, p);
236 	rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
237 #endif
238 
239 	return 0;
240 }
241 
drm_prime_lookup_buf_by_handle(struct drm_prime_file_private * prime_fpriv,uint32_t handle)242 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
243 						      uint32_t handle)
244 {
245 #ifdef __NetBSD__
246 	struct drm_prime_member *member;
247 
248 	member = rb_tree_find_node(&prime_fpriv->handles.rbr_tree, &handle);
249 	if (member == NULL)
250 		return NULL;
251 	return member->dma_buf;
252 #else
253 	struct rb_node *rb;
254 
255 	rb = prime_fpriv->handles.rb_node;
256 	while (rb) {
257 		struct drm_prime_member *member;
258 
259 		member = rb_entry(rb, struct drm_prime_member, handle_rb);
260 		if (member->handle == handle)
261 			return member->dma_buf;
262 		else if (member->handle < handle)
263 			rb = rb->rb_right;
264 		else
265 			rb = rb->rb_left;
266 	}
267 
268 	return NULL;
269 #endif
270 }
271 
drm_prime_lookup_buf_handle(struct drm_prime_file_private * prime_fpriv,struct dma_buf * dma_buf,uint32_t * handle)272 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
273 				       struct dma_buf *dma_buf,
274 				       uint32_t *handle)
275 {
276 #ifdef __NetBSD__
277 	struct drm_prime_member *member;
278 
279 	member = rb_tree_find_node(&prime_fpriv->dmabufs.rbr_tree, &dma_buf);
280 	if (member == NULL)
281 		return -ENOENT;
282 	*handle = member->handle;
283 	return 0;
284 #else
285 	struct rb_node *rb;
286 
287 	rb = prime_fpriv->dmabufs.rb_node;
288 	while (rb) {
289 		struct drm_prime_member *member;
290 
291 		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
292 		if (member->dma_buf == dma_buf) {
293 			*handle = member->handle;
294 			return 0;
295 		} else if (member->dma_buf < dma_buf) {
296 			rb = rb->rb_right;
297 		} else {
298 			rb = rb->rb_left;
299 		}
300 	}
301 
302 	return -ENOENT;
303 #endif
304 }
305 
drm_prime_remove_buf_handle_locked(struct drm_prime_file_private * prime_fpriv,struct dma_buf * dma_buf)306 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
307 					struct dma_buf *dma_buf)
308 {
309 #ifdef __NetBSD__
310 	struct drm_prime_member *member;
311 
312 	member = rb_tree_find_node(&prime_fpriv->dmabufs.rbr_tree, &dma_buf);
313 	if (member != NULL) {
314 		rb_tree_remove_node(&prime_fpriv->handles.rbr_tree, member);
315 		rb_tree_remove_node(&prime_fpriv->dmabufs.rbr_tree, member);
316 		dma_buf_put(dma_buf);
317 		kfree(member);
318 	}
319 #else
320 	struct rb_node *rb;
321 
322 	rb = prime_fpriv->dmabufs.rb_node;
323 	while (rb) {
324 		struct drm_prime_member *member;
325 
326 		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
327 		if (member->dma_buf == dma_buf) {
328 			rb_erase(&member->handle_rb, &prime_fpriv->handles);
329 			rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
330 
331 			dma_buf_put(dma_buf);
332 			kfree(member);
333 			return;
334 		} else if (member->dma_buf < dma_buf) {
335 			rb = rb->rb_right;
336 		} else {
337 			rb = rb->rb_left;
338 		}
339 	}
340 #endif
341 }
342 
drm_prime_init_file_private(struct drm_prime_file_private * prime_fpriv)343 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
344 {
345 	mutex_init(&prime_fpriv->lock);
346 #ifdef __NetBSD__
347 	rb_tree_init(&prime_fpriv->dmabufs.rbr_tree, &dmabuf_ops);
348 	rb_tree_init(&prime_fpriv->handles.rbr_tree, &handle_ops);
349 #else
350 	prime_fpriv->dmabufs = RB_ROOT;
351 	prime_fpriv->handles = RB_ROOT;
352 #endif
353 }
354 
drm_prime_destroy_file_private(struct drm_prime_file_private * prime_fpriv)355 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
356 {
357 	mutex_destroy(&prime_fpriv->lock);
358 	/* by now drm_gem_release should've made sure the list is empty */
359 	WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
360 	WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->handles));
361 }
362 
363 /**
364  * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
365  * @dev: parent device for the exported dmabuf
366  * @exp_info: the export information used by dma_buf_export()
367  *
368  * This wraps dma_buf_export() for use by generic GEM drivers that are using
369  * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
370  * a reference to the &drm_device and the exported &drm_gem_object (stored in
371  * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
372  *
373  * Returns the new dmabuf.
374  */
drm_gem_dmabuf_export(struct drm_device * dev,struct dma_buf_export_info * exp_info)375 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
376 				      struct dma_buf_export_info *exp_info)
377 {
378 	struct drm_gem_object *obj = exp_info->priv;
379 	struct dma_buf *dma_buf;
380 
381 	dma_buf = dma_buf_export(exp_info);
382 	if (IS_ERR(dma_buf))
383 		return dma_buf;
384 
385 	drm_dev_get(dev);
386 	drm_gem_object_get(obj);
387 #ifndef __NetBSD__		/* XXX dmabuf share */
388 	dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
389 #endif
390 
391 	return dma_buf;
392 }
393 EXPORT_SYMBOL(drm_gem_dmabuf_export);
394 
395 /**
396  * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
397  * @dma_buf: buffer to be released
398  *
399  * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
400  * must use this in their &dma_buf_ops structure as the release callback.
401  * drm_gem_dmabuf_release() should be used in conjunction with
402  * drm_gem_dmabuf_export().
403  */
drm_gem_dmabuf_release(struct dma_buf * dma_buf)404 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
405 {
406 	struct drm_gem_object *obj = dma_buf->priv;
407 	struct drm_device *dev = obj->dev;
408 
409 	/* drop the reference on the export fd holds */
410 	drm_gem_object_put_unlocked(obj);
411 
412 	drm_dev_put(dev);
413 }
414 EXPORT_SYMBOL(drm_gem_dmabuf_release);
415 
416 /**
417  * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
418  * @dev: dev to export the buffer from
419  * @file_priv: drm file-private structure
420  * @prime_fd: fd id of the dma-buf which should be imported
421  * @handle: pointer to storage for the handle of the imported buffer object
422  *
423  * This is the PRIME import function which must be used mandatorily by GEM
424  * drivers to ensure correct lifetime management of the underlying GEM object.
425  * The actual importing of GEM object from the dma-buf is done through the
426  * &drm_driver.gem_prime_import driver callback.
427  *
428  * Returns 0 on success or a negative error code on failure.
429  */
drm_gem_prime_fd_to_handle(struct drm_device * dev,struct drm_file * file_priv,int prime_fd,uint32_t * handle)430 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
431 			       struct drm_file *file_priv, int prime_fd,
432 			       uint32_t *handle)
433 {
434 	struct dma_buf *dma_buf;
435 	struct drm_gem_object *obj;
436 	int ret;
437 
438 	dma_buf = dma_buf_get(prime_fd);
439 	if (IS_ERR(dma_buf))
440 		return PTR_ERR(dma_buf);
441 
442 	mutex_lock(&file_priv->prime.lock);
443 
444 	ret = drm_prime_lookup_buf_handle(&file_priv->prime,
445 			dma_buf, handle);
446 	if (ret == 0)
447 		goto out_put;
448 
449 	/* never seen this one, need to import */
450 	mutex_lock(&dev->object_name_lock);
451 	if (dev->driver->gem_prime_import)
452 		obj = dev->driver->gem_prime_import(dev, dma_buf);
453 	else
454 		obj = drm_gem_prime_import(dev, dma_buf);
455 	if (IS_ERR(obj)) {
456 		ret = PTR_ERR(obj);
457 		goto out_unlock;
458 	}
459 
460 	if (obj->dma_buf) {
461 		WARN_ON(obj->dma_buf != dma_buf);
462 	} else {
463 		obj->dma_buf = dma_buf;
464 		get_dma_buf(dma_buf);
465 	}
466 
467 	/* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
468 	ret = drm_gem_handle_create_tail(file_priv, obj, handle);
469 	drm_gem_object_put_unlocked(obj);
470 	if (ret)
471 		goto out_put;
472 
473 	ret = drm_prime_add_buf_handle(&file_priv->prime,
474 			dma_buf, *handle);
475 	mutex_unlock(&file_priv->prime.lock);
476 	if (ret)
477 		goto fail;
478 
479 	dma_buf_put(dma_buf);
480 
481 	return 0;
482 
483 fail:
484 	/* hmm, if driver attached, we are relying on the free-object path
485 	 * to detach.. which seems ok..
486 	 */
487 	drm_gem_handle_delete(file_priv, *handle);
488 	dma_buf_put(dma_buf);
489 	return ret;
490 
491 out_unlock:
492 	mutex_unlock(&dev->object_name_lock);
493 out_put:
494 	mutex_unlock(&file_priv->prime.lock);
495 	dma_buf_put(dma_buf);
496 	return ret;
497 }
498 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
499 
drm_prime_fd_to_handle_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)500 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
501 				 struct drm_file *file_priv)
502 {
503 	struct drm_prime_handle *args = data;
504 
505 	if (!dev->driver->prime_fd_to_handle)
506 		return -ENOSYS;
507 
508 	return dev->driver->prime_fd_to_handle(dev, file_priv,
509 			args->fd, &args->handle);
510 }
511 
export_and_register_object(struct drm_device * dev,struct drm_gem_object * obj,uint32_t flags)512 static struct dma_buf *export_and_register_object(struct drm_device *dev,
513 						  struct drm_gem_object *obj,
514 						  uint32_t flags)
515 {
516 	struct dma_buf *dmabuf;
517 
518 	/* prevent races with concurrent gem_close. */
519 	if (obj->handle_count == 0) {
520 		dmabuf = ERR_PTR(-ENOENT);
521 		return dmabuf;
522 	}
523 
524 	if (obj->funcs && obj->funcs->export)
525 		dmabuf = obj->funcs->export(obj, flags);
526 	else if (dev->driver->gem_prime_export)
527 		dmabuf = dev->driver->gem_prime_export(obj, flags);
528 	else
529 		dmabuf = drm_gem_prime_export(obj, flags);
530 	if (IS_ERR(dmabuf)) {
531 		/* normally the created dma-buf takes ownership of the ref,
532 		 * but if that fails then drop the ref
533 		 */
534 		return dmabuf;
535 	}
536 
537 	/*
538 	 * Note that callers do not need to clean up the export cache
539 	 * since the check for obj->handle_count guarantees that someone
540 	 * will clean it up.
541 	 */
542 	obj->dma_buf = dmabuf;
543 	get_dma_buf(obj->dma_buf);
544 
545 	return dmabuf;
546 }
547 
548 /**
549  * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
550  * @dev: dev to export the buffer from
551  * @file_priv: drm file-private structure
552  * @handle: buffer handle to export
553  * @flags: flags like DRM_CLOEXEC
554  * @prime_fd: pointer to storage for the fd id of the create dma-buf
555  *
556  * This is the PRIME export function which must be used mandatorily by GEM
557  * drivers to ensure correct lifetime management of the underlying GEM object.
558  * The actual exporting from GEM object to a dma-buf is done through the
559  * &drm_driver.gem_prime_export driver callback.
560  */
drm_gem_prime_handle_to_fd(struct drm_device * dev,struct drm_file * file_priv,uint32_t handle,uint32_t flags,int * prime_fd)561 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
562 			       struct drm_file *file_priv, uint32_t handle,
563 			       uint32_t flags,
564 			       int *prime_fd)
565 {
566 	struct drm_gem_object *obj;
567 	int ret = 0;
568 	struct dma_buf *dmabuf;
569 
570 	mutex_lock(&file_priv->prime.lock);
571 	obj = drm_gem_object_lookup(file_priv, handle);
572 	if (!obj)  {
573 		ret = -ENOENT;
574 		goto out_unlock;
575 	}
576 
577 	dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
578 	if (dmabuf) {
579 		get_dma_buf(dmabuf);
580 		goto out_have_handle;
581 	}
582 
583 	mutex_lock(&dev->object_name_lock);
584 	/* re-export the original imported object */
585 	if (obj->import_attach) {
586 		dmabuf = obj->import_attach->dmabuf;
587 		get_dma_buf(dmabuf);
588 		goto out_have_obj;
589 	}
590 
591 	if (obj->dma_buf) {
592 		get_dma_buf(obj->dma_buf);
593 		dmabuf = obj->dma_buf;
594 		goto out_have_obj;
595 	}
596 
597 	dmabuf = export_and_register_object(dev, obj, flags);
598 	if (IS_ERR(dmabuf)) {
599 		/* normally the created dma-buf takes ownership of the ref,
600 		 * but if that fails then drop the ref
601 		 */
602 		ret = PTR_ERR(dmabuf);
603 		mutex_unlock(&dev->object_name_lock);
604 		goto out;
605 	}
606 
607 out_have_obj:
608 	/*
609 	 * If we've exported this buffer then cheat and add it to the import list
610 	 * so we get the correct handle back. We must do this under the
611 	 * protection of dev->object_name_lock to ensure that a racing gem close
612 	 * ioctl doesn't miss to remove this buffer handle from the cache.
613 	 */
614 	ret = drm_prime_add_buf_handle(&file_priv->prime,
615 				       dmabuf, handle);
616 	mutex_unlock(&dev->object_name_lock);
617 	if (ret)
618 		goto fail_put_dmabuf;
619 
620 out_have_handle:
621 	ret = dma_buf_fd(dmabuf, flags);
622 	/*
623 	 * We must _not_ remove the buffer from the handle cache since the newly
624 	 * created dma buf is already linked in the global obj->dma_buf pointer,
625 	 * and that is invariant as long as a userspace gem handle exists.
626 	 * Closing the handle will clean out the cache anyway, so we don't leak.
627 	 */
628 	if (ret < 0) {
629 		goto fail_put_dmabuf;
630 	} else {
631 		*prime_fd = ret;
632 		ret = 0;
633 	}
634 
635 	goto out;
636 
637 fail_put_dmabuf:
638 	dma_buf_put(dmabuf);
639 out:
640 	drm_gem_object_put_unlocked(obj);
641 out_unlock:
642 	mutex_unlock(&file_priv->prime.lock);
643 
644 	return ret;
645 }
646 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
647 
drm_prime_handle_to_fd_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)648 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
649 				 struct drm_file *file_priv)
650 {
651 	struct drm_prime_handle *args = data;
652 
653 	if (!dev->driver->prime_handle_to_fd)
654 		return -ENOSYS;
655 
656 	/* check flags are valid */
657 	if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
658 		return -EINVAL;
659 
660 	return dev->driver->prime_handle_to_fd(dev, file_priv,
661 			args->handle, args->flags, &args->fd);
662 }
663 
664 /**
665  * DOC: PRIME Helpers
666  *
667  * Drivers can implement &drm_gem_object_funcs.export and
668  * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
669  * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
670  * implement dma-buf support in terms of some lower-level helpers, which are
671  * again exported for drivers to use individually:
672  *
673  * Exporting buffers
674  * ~~~~~~~~~~~~~~~~~
675  *
676  * Optional pinning of buffers is handled at dma-buf attach and detach time in
677  * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
678  * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
679  * &drm_gem_object_funcs.get_sg_table.
680  *
681  * For kernel-internal access there's drm_gem_dmabuf_vmap() and
682  * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
683  * drm_gem_dmabuf_mmap().
684  *
685  * Note that these export helpers can only be used if the underlying backing
686  * storage is fully coherent and either permanently pinned, or it is safe to pin
687  * it indefinitely.
688  *
689  * FIXME: The underlying helper functions are named rather inconsistently.
690  *
691  * Exporting buffers
692  * ~~~~~~~~~~~~~~~~~
693  *
694  * Importing dma-bufs using drm_gem_prime_import() relies on
695  * &drm_driver.gem_prime_import_sg_table.
696  *
697  * Note that similarly to the export helpers this permanently pins the
698  * underlying backing storage. Which is ok for scanout, but is not the best
699  * option for sharing lots of buffers for rendering.
700  */
701 
702 /**
703  * drm_gem_map_attach - dma_buf attach implementation for GEM
704  * @dma_buf: buffer to attach device to
705  * @attach: buffer attachment data
706  *
707  * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
708  * used as the &dma_buf_ops.attach callback. Must be used together with
709  * drm_gem_map_detach().
710  *
711  * Returns 0 on success, negative error code on failure.
712  */
drm_gem_map_attach(struct dma_buf * dma_buf,struct dma_buf_attachment * attach)713 int drm_gem_map_attach(struct dma_buf *dma_buf,
714 		       struct dma_buf_attachment *attach)
715 {
716 	struct drm_gem_object *obj = dma_buf->priv;
717 
718 	return drm_gem_pin(obj);
719 }
720 EXPORT_SYMBOL(drm_gem_map_attach);
721 
722 /**
723  * drm_gem_map_detach - dma_buf detach implementation for GEM
724  * @dma_buf: buffer to detach from
725  * @attach: attachment to be detached
726  *
727  * Calls &drm_gem_object_funcs.pin for device specific handling.  Cleans up
728  * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
729  * &dma_buf_ops.detach callback.
730  */
drm_gem_map_detach(struct dma_buf * dma_buf,struct dma_buf_attachment * attach)731 void drm_gem_map_detach(struct dma_buf *dma_buf,
732 			struct dma_buf_attachment *attach)
733 {
734 	struct drm_gem_object *obj = dma_buf->priv;
735 
736 	drm_gem_unpin(obj);
737 }
738 EXPORT_SYMBOL(drm_gem_map_detach);
739 
740 /**
741  * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
742  * @attach: attachment whose scatterlist is to be returned
743  * @dir: direction of DMA transfer
744  *
745  * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
746  * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
747  * with drm_gem_unmap_dma_buf().
748  *
749  * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
750  * on error. May return -EINTR if it is interrupted by a signal.
751  */
drm_gem_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)752 struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
753 				     enum dma_data_direction dir)
754 {
755 	struct drm_gem_object *obj = attach->dmabuf->priv;
756 	struct sg_table *sgt;
757 
758 	if (WARN_ON(dir == DMA_NONE))
759 		return ERR_PTR(-EINVAL);
760 
761 	if (obj->funcs)
762 		sgt = obj->funcs->get_sg_table(obj);
763 	else
764 		sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
765 
766 	if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
767 			      DMA_ATTR_SKIP_CPU_SYNC)) {
768 		sg_free_table(sgt);
769 		kfree(sgt);
770 		sgt = ERR_PTR(-ENOMEM);
771 	}
772 
773 	return sgt;
774 }
775 EXPORT_SYMBOL(drm_gem_map_dma_buf);
776 
777 /**
778  * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
779  * @attach: attachment to unmap buffer from
780  * @sgt: scatterlist info of the buffer to unmap
781  * @dir: direction of DMA transfer
782  *
783  * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
784  */
drm_gem_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)785 void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
786 			   struct sg_table *sgt,
787 			   enum dma_data_direction dir)
788 {
789 	if (!sgt)
790 		return;
791 
792 	dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
793 			   DMA_ATTR_SKIP_CPU_SYNC);
794 	sg_free_table(sgt);
795 	kfree(sgt);
796 }
797 EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
798 
799 /**
800  * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
801  * @dma_buf: buffer to be mapped
802  *
803  * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
804  * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
805  *
806  * Returns the kernel virtual address or NULL on failure.
807  */
drm_gem_dmabuf_vmap(struct dma_buf * dma_buf)808 void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
809 {
810 	struct drm_gem_object *obj = dma_buf->priv;
811 	void *vaddr;
812 
813 	vaddr = drm_gem_vmap(obj);
814 	if (IS_ERR(vaddr))
815 		vaddr = NULL;
816 
817 	return vaddr;
818 }
819 EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
820 
821 /**
822  * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
823  * @dma_buf: buffer to be unmapped
824  * @vaddr: the virtual address of the buffer
825  *
826  * Releases a kernel virtual mapping. This can be used as the
827  * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
828  */
drm_gem_dmabuf_vunmap(struct dma_buf * dma_buf,void * vaddr)829 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
830 {
831 	struct drm_gem_object *obj = dma_buf->priv;
832 
833 	drm_gem_vunmap(obj, vaddr);
834 }
835 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
836 
837 /**
838  * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
839  * @obj: GEM object
840  * @vma: Virtual address range
841  *
842  * This function sets up a userspace mapping for PRIME exported buffers using
843  * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
844  * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
845  * called to set up the mapping.
846  *
847  * Drivers can use this as their &drm_driver.gem_prime_mmap callback.
848  */
849 #ifdef __NetBSD__
drm_gem_prime_mmap(struct drm_gem_object * obj,off_t * offp,size_t size,int prot,int * flagsp,int * advicep,struct uvm_object ** uobjp,int * maxprotp)850 int drm_gem_prime_mmap(struct drm_gem_object *obj, off_t *offp, size_t size,
851     int prot, int *flagsp, int *advicep, struct uvm_object **uobjp,
852     int *maxprotp)
853 #else
854 int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
855 #endif
856 {
857 	struct drm_file *priv;
858 	struct file *fil;
859 	int ret;
860 
861 	/* Add the fake offset */
862 #ifdef __NetBSD__
863 	*offp += drm_vma_node_start(&obj->vma_node);
864 #else
865 	vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
866 #endif
867 
868 	if (obj->funcs && obj->funcs->mmap) {
869 #ifdef __NetBSD__
870 		ret = obj->funcs->mmap(obj, offp, size, prot, flagsp, advicep,
871 		    uobjp, maxprotp);
872 #else
873 		ret = obj->funcs->mmap(obj, vma);
874 #endif
875 		if (ret)
876 			return ret;
877 #ifndef __NetBSD__
878 		vma->vm_private_data = obj;
879 #endif
880 		drm_gem_object_get(obj);
881 		return 0;
882 	}
883 
884 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
885 	fil = kzalloc(sizeof(*fil), GFP_KERNEL);
886 	if (!priv || !fil) {
887 		ret = -ENOMEM;
888 		goto out;
889 	}
890 
891 	/* Used by drm_gem_mmap() to lookup the GEM object */
892 	priv->minor = obj->dev->primary;
893 #ifdef __NetBSD__
894 	fil->f_data = priv;
895 #else
896 	fil->private_data = priv;
897 #endif
898 
899 	ret = drm_vma_node_allow(&obj->vma_node, priv);
900 	if (ret)
901 		goto out;
902 
903 #ifdef __NetBSD__
904 	KASSERT(size > 0);
905 	ret = obj->dev->driver->mmap_object(obj->dev, *offp, size, prot, uobjp,
906 	    offp, fil);
907 #else
908 	ret = obj->dev->driver->fops->mmap(fil, vma);
909 #endif
910 
911 	drm_vma_node_revoke(&obj->vma_node, priv);
912 out:
913 	kfree(priv);
914 	kfree(fil);
915 
916 	return ret;
917 }
918 EXPORT_SYMBOL(drm_gem_prime_mmap);
919 
920 /**
921  * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
922  * @dma_buf: buffer to be mapped
923  * @vma: virtual address range
924  *
925  * Provides memory mapping for the buffer. This can be used as the
926  * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap,
927  * which should be set to drm_gem_prime_mmap().
928  *
929  * FIXME: There's really no point to this wrapper, drivers which need anything
930  * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback.
931  *
932  * Returns 0 on success or a negative error code on failure.
933  */
934 #ifdef __NetBSD__
935 int
drm_gem_dmabuf_mmap(struct dma_buf * dma_buf,off_t * offp,size_t size,int prot,int * flagsp,int * advicep,struct uvm_object ** uobjp,int * maxprotp)936 drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, off_t *offp, size_t size,
937     int prot, int *flagsp, int *advicep, struct uvm_object **uobjp,
938     int *maxprotp)
939 #else
940 int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
941 #endif
942 {
943 	struct drm_gem_object *obj = dma_buf->priv;
944 	struct drm_device *dev = obj->dev;
945 
946 	if (!dev->driver->gem_prime_mmap)
947 		return -ENOSYS;
948 
949 #ifdef __NetBSD__
950 	KASSERT(size > 0);
951 	return dev->driver->gem_prime_mmap(obj, offp, size, prot, flagsp,
952 	    advicep, uobjp, maxprotp);
953 #else
954 	return dev->driver->gem_prime_mmap(obj, vma);
955 #endif
956 }
957 EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
958 
959 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
960 	.cache_sgt_mapping = true,
961 	.attach = drm_gem_map_attach,
962 	.detach = drm_gem_map_detach,
963 	.map_dma_buf = drm_gem_map_dma_buf,
964 	.unmap_dma_buf = drm_gem_unmap_dma_buf,
965 	.release = drm_gem_dmabuf_release,
966 	.mmap = drm_gem_dmabuf_mmap,
967 	.vmap = drm_gem_dmabuf_vmap,
968 	.vunmap = drm_gem_dmabuf_vunmap,
969 };
970 
971 /**
972  * drm_prime_pages_to_sg - converts a page array into an sg list
973  * @pages: pointer to the array of page pointers to convert
974  * @nr_pages: length of the page vector
975  *
976  * This helper creates an sg table object from a set of pages
977  * the driver is responsible for mapping the pages into the
978  * importers address space for use with dma_buf itself.
979  *
980  * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
981  */
drm_prime_pages_to_sg(struct page ** pages,unsigned int nr_pages)982 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
983 {
984 	struct sg_table *sg = NULL;
985 	int ret;
986 
987 	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
988 	if (!sg) {
989 		ret = -ENOMEM;
990 		goto out;
991 	}
992 
993 	ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
994 				nr_pages << PAGE_SHIFT, GFP_KERNEL);
995 	if (ret)
996 		goto out;
997 
998 	return sg;
999 out:
1000 	kfree(sg);
1001 	return ERR_PTR(ret);
1002 }
1003 EXPORT_SYMBOL(drm_prime_pages_to_sg);
1004 
1005 /**
1006  * drm_gem_prime_export - helper library implementation of the export callback
1007  * @obj: GEM object to export
1008  * @flags: flags like DRM_CLOEXEC and DRM_RDWR
1009  *
1010  * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
1011  * using the PRIME helpers. It is used as the default in
1012  * drm_gem_prime_handle_to_fd().
1013  */
drm_gem_prime_export(struct drm_gem_object * obj,int flags)1014 struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
1015 				     int flags)
1016 {
1017 	struct drm_device *dev = obj->dev;
1018 	struct dma_buf_export_info exp_info = {
1019 #ifndef __NetBSD__
1020 		.exp_name = KBUILD_MODNAME, /* white lie for debug */
1021 		.owner = dev->driver->fops->owner,
1022 #endif
1023 		.ops = &drm_gem_prime_dmabuf_ops,
1024 		.size = obj->size,
1025 		.flags = flags,
1026 		.priv = obj,
1027 		.resv = obj->resv,
1028 	};
1029 
1030 	return drm_gem_dmabuf_export(dev, &exp_info);
1031 }
1032 EXPORT_SYMBOL(drm_gem_prime_export);
1033 
1034 /**
1035  * drm_gem_prime_import_dev - core implementation of the import callback
1036  * @dev: drm_device to import into
1037  * @dma_buf: dma-buf object to import
1038  * @attach_dev: struct device to dma_buf attach
1039  *
1040  * This is the core of drm_gem_prime_import(). It's designed to be called by
1041  * drivers who want to use a different device structure than &drm_device.dev for
1042  * attaching via dma_buf. This function calls
1043  * &drm_driver.gem_prime_import_sg_table internally.
1044  *
1045  * Drivers must arrange to call drm_prime_gem_destroy() from their
1046  * &drm_gem_object_funcs.free hook when using this function.
1047  */
1048 #ifdef __NetBSD__
drm_gem_prime_import_dev(struct drm_device * dev,struct dma_buf * dma_buf,bus_dma_tag_t attach_dev)1049 struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
1050 					    struct dma_buf *dma_buf,
1051 					    bus_dma_tag_t attach_dev)
1052 #else
1053 struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
1054 					    struct dma_buf *dma_buf,
1055 					    struct device *attach_dev)
1056 #endif
1057 {
1058 	struct dma_buf_attachment *attach;
1059 	struct sg_table *sgt;
1060 	struct drm_gem_object *obj;
1061 	int ret;
1062 
1063 	if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
1064 		obj = dma_buf->priv;
1065 		if (obj->dev == dev) {
1066 			/*
1067 			 * Importing dmabuf exported from out own gem increases
1068 			 * refcount on gem itself instead of f_count of dmabuf.
1069 			 */
1070 			drm_gem_object_get(obj);
1071 			return obj;
1072 		}
1073 	}
1074 
1075 	if (!dev->driver->gem_prime_import_sg_table)
1076 		return ERR_PTR(-EINVAL);
1077 
1078 	attach = dma_buf_attach(dma_buf, attach_dev);
1079 	if (IS_ERR(attach))
1080 		return ERR_CAST(attach);
1081 
1082 	get_dma_buf(dma_buf);
1083 
1084 	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
1085 	if (IS_ERR(sgt)) {
1086 		ret = PTR_ERR(sgt);
1087 		goto fail_detach;
1088 	}
1089 
1090 	obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
1091 	if (IS_ERR(obj)) {
1092 		ret = PTR_ERR(obj);
1093 		goto fail_unmap;
1094 	}
1095 
1096 	obj->import_attach = attach;
1097 	obj->resv = dma_buf->resv;
1098 
1099 	return obj;
1100 
1101 fail_unmap:
1102 	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
1103 fail_detach:
1104 	dma_buf_detach(dma_buf, attach);
1105 	dma_buf_put(dma_buf);
1106 
1107 	return ERR_PTR(ret);
1108 }
1109 EXPORT_SYMBOL(drm_gem_prime_import_dev);
1110 
1111 /**
1112  * drm_gem_prime_import - helper library implementation of the import callback
1113  * @dev: drm_device to import into
1114  * @dma_buf: dma-buf object to import
1115  *
1116  * This is the implementation of the gem_prime_import functions for GEM drivers
1117  * using the PRIME helpers. Drivers can use this as their
1118  * &drm_driver.gem_prime_import implementation. It is used as the default
1119  * implementation in drm_gem_prime_fd_to_handle().
1120  *
1121  * Drivers must arrange to call drm_prime_gem_destroy() from their
1122  * &drm_gem_object_funcs.free hook when using this function.
1123  */
drm_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)1124 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
1125 					    struct dma_buf *dma_buf)
1126 {
1127 #ifdef __NetBSD__
1128 	return drm_gem_prime_import_dev(dev, dma_buf, dev->dmat);
1129 #else
1130 	return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
1131 #endif
1132 }
1133 EXPORT_SYMBOL(drm_gem_prime_import);
1134 
1135 #ifdef __NetBSD__
1136 
1137 struct sg_table *
drm_prime_bus_dmamem_to_sg(bus_dma_tag_t dmat,const bus_dma_segment_t * segs,int nsegs)1138 drm_prime_bus_dmamem_to_sg(bus_dma_tag_t dmat, const bus_dma_segment_t *segs,
1139     int nsegs)
1140 {
1141 	struct sg_table *sg;
1142 	int ret;
1143 
1144 	sg = kmalloc(sizeof(*sg), GFP_KERNEL);
1145 	if (sg == NULL) {
1146 		ret = -ENOMEM;
1147 		goto out;
1148 	}
1149 
1150 	ret = sg_alloc_table_from_bus_dmamem(sg, dmat, segs, nsegs,
1151 	    GFP_KERNEL);
1152 	if (ret)
1153 		goto out;
1154 
1155 	return sg;
1156 out:
1157 	kfree(sg);
1158 	return ERR_PTR(ret);
1159 }
1160 
1161 bus_size_t
drm_prime_sg_size(struct sg_table * sg)1162 drm_prime_sg_size(struct sg_table *sg)
1163 {
1164 
1165 	return sg->sgl->sg_npgs << PAGE_SHIFT;
1166 }
1167 
1168 void
drm_prime_sg_free(struct sg_table * sg)1169 drm_prime_sg_free(struct sg_table *sg)
1170 {
1171 
1172 	sg_free_table(sg);
1173 	kfree(sg);
1174 }
1175 
1176 int
drm_prime_sg_to_bus_dmamem(bus_dma_tag_t dmat,bus_dma_segment_t * segs,int nsegs,int * rsegs,const struct sg_table * sgt)1177 drm_prime_sg_to_bus_dmamem(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
1178     int nsegs, int *rsegs, const struct sg_table *sgt)
1179 {
1180 
1181 	/* XXX errno NetBSD->Linux */
1182 	return -bus_dmamem_import_pages(dmat, segs, nsegs, rsegs,
1183 	    sgt->sgl->sg_pgs, sgt->sgl->sg_npgs);
1184 }
1185 
1186 int
drm_prime_bus_dmamap_load_sgt(bus_dma_tag_t dmat,bus_dmamap_t map,struct sg_table * sgt)1187 drm_prime_bus_dmamap_load_sgt(bus_dma_tag_t dmat, bus_dmamap_t map,
1188     struct sg_table *sgt)
1189 {
1190 	bus_dma_segment_t *segs;
1191 	bus_size_t size = drm_prime_sg_size(sgt);
1192 	int nsegs = sgt->sgl->sg_npgs;
1193 	int ret;
1194 
1195 	segs = kcalloc(sgt->sgl->sg_npgs, sizeof(segs[0]), GFP_KERNEL);
1196 	if (segs == NULL) {
1197 		ret = -ENOMEM;
1198 		goto out0;
1199 	}
1200 
1201 	ret = drm_prime_sg_to_bus_dmamem(dmat, segs, nsegs, &nsegs, sgt);
1202 	if (ret)
1203 		goto out1;
1204 	KASSERT(nsegs <= sgt->sgl->sg_npgs);
1205 
1206 	/* XXX errno NetBSD->Linux */
1207 	ret = -bus_dmamap_load_raw(dmat, map, segs, nsegs, size,
1208 	    BUS_DMA_NOWAIT);
1209 	if (ret)
1210 		goto out1;
1211 
1212 out1:	kfree(segs);
1213 out0:	return ret;
1214 }
1215 
1216 bool
drm_prime_sg_importable(bus_dma_tag_t dmat,struct sg_table * sgt)1217 drm_prime_sg_importable(bus_dma_tag_t dmat, struct sg_table *sgt)
1218 {
1219 	unsigned i;
1220 
1221 	for (i = 0; i < sgt->sgl->sg_npgs; i++) {
1222 		if (bus_dmatag_bounces_paddr(dmat,
1223 			VM_PAGE_TO_PHYS(&sgt->sgl->sg_pgs[i]->p_vmp)))
1224 			return false;
1225 	}
1226 	return true;
1227 }
1228 
1229 #else  /* !__NetBSD__ */
1230 
1231 /**
1232  * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
1233  * @sgt: scatter-gather table to convert
1234  * @pages: optional array of page pointers to store the page array in
1235  * @addrs: optional array to store the dma bus address of each page
1236  * @max_entries: size of both the passed-in arrays
1237  *
1238  * Exports an sg table into an array of pages and addresses. This is currently
1239  * required by the TTM driver in order to do correct fault handling.
1240  *
1241  * Drivers can use this in their &drm_driver.gem_prime_import_sg_table
1242  * implementation.
1243  */
drm_prime_sg_to_page_addr_arrays(struct sg_table * sgt,struct page ** pages,dma_addr_t * addrs,int max_entries)1244 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
1245 				     dma_addr_t *addrs, int max_entries)
1246 {
1247 	unsigned count;
1248 	struct scatterlist *sg;
1249 	struct page *page;
1250 	u32 len, index;
1251 	dma_addr_t addr;
1252 
1253 	index = 0;
1254 	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
1255 		len = sg->length;
1256 		page = sg_page(sg);
1257 		addr = sg_dma_address(sg);
1258 
1259 		while (len > 0) {
1260 			if (WARN_ON(index >= max_entries))
1261 				return -1;
1262 			if (pages)
1263 				pages[index] = page;
1264 			if (addrs)
1265 				addrs[index] = addr;
1266 
1267 			page++;
1268 			addr += PAGE_SIZE;
1269 			len -= PAGE_SIZE;
1270 			index++;
1271 		}
1272 	}
1273 	return 0;
1274 }
1275 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
1276 
1277 #endif	/* __NetBSD__ */
1278 
1279 /**
1280  * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
1281  * @obj: GEM object which was created from a dma-buf
1282  * @sg: the sg-table which was pinned at import time
1283  *
1284  * This is the cleanup functions which GEM drivers need to call when they use
1285  * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
1286  */
drm_prime_gem_destroy(struct drm_gem_object * obj,struct sg_table * sg)1287 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
1288 {
1289 	struct dma_buf_attachment *attach;
1290 	struct dma_buf *dma_buf;
1291 	attach = obj->import_attach;
1292 	if (sg)
1293 		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
1294 	dma_buf = attach->dmabuf;
1295 	dma_buf_detach(attach->dmabuf, attach);
1296 	/* remove the reference */
1297 	dma_buf_put(dma_buf);
1298 }
1299 EXPORT_SYMBOL(drm_prime_gem_destroy);
1300