1 /* $NetBSD: ttm_object.c,v 1.3 2022/02/17 01:21:02 riastradh Exp $ */
2
3 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
4 /**************************************************************************
5 *
6 * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
7 * All Rights Reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sub license, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial portions
19 * of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
24 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
25 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
26 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
27 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 *
33 * While no substantial code is shared, the prime code is inspired by
34 * drm_prime.c, with
35 * Authors:
36 * Dave Airlie <airlied@redhat.com>
37 * Rob Clark <rob.clark@linaro.org>
38 */
39 /** @file ttm_ref_object.c
40 *
41 * Base- and reference object implementation for the various
42 * ttm objects. Implements reference counting, minimal security checks
43 * and release on file close.
44 */
45
46
47 /**
48 * struct ttm_object_file
49 *
50 * @tdev: Pointer to the ttm_object_device.
51 *
52 * @lock: Lock that protects the ref_list list and the
53 * ref_hash hash tables.
54 *
55 * @ref_list: List of ttm_ref_objects to be destroyed at
56 * file release.
57 *
58 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
59 * for fast lookup of ref objects given a base object.
60 */
61
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: ttm_object.c,v 1.3 2022/02/17 01:21:02 riastradh Exp $");
64
65 #define pr_fmt(fmt) "[TTM] " fmt
66
67 #include <drm/ttm/ttm_module.h>
68 #include <linux/list.h>
69 #include <linux/spinlock.h>
70 #include <linux/slab.h>
71 #include <linux/atomic.h>
72 #include "ttm_object.h"
73
74 #include <linux/nbsd-namespace.h>
75
76 struct ttm_object_file {
77 struct ttm_object_device *tdev;
78 spinlock_t lock;
79 struct list_head ref_list;
80 struct drm_open_hash ref_hash[TTM_REF_NUM];
81 struct kref refcount;
82 };
83
84 /**
85 * struct ttm_object_device
86 *
87 * @object_lock: lock that protects the object_hash hash table.
88 *
89 * @object_hash: hash table for fast lookup of object global names.
90 *
91 * @object_count: Per device object count.
92 *
93 * This is the per-device data structure needed for ttm object management.
94 */
95
96 struct ttm_object_device {
97 spinlock_t object_lock;
98 struct drm_open_hash object_hash;
99 atomic_t object_count;
100 struct ttm_mem_global *mem_glob;
101 struct dma_buf_ops ops;
102 void (*dmabuf_release)(struct dma_buf *dma_buf);
103 size_t dma_buf_size;
104 struct idr idr;
105 };
106
107 /**
108 * struct ttm_ref_object
109 *
110 * @hash: Hash entry for the per-file object reference hash.
111 *
112 * @head: List entry for the per-file list of ref-objects.
113 *
114 * @kref: Ref count.
115 *
116 * @obj: Base object this ref object is referencing.
117 *
118 * @ref_type: Type of ref object.
119 *
120 * This is similar to an idr object, but it also has a hash table entry
121 * that allows lookup with a pointer to the referenced object as a key. In
122 * that way, one can easily detect whether a base object is referenced by
123 * a particular ttm_object_file. It also carries a ref count to avoid creating
124 * multiple ref objects if a ttm_object_file references the same base
125 * object more than once.
126 */
127
128 struct ttm_ref_object {
129 struct rcu_head rcu_head;
130 struct drm_hash_item hash;
131 struct list_head head;
132 struct kref kref;
133 enum ttm_ref_type ref_type;
134 struct ttm_base_object *obj;
135 struct ttm_object_file *tfile;
136 };
137
138 static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
139
140 static inline struct ttm_object_file *
ttm_object_file_ref(struct ttm_object_file * tfile)141 ttm_object_file_ref(struct ttm_object_file *tfile)
142 {
143 kref_get(&tfile->refcount);
144 return tfile;
145 }
146
ttm_object_file_destroy(struct kref * kref)147 static void ttm_object_file_destroy(struct kref *kref)
148 {
149 struct ttm_object_file *tfile =
150 container_of(kref, struct ttm_object_file, refcount);
151
152 kfree(tfile);
153 }
154
155
ttm_object_file_unref(struct ttm_object_file ** p_tfile)156 static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
157 {
158 struct ttm_object_file *tfile = *p_tfile;
159
160 *p_tfile = NULL;
161 kref_put(&tfile->refcount, ttm_object_file_destroy);
162 }
163
164
ttm_base_object_init(struct ttm_object_file * tfile,struct ttm_base_object * base,bool shareable,enum ttm_object_type object_type,void (* refcount_release)(struct ttm_base_object **),void (* ref_obj_release)(struct ttm_base_object *,enum ttm_ref_type ref_type))165 int ttm_base_object_init(struct ttm_object_file *tfile,
166 struct ttm_base_object *base,
167 bool shareable,
168 enum ttm_object_type object_type,
169 void (*refcount_release) (struct ttm_base_object **),
170 void (*ref_obj_release) (struct ttm_base_object *,
171 enum ttm_ref_type ref_type))
172 {
173 struct ttm_object_device *tdev = tfile->tdev;
174 int ret;
175
176 base->shareable = shareable;
177 base->tfile = ttm_object_file_ref(tfile);
178 base->refcount_release = refcount_release;
179 base->ref_obj_release = ref_obj_release;
180 base->object_type = object_type;
181 kref_init(&base->refcount);
182 idr_preload(GFP_KERNEL);
183 spin_lock(&tdev->object_lock);
184 ret = idr_alloc(&tdev->idr, base, 1, 0, GFP_NOWAIT);
185 spin_unlock(&tdev->object_lock);
186 idr_preload_end();
187 if (ret < 0)
188 return ret;
189
190 base->handle = ret;
191 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
192 if (unlikely(ret != 0))
193 goto out_err1;
194
195 ttm_base_object_unref(&base);
196
197 return 0;
198 out_err1:
199 spin_lock(&tdev->object_lock);
200 idr_remove(&tdev->idr, base->handle);
201 spin_unlock(&tdev->object_lock);
202 return ret;
203 }
204
ttm_release_base(struct kref * kref)205 static void ttm_release_base(struct kref *kref)
206 {
207 struct ttm_base_object *base =
208 container_of(kref, struct ttm_base_object, refcount);
209 struct ttm_object_device *tdev = base->tfile->tdev;
210
211 spin_lock(&tdev->object_lock);
212 idr_remove(&tdev->idr, base->handle);
213 spin_unlock(&tdev->object_lock);
214
215 /*
216 * Note: We don't use synchronize_rcu() here because it's far
217 * too slow. It's up to the user to free the object using
218 * call_rcu() or ttm_base_object_kfree().
219 */
220
221 ttm_object_file_unref(&base->tfile);
222 if (base->refcount_release)
223 base->refcount_release(&base);
224 }
225
ttm_base_object_unref(struct ttm_base_object ** p_base)226 void ttm_base_object_unref(struct ttm_base_object **p_base)
227 {
228 struct ttm_base_object *base = *p_base;
229
230 *p_base = NULL;
231
232 kref_put(&base->refcount, ttm_release_base);
233 }
234
235 /**
236 * ttm_base_object_noref_lookup - look up a base object without reference
237 * @tfile: The struct ttm_object_file the object is registered with.
238 * @key: The object handle.
239 *
240 * This function looks up a ttm base object and returns a pointer to it
241 * without refcounting the pointer. The returned pointer is only valid
242 * until ttm_base_object_noref_release() is called, and the object
243 * pointed to by the returned pointer may be doomed. Any persistent usage
244 * of the object requires a refcount to be taken using kref_get_unless_zero().
245 * Iff this function returns successfully it needs to be paired with
246 * ttm_base_object_noref_release() and no sleeping- or scheduling functions
247 * may be called inbetween these function callse.
248 *
249 * Return: A pointer to the object if successful or NULL otherwise.
250 */
251 struct ttm_base_object *
ttm_base_object_noref_lookup(struct ttm_object_file * tfile,uint32_t key)252 ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
253 {
254 struct drm_hash_item *hash;
255 struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
256 int ret;
257
258 rcu_read_lock();
259 ret = drm_ht_find_item_rcu(ht, key, &hash);
260 if (ret) {
261 rcu_read_unlock();
262 return NULL;
263 }
264
265 __release(RCU);
266 return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
267 }
268 EXPORT_SYMBOL(ttm_base_object_noref_lookup);
269
ttm_base_object_lookup(struct ttm_object_file * tfile,uint32_t key)270 struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
271 uint32_t key)
272 {
273 struct ttm_base_object *base = NULL;
274 struct drm_hash_item *hash;
275 struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
276 int ret;
277
278 rcu_read_lock();
279 ret = drm_ht_find_item_rcu(ht, key, &hash);
280
281 if (likely(ret == 0)) {
282 base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
283 if (!kref_get_unless_zero(&base->refcount))
284 base = NULL;
285 }
286 rcu_read_unlock();
287
288 return base;
289 }
290
291 struct ttm_base_object *
ttm_base_object_lookup_for_ref(struct ttm_object_device * tdev,uint32_t key)292 ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
293 {
294 struct ttm_base_object *base;
295
296 rcu_read_lock();
297 base = idr_find(&tdev->idr, key);
298
299 if (base && !kref_get_unless_zero(&base->refcount))
300 base = NULL;
301 rcu_read_unlock();
302
303 return base;
304 }
305
306 /**
307 * ttm_ref_object_exists - Check whether a caller has a valid ref object
308 * (has opened) a base object.
309 *
310 * @tfile: Pointer to a struct ttm_object_file identifying the caller.
311 * @base: Pointer to a struct base object.
312 *
313 * Checks wether the caller identified by @tfile has put a valid USAGE
314 * reference object on the base object identified by @base.
315 */
ttm_ref_object_exists(struct ttm_object_file * tfile,struct ttm_base_object * base)316 bool ttm_ref_object_exists(struct ttm_object_file *tfile,
317 struct ttm_base_object *base)
318 {
319 struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
320 struct drm_hash_item *hash;
321 struct ttm_ref_object *ref;
322
323 rcu_read_lock();
324 if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0))
325 goto out_false;
326
327 /*
328 * Verify that the ref object is really pointing to our base object.
329 * Our base object could actually be dead, and the ref object pointing
330 * to another base object with the same handle.
331 */
332 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
333 if (unlikely(base != ref->obj))
334 goto out_false;
335
336 /*
337 * Verify that the ref->obj pointer was actually valid!
338 */
339 rmb();
340 if (unlikely(kref_read(&ref->kref) == 0))
341 goto out_false;
342
343 rcu_read_unlock();
344 return true;
345
346 out_false:
347 rcu_read_unlock();
348 return false;
349 }
350
ttm_ref_object_add(struct ttm_object_file * tfile,struct ttm_base_object * base,enum ttm_ref_type ref_type,bool * existed,bool require_existed)351 int ttm_ref_object_add(struct ttm_object_file *tfile,
352 struct ttm_base_object *base,
353 enum ttm_ref_type ref_type, bool *existed,
354 bool require_existed)
355 {
356 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
357 struct ttm_ref_object *ref;
358 struct drm_hash_item *hash;
359 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
360 struct ttm_operation_ctx ctx = {
361 .interruptible = false,
362 .no_wait_gpu = false
363 };
364 int ret = -EINVAL;
365
366 if (base->tfile != tfile && !base->shareable)
367 return -EPERM;
368
369 if (existed != NULL)
370 *existed = true;
371
372 while (ret == -EINVAL) {
373 rcu_read_lock();
374 ret = drm_ht_find_item_rcu(ht, base->handle, &hash);
375
376 if (ret == 0) {
377 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
378 if (kref_get_unless_zero(&ref->kref)) {
379 rcu_read_unlock();
380 break;
381 }
382 }
383
384 rcu_read_unlock();
385 if (require_existed)
386 return -EPERM;
387
388 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
389 &ctx);
390 if (unlikely(ret != 0))
391 return ret;
392 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
393 if (unlikely(ref == NULL)) {
394 ttm_mem_global_free(mem_glob, sizeof(*ref));
395 return -ENOMEM;
396 }
397
398 ref->hash.key = base->handle;
399 ref->obj = base;
400 ref->tfile = tfile;
401 ref->ref_type = ref_type;
402 kref_init(&ref->kref);
403
404 spin_lock(&tfile->lock);
405 ret = drm_ht_insert_item_rcu(ht, &ref->hash);
406
407 if (likely(ret == 0)) {
408 list_add_tail(&ref->head, &tfile->ref_list);
409 kref_get(&base->refcount);
410 spin_unlock(&tfile->lock);
411 if (existed != NULL)
412 *existed = false;
413 break;
414 }
415
416 spin_unlock(&tfile->lock);
417 BUG_ON(ret != -EINVAL);
418
419 ttm_mem_global_free(mem_glob, sizeof(*ref));
420 kfree(ref);
421 }
422
423 return ret;
424 }
425
426 static void __releases(tfile->lock) __acquires(tfile->lock)
ttm_ref_object_release(struct kref * kref)427 ttm_ref_object_release(struct kref *kref)
428 {
429 struct ttm_ref_object *ref =
430 container_of(kref, struct ttm_ref_object, kref);
431 struct ttm_base_object *base = ref->obj;
432 struct ttm_object_file *tfile = ref->tfile;
433 struct drm_open_hash *ht;
434 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
435
436 ht = &tfile->ref_hash[ref->ref_type];
437 (void)drm_ht_remove_item_rcu(ht, &ref->hash);
438 list_del(&ref->head);
439 spin_unlock(&tfile->lock);
440
441 if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
442 base->ref_obj_release(base, ref->ref_type);
443
444 ttm_base_object_unref(&ref->obj);
445 ttm_mem_global_free(mem_glob, sizeof(*ref));
446 kfree_rcu(ref, rcu_head);
447 spin_lock(&tfile->lock);
448 }
449
ttm_ref_object_base_unref(struct ttm_object_file * tfile,unsigned long key,enum ttm_ref_type ref_type)450 int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
451 unsigned long key, enum ttm_ref_type ref_type)
452 {
453 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
454 struct ttm_ref_object *ref;
455 struct drm_hash_item *hash;
456 int ret;
457
458 spin_lock(&tfile->lock);
459 ret = drm_ht_find_item(ht, key, &hash);
460 if (unlikely(ret != 0)) {
461 spin_unlock(&tfile->lock);
462 return -EINVAL;
463 }
464 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
465 kref_put(&ref->kref, ttm_ref_object_release);
466 spin_unlock(&tfile->lock);
467 return 0;
468 }
469
ttm_object_file_release(struct ttm_object_file ** p_tfile)470 void ttm_object_file_release(struct ttm_object_file **p_tfile)
471 {
472 struct ttm_ref_object *ref;
473 struct list_head *list;
474 unsigned int i;
475 struct ttm_object_file *tfile = *p_tfile;
476
477 *p_tfile = NULL;
478 spin_lock(&tfile->lock);
479
480 /*
481 * Since we release the lock within the loop, we have to
482 * restart it from the beginning each time.
483 */
484
485 while (!list_empty(&tfile->ref_list)) {
486 list = tfile->ref_list.next;
487 ref = list_entry(list, struct ttm_ref_object, head);
488 ttm_ref_object_release(&ref->kref);
489 }
490
491 spin_unlock(&tfile->lock);
492 for (i = 0; i < TTM_REF_NUM; ++i)
493 drm_ht_remove(&tfile->ref_hash[i]);
494
495 ttm_object_file_unref(&tfile);
496 }
497
ttm_object_file_init(struct ttm_object_device * tdev,unsigned int hash_order)498 struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
499 unsigned int hash_order)
500 {
501 struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
502 unsigned int i;
503 unsigned int j = 0;
504 int ret;
505
506 if (unlikely(tfile == NULL))
507 return NULL;
508
509 spin_lock_init(&tfile->lock);
510 tfile->tdev = tdev;
511 kref_init(&tfile->refcount);
512 INIT_LIST_HEAD(&tfile->ref_list);
513
514 for (i = 0; i < TTM_REF_NUM; ++i) {
515 ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
516 if (ret) {
517 j = i;
518 goto out_err;
519 }
520 }
521
522 return tfile;
523 out_err:
524 for (i = 0; i < j; ++i)
525 drm_ht_remove(&tfile->ref_hash[i]);
526
527 kfree(tfile);
528
529 return NULL;
530 }
531
532 struct ttm_object_device *
ttm_object_device_init(struct ttm_mem_global * mem_glob,unsigned int hash_order,const struct dma_buf_ops * ops)533 ttm_object_device_init(struct ttm_mem_global *mem_glob,
534 unsigned int hash_order,
535 const struct dma_buf_ops *ops)
536 {
537 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
538 int ret;
539
540 if (unlikely(tdev == NULL))
541 return NULL;
542
543 tdev->mem_glob = mem_glob;
544 spin_lock_init(&tdev->object_lock);
545 atomic_set(&tdev->object_count, 0);
546 ret = drm_ht_create(&tdev->object_hash, hash_order);
547 if (ret != 0)
548 goto out_no_object_hash;
549
550 idr_init(&tdev->idr);
551 tdev->ops = *ops;
552 tdev->dmabuf_release = tdev->ops.release;
553 tdev->ops.release = ttm_prime_dmabuf_release;
554 tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
555 ttm_round_pot(sizeof(struct file));
556 return tdev;
557
558 out_no_object_hash:
559 kfree(tdev);
560 return NULL;
561 }
562
ttm_object_device_release(struct ttm_object_device ** p_tdev)563 void ttm_object_device_release(struct ttm_object_device **p_tdev)
564 {
565 struct ttm_object_device *tdev = *p_tdev;
566
567 *p_tdev = NULL;
568
569 WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
570 idr_destroy(&tdev->idr);
571 drm_ht_remove(&tdev->object_hash);
572
573 kfree(tdev);
574 }
575
576 /**
577 * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
578 *
579 * @dma_buf: Non-refcounted pointer to a struct dma-buf.
580 *
581 * Obtain a file reference from a lookup structure that doesn't refcount
582 * the file, but synchronizes with its release method to make sure it has
583 * not been freed yet. See for example kref_get_unless_zero documentation.
584 * Returns true if refcounting succeeds, false otherwise.
585 *
586 * Nobody really wants this as a public API yet, so let it mature here
587 * for some time...
588 */
get_dma_buf_unless_doomed(struct dma_buf * dmabuf)589 static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
590 {
591 #ifdef __NetBSD__
592 /* XXX move this to linux_dma_buf.c */
593 unsigned cnt;
594
595 do {
596 cnt = atomic_load_relaxed(&dmabuf->db_refcnt);
597 if (cnt == 0)
598 return false;
599 } while (atomic_cas_uint(&dmabuf->db_refcnt, cnt, cnt + 1) != cnt);
600 return true;
601 #else
602 return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
603 #endif
604 }
605
606 /**
607 * ttm_prime_refcount_release - refcount release method for a prime object.
608 *
609 * @p_base: Pointer to ttm_base_object pointer.
610 *
611 * This is a wrapper that calls the refcount_release founction of the
612 * underlying object. At the same time it cleans up the prime object.
613 * This function is called when all references to the base object we
614 * derive from are gone.
615 */
ttm_prime_refcount_release(struct ttm_base_object ** p_base)616 static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
617 {
618 struct ttm_base_object *base = *p_base;
619 struct ttm_prime_object *prime;
620
621 *p_base = NULL;
622 prime = container_of(base, struct ttm_prime_object, base);
623 BUG_ON(prime->dma_buf != NULL);
624 mutex_destroy(&prime->mutex);
625 if (prime->refcount_release)
626 prime->refcount_release(&base);
627 }
628
629 /**
630 * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
631 *
632 * @dma_buf:
633 *
634 * This function first calls the dma_buf release method the driver
635 * provides. Then it cleans up our dma_buf pointer used for lookup,
636 * and finally releases the reference the dma_buf has on our base
637 * object.
638 */
ttm_prime_dmabuf_release(struct dma_buf * dma_buf)639 static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
640 {
641 struct ttm_prime_object *prime =
642 (struct ttm_prime_object *) dma_buf->priv;
643 struct ttm_base_object *base = &prime->base;
644 struct ttm_object_device *tdev = base->tfile->tdev;
645
646 if (tdev->dmabuf_release)
647 tdev->dmabuf_release(dma_buf);
648 mutex_lock(&prime->mutex);
649 if (prime->dma_buf == dma_buf)
650 prime->dma_buf = NULL;
651 mutex_unlock(&prime->mutex);
652 ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
653 ttm_base_object_unref(&base);
654 }
655
656 /**
657 * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
658 *
659 * @tfile: A struct ttm_object_file identifying the caller.
660 * @fd: The prime / dmabuf fd.
661 * @handle: The returned handle.
662 *
663 * This function returns a handle to an object that previously exported
664 * a dma-buf. Note that we don't handle imports yet, because we simply
665 * have no consumers of that implementation.
666 */
ttm_prime_fd_to_handle(struct ttm_object_file * tfile,int fd,u32 * handle)667 int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
668 int fd, u32 *handle)
669 {
670 struct ttm_object_device *tdev = tfile->tdev;
671 struct dma_buf *dma_buf;
672 struct ttm_prime_object *prime;
673 struct ttm_base_object *base;
674 int ret;
675
676 dma_buf = dma_buf_get(fd);
677 if (IS_ERR(dma_buf))
678 return PTR_ERR(dma_buf);
679
680 if (dma_buf->ops != &tdev->ops)
681 return -ENOSYS;
682
683 prime = (struct ttm_prime_object *) dma_buf->priv;
684 base = &prime->base;
685 *handle = base->handle;
686 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
687
688 dma_buf_put(dma_buf);
689
690 return ret;
691 }
692
693 /**
694 * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
695 *
696 * @tfile: Struct ttm_object_file identifying the caller.
697 * @handle: Handle to the object we're exporting from.
698 * @flags: flags for dma-buf creation. We just pass them on.
699 * @prime_fd: The returned file descriptor.
700 *
701 */
ttm_prime_handle_to_fd(struct ttm_object_file * tfile,uint32_t handle,uint32_t flags,int * prime_fd)702 int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
703 uint32_t handle, uint32_t flags,
704 int *prime_fd)
705 {
706 struct ttm_object_device *tdev = tfile->tdev;
707 struct ttm_base_object *base;
708 struct dma_buf *dma_buf;
709 struct ttm_prime_object *prime;
710 int ret;
711
712 base = ttm_base_object_lookup(tfile, handle);
713 if (unlikely(base == NULL ||
714 base->object_type != ttm_prime_type)) {
715 ret = -ENOENT;
716 goto out_unref;
717 }
718
719 prime = container_of(base, struct ttm_prime_object, base);
720 if (unlikely(!base->shareable)) {
721 ret = -EPERM;
722 goto out_unref;
723 }
724
725 ret = mutex_lock_interruptible(&prime->mutex);
726 if (unlikely(ret != 0)) {
727 ret = -ERESTARTSYS;
728 goto out_unref;
729 }
730
731 dma_buf = prime->dma_buf;
732 if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
733 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
734 struct ttm_operation_ctx ctx = {
735 .interruptible = true,
736 .no_wait_gpu = false
737 };
738 exp_info.ops = &tdev->ops;
739 exp_info.size = prime->size;
740 exp_info.flags = flags;
741 exp_info.priv = prime;
742
743 /*
744 * Need to create a new dma_buf, with memory accounting.
745 */
746 ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
747 &ctx);
748 if (unlikely(ret != 0)) {
749 mutex_unlock(&prime->mutex);
750 goto out_unref;
751 }
752
753 dma_buf = dma_buf_export(&exp_info);
754 if (IS_ERR(dma_buf)) {
755 ret = PTR_ERR(dma_buf);
756 ttm_mem_global_free(tdev->mem_glob,
757 tdev->dma_buf_size);
758 mutex_unlock(&prime->mutex);
759 goto out_unref;
760 }
761
762 /*
763 * dma_buf has taken the base object reference
764 */
765 base = NULL;
766 prime->dma_buf = dma_buf;
767 }
768 mutex_unlock(&prime->mutex);
769
770 ret = dma_buf_fd(dma_buf, flags);
771 if (ret >= 0) {
772 *prime_fd = ret;
773 ret = 0;
774 } else
775 dma_buf_put(dma_buf);
776
777 out_unref:
778 if (base)
779 ttm_base_object_unref(&base);
780 return ret;
781 }
782
783 /**
784 * ttm_prime_object_init - Initialize a ttm_prime_object
785 *
786 * @tfile: struct ttm_object_file identifying the caller
787 * @size: The size of the dma_bufs we export.
788 * @prime: The object to be initialized.
789 * @shareable: See ttm_base_object_init
790 * @type: See ttm_base_object_init
791 * @refcount_release: See ttm_base_object_init
792 * @ref_obj_release: See ttm_base_object_init
793 *
794 * Initializes an object which is compatible with the drm_prime model
795 * for data sharing between processes and devices.
796 */
ttm_prime_object_init(struct ttm_object_file * tfile,size_t size,struct ttm_prime_object * prime,bool shareable,enum ttm_object_type type,void (* refcount_release)(struct ttm_base_object **),void (* ref_obj_release)(struct ttm_base_object *,enum ttm_ref_type ref_type))797 int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
798 struct ttm_prime_object *prime, bool shareable,
799 enum ttm_object_type type,
800 void (*refcount_release) (struct ttm_base_object **),
801 void (*ref_obj_release) (struct ttm_base_object *,
802 enum ttm_ref_type ref_type))
803 {
804 mutex_init(&prime->mutex);
805 prime->size = PAGE_ALIGN(size);
806 prime->real_type = type;
807 prime->dma_buf = NULL;
808 prime->refcount_release = refcount_release;
809 return ttm_base_object_init(tfile, &prime->base, shareable,
810 ttm_prime_type,
811 ttm_prime_refcount_release,
812 ref_obj_release);
813 }
814