1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
4 */
5 #include <linux/anon_inodes.h>
6 #include <linux/device.h>
7 #include <linux/idr.h>
8 #include <linux/io.h>
9 #include <linux/mm.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/tee_core.h>
13 #include <linux/uaccess.h>
14 #include <linux/uio.h>
15 #include <linux/highmem.h>
16 #include "tee_private.h"
17
shm_put_kernel_pages(struct page ** pages,size_t page_count)18 static void shm_put_kernel_pages(struct page **pages, size_t page_count)
19 {
20 size_t n;
21
22 for (n = 0; n < page_count; n++)
23 put_page(pages[n]);
24 }
25
shm_get_kernel_pages(struct page ** pages,size_t page_count)26 static void shm_get_kernel_pages(struct page **pages, size_t page_count)
27 {
28 size_t n;
29
30 for (n = 0; n < page_count; n++)
31 get_page(pages[n]);
32 }
33
release_registered_pages(struct tee_shm * shm)34 static void release_registered_pages(struct tee_shm *shm)
35 {
36 if (shm->pages) {
37 if (shm->flags & TEE_SHM_USER_MAPPED)
38 unpin_user_pages(shm->pages, shm->num_pages);
39 else
40 shm_put_kernel_pages(shm->pages, shm->num_pages);
41
42 kfree(shm->pages);
43 }
44 }
45
tee_shm_release(struct tee_device * teedev,struct tee_shm * shm)46 static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
47 {
48 if (shm->flags & TEE_SHM_POOL) {
49 teedev->pool->ops->free(teedev->pool, shm);
50 } else if (shm->flags & TEE_SHM_DYNAMIC) {
51 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
52
53 if (rc)
54 dev_err(teedev->dev.parent,
55 "unregister shm %p failed: %d", shm, rc);
56
57 release_registered_pages(shm);
58 }
59
60 teedev_ctx_put(shm->ctx);
61
62 kfree(shm);
63
64 tee_device_put(teedev);
65 }
66
shm_alloc_helper(struct tee_context * ctx,size_t size,size_t align,u32 flags,int id)67 static struct tee_shm *shm_alloc_helper(struct tee_context *ctx, size_t size,
68 size_t align, u32 flags, int id)
69 {
70 struct tee_device *teedev = ctx->teedev;
71 struct tee_shm *shm;
72 void *ret;
73 int rc;
74
75 if (!tee_device_get(teedev))
76 return ERR_PTR(-EINVAL);
77
78 if (!teedev->pool) {
79 /* teedev has been detached from driver */
80 ret = ERR_PTR(-EINVAL);
81 goto err_dev_put;
82 }
83
84 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
85 if (!shm) {
86 ret = ERR_PTR(-ENOMEM);
87 goto err_dev_put;
88 }
89
90 refcount_set(&shm->refcount, 1);
91 shm->flags = flags;
92 shm->id = id;
93
94 /*
95 * We're assigning this as it is needed if the shm is to be
96 * registered. If this function returns OK then the caller expected
97 * to call teedev_ctx_get() or clear shm->ctx in case it's not
98 * needed any longer.
99 */
100 shm->ctx = ctx;
101
102 rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align);
103 if (rc) {
104 ret = ERR_PTR(rc);
105 goto err_kfree;
106 }
107
108 teedev_ctx_get(ctx);
109 return shm;
110 err_kfree:
111 kfree(shm);
112 err_dev_put:
113 tee_device_put(teedev);
114 return ret;
115 }
116
117 /**
118 * tee_shm_alloc_user_buf() - Allocate shared memory for user space
119 * @ctx: Context that allocates the shared memory
120 * @size: Requested size of shared memory
121 *
122 * Memory allocated as user space shared memory is automatically freed when
123 * the TEE file pointer is closed. The primary usage of this function is
124 * when the TEE driver doesn't support registering ordinary user space
125 * memory.
126 *
127 * @returns a pointer to 'struct tee_shm'
128 */
tee_shm_alloc_user_buf(struct tee_context * ctx,size_t size)129 struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size)
130 {
131 u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL;
132 struct tee_device *teedev = ctx->teedev;
133 struct tee_shm *shm;
134 void *ret;
135 int id;
136
137 mutex_lock(&teedev->mutex);
138 id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
139 mutex_unlock(&teedev->mutex);
140 if (id < 0)
141 return ERR_PTR(id);
142
143 shm = shm_alloc_helper(ctx, size, PAGE_SIZE, flags, id);
144 if (IS_ERR(shm)) {
145 mutex_lock(&teedev->mutex);
146 idr_remove(&teedev->idr, id);
147 mutex_unlock(&teedev->mutex);
148 return shm;
149 }
150
151 mutex_lock(&teedev->mutex);
152 ret = idr_replace(&teedev->idr, shm, id);
153 mutex_unlock(&teedev->mutex);
154 if (IS_ERR(ret)) {
155 tee_shm_free(shm);
156 return ret;
157 }
158
159 return shm;
160 }
161
162 /**
163 * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer
164 * @ctx: Context that allocates the shared memory
165 * @size: Requested size of shared memory
166 *
167 * The returned memory registered in secure world and is suitable to be
168 * passed as a memory buffer in parameter argument to
169 * tee_client_invoke_func(). The memory allocated is later freed with a
170 * call to tee_shm_free().
171 *
172 * @returns a pointer to 'struct tee_shm'
173 */
tee_shm_alloc_kernel_buf(struct tee_context * ctx,size_t size)174 struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
175 {
176 u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL;
177
178 return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1);
179 }
180 EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
181
182 /**
183 * tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared
184 * kernel buffer
185 * @ctx: Context that allocates the shared memory
186 * @size: Requested size of shared memory
187 *
188 * This function returns similar shared memory as
189 * tee_shm_alloc_kernel_buf(), but with the difference that the memory
190 * might not be registered in secure world in case the driver supports
191 * passing memory not registered in advance.
192 *
193 * This function should normally only be used internally in the TEE
194 * drivers.
195 *
196 * @returns a pointer to 'struct tee_shm'
197 */
tee_shm_alloc_priv_buf(struct tee_context * ctx,size_t size)198 struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
199 {
200 u32 flags = TEE_SHM_PRIV | TEE_SHM_POOL;
201
202 return shm_alloc_helper(ctx, size, sizeof(long) * 2, flags, -1);
203 }
204 EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
205
tee_dyn_shm_alloc_helper(struct tee_shm * shm,size_t size,size_t align,int (* shm_register)(struct tee_context * ctx,struct tee_shm * shm,struct page ** pages,size_t num_pages,unsigned long start))206 int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
207 int (*shm_register)(struct tee_context *ctx,
208 struct tee_shm *shm,
209 struct page **pages,
210 size_t num_pages,
211 unsigned long start))
212 {
213 size_t nr_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
214 struct page **pages;
215 unsigned int i;
216 int rc = 0;
217
218 /*
219 * Ignore alignment since this is already going to be page aligned
220 * and there's no need for any larger alignment.
221 */
222 shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE,
223 GFP_KERNEL | __GFP_ZERO);
224 if (!shm->kaddr)
225 return -ENOMEM;
226
227 shm->paddr = virt_to_phys(shm->kaddr);
228 shm->size = nr_pages * PAGE_SIZE;
229
230 pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
231 if (!pages) {
232 rc = -ENOMEM;
233 goto err;
234 }
235
236 for (i = 0; i < nr_pages; i++)
237 pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE);
238
239 shm->pages = pages;
240 shm->num_pages = nr_pages;
241
242 if (shm_register) {
243 rc = shm_register(shm->ctx, shm, pages, nr_pages,
244 (unsigned long)shm->kaddr);
245 if (rc)
246 goto err;
247 }
248
249 return 0;
250 err:
251 free_pages_exact(shm->kaddr, shm->size);
252 shm->kaddr = NULL;
253 return rc;
254 }
255 EXPORT_SYMBOL_GPL(tee_dyn_shm_alloc_helper);
256
tee_dyn_shm_free_helper(struct tee_shm * shm,int (* shm_unregister)(struct tee_context * ctx,struct tee_shm * shm))257 void tee_dyn_shm_free_helper(struct tee_shm *shm,
258 int (*shm_unregister)(struct tee_context *ctx,
259 struct tee_shm *shm))
260 {
261 if (shm_unregister)
262 shm_unregister(shm->ctx, shm);
263 free_pages_exact(shm->kaddr, shm->size);
264 shm->kaddr = NULL;
265 kfree(shm->pages);
266 shm->pages = NULL;
267 }
268 EXPORT_SYMBOL_GPL(tee_dyn_shm_free_helper);
269
270 static struct tee_shm *
register_shm_helper(struct tee_context * ctx,struct iov_iter * iter,u32 flags,int id)271 register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags,
272 int id)
273 {
274 struct tee_device *teedev = ctx->teedev;
275 struct tee_shm *shm;
276 unsigned long start, addr;
277 size_t num_pages, off;
278 ssize_t len;
279 void *ret;
280 int rc;
281
282 if (!tee_device_get(teedev))
283 return ERR_PTR(-EINVAL);
284
285 if (!teedev->desc->ops->shm_register ||
286 !teedev->desc->ops->shm_unregister) {
287 ret = ERR_PTR(-ENOTSUPP);
288 goto err_dev_put;
289 }
290
291 teedev_ctx_get(ctx);
292
293 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
294 if (!shm) {
295 ret = ERR_PTR(-ENOMEM);
296 goto err_ctx_put;
297 }
298
299 refcount_set(&shm->refcount, 1);
300 shm->flags = flags;
301 shm->ctx = ctx;
302 shm->id = id;
303 addr = untagged_addr((unsigned long)iter_iov_addr(iter));
304 start = rounddown(addr, PAGE_SIZE);
305 num_pages = iov_iter_npages(iter, INT_MAX);
306 if (!num_pages) {
307 ret = ERR_PTR(-ENOMEM);
308 goto err_ctx_put;
309 }
310
311 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
312 if (!shm->pages) {
313 ret = ERR_PTR(-ENOMEM);
314 goto err_free_shm;
315 }
316
317 len = iov_iter_extract_pages(iter, &shm->pages, LONG_MAX, num_pages, 0,
318 &off);
319 if (unlikely(len <= 0)) {
320 ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM);
321 goto err_free_shm_pages;
322 }
323
324 /*
325 * iov_iter_extract_kvec_pages does not get reference on the pages,
326 * get a reference on them.
327 */
328 if (iov_iter_is_kvec(iter))
329 shm_get_kernel_pages(shm->pages, num_pages);
330
331 shm->offset = off;
332 shm->size = len;
333 shm->num_pages = num_pages;
334
335 rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
336 shm->num_pages, start);
337 if (rc) {
338 ret = ERR_PTR(rc);
339 goto err_put_shm_pages;
340 }
341
342 return shm;
343 err_put_shm_pages:
344 if (!iov_iter_is_kvec(iter))
345 unpin_user_pages(shm->pages, shm->num_pages);
346 else
347 shm_put_kernel_pages(shm->pages, shm->num_pages);
348 err_free_shm_pages:
349 kfree(shm->pages);
350 err_free_shm:
351 kfree(shm);
352 err_ctx_put:
353 teedev_ctx_put(ctx);
354 err_dev_put:
355 tee_device_put(teedev);
356 return ret;
357 }
358
359 /**
360 * tee_shm_register_user_buf() - Register a userspace shared memory buffer
361 * @ctx: Context that registers the shared memory
362 * @addr: The userspace address of the shared buffer
363 * @length: Length of the shared buffer
364 *
365 * @returns a pointer to 'struct tee_shm'
366 */
tee_shm_register_user_buf(struct tee_context * ctx,unsigned long addr,size_t length)367 struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
368 unsigned long addr, size_t length)
369 {
370 u32 flags = TEE_SHM_USER_MAPPED | TEE_SHM_DYNAMIC;
371 struct tee_device *teedev = ctx->teedev;
372 struct tee_shm *shm;
373 struct iov_iter iter;
374 void *ret;
375 int id;
376
377 if (!access_ok((void __user *)addr, length))
378 return ERR_PTR(-EFAULT);
379
380 mutex_lock(&teedev->mutex);
381 id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
382 mutex_unlock(&teedev->mutex);
383 if (id < 0)
384 return ERR_PTR(id);
385
386 iov_iter_ubuf(&iter, ITER_DEST, (void __user *)addr, length);
387 shm = register_shm_helper(ctx, &iter, flags, id);
388 if (IS_ERR(shm)) {
389 mutex_lock(&teedev->mutex);
390 idr_remove(&teedev->idr, id);
391 mutex_unlock(&teedev->mutex);
392 return shm;
393 }
394
395 mutex_lock(&teedev->mutex);
396 ret = idr_replace(&teedev->idr, shm, id);
397 mutex_unlock(&teedev->mutex);
398 if (IS_ERR(ret)) {
399 tee_shm_free(shm);
400 return ret;
401 }
402
403 return shm;
404 }
405
406 /**
407 * tee_shm_register_kernel_buf() - Register kernel memory to be shared with
408 * secure world
409 * @ctx: Context that registers the shared memory
410 * @addr: The buffer
411 * @length: Length of the buffer
412 *
413 * @returns a pointer to 'struct tee_shm'
414 */
415
tee_shm_register_kernel_buf(struct tee_context * ctx,void * addr,size_t length)416 struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
417 void *addr, size_t length)
418 {
419 u32 flags = TEE_SHM_DYNAMIC;
420 struct kvec kvec;
421 struct iov_iter iter;
422
423 kvec.iov_base = addr;
424 kvec.iov_len = length;
425 iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, length);
426
427 return register_shm_helper(ctx, &iter, flags, -1);
428 }
429 EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf);
430
tee_shm_fop_release(struct inode * inode,struct file * filp)431 static int tee_shm_fop_release(struct inode *inode, struct file *filp)
432 {
433 tee_shm_put(filp->private_data);
434 return 0;
435 }
436
tee_shm_fop_mmap(struct file * filp,struct vm_area_struct * vma)437 static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
438 {
439 struct tee_shm *shm = filp->private_data;
440 size_t size = vma->vm_end - vma->vm_start;
441
442 /* Refuse sharing shared memory provided by application */
443 if (shm->flags & TEE_SHM_USER_MAPPED)
444 return -EINVAL;
445
446 /* check for overflowing the buffer's size */
447 if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
448 return -EINVAL;
449
450 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
451 size, vma->vm_page_prot);
452 }
453
454 static const struct file_operations tee_shm_fops = {
455 .owner = THIS_MODULE,
456 .release = tee_shm_fop_release,
457 .mmap = tee_shm_fop_mmap,
458 };
459
460 /**
461 * tee_shm_get_fd() - Increase reference count and return file descriptor
462 * @shm: Shared memory handle
463 * @returns user space file descriptor to shared memory
464 */
tee_shm_get_fd(struct tee_shm * shm)465 int tee_shm_get_fd(struct tee_shm *shm)
466 {
467 int fd;
468
469 if (shm->id < 0)
470 return -EINVAL;
471
472 /* matched by tee_shm_put() in tee_shm_op_release() */
473 refcount_inc(&shm->refcount);
474 fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR);
475 if (fd < 0)
476 tee_shm_put(shm);
477 return fd;
478 }
479
480 /**
481 * tee_shm_free() - Free shared memory
482 * @shm: Handle to shared memory to free
483 */
tee_shm_free(struct tee_shm * shm)484 void tee_shm_free(struct tee_shm *shm)
485 {
486 tee_shm_put(shm);
487 }
488 EXPORT_SYMBOL_GPL(tee_shm_free);
489
490 /**
491 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
492 * @shm: Shared memory handle
493 * @offs: Offset from start of this shared memory
494 * @returns virtual address of the shared memory + offs if offs is within
495 * the bounds of this shared memory, else an ERR_PTR
496 */
tee_shm_get_va(struct tee_shm * shm,size_t offs)497 void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
498 {
499 if (!shm->kaddr)
500 return ERR_PTR(-EINVAL);
501 if (offs >= shm->size)
502 return ERR_PTR(-EINVAL);
503 return (char *)shm->kaddr + offs;
504 }
505 EXPORT_SYMBOL_GPL(tee_shm_get_va);
506
507 /**
508 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
509 * @shm: Shared memory handle
510 * @offs: Offset from start of this shared memory
511 * @pa: Physical address to return
512 * @returns 0 if offs is within the bounds of this shared memory, else an
513 * error code.
514 */
tee_shm_get_pa(struct tee_shm * shm,size_t offs,phys_addr_t * pa)515 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
516 {
517 if (offs >= shm->size)
518 return -EINVAL;
519 if (pa)
520 *pa = shm->paddr + offs;
521 return 0;
522 }
523 EXPORT_SYMBOL_GPL(tee_shm_get_pa);
524
525 /**
526 * tee_shm_get_from_id() - Find shared memory object and increase reference
527 * count
528 * @ctx: Context owning the shared memory
529 * @id: Id of shared memory object
530 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
531 */
tee_shm_get_from_id(struct tee_context * ctx,int id)532 struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
533 {
534 struct tee_device *teedev;
535 struct tee_shm *shm;
536
537 if (!ctx)
538 return ERR_PTR(-EINVAL);
539
540 teedev = ctx->teedev;
541 mutex_lock(&teedev->mutex);
542 shm = idr_find(&teedev->idr, id);
543 /*
544 * If the tee_shm was found in the IDR it must have a refcount
545 * larger than 0 due to the guarantee in tee_shm_put() below. So
546 * it's safe to use refcount_inc().
547 */
548 if (!shm || shm->ctx != ctx)
549 shm = ERR_PTR(-EINVAL);
550 else
551 refcount_inc(&shm->refcount);
552 mutex_unlock(&teedev->mutex);
553 return shm;
554 }
555 EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
556
557 /**
558 * tee_shm_put() - Decrease reference count on a shared memory handle
559 * @shm: Shared memory handle
560 */
tee_shm_put(struct tee_shm * shm)561 void tee_shm_put(struct tee_shm *shm)
562 {
563 struct tee_device *teedev = shm->ctx->teedev;
564 bool do_release = false;
565
566 mutex_lock(&teedev->mutex);
567 if (refcount_dec_and_test(&shm->refcount)) {
568 /*
569 * refcount has reached 0, we must now remove it from the
570 * IDR before releasing the mutex. This will guarantee that
571 * the refcount_inc() in tee_shm_get_from_id() never starts
572 * from 0.
573 */
574 if (shm->id >= 0)
575 idr_remove(&teedev->idr, shm->id);
576 do_release = true;
577 }
578 mutex_unlock(&teedev->mutex);
579
580 if (do_release)
581 tee_shm_release(teedev, shm);
582 }
583 EXPORT_SYMBOL_GPL(tee_shm_put);
584