1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015-2016, Linaro Limited
4 */
5 #include <linux/device.h>
6 #include <linux/dma-buf.h>
7 #include <linux/fdtable.h>
8 #include <linux/idr.h>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/tee_drv.h>
12 #include <linux/uio.h>
13 #include "tee_private.h"
14
release_registered_pages(struct tee_shm * shm)15 static void release_registered_pages(struct tee_shm *shm)
16 {
17 if (shm->pages) {
18 if (shm->flags & TEE_SHM_USER_MAPPED) {
19 unpin_user_pages(shm->pages, shm->num_pages);
20 } else {
21 size_t n;
22
23 for (n = 0; n < shm->num_pages; n++)
24 put_page(shm->pages[n]);
25 }
26
27 kfree(shm->pages);
28 }
29 }
30
tee_shm_release(struct tee_shm * shm)31 static void tee_shm_release(struct tee_shm *shm)
32 {
33 struct tee_device *teedev = shm->ctx->teedev;
34
35 if (shm->flags & TEE_SHM_DMA_BUF) {
36 mutex_lock(&teedev->mutex);
37 idr_remove(&teedev->idr, shm->id);
38 mutex_unlock(&teedev->mutex);
39 }
40
41 if (shm->flags & TEE_SHM_POOL) {
42 struct tee_shm_pool_mgr *poolm;
43
44 if (shm->flags & TEE_SHM_DMA_BUF)
45 poolm = teedev->pool->dma_buf_mgr;
46 else
47 poolm = teedev->pool->private_mgr;
48
49 poolm->ops->free(poolm, shm);
50 } else if (shm->flags & TEE_SHM_REGISTER) {
51 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
52
53 if (rc)
54 dev_err(teedev->dev.parent,
55 "unregister shm %p failed: %d", shm, rc);
56
57 release_registered_pages(shm);
58 }
59
60 teedev_ctx_put(shm->ctx);
61
62 kfree(shm);
63
64 tee_device_put(teedev);
65 }
66
tee_shm_op_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)67 static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment
68 *attach, enum dma_data_direction dir)
69 {
70 return NULL;
71 }
72
tee_shm_op_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * table,enum dma_data_direction dir)73 static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach,
74 struct sg_table *table,
75 enum dma_data_direction dir)
76 {
77 }
78
tee_shm_op_release(struct dma_buf * dmabuf)79 static void tee_shm_op_release(struct dma_buf *dmabuf)
80 {
81 struct tee_shm *shm = dmabuf->priv;
82
83 tee_shm_release(shm);
84 }
85
tee_shm_op_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)86 static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
87 {
88 struct tee_shm *shm = dmabuf->priv;
89 size_t size = vma->vm_end - vma->vm_start;
90
91 /* Refuse sharing shared memory provided by application */
92 if (shm->flags & TEE_SHM_USER_MAPPED)
93 return -EINVAL;
94
95 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
96 size, vma->vm_page_prot);
97 }
98
99 static const struct dma_buf_ops tee_shm_dma_buf_ops = {
100 .map_dma_buf = tee_shm_op_map_dma_buf,
101 .unmap_dma_buf = tee_shm_op_unmap_dma_buf,
102 .release = tee_shm_op_release,
103 .mmap = tee_shm_op_mmap,
104 };
105
tee_shm_alloc(struct tee_context * ctx,size_t size,u32 flags)106 struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
107 {
108 struct tee_device *teedev = ctx->teedev;
109 struct tee_shm_pool_mgr *poolm = NULL;
110 struct tee_shm *shm;
111 void *ret;
112 int rc;
113
114 if (!(flags & TEE_SHM_MAPPED)) {
115 dev_err(teedev->dev.parent,
116 "only mapped allocations supported\n");
117 return ERR_PTR(-EINVAL);
118 }
119
120 if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) {
121 dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
122 return ERR_PTR(-EINVAL);
123 }
124
125 if (!tee_device_get(teedev))
126 return ERR_PTR(-EINVAL);
127
128 if (!teedev->pool) {
129 /* teedev has been detached from driver */
130 ret = ERR_PTR(-EINVAL);
131 goto err_dev_put;
132 }
133
134 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
135 if (!shm) {
136 ret = ERR_PTR(-ENOMEM);
137 goto err_dev_put;
138 }
139
140 shm->flags = flags | TEE_SHM_POOL;
141 shm->ctx = ctx;
142 if (flags & TEE_SHM_DMA_BUF)
143 poolm = teedev->pool->dma_buf_mgr;
144 else
145 poolm = teedev->pool->private_mgr;
146
147 rc = poolm->ops->alloc(poolm, shm, size);
148 if (rc) {
149 ret = ERR_PTR(rc);
150 goto err_kfree;
151 }
152
153
154 if (flags & TEE_SHM_DMA_BUF) {
155 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
156
157 mutex_lock(&teedev->mutex);
158 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
159 mutex_unlock(&teedev->mutex);
160 if (shm->id < 0) {
161 ret = ERR_PTR(shm->id);
162 goto err_pool_free;
163 }
164
165 exp_info.ops = &tee_shm_dma_buf_ops;
166 exp_info.size = shm->size;
167 exp_info.flags = O_RDWR;
168 exp_info.priv = shm;
169
170 shm->dmabuf = dma_buf_export(&exp_info);
171 if (IS_ERR(shm->dmabuf)) {
172 ret = ERR_CAST(shm->dmabuf);
173 goto err_rem;
174 }
175 }
176
177 teedev_ctx_get(ctx);
178
179 return shm;
180 err_rem:
181 if (flags & TEE_SHM_DMA_BUF) {
182 mutex_lock(&teedev->mutex);
183 idr_remove(&teedev->idr, shm->id);
184 mutex_unlock(&teedev->mutex);
185 }
186 err_pool_free:
187 poolm->ops->free(poolm, shm);
188 err_kfree:
189 kfree(shm);
190 err_dev_put:
191 tee_device_put(teedev);
192 return ret;
193 }
194 EXPORT_SYMBOL_GPL(tee_shm_alloc);
195
tee_shm_register(struct tee_context * ctx,unsigned long addr,size_t length,u32 flags)196 struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
197 size_t length, u32 flags)
198 {
199 struct tee_device *teedev = ctx->teedev;
200 const u32 req_user_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
201 const u32 req_kernel_flags = TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED;
202 struct tee_shm *shm;
203 void *ret;
204 int rc;
205 int num_pages;
206 unsigned long start;
207
208 if (flags != req_user_flags && flags != req_kernel_flags)
209 return ERR_PTR(-ENOTSUPP);
210
211 if (!tee_device_get(teedev))
212 return ERR_PTR(-EINVAL);
213
214 if (!teedev->desc->ops->shm_register ||
215 !teedev->desc->ops->shm_unregister) {
216 tee_device_put(teedev);
217 return ERR_PTR(-ENOTSUPP);
218 }
219
220 teedev_ctx_get(ctx);
221
222 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
223 if (!shm) {
224 ret = ERR_PTR(-ENOMEM);
225 goto err;
226 }
227
228 shm->flags = flags | TEE_SHM_REGISTER;
229 shm->ctx = ctx;
230 shm->id = -1;
231 addr = untagged_addr(addr);
232 start = rounddown(addr, PAGE_SIZE);
233 shm->offset = addr - start;
234 shm->size = length;
235 num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
236 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
237 if (!shm->pages) {
238 ret = ERR_PTR(-ENOMEM);
239 goto err;
240 }
241
242 if (flags & TEE_SHM_USER_MAPPED) {
243 rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE,
244 shm->pages);
245 } else {
246 struct kvec *kiov;
247 int i;
248
249 kiov = kcalloc(num_pages, sizeof(*kiov), GFP_KERNEL);
250 if (!kiov) {
251 ret = ERR_PTR(-ENOMEM);
252 goto err;
253 }
254
255 for (i = 0; i < num_pages; i++) {
256 kiov[i].iov_base = (void *)(start + i * PAGE_SIZE);
257 kiov[i].iov_len = PAGE_SIZE;
258 }
259
260 rc = get_kernel_pages(kiov, num_pages, 0, shm->pages);
261 kfree(kiov);
262 }
263 if (rc > 0)
264 shm->num_pages = rc;
265 if (rc != num_pages) {
266 if (rc >= 0)
267 rc = -ENOMEM;
268 ret = ERR_PTR(rc);
269 goto err;
270 }
271
272 mutex_lock(&teedev->mutex);
273 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
274 mutex_unlock(&teedev->mutex);
275
276 if (shm->id < 0) {
277 ret = ERR_PTR(shm->id);
278 goto err;
279 }
280
281 rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
282 shm->num_pages, start);
283 if (rc) {
284 ret = ERR_PTR(rc);
285 goto err;
286 }
287
288 if (flags & TEE_SHM_DMA_BUF) {
289 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
290
291 exp_info.ops = &tee_shm_dma_buf_ops;
292 exp_info.size = shm->size;
293 exp_info.flags = O_RDWR;
294 exp_info.priv = shm;
295
296 shm->dmabuf = dma_buf_export(&exp_info);
297 if (IS_ERR(shm->dmabuf)) {
298 ret = ERR_CAST(shm->dmabuf);
299 teedev->desc->ops->shm_unregister(ctx, shm);
300 goto err;
301 }
302 }
303
304 return shm;
305 err:
306 if (shm) {
307 if (shm->id >= 0) {
308 mutex_lock(&teedev->mutex);
309 idr_remove(&teedev->idr, shm->id);
310 mutex_unlock(&teedev->mutex);
311 }
312 release_registered_pages(shm);
313 }
314 kfree(shm);
315 teedev_ctx_put(ctx);
316 tee_device_put(teedev);
317 return ret;
318 }
319 EXPORT_SYMBOL_GPL(tee_shm_register);
320
321 /**
322 * tee_shm_get_fd() - Increase reference count and return file descriptor
323 * @shm: Shared memory handle
324 * @returns user space file descriptor to shared memory
325 */
tee_shm_get_fd(struct tee_shm * shm)326 int tee_shm_get_fd(struct tee_shm *shm)
327 {
328 int fd;
329
330 if (!(shm->flags & TEE_SHM_DMA_BUF))
331 return -EINVAL;
332
333 get_dma_buf(shm->dmabuf);
334 fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
335 if (fd < 0)
336 dma_buf_put(shm->dmabuf);
337 return fd;
338 }
339
340 /**
341 * tee_shm_free() - Free shared memory
342 * @shm: Handle to shared memory to free
343 */
tee_shm_free(struct tee_shm * shm)344 void tee_shm_free(struct tee_shm *shm)
345 {
346 /*
347 * dma_buf_put() decreases the dmabuf reference counter and will
348 * call tee_shm_release() when the last reference is gone.
349 *
350 * In the case of driver private memory we call tee_shm_release
351 * directly instead as it doesn't have a reference counter.
352 */
353 if (shm->flags & TEE_SHM_DMA_BUF)
354 dma_buf_put(shm->dmabuf);
355 else
356 tee_shm_release(shm);
357 }
358 EXPORT_SYMBOL_GPL(tee_shm_free);
359
360 /**
361 * tee_shm_va2pa() - Get physical address of a virtual address
362 * @shm: Shared memory handle
363 * @va: Virtual address to tranlsate
364 * @pa: Returned physical address
365 * @returns 0 on success and < 0 on failure
366 */
tee_shm_va2pa(struct tee_shm * shm,void * va,phys_addr_t * pa)367 int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
368 {
369 if (!(shm->flags & TEE_SHM_MAPPED))
370 return -EINVAL;
371 /* Check that we're in the range of the shm */
372 if ((char *)va < (char *)shm->kaddr)
373 return -EINVAL;
374 if ((char *)va >= ((char *)shm->kaddr + shm->size))
375 return -EINVAL;
376
377 return tee_shm_get_pa(
378 shm, (unsigned long)va - (unsigned long)shm->kaddr, pa);
379 }
380 EXPORT_SYMBOL_GPL(tee_shm_va2pa);
381
382 /**
383 * tee_shm_pa2va() - Get virtual address of a physical address
384 * @shm: Shared memory handle
385 * @pa: Physical address to tranlsate
386 * @va: Returned virtual address
387 * @returns 0 on success and < 0 on failure
388 */
tee_shm_pa2va(struct tee_shm * shm,phys_addr_t pa,void ** va)389 int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
390 {
391 if (!(shm->flags & TEE_SHM_MAPPED))
392 return -EINVAL;
393 /* Check that we're in the range of the shm */
394 if (pa < shm->paddr)
395 return -EINVAL;
396 if (pa >= (shm->paddr + shm->size))
397 return -EINVAL;
398
399 if (va) {
400 void *v = tee_shm_get_va(shm, pa - shm->paddr);
401
402 if (IS_ERR(v))
403 return PTR_ERR(v);
404 *va = v;
405 }
406 return 0;
407 }
408 EXPORT_SYMBOL_GPL(tee_shm_pa2va);
409
410 /**
411 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
412 * @shm: Shared memory handle
413 * @offs: Offset from start of this shared memory
414 * @returns virtual address of the shared memory + offs if offs is within
415 * the bounds of this shared memory, else an ERR_PTR
416 */
tee_shm_get_va(struct tee_shm * shm,size_t offs)417 void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
418 {
419 if (!(shm->flags & TEE_SHM_MAPPED))
420 return ERR_PTR(-EINVAL);
421 if (offs >= shm->size)
422 return ERR_PTR(-EINVAL);
423 return (char *)shm->kaddr + offs;
424 }
425 EXPORT_SYMBOL_GPL(tee_shm_get_va);
426
427 /**
428 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
429 * @shm: Shared memory handle
430 * @offs: Offset from start of this shared memory
431 * @pa: Physical address to return
432 * @returns 0 if offs is within the bounds of this shared memory, else an
433 * error code.
434 */
tee_shm_get_pa(struct tee_shm * shm,size_t offs,phys_addr_t * pa)435 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
436 {
437 if (offs >= shm->size)
438 return -EINVAL;
439 if (pa)
440 *pa = shm->paddr + offs;
441 return 0;
442 }
443 EXPORT_SYMBOL_GPL(tee_shm_get_pa);
444
445 /**
446 * tee_shm_get_from_id() - Find shared memory object and increase reference
447 * count
448 * @ctx: Context owning the shared memory
449 * @id: Id of shared memory object
450 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
451 */
tee_shm_get_from_id(struct tee_context * ctx,int id)452 struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
453 {
454 struct tee_device *teedev;
455 struct tee_shm *shm;
456
457 if (!ctx)
458 return ERR_PTR(-EINVAL);
459
460 teedev = ctx->teedev;
461 mutex_lock(&teedev->mutex);
462 shm = idr_find(&teedev->idr, id);
463 if (!shm || shm->ctx != ctx)
464 shm = ERR_PTR(-EINVAL);
465 else if (shm->flags & TEE_SHM_DMA_BUF)
466 get_dma_buf(shm->dmabuf);
467 mutex_unlock(&teedev->mutex);
468 return shm;
469 }
470 EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
471
472 /**
473 * tee_shm_put() - Decrease reference count on a shared memory handle
474 * @shm: Shared memory handle
475 */
tee_shm_put(struct tee_shm * shm)476 void tee_shm_put(struct tee_shm *shm)
477 {
478 if (shm->flags & TEE_SHM_DMA_BUF)
479 dma_buf_put(shm->dmabuf);
480 }
481 EXPORT_SYMBOL_GPL(tee_shm_put);
482