1 /*
2  * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12 
13 #include <linux/io.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/refcount.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-vmalloc.h>
23 #include <media/videobuf2-memops.h>
24 
25 struct vb2_vmalloc_buf {
26 	void				*vaddr;
27 	struct frame_vector		*vec;
28 	enum dma_data_direction		dma_dir;
29 	unsigned long			size;
30 	refcount_t			refcount;
31 	struct vb2_vmarea_handler	handler;
32 	struct dma_buf			*dbuf;
33 };
34 
35 static void vb2_vmalloc_put(void *buf_priv);
36 
vb2_vmalloc_alloc(struct device * dev,unsigned long attrs,unsigned long size,enum dma_data_direction dma_dir,gfp_t gfp_flags)37 static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
38 			       unsigned long size, enum dma_data_direction dma_dir,
39 			       gfp_t gfp_flags)
40 {
41 	struct vb2_vmalloc_buf *buf;
42 
43 	buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
44 	if (!buf)
45 		return ERR_PTR(-ENOMEM);
46 
47 	buf->size = size;
48 	buf->vaddr = vmalloc_user(buf->size);
49 	if (!buf->vaddr) {
50 		pr_debug("vmalloc of size %ld failed\n", buf->size);
51 		kfree(buf);
52 		return ERR_PTR(-ENOMEM);
53 	}
54 
55 	buf->dma_dir = dma_dir;
56 	buf->handler.refcount = &buf->refcount;
57 	buf->handler.put = vb2_vmalloc_put;
58 	buf->handler.arg = buf;
59 
60 	refcount_set(&buf->refcount, 1);
61 	return buf;
62 }
63 
vb2_vmalloc_put(void * buf_priv)64 static void vb2_vmalloc_put(void *buf_priv)
65 {
66 	struct vb2_vmalloc_buf *buf = buf_priv;
67 
68 	if (refcount_dec_and_test(&buf->refcount)) {
69 		vfree(buf->vaddr);
70 		kfree(buf);
71 	}
72 }
73 
74 #if 0
75 static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
76 				     unsigned long size,
77 				     enum dma_data_direction dma_dir)
78 {
79 	struct vb2_vmalloc_buf *buf;
80 	struct frame_vector *vec;
81 	int n_pages, offset, i;
82 	int ret = -ENOMEM;
83 
84 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
85 	if (!buf)
86 		return ERR_PTR(-ENOMEM);
87 
88 	buf->dma_dir = dma_dir;
89 	offset = vaddr & ~PAGE_MASK;
90 	buf->size = size;
91 	vec = vb2_create_framevec(vaddr, size);
92 	if (IS_ERR(vec)) {
93 		ret = PTR_ERR(vec);
94 		goto fail_pfnvec_create;
95 	}
96 	buf->vec = vec;
97 	n_pages = frame_vector_count(vec);
98 	if (frame_vector_to_pages(vec) < 0) {
99 		unsigned long *nums = frame_vector_pfns(vec);
100 
101 		/*
102 		 * We cannot get page pointers for these pfns. Check memory is
103 		 * physically contiguous and use direct mapping.
104 		 */
105 		for (i = 1; i < n_pages; i++)
106 			if (nums[i-1] + 1 != nums[i])
107 				goto fail_map;
108 		buf->vaddr = (__force void *)
109 			ioremap(__pfn_to_phys(nums[0]), size + offset);
110 	} else {
111 		buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1);
112 	}
113 
114 	if (!buf->vaddr)
115 		goto fail_map;
116 	buf->vaddr += offset;
117 	return buf;
118 
119 fail_map:
120 	vb2_destroy_framevec(vec);
121 fail_pfnvec_create:
122 	kfree(buf);
123 
124 	return ERR_PTR(ret);
125 }
126 
127 static void vb2_vmalloc_put_userptr(void *buf_priv)
128 {
129 	struct vb2_vmalloc_buf *buf = buf_priv;
130 	unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
131 	unsigned int i;
132 	struct page **pages;
133 	unsigned int n_pages;
134 
135 	if (!buf->vec->is_pfns) {
136 		n_pages = frame_vector_count(buf->vec);
137 		pages = frame_vector_pages(buf->vec);
138 		if (vaddr)
139 			vm_unmap_ram((void *)vaddr, n_pages);
140 		if (buf->dma_dir == DMA_FROM_DEVICE ||
141 		    buf->dma_dir == DMA_BIDIRECTIONAL)
142 			for (i = 0; i < n_pages; i++)
143 				set_page_dirty_lock(pages[i]);
144 	} else {
145 		iounmap((__force void __iomem *)buf->vaddr);
146 	}
147 	vb2_destroy_framevec(buf->vec);
148 	kfree(buf);
149 }
150 #else
151 #define	vb2_vmalloc_get_userptr NULL
152 #define	vb2_vmalloc_put_userptr NULL
153 #endif
154 
vb2_vmalloc_vaddr(void * buf_priv)155 static void *vb2_vmalloc_vaddr(void *buf_priv)
156 {
157 	struct vb2_vmalloc_buf *buf = buf_priv;
158 
159 	if (!buf->vaddr) {
160 		pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
161 		return NULL;
162 	}
163 
164 	return buf->vaddr;
165 }
166 
vb2_vmalloc_num_users(void * buf_priv)167 static unsigned int vb2_vmalloc_num_users(void *buf_priv)
168 {
169 	struct vb2_vmalloc_buf *buf = buf_priv;
170 	return refcount_read(&buf->refcount);
171 }
172 
vb2_vmalloc_mmap(void * buf_priv,struct vm_area_struct * vma)173 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
174 {
175 	struct vb2_vmalloc_buf *buf = buf_priv;
176 	int ret;
177 
178 	if (!buf) {
179 		pr_err("No memory to map\n");
180 		return -EINVAL;
181 	}
182 
183 	ret = remap_vmalloc_range(vma, buf->vaddr, 0);
184 	if (ret) {
185 		pr_err("Remapping vmalloc memory, error: %d\n", ret);
186 		return ret;
187 	}
188 
189 	/*
190 	 * Make sure that vm_areas for 2 buffers won't be merged together
191 	 */
192 	vma->vm_flags		|= VM_DONTEXPAND;
193 
194 	/*
195 	 * Use common vm_area operations to track buffer refcount.
196 	 */
197 	vma->vm_private_data	= &buf->handler;
198 	vma->vm_ops		= &vb2_common_vm_ops;
199 
200 	vma->vm_ops->open(vma);
201 
202 	return 0;
203 }
204 
205 #ifdef CONFIG_HAS_DMA
206 /*********************************************/
207 /*         DMABUF ops for exporters          */
208 /*********************************************/
209 
210 struct vb2_vmalloc_attachment {
211 	struct sg_table sgt;
212 	enum dma_data_direction dma_dir;
213 };
214 
vb2_vmalloc_dmabuf_ops_attach(struct dma_buf * dbuf,struct dma_buf_attachment * dbuf_attach)215 static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
216 	struct dma_buf_attachment *dbuf_attach)
217 {
218 	struct vb2_vmalloc_attachment *attach;
219 	struct vb2_vmalloc_buf *buf = dbuf->priv;
220 	int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
221 	struct sg_table *sgt;
222 	struct scatterlist *sg;
223 	void *vaddr = buf->vaddr;
224 	int ret;
225 	int i;
226 
227 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
228 	if (!attach)
229 		return -ENOMEM;
230 
231 	sgt = &attach->sgt;
232 	ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
233 	if (ret) {
234 		kfree(attach);
235 		return ret;
236 	}
237 	for_each_sgtable_sg(sgt, sg, i) {
238 		struct page *page = vmalloc_to_page(vaddr);
239 
240 		if (!page) {
241 			sg_free_table(sgt);
242 			kfree(attach);
243 			return -ENOMEM;
244 		}
245 		sg_set_page(sg, page, PAGE_SIZE, 0);
246 		vaddr += PAGE_SIZE;
247 	}
248 
249 	attach->dma_dir = DMA_NONE;
250 	dbuf_attach->priv = attach;
251 	return 0;
252 }
253 
vb2_vmalloc_dmabuf_ops_detach(struct dma_buf * dbuf,struct dma_buf_attachment * db_attach)254 static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
255 	struct dma_buf_attachment *db_attach)
256 {
257 	struct vb2_vmalloc_attachment *attach = db_attach->priv;
258 	struct sg_table *sgt;
259 
260 	if (!attach)
261 		return;
262 
263 	sgt = &attach->sgt;
264 
265 	/* release the scatterlist cache */
266 	if (attach->dma_dir != DMA_NONE)
267 		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
268 	sg_free_table(sgt);
269 	kfree(attach);
270 	db_attach->priv = NULL;
271 }
272 
vb2_vmalloc_dmabuf_ops_map(struct dma_buf_attachment * db_attach,enum dma_data_direction dma_dir)273 static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
274 	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
275 {
276 	struct vb2_vmalloc_attachment *attach = db_attach->priv;
277 	/* stealing dmabuf mutex to serialize map/unmap operations */
278 	struct mutex *lock = &db_attach->dmabuf->lock;
279 	struct sg_table *sgt;
280 
281 	mutex_lock(lock);
282 
283 	sgt = &attach->sgt;
284 	/* return previously mapped sg table */
285 	if (attach->dma_dir == dma_dir) {
286 		mutex_unlock(lock);
287 		return sgt;
288 	}
289 
290 	/* release any previous cache */
291 	if (attach->dma_dir != DMA_NONE) {
292 		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
293 		attach->dma_dir = DMA_NONE;
294 	}
295 
296 	/* mapping to the client with new direction */
297 	if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
298 		pr_err("failed to map scatterlist\n");
299 		mutex_unlock(lock);
300 		return ERR_PTR(-EIO);
301 	}
302 
303 	attach->dma_dir = dma_dir;
304 
305 	mutex_unlock(lock);
306 
307 	return sgt;
308 }
309 
vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment * db_attach,struct sg_table * sgt,enum dma_data_direction dma_dir)310 static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
311 	struct sg_table *sgt, enum dma_data_direction dma_dir)
312 {
313 	/* nothing to be done here */
314 }
315 
vb2_vmalloc_dmabuf_ops_release(struct dma_buf * dbuf)316 static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
317 {
318 	/* drop reference obtained in vb2_vmalloc_get_dmabuf */
319 	vb2_vmalloc_put(dbuf->priv);
320 }
321 
vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf * dbuf,struct dma_buf_map * map)322 static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
323 {
324 	struct vb2_vmalloc_buf *buf = dbuf->priv;
325 
326 	dma_buf_map_set_vaddr(map, buf->vaddr);
327 
328 	return 0;
329 }
330 
vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf * dbuf,struct vm_area_struct * vma)331 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
332 	struct vm_area_struct *vma)
333 {
334 	return vb2_vmalloc_mmap(dbuf->priv, vma);
335 }
336 
337 static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
338 	.attach = vb2_vmalloc_dmabuf_ops_attach,
339 	.detach = vb2_vmalloc_dmabuf_ops_detach,
340 	.map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
341 	.unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
342 	.vmap = vb2_vmalloc_dmabuf_ops_vmap,
343 	.mmap = vb2_vmalloc_dmabuf_ops_mmap,
344 	.release = vb2_vmalloc_dmabuf_ops_release,
345 };
346 
vb2_vmalloc_get_dmabuf(void * buf_priv,unsigned long flags)347 static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
348 {
349 	struct vb2_vmalloc_buf *buf = buf_priv;
350 	struct dma_buf *dbuf;
351 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
352 
353 	exp_info.ops = &vb2_vmalloc_dmabuf_ops;
354 	exp_info.size = buf->size;
355 	exp_info.flags = flags;
356 	exp_info.priv = buf;
357 
358 	if (WARN_ON(!buf->vaddr))
359 		return NULL;
360 
361 	dbuf = dma_buf_export(&exp_info);
362 	if (IS_ERR(dbuf))
363 		return NULL;
364 
365 	/* dmabuf keeps reference to vb2 buffer */
366 	refcount_inc(&buf->refcount);
367 
368 	return dbuf;
369 }
370 #endif /* CONFIG_HAS_DMA */
371 
372 
373 /*********************************************/
374 /*       callbacks for DMABUF buffers        */
375 /*********************************************/
376 
vb2_vmalloc_map_dmabuf(void * mem_priv)377 static int vb2_vmalloc_map_dmabuf(void *mem_priv)
378 {
379 	struct vb2_vmalloc_buf *buf = mem_priv;
380 	struct dma_buf_map map;
381 	int ret;
382 
383 	ret = dma_buf_vmap(buf->dbuf, &map);
384 	if (ret)
385 		return -EFAULT;
386 	buf->vaddr = map.vaddr;
387 
388 	return 0;
389 }
390 
vb2_vmalloc_unmap_dmabuf(void * mem_priv)391 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
392 {
393 	struct vb2_vmalloc_buf *buf = mem_priv;
394 	struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
395 
396 	dma_buf_vunmap(buf->dbuf, &map);
397 	buf->vaddr = NULL;
398 }
399 
vb2_vmalloc_detach_dmabuf(void * mem_priv)400 static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
401 {
402 	struct vb2_vmalloc_buf *buf = mem_priv;
403 	struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
404 
405 	if (buf->vaddr)
406 		dma_buf_vunmap(buf->dbuf, &map);
407 
408 	kfree(buf);
409 }
410 
vb2_vmalloc_attach_dmabuf(struct device * dev,struct dma_buf * dbuf,unsigned long size,enum dma_data_direction dma_dir)411 static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
412 	unsigned long size, enum dma_data_direction dma_dir)
413 {
414 	struct vb2_vmalloc_buf *buf;
415 
416 	if (dbuf->size < size)
417 		return ERR_PTR(-EFAULT);
418 
419 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
420 	if (!buf)
421 		return ERR_PTR(-ENOMEM);
422 
423 	buf->dbuf = dbuf;
424 	buf->dma_dir = dma_dir;
425 	buf->size = size;
426 
427 	return buf;
428 }
429 
430 
431 const struct vb2_mem_ops vb2_vmalloc_memops = {
432 	.alloc		= vb2_vmalloc_alloc,
433 	.put		= vb2_vmalloc_put,
434 	.get_userptr	= vb2_vmalloc_get_userptr,
435 	.put_userptr	= vb2_vmalloc_put_userptr,
436 #ifdef CONFIG_HAS_DMA
437 	.get_dmabuf	= vb2_vmalloc_get_dmabuf,
438 #endif
439 	.map_dmabuf	= vb2_vmalloc_map_dmabuf,
440 	.unmap_dmabuf	= vb2_vmalloc_unmap_dmabuf,
441 	.attach_dmabuf	= vb2_vmalloc_attach_dmabuf,
442 	.detach_dmabuf	= vb2_vmalloc_detach_dmabuf,
443 	.vaddr		= vb2_vmalloc_vaddr,
444 	.mmap		= vb2_vmalloc_mmap,
445 	.num_users	= vb2_vmalloc_num_users,
446 };
447 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
448 
449 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
450 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
451 MODULE_LICENSE("GPL");
452