1 /*
2 * videobuf2-core.c - video buffer 2 core framework
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 * Marek Szyprowski <m.szyprowski@samsung.com>
8 *
9 * The vb2_thread implementation was based on code from videobuf-dvb.c:
10 * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation.
15 */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/mm.h>
23 #include <linux/poll.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/freezer.h>
27 #include <linux/kthread.h>
28
29 #include <media/videobuf2-core.h>
30 #include <media/v4l2-mc.h>
31
32 #include <trace/events/vb2.h>
33
34 static int debug;
35 module_param(debug, int, 0644);
36
37 #define dprintk(q, level, fmt, arg...) \
38 do { \
39 if (debug >= level) \
40 pr_info("[%s] %s: " fmt, (q)->name, __func__, \
41 ## arg); \
42 } while (0)
43
44 #ifdef CONFIG_VIDEO_ADV_DEBUG
45
46 /*
47 * If advanced debugging is on, then count how often each op is called
48 * successfully, which can either be per-buffer or per-queue.
49 *
50 * This makes it easy to check that the 'init' and 'cleanup'
51 * (and variations thereof) stay balanced.
52 */
53
54 #define log_memop(vb, op) \
55 dprintk((vb)->vb2_queue, 2, "call_memop(%d, %s)%s\n", \
56 (vb)->index, #op, \
57 (vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
58
59 #define call_memop(vb, op, args...) \
60 ({ \
61 struct vb2_queue *_q = (vb)->vb2_queue; \
62 int err; \
63 \
64 log_memop(vb, op); \
65 err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \
66 if (!err) \
67 (vb)->cnt_mem_ ## op++; \
68 err; \
69 })
70
71 #define call_ptr_memop(vb, op, args...) \
72 ({ \
73 struct vb2_queue *_q = (vb)->vb2_queue; \
74 void *ptr; \
75 \
76 log_memop(vb, op); \
77 ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \
78 if (!IS_ERR_OR_NULL(ptr)) \
79 (vb)->cnt_mem_ ## op++; \
80 ptr; \
81 })
82
83 #define call_void_memop(vb, op, args...) \
84 ({ \
85 struct vb2_queue *_q = (vb)->vb2_queue; \
86 \
87 log_memop(vb, op); \
88 if (_q->mem_ops->op) \
89 _q->mem_ops->op(args); \
90 (vb)->cnt_mem_ ## op++; \
91 })
92
93 #define log_qop(q, op) \
94 dprintk(q, 2, "call_qop(%s)%s\n", #op, \
95 (q)->ops->op ? "" : " (nop)")
96
97 #define call_qop(q, op, args...) \
98 ({ \
99 int err; \
100 \
101 log_qop(q, op); \
102 err = (q)->ops->op ? (q)->ops->op(args) : 0; \
103 if (!err) \
104 (q)->cnt_ ## op++; \
105 err; \
106 })
107
108 #define call_void_qop(q, op, args...) \
109 ({ \
110 log_qop(q, op); \
111 if ((q)->ops->op) \
112 (q)->ops->op(args); \
113 (q)->cnt_ ## op++; \
114 })
115
116 #define log_vb_qop(vb, op, args...) \
117 dprintk((vb)->vb2_queue, 2, "call_vb_qop(%d, %s)%s\n", \
118 (vb)->index, #op, \
119 (vb)->vb2_queue->ops->op ? "" : " (nop)")
120
121 #define call_vb_qop(vb, op, args...) \
122 ({ \
123 int err; \
124 \
125 log_vb_qop(vb, op); \
126 err = (vb)->vb2_queue->ops->op ? \
127 (vb)->vb2_queue->ops->op(args) : 0; \
128 if (!err) \
129 (vb)->cnt_ ## op++; \
130 err; \
131 })
132
133 #define call_void_vb_qop(vb, op, args...) \
134 ({ \
135 log_vb_qop(vb, op); \
136 if ((vb)->vb2_queue->ops->op) \
137 (vb)->vb2_queue->ops->op(args); \
138 (vb)->cnt_ ## op++; \
139 })
140
141 #else
142
143 #define call_memop(vb, op, args...) \
144 ((vb)->vb2_queue->mem_ops->op ? \
145 (vb)->vb2_queue->mem_ops->op(args) : 0)
146
147 #define call_ptr_memop(vb, op, args...) \
148 ((vb)->vb2_queue->mem_ops->op ? \
149 (vb)->vb2_queue->mem_ops->op(args) : NULL)
150
151 #define call_void_memop(vb, op, args...) \
152 do { \
153 if ((vb)->vb2_queue->mem_ops->op) \
154 (vb)->vb2_queue->mem_ops->op(args); \
155 } while (0)
156
157 #define call_qop(q, op, args...) \
158 ((q)->ops->op ? (q)->ops->op(args) : 0)
159
160 #define call_void_qop(q, op, args...) \
161 do { \
162 if ((q)->ops->op) \
163 (q)->ops->op(args); \
164 } while (0)
165
166 #define call_vb_qop(vb, op, args...) \
167 ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
168
169 #define call_void_vb_qop(vb, op, args...) \
170 do { \
171 if ((vb)->vb2_queue->ops->op) \
172 (vb)->vb2_queue->ops->op(args); \
173 } while (0)
174
175 #endif
176
177 #define call_bufop(q, op, args...) \
178 ({ \
179 int ret = 0; \
180 if (q && q->buf_ops && q->buf_ops->op) \
181 ret = q->buf_ops->op(args); \
182 ret; \
183 })
184
185 #define call_void_bufop(q, op, args...) \
186 ({ \
187 if (q && q->buf_ops && q->buf_ops->op) \
188 q->buf_ops->op(args); \
189 })
190
191 static void __vb2_queue_cancel(struct vb2_queue *q);
192 static void __enqueue_in_driver(struct vb2_buffer *vb);
193
vb2_state_name(enum vb2_buffer_state s)194 static const char *vb2_state_name(enum vb2_buffer_state s)
195 {
196 static const char * const state_names[] = {
197 [VB2_BUF_STATE_DEQUEUED] = "dequeued",
198 [VB2_BUF_STATE_IN_REQUEST] = "in request",
199 [VB2_BUF_STATE_PREPARING] = "preparing",
200 [VB2_BUF_STATE_QUEUED] = "queued",
201 [VB2_BUF_STATE_ACTIVE] = "active",
202 [VB2_BUF_STATE_DONE] = "done",
203 [VB2_BUF_STATE_ERROR] = "error",
204 };
205
206 if ((unsigned int)(s) < ARRAY_SIZE(state_names))
207 return state_names[s];
208 return "unknown";
209 }
210
211 /*
212 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
213 */
__vb2_buf_mem_alloc(struct vb2_buffer * vb)214 static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
215 {
216 struct vb2_queue *q = vb->vb2_queue;
217 void *mem_priv;
218 int plane;
219 int ret = -ENOMEM;
220
221 /*
222 * Allocate memory for all planes in this buffer
223 * NOTE: mmapped areas should be page aligned
224 */
225 for (plane = 0; plane < vb->num_planes; ++plane) {
226 /* Memops alloc requires size to be page aligned. */
227 unsigned long size = PAGE_ALIGN(vb->planes[plane].length);
228
229 /* Did it wrap around? */
230 if (size < vb->planes[plane].length)
231 goto free;
232
233 mem_priv = call_ptr_memop(vb, alloc,
234 q->alloc_devs[plane] ? : q->dev,
235 q->dma_attrs, size, q->dma_dir, q->gfp_flags);
236 if (IS_ERR_OR_NULL(mem_priv)) {
237 if (mem_priv)
238 ret = PTR_ERR(mem_priv);
239 goto free;
240 }
241
242 /* Associate allocator private data with this plane */
243 vb->planes[plane].mem_priv = mem_priv;
244 }
245
246 return 0;
247 free:
248 /* Free already allocated memory if one of the allocations failed */
249 for (; plane > 0; --plane) {
250 call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
251 vb->planes[plane - 1].mem_priv = NULL;
252 }
253
254 return ret;
255 }
256
257 /*
258 * __vb2_buf_mem_free() - free memory of the given buffer
259 */
__vb2_buf_mem_free(struct vb2_buffer * vb)260 static void __vb2_buf_mem_free(struct vb2_buffer *vb)
261 {
262 unsigned int plane;
263
264 for (plane = 0; plane < vb->num_planes; ++plane) {
265 call_void_memop(vb, put, vb->planes[plane].mem_priv);
266 vb->planes[plane].mem_priv = NULL;
267 dprintk(vb->vb2_queue, 3, "freed plane %d of buffer %d\n",
268 plane, vb->index);
269 }
270 }
271
272 /*
273 * __vb2_buf_userptr_put() - release userspace memory associated with
274 * a USERPTR buffer
275 */
__vb2_buf_userptr_put(struct vb2_buffer * vb)276 static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
277 {
278 unsigned int plane;
279
280 for (plane = 0; plane < vb->num_planes; ++plane) {
281 if (vb->planes[plane].mem_priv)
282 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
283 vb->planes[plane].mem_priv = NULL;
284 }
285 }
286
287 /*
288 * __vb2_plane_dmabuf_put() - release memory associated with
289 * a DMABUF shared plane
290 */
__vb2_plane_dmabuf_put(struct vb2_buffer * vb,struct vb2_plane * p)291 static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
292 {
293 if (!p->mem_priv)
294 return;
295
296 if (p->dbuf_mapped)
297 call_void_memop(vb, unmap_dmabuf, p->mem_priv);
298
299 call_void_memop(vb, detach_dmabuf, p->mem_priv);
300 dma_buf_put(p->dbuf);
301 p->mem_priv = NULL;
302 p->dbuf = NULL;
303 p->dbuf_mapped = 0;
304 }
305
306 /*
307 * __vb2_buf_dmabuf_put() - release memory associated with
308 * a DMABUF shared buffer
309 */
__vb2_buf_dmabuf_put(struct vb2_buffer * vb)310 static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
311 {
312 unsigned int plane;
313
314 for (plane = 0; plane < vb->num_planes; ++plane)
315 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
316 }
317
318 /*
319 * __vb2_buf_mem_prepare() - call ->prepare() on buffer's private memory
320 * to sync caches
321 */
__vb2_buf_mem_prepare(struct vb2_buffer * vb)322 static void __vb2_buf_mem_prepare(struct vb2_buffer *vb)
323 {
324 unsigned int plane;
325
326 if (vb->synced)
327 return;
328
329 if (vb->need_cache_sync_on_prepare) {
330 for (plane = 0; plane < vb->num_planes; ++plane)
331 call_void_memop(vb, prepare,
332 vb->planes[plane].mem_priv);
333 }
334 vb->synced = 1;
335 }
336
337 /*
338 * __vb2_buf_mem_finish() - call ->finish on buffer's private memory
339 * to sync caches
340 */
__vb2_buf_mem_finish(struct vb2_buffer * vb)341 static void __vb2_buf_mem_finish(struct vb2_buffer *vb)
342 {
343 unsigned int plane;
344
345 if (!vb->synced)
346 return;
347
348 if (vb->need_cache_sync_on_finish) {
349 for (plane = 0; plane < vb->num_planes; ++plane)
350 call_void_memop(vb, finish,
351 vb->planes[plane].mem_priv);
352 }
353 vb->synced = 0;
354 }
355
356 /*
357 * __setup_offsets() - setup unique offsets ("cookies") for every plane in
358 * the buffer.
359 */
__setup_offsets(struct vb2_buffer * vb)360 static void __setup_offsets(struct vb2_buffer *vb)
361 {
362 struct vb2_queue *q = vb->vb2_queue;
363 unsigned int plane;
364 unsigned long off = 0;
365
366 if (vb->index) {
367 struct vb2_buffer *prev = q->bufs[vb->index - 1];
368 struct vb2_plane *p = &prev->planes[prev->num_planes - 1];
369
370 off = PAGE_ALIGN(p->m.offset + p->length);
371 }
372
373 for (plane = 0; plane < vb->num_planes; ++plane) {
374 vb->planes[plane].m.offset = off;
375
376 dprintk(q, 3, "buffer %d, plane %d offset 0x%08lx\n",
377 vb->index, plane, off);
378
379 off += vb->planes[plane].length;
380 off = PAGE_ALIGN(off);
381 }
382 }
383
384 /*
385 * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
386 * video buffer memory for all buffers/planes on the queue and initializes the
387 * queue
388 *
389 * Returns the number of buffers successfully allocated.
390 */
__vb2_queue_alloc(struct vb2_queue * q,enum vb2_memory memory,unsigned int num_buffers,unsigned int num_planes,const unsigned plane_sizes[VB2_MAX_PLANES])391 static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
392 unsigned int num_buffers, unsigned int num_planes,
393 const unsigned plane_sizes[VB2_MAX_PLANES])
394 {
395 unsigned int buffer, plane;
396 struct vb2_buffer *vb;
397 int ret;
398
399 /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */
400 num_buffers = min_t(unsigned int, num_buffers,
401 VB2_MAX_FRAME - q->num_buffers);
402
403 for (buffer = 0; buffer < num_buffers; ++buffer) {
404 /* Allocate videobuf buffer structures */
405 vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
406 if (!vb) {
407 dprintk(q, 1, "memory alloc for buffer struct failed\n");
408 break;
409 }
410
411 vb->state = VB2_BUF_STATE_DEQUEUED;
412 vb->vb2_queue = q;
413 vb->num_planes = num_planes;
414 vb->index = q->num_buffers + buffer;
415 vb->type = q->type;
416 vb->memory = memory;
417 /*
418 * We need to set these flags here so that the videobuf2 core
419 * will call ->prepare()/->finish() cache sync/flush on vb2
420 * buffers when appropriate. However, we can avoid explicit
421 * ->prepare() and ->finish() cache sync for DMABUF buffers,
422 * because DMA exporter takes care of it.
423 */
424 if (q->memory != VB2_MEMORY_DMABUF) {
425 vb->need_cache_sync_on_prepare = 1;
426 vb->need_cache_sync_on_finish = 1;
427 }
428 for (plane = 0; plane < num_planes; ++plane) {
429 vb->planes[plane].length = plane_sizes[plane];
430 vb->planes[plane].min_length = plane_sizes[plane];
431 }
432 call_void_bufop(q, init_buffer, vb);
433
434 q->bufs[vb->index] = vb;
435
436 /* Allocate video buffer memory for the MMAP type */
437 if (memory == VB2_MEMORY_MMAP) {
438 ret = __vb2_buf_mem_alloc(vb);
439 if (ret) {
440 dprintk(q, 1, "failed allocating memory for buffer %d\n",
441 buffer);
442 q->bufs[vb->index] = NULL;
443 kfree(vb);
444 break;
445 }
446 __setup_offsets(vb);
447 /*
448 * Call the driver-provided buffer initialization
449 * callback, if given. An error in initialization
450 * results in queue setup failure.
451 */
452 ret = call_vb_qop(vb, buf_init, vb);
453 if (ret) {
454 dprintk(q, 1, "buffer %d %p initialization failed\n",
455 buffer, vb);
456 __vb2_buf_mem_free(vb);
457 q->bufs[vb->index] = NULL;
458 kfree(vb);
459 break;
460 }
461 }
462 }
463
464 dprintk(q, 3, "allocated %d buffers, %d plane(s) each\n",
465 buffer, num_planes);
466
467 return buffer;
468 }
469
470 /*
471 * __vb2_free_mem() - release all video buffer memory for a given queue
472 */
__vb2_free_mem(struct vb2_queue * q,unsigned int buffers)473 static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
474 {
475 unsigned int buffer;
476 struct vb2_buffer *vb;
477
478 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
479 ++buffer) {
480 vb = q->bufs[buffer];
481 if (!vb)
482 continue;
483
484 /* Free MMAP buffers or release USERPTR buffers */
485 if (q->memory == VB2_MEMORY_MMAP)
486 __vb2_buf_mem_free(vb);
487 else if (q->memory == VB2_MEMORY_DMABUF)
488 __vb2_buf_dmabuf_put(vb);
489 else
490 __vb2_buf_userptr_put(vb);
491 }
492 }
493
494 /*
495 * __vb2_queue_free() - free buffers at the end of the queue - video memory and
496 * related information, if no buffers are left return the queue to an
497 * uninitialized state. Might be called even if the queue has already been freed.
498 */
__vb2_queue_free(struct vb2_queue * q,unsigned int buffers)499 static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
500 {
501 unsigned int buffer;
502
503 /*
504 * Sanity check: when preparing a buffer the queue lock is released for
505 * a short while (see __buf_prepare for the details), which would allow
506 * a race with a reqbufs which can call this function. Removing the
507 * buffers from underneath __buf_prepare is obviously a bad idea, so we
508 * check if any of the buffers is in the state PREPARING, and if so we
509 * just return -EAGAIN.
510 */
511 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
512 ++buffer) {
513 if (q->bufs[buffer] == NULL)
514 continue;
515 if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
516 dprintk(q, 1, "preparing buffers, cannot free\n");
517 return -EAGAIN;
518 }
519 }
520
521 /* Call driver-provided cleanup function for each buffer, if provided */
522 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
523 ++buffer) {
524 struct vb2_buffer *vb = q->bufs[buffer];
525
526 if (vb && vb->planes[0].mem_priv)
527 call_void_vb_qop(vb, buf_cleanup, vb);
528 }
529
530 /* Release video buffer memory */
531 __vb2_free_mem(q, buffers);
532
533 #ifdef CONFIG_VIDEO_ADV_DEBUG
534 /*
535 * Check that all the calls were balances during the life-time of this
536 * queue. If not (or if the debug level is 1 or up), then dump the
537 * counters to the kernel log.
538 */
539 if (q->num_buffers) {
540 bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
541 q->cnt_wait_prepare != q->cnt_wait_finish;
542
543 if (unbalanced || debug) {
544 pr_info("counters for queue %p:%s\n", q,
545 unbalanced ? " UNBALANCED!" : "");
546 pr_info(" setup: %u start_streaming: %u stop_streaming: %u\n",
547 q->cnt_queue_setup, q->cnt_start_streaming,
548 q->cnt_stop_streaming);
549 pr_info(" wait_prepare: %u wait_finish: %u\n",
550 q->cnt_wait_prepare, q->cnt_wait_finish);
551 }
552 q->cnt_queue_setup = 0;
553 q->cnt_wait_prepare = 0;
554 q->cnt_wait_finish = 0;
555 q->cnt_start_streaming = 0;
556 q->cnt_stop_streaming = 0;
557 }
558 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
559 struct vb2_buffer *vb = q->bufs[buffer];
560 bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
561 vb->cnt_mem_prepare != vb->cnt_mem_finish ||
562 vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
563 vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
564 vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
565 vb->cnt_buf_queue != vb->cnt_buf_done ||
566 vb->cnt_buf_prepare != vb->cnt_buf_finish ||
567 vb->cnt_buf_init != vb->cnt_buf_cleanup;
568
569 if (unbalanced || debug) {
570 pr_info(" counters for queue %p, buffer %d:%s\n",
571 q, buffer, unbalanced ? " UNBALANCED!" : "");
572 pr_info(" buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
573 vb->cnt_buf_init, vb->cnt_buf_cleanup,
574 vb->cnt_buf_prepare, vb->cnt_buf_finish);
575 pr_info(" buf_out_validate: %u buf_queue: %u buf_done: %u buf_request_complete: %u\n",
576 vb->cnt_buf_out_validate, vb->cnt_buf_queue,
577 vb->cnt_buf_done, vb->cnt_buf_request_complete);
578 pr_info(" alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
579 vb->cnt_mem_alloc, vb->cnt_mem_put,
580 vb->cnt_mem_prepare, vb->cnt_mem_finish,
581 vb->cnt_mem_mmap);
582 pr_info(" get_userptr: %u put_userptr: %u\n",
583 vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
584 pr_info(" attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
585 vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
586 vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
587 pr_info(" get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
588 vb->cnt_mem_get_dmabuf,
589 vb->cnt_mem_num_users,
590 vb->cnt_mem_vaddr,
591 vb->cnt_mem_cookie);
592 }
593 }
594 #endif
595
596 /* Free videobuf buffers */
597 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
598 ++buffer) {
599 kfree(q->bufs[buffer]);
600 q->bufs[buffer] = NULL;
601 }
602
603 q->num_buffers -= buffers;
604 if (!q->num_buffers) {
605 q->memory = VB2_MEMORY_UNKNOWN;
606 INIT_LIST_HEAD(&q->queued_list);
607 }
608 return 0;
609 }
610
vb2_buffer_in_use(struct vb2_queue * q,struct vb2_buffer * vb)611 bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
612 {
613 unsigned int plane;
614 for (plane = 0; plane < vb->num_planes; ++plane) {
615 void *mem_priv = vb->planes[plane].mem_priv;
616 /*
617 * If num_users() has not been provided, call_memop
618 * will return 0, apparently nobody cares about this
619 * case anyway. If num_users() returns more than 1,
620 * we are not the only user of the plane's memory.
621 */
622 if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
623 return true;
624 }
625 return false;
626 }
627 EXPORT_SYMBOL(vb2_buffer_in_use);
628
629 /*
630 * __buffers_in_use() - return true if any buffers on the queue are in use and
631 * the queue cannot be freed (by the means of REQBUFS(0)) call
632 */
__buffers_in_use(struct vb2_queue * q)633 static bool __buffers_in_use(struct vb2_queue *q)
634 {
635 unsigned int buffer;
636 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
637 if (vb2_buffer_in_use(q, q->bufs[buffer]))
638 return true;
639 }
640 return false;
641 }
642
vb2_core_querybuf(struct vb2_queue * q,unsigned int index,void * pb)643 void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
644 {
645 call_void_bufop(q, fill_user_buffer, q->bufs[index], pb);
646 }
647 EXPORT_SYMBOL_GPL(vb2_core_querybuf);
648
649 /*
650 * __verify_userptr_ops() - verify that all memory operations required for
651 * USERPTR queue type have been provided
652 */
__verify_userptr_ops(struct vb2_queue * q)653 static int __verify_userptr_ops(struct vb2_queue *q)
654 {
655 if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
656 !q->mem_ops->put_userptr)
657 return -EINVAL;
658
659 return 0;
660 }
661
662 /*
663 * __verify_mmap_ops() - verify that all memory operations required for
664 * MMAP queue type have been provided
665 */
__verify_mmap_ops(struct vb2_queue * q)666 static int __verify_mmap_ops(struct vb2_queue *q)
667 {
668 if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
669 !q->mem_ops->put || !q->mem_ops->mmap)
670 return -EINVAL;
671
672 return 0;
673 }
674
675 /*
676 * __verify_dmabuf_ops() - verify that all memory operations required for
677 * DMABUF queue type have been provided
678 */
__verify_dmabuf_ops(struct vb2_queue * q)679 static int __verify_dmabuf_ops(struct vb2_queue *q)
680 {
681 if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
682 !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
683 !q->mem_ops->unmap_dmabuf)
684 return -EINVAL;
685
686 return 0;
687 }
688
vb2_verify_memory_type(struct vb2_queue * q,enum vb2_memory memory,unsigned int type)689 int vb2_verify_memory_type(struct vb2_queue *q,
690 enum vb2_memory memory, unsigned int type)
691 {
692 if (memory != VB2_MEMORY_MMAP && /* memory != VB2_MEMORY_USERPTR && */
693 memory != VB2_MEMORY_DMABUF) {
694 dprintk(q, 1, "unsupported memory type\n");
695 return -EINVAL;
696 }
697
698 if (type != q->type) {
699 dprintk(q, 1, "requested type is incorrect\n");
700 return -EINVAL;
701 }
702
703 /*
704 * Make sure all the required memory ops for given memory type
705 * are available.
706 */
707 if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) {
708 dprintk(q, 1, "MMAP for current setup unsupported\n");
709 return -EINVAL;
710 }
711 #if 0
712 if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
713 dprintk(q, 1, "USERPTR for current setup unsupported\n");
714 return -EINVAL;
715 }
716 #endif
717 if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
718 dprintk(q, 1, "DMABUF for current setup unsupported\n");
719 return -EINVAL;
720 }
721
722 /*
723 * Place the busy tests at the end: -EBUSY can be ignored when
724 * create_bufs is called with count == 0, but count == 0 should still
725 * do the memory and type validation.
726 */
727 if (vb2_fileio_is_active(q)) {
728 dprintk(q, 1, "file io in progress\n");
729 return -EBUSY;
730 }
731 return 0;
732 }
733 EXPORT_SYMBOL(vb2_verify_memory_type);
734
vb2_core_reqbufs(struct vb2_queue * q,enum vb2_memory memory,unsigned int * count)735 int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
736 unsigned int *count)
737 {
738 unsigned int num_buffers, allocated_buffers, num_planes = 0;
739 unsigned plane_sizes[VB2_MAX_PLANES] = { };
740 unsigned int i;
741 int ret;
742
743 if (q->streaming) {
744 dprintk(q, 1, "streaming active\n");
745 return -EBUSY;
746 }
747
748 if (q->waiting_in_dqbuf && *count) {
749 dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
750 return -EBUSY;
751 }
752
753 if (*count == 0 || q->num_buffers != 0 ||
754 (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory)) {
755 /*
756 * We already have buffers allocated, so first check if they
757 * are not in use and can be freed.
758 */
759 mutex_lock(&q->mmap_lock);
760 if (debug && q->memory == VB2_MEMORY_MMAP &&
761 __buffers_in_use(q))
762 dprintk(q, 1, "memory in use, orphaning buffers\n");
763
764 /*
765 * Call queue_cancel to clean up any buffers in the
766 * QUEUED state which is possible if buffers were prepared or
767 * queued without ever calling STREAMON.
768 */
769 __vb2_queue_cancel(q);
770 ret = __vb2_queue_free(q, q->num_buffers);
771 mutex_unlock(&q->mmap_lock);
772 if (ret)
773 return ret;
774
775 /*
776 * In case of REQBUFS(0) return immediately without calling
777 * driver's queue_setup() callback and allocating resources.
778 */
779 if (*count == 0)
780 return 0;
781 }
782
783 /*
784 * Make sure the requested values and current defaults are sane.
785 */
786 WARN_ON(q->min_buffers_needed > VB2_MAX_FRAME);
787 num_buffers = max_t(unsigned int, *count, q->min_buffers_needed);
788 num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME);
789 memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
790 q->memory = memory;
791
792 /*
793 * Ask the driver how many buffers and planes per buffer it requires.
794 * Driver also sets the size and allocator context for each plane.
795 */
796 ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
797 plane_sizes, q->alloc_devs);
798 if (ret)
799 return ret;
800
801 /* Check that driver has set sane values */
802 if (WARN_ON(!num_planes))
803 return -EINVAL;
804
805 for (i = 0; i < num_planes; i++)
806 if (WARN_ON(!plane_sizes[i]))
807 return -EINVAL;
808
809 /* Finally, allocate buffers and video memory */
810 allocated_buffers =
811 __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes);
812 if (allocated_buffers == 0) {
813 dprintk(q, 1, "memory allocation failed\n");
814 return -ENOMEM;
815 }
816
817 /*
818 * There is no point in continuing if we can't allocate the minimum
819 * number of buffers needed by this vb2_queue.
820 */
821 if (allocated_buffers < q->min_buffers_needed)
822 ret = -ENOMEM;
823
824 /*
825 * Check if driver can handle the allocated number of buffers.
826 */
827 if (!ret && allocated_buffers < num_buffers) {
828 num_buffers = allocated_buffers;
829 /*
830 * num_planes is set by the previous queue_setup(), but since it
831 * signals to queue_setup() whether it is called from create_bufs()
832 * vs reqbufs() we zero it here to signal that queue_setup() is
833 * called for the reqbufs() case.
834 */
835 num_planes = 0;
836
837 ret = call_qop(q, queue_setup, q, &num_buffers,
838 &num_planes, plane_sizes, q->alloc_devs);
839
840 if (!ret && allocated_buffers < num_buffers)
841 ret = -ENOMEM;
842
843 /*
844 * Either the driver has accepted a smaller number of buffers,
845 * or .queue_setup() returned an error
846 */
847 }
848
849 mutex_lock(&q->mmap_lock);
850 q->num_buffers = allocated_buffers;
851
852 if (ret < 0) {
853 /*
854 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
855 * from q->num_buffers.
856 */
857 __vb2_queue_free(q, allocated_buffers);
858 mutex_unlock(&q->mmap_lock);
859 return ret;
860 }
861 mutex_unlock(&q->mmap_lock);
862
863 /*
864 * Return the number of successfully allocated buffers
865 * to the userspace.
866 */
867 *count = allocated_buffers;
868 q->waiting_for_buffers = !q->is_output;
869
870 return 0;
871 }
872 EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
873
vb2_core_create_bufs(struct vb2_queue * q,enum vb2_memory memory,unsigned int * count,unsigned int requested_planes,const unsigned int requested_sizes[])874 int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
875 unsigned int *count,
876 unsigned int requested_planes,
877 const unsigned int requested_sizes[])
878 {
879 unsigned int num_planes = 0, num_buffers, allocated_buffers;
880 unsigned plane_sizes[VB2_MAX_PLANES] = { };
881 int ret;
882
883 if (q->num_buffers == VB2_MAX_FRAME) {
884 dprintk(q, 1, "maximum number of buffers already allocated\n");
885 return -ENOBUFS;
886 }
887
888 if (!q->num_buffers) {
889 if (q->waiting_in_dqbuf && *count) {
890 dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
891 return -EBUSY;
892 }
893 memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
894 q->memory = memory;
895 q->waiting_for_buffers = !q->is_output;
896 } else {
897 if (q->memory != memory) {
898 dprintk(q, 1, "memory model mismatch\n");
899 return -EINVAL;
900 }
901 }
902
903 num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
904
905 if (requested_planes && requested_sizes) {
906 num_planes = requested_planes;
907 memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes));
908 }
909
910 /*
911 * Ask the driver, whether the requested number of buffers, planes per
912 * buffer and their sizes are acceptable
913 */
914 ret = call_qop(q, queue_setup, q, &num_buffers,
915 &num_planes, plane_sizes, q->alloc_devs);
916 if (ret)
917 return ret;
918
919 /* Finally, allocate buffers and video memory */
920 allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
921 num_planes, plane_sizes);
922 if (allocated_buffers == 0) {
923 dprintk(q, 1, "memory allocation failed\n");
924 return -ENOMEM;
925 }
926
927 /*
928 * Check if driver can handle the so far allocated number of buffers.
929 */
930 if (allocated_buffers < num_buffers) {
931 num_buffers = allocated_buffers;
932
933 /*
934 * q->num_buffers contains the total number of buffers, that the
935 * queue driver has set up
936 */
937 ret = call_qop(q, queue_setup, q, &num_buffers,
938 &num_planes, plane_sizes, q->alloc_devs);
939
940 if (!ret && allocated_buffers < num_buffers)
941 ret = -ENOMEM;
942
943 /*
944 * Either the driver has accepted a smaller number of buffers,
945 * or .queue_setup() returned an error
946 */
947 }
948
949 mutex_lock(&q->mmap_lock);
950 q->num_buffers += allocated_buffers;
951
952 if (ret < 0) {
953 /*
954 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
955 * from q->num_buffers.
956 */
957 __vb2_queue_free(q, allocated_buffers);
958 mutex_unlock(&q->mmap_lock);
959 return -ENOMEM;
960 }
961 mutex_unlock(&q->mmap_lock);
962
963 /*
964 * Return the number of successfully allocated buffers
965 * to the userspace.
966 */
967 *count = allocated_buffers;
968
969 return 0;
970 }
971 EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
972
vb2_plane_vaddr(struct vb2_buffer * vb,unsigned int plane_no)973 void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
974 {
975 if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
976 return NULL;
977
978 return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
979
980 }
981 EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
982
vb2_plane_cookie(struct vb2_buffer * vb,unsigned int plane_no)983 void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
984 {
985 if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
986 return NULL;
987
988 return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
989 }
990 EXPORT_SYMBOL_GPL(vb2_plane_cookie);
991
vb2_buffer_done(struct vb2_buffer * vb,enum vb2_buffer_state state)992 void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
993 {
994 struct vb2_queue *q = vb->vb2_queue;
995 unsigned long flags;
996
997 if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
998 return;
999
1000 if (WARN_ON(state != VB2_BUF_STATE_DONE &&
1001 state != VB2_BUF_STATE_ERROR &&
1002 state != VB2_BUF_STATE_QUEUED))
1003 state = VB2_BUF_STATE_ERROR;
1004
1005 #ifdef CONFIG_VIDEO_ADV_DEBUG
1006 /*
1007 * Although this is not a callback, it still does have to balance
1008 * with the buf_queue op. So update this counter manually.
1009 */
1010 vb->cnt_buf_done++;
1011 #endif
1012 dprintk(q, 4, "done processing on buffer %d, state: %s\n",
1013 vb->index, vb2_state_name(state));
1014
1015 if (state != VB2_BUF_STATE_QUEUED)
1016 __vb2_buf_mem_finish(vb);
1017
1018 spin_lock_irqsave(&q->done_lock, flags);
1019 if (state == VB2_BUF_STATE_QUEUED) {
1020 vb->state = VB2_BUF_STATE_QUEUED;
1021 } else {
1022 /* Add the buffer to the done buffers list */
1023 list_add_tail(&vb->done_entry, &q->done_list);
1024 vb->state = state;
1025 }
1026 atomic_dec(&q->owned_by_drv_count);
1027
1028 if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) {
1029 media_request_object_unbind(&vb->req_obj);
1030 media_request_object_put(&vb->req_obj);
1031 }
1032
1033 spin_unlock_irqrestore(&q->done_lock, flags);
1034
1035 trace_vb2_buf_done(q, vb);
1036
1037 switch (state) {
1038 case VB2_BUF_STATE_QUEUED:
1039 return;
1040 default:
1041 /* Inform any processes that may be waiting for buffers */
1042 wake_up(&q->done_wq);
1043 break;
1044 }
1045 }
1046 EXPORT_SYMBOL_GPL(vb2_buffer_done);
1047
vb2_discard_done(struct vb2_queue * q)1048 void vb2_discard_done(struct vb2_queue *q)
1049 {
1050 struct vb2_buffer *vb;
1051 unsigned long flags;
1052
1053 spin_lock_irqsave(&q->done_lock, flags);
1054 list_for_each_entry(vb, &q->done_list, done_entry)
1055 vb->state = VB2_BUF_STATE_ERROR;
1056 spin_unlock_irqrestore(&q->done_lock, flags);
1057 }
1058 EXPORT_SYMBOL_GPL(vb2_discard_done);
1059
1060 /*
1061 * __prepare_mmap() - prepare an MMAP buffer
1062 */
__prepare_mmap(struct vb2_buffer * vb)1063 static int __prepare_mmap(struct vb2_buffer *vb)
1064 {
1065 int ret = 0;
1066
1067 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
1068 vb, vb->planes);
1069 return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
1070 }
1071
1072 /*
1073 * __prepare_userptr() - prepare a USERPTR buffer
1074 */
__prepare_userptr(struct vb2_buffer * vb)1075 static int __prepare_userptr(struct vb2_buffer *vb)
1076 {
1077 struct vb2_plane planes[VB2_MAX_PLANES];
1078 struct vb2_queue *q = vb->vb2_queue;
1079 void *mem_priv;
1080 unsigned int plane;
1081 int ret = 0;
1082 bool reacquired = vb->planes[0].mem_priv == NULL;
1083
1084 memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
1085 /* Copy relevant information provided by the userspace */
1086 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
1087 vb, planes);
1088 if (ret)
1089 return ret;
1090
1091 for (plane = 0; plane < vb->num_planes; ++plane) {
1092 /* Skip the plane if already verified */
1093 if (vb->planes[plane].m.userptr &&
1094 vb->planes[plane].m.userptr == planes[plane].m.userptr
1095 && vb->planes[plane].length == planes[plane].length)
1096 continue;
1097
1098 dprintk(q, 3, "userspace address for plane %d changed, reacquiring memory\n",
1099 plane);
1100
1101 /* Check if the provided plane buffer is large enough */
1102 if (planes[plane].length < vb->planes[plane].min_length) {
1103 dprintk(q, 1, "provided buffer size %u is less than setup size %u for plane %d\n",
1104 planes[plane].length,
1105 vb->planes[plane].min_length,
1106 plane);
1107 ret = -EINVAL;
1108 goto err;
1109 }
1110
1111 /* Release previously acquired memory if present */
1112 if (vb->planes[plane].mem_priv) {
1113 if (!reacquired) {
1114 reacquired = true;
1115 vb->copied_timestamp = 0;
1116 call_void_vb_qop(vb, buf_cleanup, vb);
1117 }
1118 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
1119 }
1120
1121 vb->planes[plane].mem_priv = NULL;
1122 vb->planes[plane].bytesused = 0;
1123 vb->planes[plane].length = 0;
1124 vb->planes[plane].m.userptr = 0;
1125 vb->planes[plane].data_offset = 0;
1126
1127 /* Acquire each plane's memory */
1128 mem_priv = call_ptr_memop(vb, get_userptr,
1129 q->alloc_devs[plane] ? : q->dev,
1130 planes[plane].m.userptr,
1131 planes[plane].length, q->dma_dir);
1132 if (IS_ERR(mem_priv)) {
1133 dprintk(q, 1, "failed acquiring userspace memory for plane %d\n",
1134 plane);
1135 ret = PTR_ERR(mem_priv);
1136 goto err;
1137 }
1138 vb->planes[plane].mem_priv = mem_priv;
1139 }
1140
1141 /*
1142 * Now that everything is in order, copy relevant information
1143 * provided by userspace.
1144 */
1145 for (plane = 0; plane < vb->num_planes; ++plane) {
1146 vb->planes[plane].bytesused = planes[plane].bytesused;
1147 vb->planes[plane].length = planes[plane].length;
1148 vb->planes[plane].m.userptr = planes[plane].m.userptr;
1149 vb->planes[plane].data_offset = planes[plane].data_offset;
1150 }
1151
1152 if (reacquired) {
1153 /*
1154 * One or more planes changed, so we must call buf_init to do
1155 * the driver-specific initialization on the newly acquired
1156 * buffer, if provided.
1157 */
1158 ret = call_vb_qop(vb, buf_init, vb);
1159 if (ret) {
1160 dprintk(q, 1, "buffer initialization failed\n");
1161 goto err;
1162 }
1163 }
1164
1165 ret = call_vb_qop(vb, buf_prepare, vb);
1166 if (ret) {
1167 dprintk(q, 1, "buffer preparation failed\n");
1168 call_void_vb_qop(vb, buf_cleanup, vb);
1169 goto err;
1170 }
1171
1172 return 0;
1173 err:
1174 /* In case of errors, release planes that were already acquired */
1175 for (plane = 0; plane < vb->num_planes; ++plane) {
1176 if (vb->planes[plane].mem_priv)
1177 call_void_memop(vb, put_userptr,
1178 vb->planes[plane].mem_priv);
1179 vb->planes[plane].mem_priv = NULL;
1180 vb->planes[plane].m.userptr = 0;
1181 vb->planes[plane].length = 0;
1182 }
1183
1184 return ret;
1185 }
1186
1187 /*
1188 * __prepare_dmabuf() - prepare a DMABUF buffer
1189 */
__prepare_dmabuf(struct vb2_buffer * vb)1190 static int __prepare_dmabuf(struct vb2_buffer *vb)
1191 {
1192 struct vb2_plane planes[VB2_MAX_PLANES];
1193 struct vb2_queue *q = vb->vb2_queue;
1194 void *mem_priv;
1195 unsigned int plane;
1196 int ret = 0;
1197 bool reacquired = vb->planes[0].mem_priv == NULL;
1198
1199 memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
1200 /* Copy relevant information provided by the userspace */
1201 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
1202 vb, planes);
1203 if (ret)
1204 return ret;
1205
1206 for (plane = 0; plane < vb->num_planes; ++plane) {
1207 struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
1208
1209 if (IS_ERR_OR_NULL(dbuf)) {
1210 dprintk(q, 1, "invalid dmabuf fd for plane %d\n",
1211 plane);
1212 ret = -EINVAL;
1213 goto err;
1214 }
1215
1216 /* use DMABUF size if length is not provided */
1217 if (planes[plane].length == 0)
1218 planes[plane].length = dbuf->size;
1219
1220 if (planes[plane].length < vb->planes[plane].min_length) {
1221 dprintk(q, 1, "invalid dmabuf length %u for plane %d, minimum length %u\n",
1222 planes[plane].length, plane,
1223 vb->planes[plane].min_length);
1224 dma_buf_put(dbuf);
1225 ret = -EINVAL;
1226 goto err;
1227 }
1228
1229 /* Skip the plane if already verified */
1230 if (dbuf == vb->planes[plane].dbuf &&
1231 vb->planes[plane].length == planes[plane].length) {
1232 dma_buf_put(dbuf);
1233 continue;
1234 }
1235
1236 dprintk(q, 3, "buffer for plane %d changed\n", plane);
1237
1238 if (!reacquired) {
1239 reacquired = true;
1240 vb->copied_timestamp = 0;
1241 call_void_vb_qop(vb, buf_cleanup, vb);
1242 }
1243
1244 /* Release previously acquired memory if present */
1245 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
1246 vb->planes[plane].bytesused = 0;
1247 vb->planes[plane].length = 0;
1248 vb->planes[plane].m.fd = 0;
1249 vb->planes[plane].data_offset = 0;
1250
1251 /* Acquire each plane's memory */
1252 mem_priv = call_ptr_memop(vb, attach_dmabuf,
1253 q->alloc_devs[plane] ? : q->dev,
1254 dbuf, planes[plane].length, q->dma_dir);
1255 if (IS_ERR(mem_priv)) {
1256 dprintk(q, 1, "failed to attach dmabuf\n");
1257 ret = PTR_ERR(mem_priv);
1258 dma_buf_put(dbuf);
1259 goto err;
1260 }
1261
1262 vb->planes[plane].dbuf = dbuf;
1263 vb->planes[plane].mem_priv = mem_priv;
1264 }
1265
1266 /*
1267 * This pins the buffer(s) with dma_buf_map_attachment()). It's done
1268 * here instead just before the DMA, while queueing the buffer(s) so
1269 * userspace knows sooner rather than later if the dma-buf map fails.
1270 */
1271 for (plane = 0; plane < vb->num_planes; ++plane) {
1272 if (vb->planes[plane].dbuf_mapped)
1273 continue;
1274
1275 ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
1276 if (ret) {
1277 dprintk(q, 1, "failed to map dmabuf for plane %d\n",
1278 plane);
1279 goto err;
1280 }
1281 vb->planes[plane].dbuf_mapped = 1;
1282 }
1283
1284 /*
1285 * Now that everything is in order, copy relevant information
1286 * provided by userspace.
1287 */
1288 for (plane = 0; plane < vb->num_planes; ++plane) {
1289 vb->planes[plane].bytesused = planes[plane].bytesused;
1290 vb->planes[plane].length = planes[plane].length;
1291 vb->planes[plane].m.fd = planes[plane].m.fd;
1292 vb->planes[plane].data_offset = planes[plane].data_offset;
1293 }
1294
1295 if (reacquired) {
1296 /*
1297 * Call driver-specific initialization on the newly acquired buffer,
1298 * if provided.
1299 */
1300 ret = call_vb_qop(vb, buf_init, vb);
1301 if (ret) {
1302 dprintk(q, 1, "buffer initialization failed\n");
1303 goto err;
1304 }
1305 }
1306
1307 ret = call_vb_qop(vb, buf_prepare, vb);
1308 if (ret) {
1309 dprintk(q, 1, "buffer preparation failed\n");
1310 call_void_vb_qop(vb, buf_cleanup, vb);
1311 goto err;
1312 }
1313
1314 return 0;
1315 err:
1316 /* In case of errors, release planes that were already acquired */
1317 __vb2_buf_dmabuf_put(vb);
1318
1319 return ret;
1320 }
1321
1322 /*
1323 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
1324 */
__enqueue_in_driver(struct vb2_buffer * vb)1325 static void __enqueue_in_driver(struct vb2_buffer *vb)
1326 {
1327 struct vb2_queue *q = vb->vb2_queue;
1328
1329 vb->state = VB2_BUF_STATE_ACTIVE;
1330 atomic_inc(&q->owned_by_drv_count);
1331
1332 trace_vb2_buf_queue(q, vb);
1333
1334 call_void_vb_qop(vb, buf_queue, vb);
1335 }
1336
__buf_prepare(struct vb2_buffer * vb)1337 static int __buf_prepare(struct vb2_buffer *vb)
1338 {
1339 struct vb2_queue *q = vb->vb2_queue;
1340 enum vb2_buffer_state orig_state = vb->state;
1341 int ret;
1342
1343 if (q->error) {
1344 dprintk(q, 1, "fatal error occurred on queue\n");
1345 return -EIO;
1346 }
1347
1348 if (vb->prepared)
1349 return 0;
1350 WARN_ON(vb->synced);
1351
1352 if (q->is_output) {
1353 ret = call_vb_qop(vb, buf_out_validate, vb);
1354 if (ret) {
1355 dprintk(q, 1, "buffer validation failed\n");
1356 return ret;
1357 }
1358 }
1359
1360 vb->state = VB2_BUF_STATE_PREPARING;
1361
1362 switch (q->memory) {
1363 case VB2_MEMORY_MMAP:
1364 ret = __prepare_mmap(vb);
1365 break;
1366 #if 0
1367 case VB2_MEMORY_USERPTR:
1368 ret = __prepare_userptr(vb);
1369 break;
1370 #endif
1371 case VB2_MEMORY_DMABUF:
1372 ret = __prepare_dmabuf(vb);
1373 break;
1374 default:
1375 WARN(1, "Invalid queue type\n");
1376 ret = -EINVAL;
1377 break;
1378 }
1379
1380 if (ret) {
1381 dprintk(q, 1, "buffer preparation failed: %d\n", ret);
1382 vb->state = orig_state;
1383 return ret;
1384 }
1385
1386 __vb2_buf_mem_prepare(vb);
1387 vb->prepared = 1;
1388 vb->state = orig_state;
1389
1390 return 0;
1391 }
1392
vb2_req_prepare(struct media_request_object * obj)1393 static int vb2_req_prepare(struct media_request_object *obj)
1394 {
1395 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
1396 int ret;
1397
1398 if (WARN_ON(vb->state != VB2_BUF_STATE_IN_REQUEST))
1399 return -EINVAL;
1400
1401 mutex_lock(vb->vb2_queue->lock);
1402 ret = __buf_prepare(vb);
1403 mutex_unlock(vb->vb2_queue->lock);
1404 return ret;
1405 }
1406
1407 static void __vb2_dqbuf(struct vb2_buffer *vb);
1408
vb2_req_unprepare(struct media_request_object * obj)1409 static void vb2_req_unprepare(struct media_request_object *obj)
1410 {
1411 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
1412
1413 mutex_lock(vb->vb2_queue->lock);
1414 __vb2_dqbuf(vb);
1415 vb->state = VB2_BUF_STATE_IN_REQUEST;
1416 mutex_unlock(vb->vb2_queue->lock);
1417 WARN_ON(!vb->req_obj.req);
1418 }
1419
1420 int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
1421 struct media_request *req);
1422
vb2_req_queue(struct media_request_object * obj)1423 static void vb2_req_queue(struct media_request_object *obj)
1424 {
1425 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
1426
1427 mutex_lock(vb->vb2_queue->lock);
1428 vb2_core_qbuf(vb->vb2_queue, vb->index, NULL, NULL);
1429 mutex_unlock(vb->vb2_queue->lock);
1430 }
1431
vb2_req_unbind(struct media_request_object * obj)1432 static void vb2_req_unbind(struct media_request_object *obj)
1433 {
1434 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
1435
1436 if (vb->state == VB2_BUF_STATE_IN_REQUEST)
1437 call_void_bufop(vb->vb2_queue, init_buffer, vb);
1438 }
1439
vb2_req_release(struct media_request_object * obj)1440 static void vb2_req_release(struct media_request_object *obj)
1441 {
1442 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
1443
1444 if (vb->state == VB2_BUF_STATE_IN_REQUEST) {
1445 vb->state = VB2_BUF_STATE_DEQUEUED;
1446 if (vb->request)
1447 media_request_put(vb->request);
1448 vb->request = NULL;
1449 }
1450 }
1451
1452 static const struct media_request_object_ops vb2_core_req_ops = {
1453 .prepare = vb2_req_prepare,
1454 .unprepare = vb2_req_unprepare,
1455 .queue = vb2_req_queue,
1456 .unbind = vb2_req_unbind,
1457 .release = vb2_req_release,
1458 };
1459
vb2_request_object_is_buffer(struct media_request_object * obj)1460 bool vb2_request_object_is_buffer(struct media_request_object *obj)
1461 {
1462 return obj->ops == &vb2_core_req_ops;
1463 }
1464 EXPORT_SYMBOL_GPL(vb2_request_object_is_buffer);
1465
vb2_request_buffer_cnt(struct media_request * req)1466 unsigned int vb2_request_buffer_cnt(struct media_request *req)
1467 {
1468 struct media_request_object *obj;
1469 unsigned long flags;
1470 unsigned int buffer_cnt = 0;
1471
1472 spin_lock_irqsave(&req->lock, flags);
1473 list_for_each_entry(obj, &req->objects, list)
1474 if (vb2_request_object_is_buffer(obj))
1475 buffer_cnt++;
1476 spin_unlock_irqrestore(&req->lock, flags);
1477
1478 return buffer_cnt;
1479 }
1480 EXPORT_SYMBOL_GPL(vb2_request_buffer_cnt);
1481
vb2_core_prepare_buf(struct vb2_queue * q,unsigned int index,void * pb)1482 int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
1483 {
1484 struct vb2_buffer *vb;
1485 int ret;
1486
1487 vb = q->bufs[index];
1488 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1489 dprintk(q, 1, "invalid buffer state %s\n",
1490 vb2_state_name(vb->state));
1491 return -EINVAL;
1492 }
1493 if (vb->prepared) {
1494 dprintk(q, 1, "buffer already prepared\n");
1495 return -EINVAL;
1496 }
1497
1498 ret = __buf_prepare(vb);
1499 if (ret)
1500 return ret;
1501
1502 /* Fill buffer information for the userspace */
1503 call_void_bufop(q, fill_user_buffer, vb, pb);
1504
1505 dprintk(q, 2, "prepare of buffer %d succeeded\n", vb->index);
1506
1507 return 0;
1508 }
1509 EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
1510
1511 /*
1512 * vb2_start_streaming() - Attempt to start streaming.
1513 * @q: videobuf2 queue
1514 *
1515 * Attempt to start streaming. When this function is called there must be
1516 * at least q->min_buffers_needed buffers queued up (i.e. the minimum
1517 * number of buffers required for the DMA engine to function). If the
1518 * @start_streaming op fails it is supposed to return all the driver-owned
1519 * buffers back to vb2 in state QUEUED. Check if that happened and if
1520 * not warn and reclaim them forcefully.
1521 */
vb2_start_streaming(struct vb2_queue * q)1522 static int vb2_start_streaming(struct vb2_queue *q)
1523 {
1524 struct vb2_buffer *vb;
1525 int ret;
1526
1527 /*
1528 * If any buffers were queued before streamon,
1529 * we can now pass them to driver for processing.
1530 */
1531 list_for_each_entry(vb, &q->queued_list, queued_entry)
1532 __enqueue_in_driver(vb);
1533
1534 /* Tell the driver to start streaming */
1535 q->start_streaming_called = 1;
1536 ret = call_qop(q, start_streaming, q,
1537 atomic_read(&q->owned_by_drv_count));
1538 if (!ret)
1539 return 0;
1540
1541 q->start_streaming_called = 0;
1542
1543 dprintk(q, 1, "driver refused to start streaming\n");
1544 /*
1545 * If you see this warning, then the driver isn't cleaning up properly
1546 * after a failed start_streaming(). See the start_streaming()
1547 * documentation in videobuf2-core.h for more information how buffers
1548 * should be returned to vb2 in start_streaming().
1549 */
1550 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
1551 unsigned i;
1552
1553 /*
1554 * Forcefully reclaim buffers if the driver did not
1555 * correctly return them to vb2.
1556 */
1557 for (i = 0; i < q->num_buffers; ++i) {
1558 vb = q->bufs[i];
1559 if (vb->state == VB2_BUF_STATE_ACTIVE)
1560 vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
1561 }
1562 /* Must be zero now */
1563 WARN_ON(atomic_read(&q->owned_by_drv_count));
1564 }
1565 /*
1566 * If done_list is not empty, then start_streaming() didn't call
1567 * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or
1568 * STATE_DONE.
1569 */
1570 WARN_ON(!list_empty(&q->done_list));
1571 return ret;
1572 }
1573
vb2_core_qbuf(struct vb2_queue * q,unsigned int index,void * pb,struct media_request * req)1574 int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
1575 struct media_request *req)
1576 {
1577 struct vb2_buffer *vb;
1578 int ret;
1579
1580 if (q->error) {
1581 dprintk(q, 1, "fatal error occurred on queue\n");
1582 return -EIO;
1583 }
1584
1585 vb = q->bufs[index];
1586
1587 if (!req && vb->state != VB2_BUF_STATE_IN_REQUEST &&
1588 q->requires_requests) {
1589 dprintk(q, 1, "qbuf requires a request\n");
1590 return -EBADR;
1591 }
1592
1593 if ((req && q->uses_qbuf) ||
1594 (!req && vb->state != VB2_BUF_STATE_IN_REQUEST &&
1595 q->uses_requests)) {
1596 dprintk(q, 1, "queue in wrong mode (qbuf vs requests)\n");
1597 return -EBUSY;
1598 }
1599
1600 if (req) {
1601 int ret;
1602
1603 q->uses_requests = 1;
1604 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1605 dprintk(q, 1, "buffer %d not in dequeued state\n",
1606 vb->index);
1607 return -EINVAL;
1608 }
1609
1610 if (q->is_output && !vb->prepared) {
1611 ret = call_vb_qop(vb, buf_out_validate, vb);
1612 if (ret) {
1613 dprintk(q, 1, "buffer validation failed\n");
1614 return ret;
1615 }
1616 }
1617
1618 media_request_object_init(&vb->req_obj);
1619
1620 /* Make sure the request is in a safe state for updating. */
1621 ret = media_request_lock_for_update(req);
1622 if (ret)
1623 return ret;
1624 ret = media_request_object_bind(req, &vb2_core_req_ops,
1625 q, true, &vb->req_obj);
1626 media_request_unlock_for_update(req);
1627 if (ret)
1628 return ret;
1629
1630 vb->state = VB2_BUF_STATE_IN_REQUEST;
1631
1632 /*
1633 * Increment the refcount and store the request.
1634 * The request refcount is decremented again when the
1635 * buffer is dequeued. This is to prevent vb2_buffer_done()
1636 * from freeing the request from interrupt context, which can
1637 * happen if the application closed the request fd after
1638 * queueing the request.
1639 */
1640 media_request_get(req);
1641 vb->request = req;
1642
1643 /* Fill buffer information for the userspace */
1644 if (pb) {
1645 call_void_bufop(q, copy_timestamp, vb, pb);
1646 call_void_bufop(q, fill_user_buffer, vb, pb);
1647 }
1648
1649 dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index);
1650 return 0;
1651 }
1652
1653 if (vb->state != VB2_BUF_STATE_IN_REQUEST)
1654 q->uses_qbuf = 1;
1655
1656 switch (vb->state) {
1657 case VB2_BUF_STATE_DEQUEUED:
1658 case VB2_BUF_STATE_IN_REQUEST:
1659 if (!vb->prepared) {
1660 ret = __buf_prepare(vb);
1661 if (ret)
1662 return ret;
1663 }
1664 break;
1665 case VB2_BUF_STATE_PREPARING:
1666 dprintk(q, 1, "buffer still being prepared\n");
1667 return -EINVAL;
1668 default:
1669 dprintk(q, 1, "invalid buffer state %s\n",
1670 vb2_state_name(vb->state));
1671 return -EINVAL;
1672 }
1673
1674 /*
1675 * Add to the queued buffers list, a buffer will stay on it until
1676 * dequeued in dqbuf.
1677 */
1678 list_add_tail(&vb->queued_entry, &q->queued_list);
1679 q->queued_count++;
1680 q->waiting_for_buffers = false;
1681 vb->state = VB2_BUF_STATE_QUEUED;
1682
1683 if (pb)
1684 call_void_bufop(q, copy_timestamp, vb, pb);
1685
1686 trace_vb2_qbuf(q, vb);
1687
1688 /*
1689 * If already streaming, give the buffer to driver for processing.
1690 * If not, the buffer will be given to driver on next streamon.
1691 */
1692 if (q->start_streaming_called)
1693 __enqueue_in_driver(vb);
1694
1695 /* Fill buffer information for the userspace */
1696 if (pb)
1697 call_void_bufop(q, fill_user_buffer, vb, pb);
1698
1699 /*
1700 * If streamon has been called, and we haven't yet called
1701 * start_streaming() since not enough buffers were queued, and
1702 * we now have reached the minimum number of queued buffers,
1703 * then we can finally call start_streaming().
1704 */
1705 if (q->streaming && !q->start_streaming_called &&
1706 q->queued_count >= q->min_buffers_needed) {
1707 ret = vb2_start_streaming(q);
1708 if (ret)
1709 return ret;
1710 }
1711
1712 dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index);
1713 return 0;
1714 }
1715 EXPORT_SYMBOL_GPL(vb2_core_qbuf);
1716
1717 /*
1718 * __vb2_wait_for_done_vb() - wait for a buffer to become available
1719 * for dequeuing
1720 *
1721 * Will sleep if required for nonblocking == false.
1722 */
__vb2_wait_for_done_vb(struct vb2_queue * q,int nonblocking)1723 static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1724 {
1725 /*
1726 * All operations on vb_done_list are performed under done_lock
1727 * spinlock protection. However, buffers may be removed from
1728 * it and returned to userspace only while holding both driver's
1729 * lock and the done_lock spinlock. Thus we can be sure that as
1730 * long as we hold the driver's lock, the list will remain not
1731 * empty if list_empty() check succeeds.
1732 */
1733
1734 for (;;) {
1735 int ret;
1736
1737 if (q->waiting_in_dqbuf) {
1738 dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
1739 return -EBUSY;
1740 }
1741
1742 if (!q->streaming) {
1743 dprintk(q, 1, "streaming off, will not wait for buffers\n");
1744 return -EINVAL;
1745 }
1746
1747 if (q->error) {
1748 dprintk(q, 1, "Queue in error state, will not wait for buffers\n");
1749 return -EIO;
1750 }
1751
1752 if (q->last_buffer_dequeued) {
1753 dprintk(q, 3, "last buffer dequeued already, will not wait for buffers\n");
1754 return -EPIPE;
1755 }
1756
1757 if (!list_empty(&q->done_list)) {
1758 /*
1759 * Found a buffer that we were waiting for.
1760 */
1761 break;
1762 }
1763
1764 if (nonblocking) {
1765 dprintk(q, 3, "nonblocking and no buffers to dequeue, will not wait\n");
1766 return -EAGAIN;
1767 }
1768
1769 q->waiting_in_dqbuf = 1;
1770 /*
1771 * We are streaming and blocking, wait for another buffer to
1772 * become ready or for streamoff. Driver's lock is released to
1773 * allow streamoff or qbuf to be called while waiting.
1774 */
1775 call_void_qop(q, wait_prepare, q);
1776
1777 /*
1778 * All locks have been released, it is safe to sleep now.
1779 */
1780 dprintk(q, 3, "will sleep waiting for buffers\n");
1781 ret = wait_event_interruptible(q->done_wq,
1782 !list_empty(&q->done_list) || !q->streaming ||
1783 q->error);
1784
1785 /*
1786 * We need to reevaluate both conditions again after reacquiring
1787 * the locks or return an error if one occurred.
1788 */
1789 call_void_qop(q, wait_finish, q);
1790 q->waiting_in_dqbuf = 0;
1791 if (ret) {
1792 dprintk(q, 1, "sleep was interrupted\n");
1793 return ret;
1794 }
1795 }
1796 return 0;
1797 }
1798
1799 /*
1800 * __vb2_get_done_vb() - get a buffer ready for dequeuing
1801 *
1802 * Will sleep if required for nonblocking == false.
1803 */
__vb2_get_done_vb(struct vb2_queue * q,struct vb2_buffer ** vb,void * pb,int nonblocking)1804 static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1805 void *pb, int nonblocking)
1806 {
1807 unsigned long flags;
1808 int ret = 0;
1809
1810 /*
1811 * Wait for at least one buffer to become available on the done_list.
1812 */
1813 ret = __vb2_wait_for_done_vb(q, nonblocking);
1814 if (ret)
1815 return ret;
1816
1817 /*
1818 * Driver's lock has been held since we last verified that done_list
1819 * is not empty, so no need for another list_empty(done_list) check.
1820 */
1821 spin_lock_irqsave(&q->done_lock, flags);
1822 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
1823 /*
1824 * Only remove the buffer from done_list if all planes can be
1825 * handled. Some cases such as V4L2 file I/O and DVB have pb
1826 * == NULL; skip the check then as there's nothing to verify.
1827 */
1828 if (pb)
1829 ret = call_bufop(q, verify_planes_array, *vb, pb);
1830 if (!ret)
1831 list_del(&(*vb)->done_entry);
1832 spin_unlock_irqrestore(&q->done_lock, flags);
1833
1834 return ret;
1835 }
1836
vb2_wait_for_all_buffers(struct vb2_queue * q)1837 int vb2_wait_for_all_buffers(struct vb2_queue *q)
1838 {
1839 if (!q->streaming) {
1840 dprintk(q, 1, "streaming off, will not wait for buffers\n");
1841 return -EINVAL;
1842 }
1843
1844 if (q->start_streaming_called)
1845 wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
1846 return 0;
1847 }
1848 EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
1849
1850 /*
1851 * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
1852 */
__vb2_dqbuf(struct vb2_buffer * vb)1853 static void __vb2_dqbuf(struct vb2_buffer *vb)
1854 {
1855 struct vb2_queue *q = vb->vb2_queue;
1856
1857 /* nothing to do if the buffer is already dequeued */
1858 if (vb->state == VB2_BUF_STATE_DEQUEUED)
1859 return;
1860
1861 vb->state = VB2_BUF_STATE_DEQUEUED;
1862
1863 call_void_bufop(q, init_buffer, vb);
1864 }
1865
vb2_core_dqbuf(struct vb2_queue * q,unsigned int * pindex,void * pb,bool nonblocking)1866 int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
1867 bool nonblocking)
1868 {
1869 struct vb2_buffer *vb = NULL;
1870 int ret;
1871
1872 ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
1873 if (ret < 0)
1874 return ret;
1875
1876 switch (vb->state) {
1877 case VB2_BUF_STATE_DONE:
1878 dprintk(q, 3, "returning done buffer\n");
1879 break;
1880 case VB2_BUF_STATE_ERROR:
1881 dprintk(q, 3, "returning done buffer with errors\n");
1882 break;
1883 default:
1884 dprintk(q, 1, "invalid buffer state %s\n",
1885 vb2_state_name(vb->state));
1886 return -EINVAL;
1887 }
1888
1889 call_void_vb_qop(vb, buf_finish, vb);
1890 vb->prepared = 0;
1891
1892 if (pindex)
1893 *pindex = vb->index;
1894
1895 /* Fill buffer information for the userspace */
1896 if (pb)
1897 call_void_bufop(q, fill_user_buffer, vb, pb);
1898
1899 /* Remove from videobuf queue */
1900 list_del(&vb->queued_entry);
1901 q->queued_count--;
1902
1903 trace_vb2_dqbuf(q, vb);
1904
1905 /* go back to dequeued state */
1906 __vb2_dqbuf(vb);
1907
1908 if (WARN_ON(vb->req_obj.req)) {
1909 media_request_object_unbind(&vb->req_obj);
1910 media_request_object_put(&vb->req_obj);
1911 }
1912 if (vb->request)
1913 media_request_put(vb->request);
1914 vb->request = NULL;
1915
1916 dprintk(q, 2, "dqbuf of buffer %d, state: %s\n",
1917 vb->index, vb2_state_name(vb->state));
1918
1919 return 0;
1920
1921 }
1922 EXPORT_SYMBOL_GPL(vb2_core_dqbuf);
1923
1924 /*
1925 * __vb2_queue_cancel() - cancel and stop (pause) streaming
1926 *
1927 * Removes all queued buffers from driver's queue and all buffers queued by
1928 * userspace from videobuf's queue. Returns to state after reqbufs.
1929 */
__vb2_queue_cancel(struct vb2_queue * q)1930 static void __vb2_queue_cancel(struct vb2_queue *q)
1931 {
1932 unsigned int i;
1933
1934 /*
1935 * Tell driver to stop all transactions and release all queued
1936 * buffers.
1937 */
1938 if (q->start_streaming_called)
1939 call_void_qop(q, stop_streaming, q);
1940
1941 /*
1942 * If you see this warning, then the driver isn't cleaning up properly
1943 * in stop_streaming(). See the stop_streaming() documentation in
1944 * videobuf2-core.h for more information how buffers should be returned
1945 * to vb2 in stop_streaming().
1946 */
1947 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
1948 for (i = 0; i < q->num_buffers; ++i)
1949 if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) {
1950 pr_warn("driver bug: stop_streaming operation is leaving buf %p in active state\n",
1951 q->bufs[i]);
1952 vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
1953 }
1954 /* Must be zero now */
1955 WARN_ON(atomic_read(&q->owned_by_drv_count));
1956 }
1957
1958 q->streaming = 0;
1959 q->start_streaming_called = 0;
1960 q->queued_count = 0;
1961 q->error = 0;
1962 q->uses_requests = 0;
1963 q->uses_qbuf = 0;
1964
1965 /*
1966 * Remove all buffers from videobuf's list...
1967 */
1968 INIT_LIST_HEAD(&q->queued_list);
1969 /*
1970 * ...and done list; userspace will not receive any buffers it
1971 * has not already dequeued before initiating cancel.
1972 */
1973 INIT_LIST_HEAD(&q->done_list);
1974 atomic_set(&q->owned_by_drv_count, 0);
1975 wake_up_all(&q->done_wq);
1976
1977 /*
1978 * Reinitialize all buffers for next use.
1979 * Make sure to call buf_finish for any queued buffers. Normally
1980 * that's done in dqbuf, but that's not going to happen when we
1981 * cancel the whole queue. Note: this code belongs here, not in
1982 * __vb2_dqbuf() since in vb2_core_dqbuf() there is a critical
1983 * call to __fill_user_buffer() after buf_finish(). That order can't
1984 * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
1985 */
1986 for (i = 0; i < q->num_buffers; ++i) {
1987 struct vb2_buffer *vb = q->bufs[i];
1988 struct media_request *req = vb->req_obj.req;
1989
1990 /*
1991 * If a request is associated with this buffer, then
1992 * call buf_request_cancel() to give the driver to complete()
1993 * related request objects. Otherwise those objects would
1994 * never complete.
1995 */
1996 if (req) {
1997 enum media_request_state state;
1998 unsigned long flags;
1999
2000 spin_lock_irqsave(&req->lock, flags);
2001 state = req->state;
2002 spin_unlock_irqrestore(&req->lock, flags);
2003
2004 if (state == MEDIA_REQUEST_STATE_QUEUED)
2005 call_void_vb_qop(vb, buf_request_complete, vb);
2006 }
2007
2008 __vb2_buf_mem_finish(vb);
2009
2010 if (vb->prepared) {
2011 call_void_vb_qop(vb, buf_finish, vb);
2012 vb->prepared = 0;
2013 }
2014 __vb2_dqbuf(vb);
2015
2016 if (vb->req_obj.req) {
2017 media_request_object_unbind(&vb->req_obj);
2018 media_request_object_put(&vb->req_obj);
2019 }
2020 if (vb->request)
2021 media_request_put(vb->request);
2022 vb->request = NULL;
2023 vb->copied_timestamp = 0;
2024 }
2025 }
2026
vb2_core_streamon(struct vb2_queue * q,unsigned int type)2027 int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
2028 {
2029 int ret;
2030
2031 if (type != q->type) {
2032 dprintk(q, 1, "invalid stream type\n");
2033 return -EINVAL;
2034 }
2035
2036 if (q->streaming) {
2037 dprintk(q, 3, "already streaming\n");
2038 return 0;
2039 }
2040
2041 if (!q->num_buffers) {
2042 dprintk(q, 1, "no buffers have been allocated\n");
2043 return -EINVAL;
2044 }
2045
2046 if (q->num_buffers < q->min_buffers_needed) {
2047 dprintk(q, 1, "need at least %u allocated buffers\n",
2048 q->min_buffers_needed);
2049 return -EINVAL;
2050 }
2051
2052 /*
2053 * Tell driver to start streaming provided sufficient buffers
2054 * are available.
2055 */
2056 if (q->queued_count >= q->min_buffers_needed) {
2057 ret = v4l_vb2q_enable_media_source(q);
2058 if (ret)
2059 return ret;
2060 ret = vb2_start_streaming(q);
2061 if (ret)
2062 return ret;
2063 }
2064
2065 q->streaming = 1;
2066
2067 dprintk(q, 3, "successful\n");
2068 return 0;
2069 }
2070 EXPORT_SYMBOL_GPL(vb2_core_streamon);
2071
vb2_queue_error(struct vb2_queue * q)2072 void vb2_queue_error(struct vb2_queue *q)
2073 {
2074 q->error = 1;
2075
2076 wake_up_all(&q->done_wq);
2077 }
2078 EXPORT_SYMBOL_GPL(vb2_queue_error);
2079
vb2_core_streamoff(struct vb2_queue * q,unsigned int type)2080 int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
2081 {
2082 if (type != q->type) {
2083 dprintk(q, 1, "invalid stream type\n");
2084 return -EINVAL;
2085 }
2086
2087 /*
2088 * Cancel will pause streaming and remove all buffers from the driver
2089 * and videobuf, effectively returning control over them to userspace.
2090 *
2091 * Note that we do this even if q->streaming == 0: if you prepare or
2092 * queue buffers, and then call streamoff without ever having called
2093 * streamon, you would still expect those buffers to be returned to
2094 * their normal dequeued state.
2095 */
2096 __vb2_queue_cancel(q);
2097 q->waiting_for_buffers = !q->is_output;
2098 q->last_buffer_dequeued = false;
2099
2100 dprintk(q, 3, "successful\n");
2101 return 0;
2102 }
2103 EXPORT_SYMBOL_GPL(vb2_core_streamoff);
2104
2105 /*
2106 * __find_plane_by_offset() - find plane associated with the given offset off
2107 */
__find_plane_by_offset(struct vb2_queue * q,unsigned long off,unsigned int * _buffer,unsigned int * _plane)2108 static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
2109 unsigned int *_buffer, unsigned int *_plane)
2110 {
2111 struct vb2_buffer *vb;
2112 unsigned int buffer, plane;
2113
2114 /*
2115 * Go over all buffers and their planes, comparing the given offset
2116 * with an offset assigned to each plane. If a match is found,
2117 * return its buffer and plane numbers.
2118 */
2119 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
2120 vb = q->bufs[buffer];
2121
2122 for (plane = 0; plane < vb->num_planes; ++plane) {
2123 if (vb->planes[plane].m.offset == off) {
2124 *_buffer = buffer;
2125 *_plane = plane;
2126 return 0;
2127 }
2128 }
2129 }
2130
2131 return -EINVAL;
2132 }
2133
vb2_core_expbuf(struct vb2_queue * q,int * fd,unsigned int type,unsigned int index,unsigned int plane,unsigned int flags)2134 int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
2135 unsigned int index, unsigned int plane, unsigned int flags)
2136 {
2137 struct vb2_buffer *vb = NULL;
2138 struct vb2_plane *vb_plane;
2139 int ret;
2140 struct dma_buf *dbuf;
2141
2142 if (q->memory != VB2_MEMORY_MMAP) {
2143 dprintk(q, 1, "queue is not currently set up for mmap\n");
2144 return -EINVAL;
2145 }
2146
2147 if (!q->mem_ops->get_dmabuf) {
2148 dprintk(q, 1, "queue does not support DMA buffer exporting\n");
2149 return -EINVAL;
2150 }
2151
2152 if (flags & ~(O_CLOEXEC | O_ACCMODE)) {
2153 dprintk(q, 1, "queue does support only O_CLOEXEC and access mode flags\n");
2154 return -EINVAL;
2155 }
2156
2157 if (type != q->type) {
2158 dprintk(q, 1, "invalid buffer type\n");
2159 return -EINVAL;
2160 }
2161
2162 if (index >= q->num_buffers) {
2163 dprintk(q, 1, "buffer index out of range\n");
2164 return -EINVAL;
2165 }
2166
2167 vb = q->bufs[index];
2168
2169 if (plane >= vb->num_planes) {
2170 dprintk(q, 1, "buffer plane out of range\n");
2171 return -EINVAL;
2172 }
2173
2174 if (vb2_fileio_is_active(q)) {
2175 dprintk(q, 1, "expbuf: file io in progress\n");
2176 return -EBUSY;
2177 }
2178
2179 vb_plane = &vb->planes[plane];
2180
2181 dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv,
2182 flags & O_ACCMODE);
2183 if (IS_ERR_OR_NULL(dbuf)) {
2184 dprintk(q, 1, "failed to export buffer %d, plane %d\n",
2185 index, plane);
2186 return -EINVAL;
2187 }
2188
2189 ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE);
2190 if (ret < 0) {
2191 dprintk(q, 3, "buffer %d, plane %d failed to export (%d)\n",
2192 index, plane, ret);
2193 dma_buf_put(dbuf);
2194 return ret;
2195 }
2196
2197 dprintk(q, 3, "buffer %d, plane %d exported as %d descriptor\n",
2198 index, plane, ret);
2199 *fd = ret;
2200
2201 return 0;
2202 }
2203 EXPORT_SYMBOL_GPL(vb2_core_expbuf);
2204
vb2_mmap(struct vb2_queue * q,struct vm_area_struct * vma)2205 int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
2206 {
2207 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
2208 struct vb2_buffer *vb;
2209 unsigned int buffer = 0, plane = 0;
2210 int ret;
2211 unsigned long length;
2212
2213 if (q->memory != VB2_MEMORY_MMAP) {
2214 dprintk(q, 1, "queue is not currently set up for mmap\n");
2215 return -EINVAL;
2216 }
2217
2218 /*
2219 * Check memory area access mode.
2220 */
2221 if (!(vma->vm_flags & VM_SHARED)) {
2222 dprintk(q, 1, "invalid vma flags, VM_SHARED needed\n");
2223 return -EINVAL;
2224 }
2225 if (q->is_output) {
2226 if (!(vma->vm_flags & VM_WRITE)) {
2227 dprintk(q, 1, "invalid vma flags, VM_WRITE needed\n");
2228 return -EINVAL;
2229 }
2230 } else {
2231 if (!(vma->vm_flags & VM_READ)) {
2232 dprintk(q, 1, "invalid vma flags, VM_READ needed\n");
2233 return -EINVAL;
2234 }
2235 }
2236
2237 mutex_lock(&q->mmap_lock);
2238
2239 if (vb2_fileio_is_active(q)) {
2240 dprintk(q, 1, "mmap: file io in progress\n");
2241 ret = -EBUSY;
2242 goto unlock;
2243 }
2244
2245 /*
2246 * Find the plane corresponding to the offset passed by userspace.
2247 */
2248 ret = __find_plane_by_offset(q, off, &buffer, &plane);
2249 if (ret)
2250 goto unlock;
2251
2252 vb = q->bufs[buffer];
2253
2254 /*
2255 * MMAP requires page_aligned buffers.
2256 * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
2257 * so, we need to do the same here.
2258 */
2259 length = PAGE_ALIGN(vb->planes[plane].length);
2260 if (length < (vma->vm_end - vma->vm_start)) {
2261 dprintk(q, 1,
2262 "MMAP invalid, as it would overflow buffer length\n");
2263 ret = -EINVAL;
2264 goto unlock;
2265 }
2266
2267 /*
2268 * vm_pgoff is treated in V4L2 API as a 'cookie' to select a buffer,
2269 * not as a in-buffer offset. We always want to mmap a whole buffer
2270 * from its beginning.
2271 */
2272 vma->vm_pgoff = 0;
2273
2274 ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
2275
2276 unlock:
2277 mutex_unlock(&q->mmap_lock);
2278 if (ret)
2279 return ret;
2280
2281 dprintk(q, 3, "buffer %d, plane %d successfully mapped\n", buffer, plane);
2282 return 0;
2283 }
2284 EXPORT_SYMBOL_GPL(vb2_mmap);
2285
2286 #ifndef CONFIG_MMU
vb2_get_unmapped_area(struct vb2_queue * q,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)2287 unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
2288 unsigned long addr,
2289 unsigned long len,
2290 unsigned long pgoff,
2291 unsigned long flags)
2292 {
2293 unsigned long off = pgoff << PAGE_SHIFT;
2294 struct vb2_buffer *vb;
2295 unsigned int buffer, plane;
2296 void *vaddr;
2297 int ret;
2298
2299 if (q->memory != VB2_MEMORY_MMAP) {
2300 dprintk(q, 1, "queue is not currently set up for mmap\n");
2301 return -EINVAL;
2302 }
2303
2304 /*
2305 * Find the plane corresponding to the offset passed by userspace.
2306 */
2307 ret = __find_plane_by_offset(q, off, &buffer, &plane);
2308 if (ret)
2309 return ret;
2310
2311 vb = q->bufs[buffer];
2312
2313 vaddr = vb2_plane_vaddr(vb, plane);
2314 return vaddr ? (unsigned long)vaddr : -EINVAL;
2315 }
2316 EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
2317 #endif
2318
vb2_core_queue_init(struct vb2_queue * q)2319 int vb2_core_queue_init(struct vb2_queue *q)
2320 {
2321 /*
2322 * Sanity check
2323 */
2324 if (WARN_ON(!q) ||
2325 WARN_ON(!q->ops) ||
2326 WARN_ON(!q->mem_ops) ||
2327 WARN_ON(!q->type) ||
2328 WARN_ON(!q->io_modes) ||
2329 WARN_ON(!q->ops->queue_setup) ||
2330 WARN_ON(!q->ops->buf_queue))
2331 return -EINVAL;
2332
2333 if (WARN_ON(q->requires_requests && !q->supports_requests))
2334 return -EINVAL;
2335
2336 INIT_LIST_HEAD(&q->queued_list);
2337 INIT_LIST_HEAD(&q->done_list);
2338 spin_lock_init(&q->done_lock);
2339 mutex_init(&q->mmap_lock);
2340 init_waitqueue_head(&q->done_wq);
2341
2342 q->memory = VB2_MEMORY_UNKNOWN;
2343
2344 if (q->buf_struct_size == 0)
2345 q->buf_struct_size = sizeof(struct vb2_buffer);
2346
2347 if (q->bidirectional)
2348 q->dma_dir = DMA_BIDIRECTIONAL;
2349 else
2350 q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
2351
2352 if (q->name[0] == '\0')
2353 snprintf(q->name, sizeof(q->name), "%s-%p",
2354 q->is_output ? "out" : "cap", q);
2355
2356 return 0;
2357 }
2358 EXPORT_SYMBOL_GPL(vb2_core_queue_init);
2359
2360 static int __vb2_init_fileio(struct vb2_queue *q, int read);
2361 static int __vb2_cleanup_fileio(struct vb2_queue *q);
vb2_core_queue_release(struct vb2_queue * q)2362 void vb2_core_queue_release(struct vb2_queue *q)
2363 {
2364 __vb2_cleanup_fileio(q);
2365 __vb2_queue_cancel(q);
2366 mutex_lock(&q->mmap_lock);
2367 __vb2_queue_free(q, q->num_buffers);
2368 mutex_unlock(&q->mmap_lock);
2369 }
2370 EXPORT_SYMBOL_GPL(vb2_core_queue_release);
2371
vb2_core_poll(struct vb2_queue * q,struct file * file,poll_table * wait)2372 __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
2373 poll_table *wait)
2374 {
2375 __poll_t req_events = poll_requested_events(wait);
2376 struct vb2_buffer *vb = NULL;
2377 unsigned long flags;
2378
2379 /*
2380 * poll_wait() MUST be called on the first invocation on all the
2381 * potential queues of interest, even if we are not interested in their
2382 * events during this first call. Failure to do so will result in
2383 * queue's events to be ignored because the poll_table won't be capable
2384 * of adding new wait queues thereafter.
2385 */
2386 poll_wait(file, &q->done_wq, wait);
2387
2388 if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM)))
2389 return 0;
2390 if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM)))
2391 return 0;
2392
2393 /*
2394 * Start file I/O emulator only if streaming API has not been used yet.
2395 */
2396 if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
2397 if (!q->is_output && (q->io_modes & VB2_READ) &&
2398 (req_events & (EPOLLIN | EPOLLRDNORM))) {
2399 if (__vb2_init_fileio(q, 1))
2400 return EPOLLERR;
2401 }
2402 if (q->is_output && (q->io_modes & VB2_WRITE) &&
2403 (req_events & (EPOLLOUT | EPOLLWRNORM))) {
2404 if (__vb2_init_fileio(q, 0))
2405 return EPOLLERR;
2406 /*
2407 * Write to OUTPUT queue can be done immediately.
2408 */
2409 return EPOLLOUT | EPOLLWRNORM;
2410 }
2411 }
2412
2413 /*
2414 * There is nothing to wait for if the queue isn't streaming, or if the
2415 * error flag is set.
2416 */
2417 if (!vb2_is_streaming(q) || q->error)
2418 return EPOLLERR;
2419
2420 /*
2421 * If this quirk is set and QBUF hasn't been called yet then
2422 * return EPOLLERR as well. This only affects capture queues, output
2423 * queues will always initialize waiting_for_buffers to false.
2424 * This quirk is set by V4L2 for backwards compatibility reasons.
2425 */
2426 if (q->quirk_poll_must_check_waiting_for_buffers &&
2427 q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM)))
2428 return EPOLLERR;
2429
2430 /*
2431 * For output streams you can call write() as long as there are fewer
2432 * buffers queued than there are buffers available.
2433 */
2434 if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
2435 return EPOLLOUT | EPOLLWRNORM;
2436
2437 if (list_empty(&q->done_list)) {
2438 /*
2439 * If the last buffer was dequeued from a capture queue,
2440 * return immediately. DQBUF will return -EPIPE.
2441 */
2442 if (q->last_buffer_dequeued)
2443 return EPOLLIN | EPOLLRDNORM;
2444 }
2445
2446 /*
2447 * Take first buffer available for dequeuing.
2448 */
2449 spin_lock_irqsave(&q->done_lock, flags);
2450 if (!list_empty(&q->done_list))
2451 vb = list_first_entry(&q->done_list, struct vb2_buffer,
2452 done_entry);
2453 spin_unlock_irqrestore(&q->done_lock, flags);
2454
2455 if (vb && (vb->state == VB2_BUF_STATE_DONE
2456 || vb->state == VB2_BUF_STATE_ERROR)) {
2457 return (q->is_output) ?
2458 EPOLLOUT | EPOLLWRNORM :
2459 EPOLLIN | EPOLLRDNORM;
2460 }
2461 return 0;
2462 }
2463 EXPORT_SYMBOL_GPL(vb2_core_poll);
2464
2465 /*
2466 * struct vb2_fileio_buf - buffer context used by file io emulator
2467 *
2468 * vb2 provides a compatibility layer and emulator of file io (read and
2469 * write) calls on top of streaming API. This structure is used for
2470 * tracking context related to the buffers.
2471 */
2472 struct vb2_fileio_buf {
2473 void *vaddr;
2474 unsigned int size;
2475 unsigned int pos;
2476 unsigned int queued:1;
2477 };
2478
2479 /*
2480 * struct vb2_fileio_data - queue context used by file io emulator
2481 *
2482 * @cur_index: the index of the buffer currently being read from or
2483 * written to. If equal to q->num_buffers then a new buffer
2484 * must be dequeued.
2485 * @initial_index: in the read() case all buffers are queued up immediately
2486 * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
2487 * buffers. However, in the write() case no buffers are initially
2488 * queued, instead whenever a buffer is full it is queued up by
2489 * __vb2_perform_fileio(). Only once all available buffers have
2490 * been queued up will __vb2_perform_fileio() start to dequeue
2491 * buffers. This means that initially __vb2_perform_fileio()
2492 * needs to know what buffer index to use when it is queuing up
2493 * the buffers for the first time. That initial index is stored
2494 * in this field. Once it is equal to q->num_buffers all
2495 * available buffers have been queued and __vb2_perform_fileio()
2496 * should start the normal dequeue/queue cycle.
2497 *
2498 * vb2 provides a compatibility layer and emulator of file io (read and
2499 * write) calls on top of streaming API. For proper operation it required
2500 * this structure to save the driver state between each call of the read
2501 * or write function.
2502 */
2503 struct vb2_fileio_data {
2504 unsigned int count;
2505 unsigned int type;
2506 unsigned int memory;
2507 struct vb2_fileio_buf bufs[VB2_MAX_FRAME];
2508 unsigned int cur_index;
2509 unsigned int initial_index;
2510 unsigned int q_count;
2511 unsigned int dq_count;
2512 unsigned read_once:1;
2513 unsigned write_immediately:1;
2514 };
2515
2516 /*
2517 * __vb2_init_fileio() - initialize file io emulator
2518 * @q: videobuf2 queue
2519 * @read: mode selector (1 means read, 0 means write)
2520 */
__vb2_init_fileio(struct vb2_queue * q,int read)2521 static int __vb2_init_fileio(struct vb2_queue *q, int read)
2522 {
2523 struct vb2_fileio_data *fileio;
2524 int i, ret;
2525 unsigned int count = 0;
2526
2527 /*
2528 * Sanity check
2529 */
2530 if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
2531 (!read && !(q->io_modes & VB2_WRITE))))
2532 return -EINVAL;
2533
2534 /*
2535 * Check if device supports mapping buffers to kernel virtual space.
2536 */
2537 if (!q->mem_ops->vaddr)
2538 return -EBUSY;
2539
2540 /*
2541 * Check if streaming api has not been already activated.
2542 */
2543 if (q->streaming || q->num_buffers > 0)
2544 return -EBUSY;
2545
2546 /*
2547 * Start with count 1, driver can increase it in queue_setup()
2548 */
2549 count = 1;
2550
2551 dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
2552 (read) ? "read" : "write", count, q->fileio_read_once,
2553 q->fileio_write_immediately);
2554
2555 fileio = kzalloc(sizeof(*fileio), GFP_KERNEL);
2556 if (fileio == NULL)
2557 return -ENOMEM;
2558
2559 fileio->read_once = q->fileio_read_once;
2560 fileio->write_immediately = q->fileio_write_immediately;
2561
2562 /*
2563 * Request buffers and use MMAP type to force driver
2564 * to allocate buffers by itself.
2565 */
2566 fileio->count = count;
2567 fileio->memory = VB2_MEMORY_MMAP;
2568 fileio->type = q->type;
2569 q->fileio = fileio;
2570 ret = vb2_core_reqbufs(q, fileio->memory, &fileio->count);
2571 if (ret)
2572 goto err_kfree;
2573
2574 /*
2575 * Check if plane_count is correct
2576 * (multiplane buffers are not supported).
2577 */
2578 if (q->bufs[0]->num_planes != 1) {
2579 ret = -EBUSY;
2580 goto err_reqbufs;
2581 }
2582
2583 /*
2584 * Get kernel address of each buffer.
2585 */
2586 for (i = 0; i < q->num_buffers; i++) {
2587 fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
2588 if (fileio->bufs[i].vaddr == NULL) {
2589 ret = -EINVAL;
2590 goto err_reqbufs;
2591 }
2592 fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
2593 }
2594
2595 /*
2596 * Read mode requires pre queuing of all buffers.
2597 */
2598 if (read) {
2599 /*
2600 * Queue all buffers.
2601 */
2602 for (i = 0; i < q->num_buffers; i++) {
2603 ret = vb2_core_qbuf(q, i, NULL, NULL);
2604 if (ret)
2605 goto err_reqbufs;
2606 fileio->bufs[i].queued = 1;
2607 }
2608 /*
2609 * All buffers have been queued, so mark that by setting
2610 * initial_index to q->num_buffers
2611 */
2612 fileio->initial_index = q->num_buffers;
2613 fileio->cur_index = q->num_buffers;
2614 }
2615
2616 /*
2617 * Start streaming.
2618 */
2619 ret = vb2_core_streamon(q, q->type);
2620 if (ret)
2621 goto err_reqbufs;
2622
2623 return ret;
2624
2625 err_reqbufs:
2626 fileio->count = 0;
2627 vb2_core_reqbufs(q, fileio->memory, &fileio->count);
2628
2629 err_kfree:
2630 q->fileio = NULL;
2631 kfree(fileio);
2632 return ret;
2633 }
2634
2635 /*
2636 * __vb2_cleanup_fileio() - free resourced used by file io emulator
2637 * @q: videobuf2 queue
2638 */
__vb2_cleanup_fileio(struct vb2_queue * q)2639 static int __vb2_cleanup_fileio(struct vb2_queue *q)
2640 {
2641 struct vb2_fileio_data *fileio = q->fileio;
2642
2643 if (fileio) {
2644 vb2_core_streamoff(q, q->type);
2645 q->fileio = NULL;
2646 fileio->count = 0;
2647 vb2_core_reqbufs(q, fileio->memory, &fileio->count);
2648 kfree(fileio);
2649 dprintk(q, 3, "file io emulator closed\n");
2650 }
2651 return 0;
2652 }
2653
2654 /*
2655 * __vb2_perform_fileio() - perform a single file io (read or write) operation
2656 * @q: videobuf2 queue
2657 * @data: pointed to target userspace buffer
2658 * @count: number of bytes to read or write
2659 * @ppos: file handle position tracking pointer
2660 * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
2661 * @read: access mode selector (1 means read, 0 means write)
2662 */
__vb2_perform_fileio(struct vb2_queue * q,char __user * data,size_t count,loff_t * ppos,int nonblock,int read)2663 static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
2664 loff_t *ppos, int nonblock, int read)
2665 {
2666 struct vb2_fileio_data *fileio;
2667 struct vb2_fileio_buf *buf;
2668 bool is_multiplanar = q->is_multiplanar;
2669 /*
2670 * When using write() to write data to an output video node the vb2 core
2671 * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody
2672 * else is able to provide this information with the write() operation.
2673 */
2674 bool copy_timestamp = !read && q->copy_timestamp;
2675 unsigned index;
2676 int ret;
2677
2678 dprintk(q, 3, "mode %s, offset %ld, count %zd, %sblocking\n",
2679 read ? "read" : "write", (long)*ppos, count,
2680 nonblock ? "non" : "");
2681
2682 if (!data)
2683 return -EINVAL;
2684
2685 if (q->waiting_in_dqbuf) {
2686 dprintk(q, 3, "another dup()ped fd is %s\n",
2687 read ? "reading" : "writing");
2688 return -EBUSY;
2689 }
2690
2691 /*
2692 * Initialize emulator on first call.
2693 */
2694 if (!vb2_fileio_is_active(q)) {
2695 ret = __vb2_init_fileio(q, read);
2696 dprintk(q, 3, "vb2_init_fileio result: %d\n", ret);
2697 if (ret)
2698 return ret;
2699 }
2700 fileio = q->fileio;
2701
2702 /*
2703 * Check if we need to dequeue the buffer.
2704 */
2705 index = fileio->cur_index;
2706 if (index >= q->num_buffers) {
2707 struct vb2_buffer *b;
2708
2709 /*
2710 * Call vb2_dqbuf to get buffer back.
2711 */
2712 ret = vb2_core_dqbuf(q, &index, NULL, nonblock);
2713 dprintk(q, 5, "vb2_dqbuf result: %d\n", ret);
2714 if (ret)
2715 return ret;
2716 fileio->dq_count += 1;
2717
2718 fileio->cur_index = index;
2719 buf = &fileio->bufs[index];
2720 b = q->bufs[index];
2721
2722 /*
2723 * Get number of bytes filled by the driver
2724 */
2725 buf->pos = 0;
2726 buf->queued = 0;
2727 buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
2728 : vb2_plane_size(q->bufs[index], 0);
2729 /* Compensate for data_offset on read in the multiplanar case. */
2730 if (is_multiplanar && read &&
2731 b->planes[0].data_offset < buf->size) {
2732 buf->pos = b->planes[0].data_offset;
2733 buf->size -= buf->pos;
2734 }
2735 } else {
2736 buf = &fileio->bufs[index];
2737 }
2738
2739 /*
2740 * Limit count on last few bytes of the buffer.
2741 */
2742 if (buf->pos + count > buf->size) {
2743 count = buf->size - buf->pos;
2744 dprintk(q, 5, "reducing read count: %zd\n", count);
2745 }
2746
2747 /*
2748 * Transfer data to userspace.
2749 */
2750 dprintk(q, 3, "copying %zd bytes - buffer %d, offset %u\n",
2751 count, index, buf->pos);
2752 if (read)
2753 ret = copy_to_user(data, buf->vaddr + buf->pos, count);
2754 else
2755 ret = copy_from_user(buf->vaddr + buf->pos, data, count);
2756 if (ret) {
2757 dprintk(q, 3, "error copying data\n");
2758 return -EFAULT;
2759 }
2760
2761 /*
2762 * Update counters.
2763 */
2764 buf->pos += count;
2765 *ppos += count;
2766
2767 /*
2768 * Queue next buffer if required.
2769 */
2770 if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
2771 struct vb2_buffer *b = q->bufs[index];
2772
2773 /*
2774 * Check if this is the last buffer to read.
2775 */
2776 if (read && fileio->read_once && fileio->dq_count == 1) {
2777 dprintk(q, 3, "read limit reached\n");
2778 return __vb2_cleanup_fileio(q);
2779 }
2780
2781 /*
2782 * Call vb2_qbuf and give buffer to the driver.
2783 */
2784 b->planes[0].bytesused = buf->pos;
2785
2786 if (copy_timestamp)
2787 b->timestamp = ktime_get_ns();
2788 ret = vb2_core_qbuf(q, index, NULL, NULL);
2789 dprintk(q, 5, "vb2_dbuf result: %d\n", ret);
2790 if (ret)
2791 return ret;
2792
2793 /*
2794 * Buffer has been queued, update the status
2795 */
2796 buf->pos = 0;
2797 buf->queued = 1;
2798 buf->size = vb2_plane_size(q->bufs[index], 0);
2799 fileio->q_count += 1;
2800 /*
2801 * If we are queuing up buffers for the first time, then
2802 * increase initial_index by one.
2803 */
2804 if (fileio->initial_index < q->num_buffers)
2805 fileio->initial_index++;
2806 /*
2807 * The next buffer to use is either a buffer that's going to be
2808 * queued for the first time (initial_index < q->num_buffers)
2809 * or it is equal to q->num_buffers, meaning that the next
2810 * time we need to dequeue a buffer since we've now queued up
2811 * all the 'first time' buffers.
2812 */
2813 fileio->cur_index = fileio->initial_index;
2814 }
2815
2816 /*
2817 * Return proper number of bytes processed.
2818 */
2819 if (ret == 0)
2820 ret = count;
2821 return ret;
2822 }
2823
vb2_read(struct vb2_queue * q,char __user * data,size_t count,loff_t * ppos,int nonblocking)2824 size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
2825 loff_t *ppos, int nonblocking)
2826 {
2827 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
2828 }
2829 EXPORT_SYMBOL_GPL(vb2_read);
2830
vb2_write(struct vb2_queue * q,const char __user * data,size_t count,loff_t * ppos,int nonblocking)2831 size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
2832 loff_t *ppos, int nonblocking)
2833 {
2834 return __vb2_perform_fileio(q, (char __user *) data, count,
2835 ppos, nonblocking, 0);
2836 }
2837 EXPORT_SYMBOL_GPL(vb2_write);
2838
2839 struct vb2_threadio_data {
2840 struct task_struct *thread;
2841 vb2_thread_fnc fnc;
2842 void *priv;
2843 bool stop;
2844 };
2845
vb2_thread(void * data)2846 static int vb2_thread(void *data)
2847 {
2848 struct vb2_queue *q = data;
2849 struct vb2_threadio_data *threadio = q->threadio;
2850 bool copy_timestamp = false;
2851 unsigned prequeue = 0;
2852 unsigned index = 0;
2853 int ret = 0;
2854
2855 if (q->is_output) {
2856 prequeue = q->num_buffers;
2857 copy_timestamp = q->copy_timestamp;
2858 }
2859
2860 set_freezable();
2861
2862 for (;;) {
2863 struct vb2_buffer *vb;
2864
2865 /*
2866 * Call vb2_dqbuf to get buffer back.
2867 */
2868 if (prequeue) {
2869 vb = q->bufs[index++];
2870 prequeue--;
2871 } else {
2872 call_void_qop(q, wait_finish, q);
2873 if (!threadio->stop)
2874 ret = vb2_core_dqbuf(q, &index, NULL, 0);
2875 call_void_qop(q, wait_prepare, q);
2876 dprintk(q, 5, "file io: vb2_dqbuf result: %d\n", ret);
2877 if (!ret)
2878 vb = q->bufs[index];
2879 }
2880 if (ret || threadio->stop)
2881 break;
2882 try_to_freeze();
2883
2884 if (vb->state != VB2_BUF_STATE_ERROR)
2885 if (threadio->fnc(vb, threadio->priv))
2886 break;
2887 call_void_qop(q, wait_finish, q);
2888 if (copy_timestamp)
2889 vb->timestamp = ktime_get_ns();
2890 if (!threadio->stop)
2891 ret = vb2_core_qbuf(q, vb->index, NULL, NULL);
2892 call_void_qop(q, wait_prepare, q);
2893 if (ret || threadio->stop)
2894 break;
2895 }
2896
2897 /* Hmm, linux becomes *very* unhappy without this ... */
2898 while (!kthread_should_stop()) {
2899 set_current_state(TASK_INTERRUPTIBLE);
2900 schedule();
2901 }
2902 return 0;
2903 }
2904
2905 /*
2906 * This function should not be used for anything else but the videobuf2-dvb
2907 * support. If you think you have another good use-case for this, then please
2908 * contact the linux-media mailinglist first.
2909 */
vb2_thread_start(struct vb2_queue * q,vb2_thread_fnc fnc,void * priv,const char * thread_name)2910 int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
2911 const char *thread_name)
2912 {
2913 struct vb2_threadio_data *threadio;
2914 int ret = 0;
2915
2916 if (q->threadio)
2917 return -EBUSY;
2918 if (vb2_is_busy(q))
2919 return -EBUSY;
2920 if (WARN_ON(q->fileio))
2921 return -EBUSY;
2922
2923 threadio = kzalloc(sizeof(*threadio), GFP_KERNEL);
2924 if (threadio == NULL)
2925 return -ENOMEM;
2926 threadio->fnc = fnc;
2927 threadio->priv = priv;
2928
2929 ret = __vb2_init_fileio(q, !q->is_output);
2930 dprintk(q, 3, "file io: vb2_init_fileio result: %d\n", ret);
2931 if (ret)
2932 goto nomem;
2933 q->threadio = threadio;
2934 threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name);
2935 if (IS_ERR(threadio->thread)) {
2936 ret = PTR_ERR(threadio->thread);
2937 threadio->thread = NULL;
2938 goto nothread;
2939 }
2940 return 0;
2941
2942 nothread:
2943 __vb2_cleanup_fileio(q);
2944 nomem:
2945 kfree(threadio);
2946 return ret;
2947 }
2948 EXPORT_SYMBOL_GPL(vb2_thread_start);
2949
vb2_thread_stop(struct vb2_queue * q)2950 int vb2_thread_stop(struct vb2_queue *q)
2951 {
2952 struct vb2_threadio_data *threadio = q->threadio;
2953 int err;
2954
2955 if (threadio == NULL)
2956 return 0;
2957 threadio->stop = true;
2958 /* Wake up all pending sleeps in the thread */
2959 vb2_queue_error(q);
2960 err = kthread_stop(threadio->thread);
2961 __vb2_cleanup_fileio(q);
2962 threadio->thread = NULL;
2963 kfree(threadio);
2964 q->threadio = NULL;
2965 return err;
2966 }
2967 EXPORT_SYMBOL_GPL(vb2_thread_stop);
2968
2969 MODULE_DESCRIPTION("Media buffer core framework");
2970 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
2971 MODULE_LICENSE("GPL");
2972