1 /*
2  * Copyright (C) 2014 Collabora Ltd.
3  *     Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
4  *
5  * This library is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU Library General Public
7  * License as published by the Free Software Foundation; either
8  * version 2 of the License, or (at your option) any later version.
9  *
10  * This library is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * Library General Public License for more details.
14  *
15  * You should have received a copy of the GNU Library General Public
16  * License along with this library; if not, write to the
17  * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
18  * Boston, MA 02110-1301, USA.
19  *
20  */
21 
22 #include "config.h"
23 
24 #ifndef _GNU_SOURCE
25 # define _GNU_SOURCE            /* O_CLOEXEC */
26 #endif
27 
28 #include "ext/videodev2.h"
29 
30 #include "gstv4l2object.h"
31 #include "gstv4l2allocator.h"
32 
33 #include <gst/allocators/gstdmabuf.h>
34 
35 #include <fcntl.h>
36 #include <string.h>
37 #include <sys/stat.h>
38 #include <sys/types.h>
39 #include <sys/mman.h>
40 #include <unistd.h>
41 
42 #define GST_V4L2_MEMORY_TYPE "V4l2Memory"
43 
44 #define gst_v4l2_allocator_parent_class parent_class
45 G_DEFINE_TYPE (GstV4l2Allocator, gst_v4l2_allocator, GST_TYPE_ALLOCATOR);
46 
47 GST_DEBUG_CATEGORY_STATIC (v4l2allocator_debug);
48 #define GST_CAT_DEFAULT v4l2allocator_debug
49 
50 #define UNSET_QUEUED(buffer) \
51     ((buffer).flags &= ~(V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE))
52 
53 #define SET_QUEUED(buffer) ((buffer).flags |= V4L2_BUF_FLAG_QUEUED)
54 
55 #define IS_QUEUED(buffer) \
56     ((buffer).flags & (V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE))
57 
58 enum
59 {
60   GROUP_RELEASED,
61   LAST_SIGNAL
62 };
63 
64 static guint gst_v4l2_allocator_signals[LAST_SIGNAL] = { 0 };
65 
66 static void gst_v4l2_allocator_release (GstV4l2Allocator * allocator,
67     GstV4l2Memory * mem);
68 
69 static const gchar *
memory_type_to_str(guint32 memory)70 memory_type_to_str (guint32 memory)
71 {
72   switch (memory) {
73     case V4L2_MEMORY_MMAP:
74       return "mmap";
75     case V4L2_MEMORY_USERPTR:
76       return "userptr";
77     case V4L2_MEMORY_DMABUF:
78       return "dmabuf";
79     default:
80       return "unknown";
81   }
82 }
83 
84 /*************************************/
85 /* GstV4lMemory implementation */
86 /*************************************/
87 
88 static gpointer
_v4l2mem_map(GstV4l2Memory * mem,gsize maxsize,GstMapFlags flags)89 _v4l2mem_map (GstV4l2Memory * mem, gsize maxsize, GstMapFlags flags)
90 {
91   gpointer data = NULL;
92 
93   switch (mem->group->buffer.memory) {
94     case V4L2_MEMORY_MMAP:
95     case V4L2_MEMORY_USERPTR:
96       data = mem->data;
97       break;
98     case V4L2_MEMORY_DMABUF:
99       /* v4l2 dmabuf memory are not shared with downstream */
100       g_assert_not_reached ();
101       break;
102     default:
103       GST_WARNING ("Unknown memory type %i", mem->group->buffer.memory);
104       break;
105   }
106   return data;
107 }
108 
109 static gboolean
_v4l2mem_unmap(GstV4l2Memory * mem)110 _v4l2mem_unmap (GstV4l2Memory * mem)
111 {
112   gboolean ret = FALSE;
113 
114   switch (mem->group->buffer.memory) {
115     case V4L2_MEMORY_MMAP:
116     case V4L2_MEMORY_USERPTR:
117       ret = TRUE;
118       break;
119     case V4L2_MEMORY_DMABUF:
120       /* v4l2 dmabuf memory are not share with downstream */
121       g_assert_not_reached ();
122       break;
123     default:
124       GST_WARNING ("Unknown memory type %i", mem->group->buffer.memory);
125       break;
126   }
127   return ret;
128 }
129 
130 static gboolean
_v4l2mem_dispose(GstV4l2Memory * mem)131 _v4l2mem_dispose (GstV4l2Memory * mem)
132 {
133   GstV4l2Allocator *allocator = (GstV4l2Allocator *) mem->mem.allocator;
134   GstV4l2MemoryGroup *group = mem->group;
135   gboolean ret;
136 
137   if (group->mem[mem->plane]) {
138     /* We may have a dmabuf, replace it with returned original memory */
139     group->mem[mem->plane] = gst_memory_ref ((GstMemory *) mem);
140     gst_v4l2_allocator_release (allocator, mem);
141     ret = FALSE;
142   } else {
143     gst_object_ref (allocator);
144     ret = TRUE;
145   }
146 
147   return ret;
148 }
149 
150 static inline GstV4l2Memory *
_v4l2mem_new(GstMemoryFlags flags,GstAllocator * allocator,GstMemory * parent,gsize maxsize,gsize align,gsize offset,gsize size,gint plane,gpointer data,int dmafd,GstV4l2MemoryGroup * group)151 _v4l2mem_new (GstMemoryFlags flags, GstAllocator * allocator,
152     GstMemory * parent, gsize maxsize, gsize align, gsize offset, gsize size,
153     gint plane, gpointer data, int dmafd, GstV4l2MemoryGroup * group)
154 {
155   GstV4l2Memory *mem;
156 
157   mem = g_slice_new0 (GstV4l2Memory);
158   gst_memory_init (GST_MEMORY_CAST (mem),
159       flags, allocator, parent, maxsize, align, offset, size);
160 
161   if (parent == NULL)
162     mem->mem.mini_object.dispose =
163         (GstMiniObjectDisposeFunction) _v4l2mem_dispose;
164 
165   mem->plane = plane;
166   mem->data = data;
167   mem->dmafd = dmafd;
168   mem->group = group;
169 
170   return mem;
171 }
172 
173 static GstV4l2Memory *
_v4l2mem_share(GstV4l2Memory * mem,gssize offset,gsize size)174 _v4l2mem_share (GstV4l2Memory * mem, gssize offset, gsize size)
175 {
176   GstV4l2Memory *sub;
177   GstMemory *parent;
178 
179   /* find the real parent */
180   if ((parent = mem->mem.parent) == NULL)
181     parent = (GstMemory *) mem;
182 
183   if (size == -1)
184     size = mem->mem.size - offset;
185 
186   /* the shared memory is always readonly */
187   sub = _v4l2mem_new (GST_MINI_OBJECT_FLAGS (parent) |
188       GST_MINI_OBJECT_FLAG_LOCK_READONLY, mem->mem.allocator, parent,
189       mem->mem.maxsize, mem->mem.align, offset, size, mem->plane, mem->data,
190       -1, mem->group);
191 
192   return sub;
193 }
194 
195 static gboolean
_v4l2mem_is_span(GstV4l2Memory * mem1,GstV4l2Memory * mem2,gsize * offset)196 _v4l2mem_is_span (GstV4l2Memory * mem1, GstV4l2Memory * mem2, gsize * offset)
197 {
198   if (offset)
199     *offset = mem1->mem.offset - mem1->mem.parent->offset;
200 
201   /* and memory is contiguous */
202   return mem1->mem.offset + mem1->mem.size == mem2->mem.offset;
203 }
204 
205 gboolean
gst_is_v4l2_memory(GstMemory * mem)206 gst_is_v4l2_memory (GstMemory * mem)
207 {
208   return gst_memory_is_type (mem, GST_V4L2_MEMORY_TYPE);
209 }
210 
211 GQuark
gst_v4l2_memory_quark(void)212 gst_v4l2_memory_quark (void)
213 {
214   static GQuark quark = 0;
215 
216   if (quark == 0)
217     quark = g_quark_from_string ("GstV4l2Memory");
218 
219   return quark;
220 }
221 
222 
223 /*************************************/
224 /* GstV4l2MemoryGroup implementation */
225 /*************************************/
226 
227 static void
gst_v4l2_memory_group_free(GstV4l2MemoryGroup * group)228 gst_v4l2_memory_group_free (GstV4l2MemoryGroup * group)
229 {
230   gint i;
231 
232   for (i = 0; i < group->n_mem; i++) {
233     GstMemory *mem = group->mem[i];
234     group->mem[i] = NULL;
235     if (mem)
236       gst_memory_unref (mem);
237   }
238 
239   g_slice_free (GstV4l2MemoryGroup, group);
240 }
241 
242 static GstV4l2MemoryGroup *
gst_v4l2_memory_group_new(GstV4l2Allocator * allocator,guint32 index)243 gst_v4l2_memory_group_new (GstV4l2Allocator * allocator, guint32 index)
244 {
245   GstV4l2Object *obj = allocator->obj;
246   guint32 memory = allocator->memory;
247   struct v4l2_format *format = &obj->format;
248   GstV4l2MemoryGroup *group;
249   gsize img_size, buf_size;
250 
251   group = g_slice_new0 (GstV4l2MemoryGroup);
252 
253   group->buffer.type = format->type;
254   group->buffer.index = index;
255   group->buffer.memory = memory;
256 
257   if (V4L2_TYPE_IS_MULTIPLANAR (format->type)) {
258     group->n_mem = group->buffer.length = format->fmt.pix_mp.num_planes;
259     group->buffer.m.planes = group->planes;
260   } else {
261     group->n_mem = 1;
262   }
263 
264   if (obj->ioctl (obj->video_fd, VIDIOC_QUERYBUF, &group->buffer) < 0)
265     goto querybuf_failed;
266 
267   if (group->buffer.index != index) {
268     GST_ERROR_OBJECT (allocator, "Buffer index returned by VIDIOC_QUERYBUF "
269         "didn't match, this indicate the presence of a bug in your driver or "
270         "libv4l2");
271     g_slice_free (GstV4l2MemoryGroup, group);
272     return NULL;
273   }
274 
275   /* Check that provided size matches the format we have negotiation. Failing
276    * there usually means a driver of libv4l bug. */
277   if (V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
278     gint i;
279 
280     for (i = 0; i < group->n_mem; i++) {
281       img_size = obj->format.fmt.pix_mp.plane_fmt[i].sizeimage;
282       buf_size = group->planes[i].length;
283       if (buf_size < img_size)
284         goto buffer_too_short;
285     }
286   } else {
287     img_size = obj->format.fmt.pix.sizeimage;
288     buf_size = group->buffer.length;
289     if (buf_size < img_size)
290       goto buffer_too_short;
291   }
292 
293   /* We save non planar buffer information into the multi-planar plane array
294    * to avoid duplicating the code later */
295   if (!V4L2_TYPE_IS_MULTIPLANAR (format->type)) {
296     group->planes[0].bytesused = group->buffer.bytesused;
297     group->planes[0].length = group->buffer.length;
298     group->planes[0].data_offset = 0;
299     g_assert (sizeof (group->planes[0].m) == sizeof (group->buffer.m));
300     memcpy (&group->planes[0].m, &group->buffer.m, sizeof (group->buffer.m));
301   }
302 
303   GST_LOG_OBJECT (allocator, "Got %s buffer", memory_type_to_str (memory));
304   GST_LOG_OBJECT (allocator, "  index:     %u", group->buffer.index);
305   GST_LOG_OBJECT (allocator, "  type:      %d", group->buffer.type);
306   GST_LOG_OBJECT (allocator, "  flags:     %08x", group->buffer.flags);
307   GST_LOG_OBJECT (allocator, "  field:     %d", group->buffer.field);
308   GST_LOG_OBJECT (allocator, "  memory:    %d", group->buffer.memory);
309   GST_LOG_OBJECT (allocator, "  planes:    %d", group->n_mem);
310 
311 #ifndef GST_DISABLE_GST_DEBUG
312   if (memory == V4L2_MEMORY_MMAP) {
313     gint i;
314     for (i = 0; i < group->n_mem; i++) {
315       GST_LOG_OBJECT (allocator,
316           "  [%u] bytesused: %u, length: %u, offset: %u", i,
317           group->planes[i].bytesused, group->planes[i].length,
318           group->planes[i].data_offset);
319       GST_LOG_OBJECT (allocator, "  [%u] MMAP offset:  %u", i,
320           group->planes[i].m.mem_offset);
321     }
322   }
323 #endif
324 
325   return group;
326 
327 querybuf_failed:
328   {
329     GST_ERROR ("error querying buffer %d: %s", index, g_strerror (errno));
330     goto failed;
331   }
332 buffer_too_short:
333   {
334     GST_ERROR ("buffer size %" G_GSIZE_FORMAT
335         " is smaller then negotiated size %" G_GSIZE_FORMAT
336         ", this is usually the result of a bug in the v4l2 driver or libv4l.",
337         buf_size, img_size);
338     goto failed;
339   }
340 failed:
341   gst_v4l2_memory_group_free (group);
342   return NULL;
343 }
344 
345 
346 /*************************************/
347 /* GstV4lAllocator implementation    */
348 /*************************************/
349 
350 static void
gst_v4l2_allocator_release(GstV4l2Allocator * allocator,GstV4l2Memory * mem)351 gst_v4l2_allocator_release (GstV4l2Allocator * allocator, GstV4l2Memory * mem)
352 {
353   GstV4l2MemoryGroup *group = mem->group;
354 
355   GST_LOG_OBJECT (allocator, "plane %i of buffer %u released",
356       mem->plane, group->buffer.index);
357 
358   switch (allocator->memory) {
359     case V4L2_MEMORY_DMABUF:
360       close (mem->dmafd);
361       mem->dmafd = -1;
362       break;
363     case V4L2_MEMORY_USERPTR:
364       mem->data = NULL;
365       break;
366     default:
367       break;
368   }
369 
370   /* When all memory are back, put the group back in the free queue */
371   if (g_atomic_int_dec_and_test (&group->mems_allocated)) {
372     GST_LOG_OBJECT (allocator, "buffer %u released", group->buffer.index);
373     gst_atomic_queue_push (allocator->free_queue, group);
374     g_signal_emit (allocator, gst_v4l2_allocator_signals[GROUP_RELEASED], 0);
375   }
376 
377   /* Keep last, allocator may be freed after this call */
378   g_object_unref (allocator);
379 }
380 
381 static void
gst_v4l2_allocator_free(GstAllocator * gallocator,GstMemory * gmem)382 gst_v4l2_allocator_free (GstAllocator * gallocator, GstMemory * gmem)
383 {
384   GstV4l2Allocator *allocator = (GstV4l2Allocator *) gallocator;
385   GstV4l2Object *obj = allocator->obj;
386   GstV4l2Memory *mem = (GstV4l2Memory *) gmem;
387   GstV4l2MemoryGroup *group = mem->group;
388 
389   /* Only free unparented memory */
390   if (mem->mem.parent == NULL) {
391     GST_LOG_OBJECT (allocator, "freeing plane %i of buffer %u",
392         mem->plane, group->buffer.index);
393 
394     if (allocator->memory == V4L2_MEMORY_MMAP) {
395       if (mem->data)
396         obj->munmap (mem->data, group->planes[mem->plane].length);
397     }
398 
399     /* This apply for both mmap with expbuf, and dmabuf imported memory */
400     if (mem->dmafd >= 0)
401       close (mem->dmafd);
402   }
403 
404   g_slice_free (GstV4l2Memory, mem);
405 }
406 
407 static void
gst_v4l2_allocator_dispose(GObject * obj)408 gst_v4l2_allocator_dispose (GObject * obj)
409 {
410   GstV4l2Allocator *allocator = (GstV4l2Allocator *) obj;
411   gint i;
412 
413   GST_LOG_OBJECT (obj, "called");
414 
415   for (i = 0; i < allocator->count; i++) {
416     GstV4l2MemoryGroup *group = allocator->groups[i];
417     allocator->groups[i] = NULL;
418     if (group)
419       gst_v4l2_memory_group_free (group);
420   }
421 
422   G_OBJECT_CLASS (parent_class)->dispose (obj);
423 }
424 
425 static void
gst_v4l2_allocator_finalize(GObject * obj)426 gst_v4l2_allocator_finalize (GObject * obj)
427 {
428   GstV4l2Allocator *allocator = (GstV4l2Allocator *) obj;
429 
430   GST_LOG_OBJECT (obj, "called");
431 
432   gst_atomic_queue_unref (allocator->free_queue);
433   gst_object_unref (allocator->obj->element);
434 
435   G_OBJECT_CLASS (parent_class)->finalize (obj);
436 }
437 
438 static void
gst_v4l2_allocator_class_init(GstV4l2AllocatorClass * klass)439 gst_v4l2_allocator_class_init (GstV4l2AllocatorClass * klass)
440 {
441   GObjectClass *object_class;
442   GstAllocatorClass *allocator_class;
443 
444   allocator_class = (GstAllocatorClass *) klass;
445   object_class = (GObjectClass *) klass;
446 
447   allocator_class->alloc = NULL;
448   allocator_class->free = gst_v4l2_allocator_free;
449 
450   object_class->dispose = gst_v4l2_allocator_dispose;
451   object_class->finalize = gst_v4l2_allocator_finalize;
452 
453   gst_v4l2_allocator_signals[GROUP_RELEASED] = g_signal_new ("group-released",
454       G_TYPE_FROM_CLASS (object_class), G_SIGNAL_RUN_LAST, 0, NULL, NULL, NULL,
455       G_TYPE_NONE, 0);
456 
457   GST_DEBUG_CATEGORY_INIT (v4l2allocator_debug, "v4l2allocator", 0,
458       "V4L2 Allocator");
459 }
460 
461 static void
gst_v4l2_allocator_init(GstV4l2Allocator * allocator)462 gst_v4l2_allocator_init (GstV4l2Allocator * allocator)
463 {
464   GstAllocator *alloc = GST_ALLOCATOR_CAST (allocator);
465 
466   alloc->mem_type = GST_V4L2_MEMORY_TYPE;
467   alloc->mem_map = (GstMemoryMapFunction) _v4l2mem_map;
468   alloc->mem_unmap = (GstMemoryUnmapFunction) _v4l2mem_unmap;
469   alloc->mem_share = (GstMemoryShareFunction) _v4l2mem_share;
470   alloc->mem_is_span = (GstMemoryIsSpanFunction) _v4l2mem_is_span;
471   /* Use the default, fallback copy function */
472 
473   allocator->free_queue = gst_atomic_queue_new (VIDEO_MAX_FRAME);
474 
475   GST_OBJECT_FLAG_SET (allocator, GST_ALLOCATOR_FLAG_CUSTOM_ALLOC);
476 }
477 
478 #define GST_V4L2_ALLOCATOR_PROBE(obj,type) \
479     gst_v4l2_allocator_probe ((obj), V4L2_MEMORY_ ## type, \
480         GST_V4L2_ALLOCATOR_FLAG_ ## type ## _REQBUFS, \
481         GST_V4L2_ALLOCATOR_FLAG_ ## type ## _CREATE_BUFS)
482 static guint32
gst_v4l2_allocator_probe(GstV4l2Allocator * allocator,guint32 memory,guint32 breq_flag,guint32 bcreate_flag)483 gst_v4l2_allocator_probe (GstV4l2Allocator * allocator, guint32 memory,
484     guint32 breq_flag, guint32 bcreate_flag)
485 {
486   GstV4l2Object *obj = allocator->obj;
487   struct v4l2_requestbuffers breq = { 0 };
488   guint32 flags = 0;
489 
490   breq.type = obj->type;
491   breq.count = 0;
492   breq.memory = memory;
493 
494   if (obj->ioctl (obj->video_fd, VIDIOC_REQBUFS, &breq) == 0) {
495     struct v4l2_create_buffers bcreate = { 0 };
496 
497     flags |= breq_flag;
498 
499     bcreate.memory = memory;
500     bcreate.format = obj->format;
501 
502     if ((obj->ioctl (obj->video_fd, VIDIOC_CREATE_BUFS, &bcreate) == 0))
503       flags |= bcreate_flag;
504   }
505 
506   if (breq.capabilities & V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS)
507     flags |= GST_V4L2_ALLOCATOR_FLAG_SUPPORTS_ORPHANED_BUFS;
508 
509   return flags;
510 }
511 
512 static GstV4l2MemoryGroup *
gst_v4l2_allocator_create_buf(GstV4l2Allocator * allocator)513 gst_v4l2_allocator_create_buf (GstV4l2Allocator * allocator)
514 {
515   GstV4l2Object *obj = allocator->obj;
516   struct v4l2_create_buffers bcreate = { 0 };
517   GstV4l2MemoryGroup *group = NULL;
518 
519   GST_OBJECT_LOCK (allocator);
520 
521   if (!g_atomic_int_get (&allocator->active))
522     goto done;
523 
524   if (GST_V4L2_ALLOCATOR_IS_ORPHANED (allocator))
525     goto orphaned_bug;
526 
527   bcreate.memory = allocator->memory;
528   bcreate.format = obj->format;
529   bcreate.count = 1;
530 
531   if (!allocator->can_allocate)
532     goto done;
533 
534   if (obj->ioctl (obj->video_fd, VIDIOC_CREATE_BUFS, &bcreate) < 0)
535     goto create_bufs_failed;
536 
537   if (allocator->groups[bcreate.index] != NULL)
538     goto create_bufs_bug;
539 
540   group = gst_v4l2_memory_group_new (allocator, bcreate.index);
541 
542   if (group) {
543     allocator->groups[bcreate.index] = group;
544     allocator->count++;
545   }
546 
547 done:
548   GST_OBJECT_UNLOCK (allocator);
549   return group;
550 
551 orphaned_bug:
552   {
553     GST_ERROR_OBJECT (allocator, "allocator was orphaned, "
554         "not creating new buffers");
555     goto done;
556   }
557 create_bufs_failed:
558   {
559     GST_WARNING_OBJECT (allocator, "error creating a new buffer: %s",
560         g_strerror (errno));
561     goto done;
562   }
563 create_bufs_bug:
564   {
565     GST_ERROR_OBJECT (allocator, "created buffer has already used buffer "
566         "index %i, this means there is an bug in your driver or libv4l2",
567         bcreate.index);
568     goto done;
569   }
570 }
571 
572 static GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc(GstV4l2Allocator * allocator)573 gst_v4l2_allocator_alloc (GstV4l2Allocator * allocator)
574 {
575   GstV4l2MemoryGroup *group;
576 
577   if (!g_atomic_int_get (&allocator->active))
578     return NULL;
579 
580   group = gst_atomic_queue_pop (allocator->free_queue);
581 
582   if (group == NULL) {
583     if (allocator->can_allocate) {
584       group = gst_v4l2_allocator_create_buf (allocator);
585 
586       /* Don't hammer on CREATE_BUFS */
587       if (group == NULL)
588         allocator->can_allocate = FALSE;
589     }
590   }
591 
592   return group;
593 }
594 
595 static void
gst_v4l2_allocator_reset_size(GstV4l2Allocator * allocator,GstV4l2MemoryGroup * group)596 gst_v4l2_allocator_reset_size (GstV4l2Allocator * allocator,
597     GstV4l2MemoryGroup * group)
598 {
599   gint i;
600   for (i = 0; i < group->n_mem; i++) {
601     group->mem[i]->maxsize = group->planes[i].length;
602     group->mem[i]->offset = 0;
603     group->mem[i]->size = group->planes[i].length;
604   }
605 }
606 
607 static void
_cleanup_failed_alloc(GstV4l2Allocator * allocator,GstV4l2MemoryGroup * group)608 _cleanup_failed_alloc (GstV4l2Allocator * allocator, GstV4l2MemoryGroup * group)
609 {
610   if (group->mems_allocated > 0) {
611     gint i;
612     /* If one or more mmap worked, we need to unref the memory, otherwise
613      * they will keep a ref on the allocator and leak it. This will put back
614      * the group into the free_queue */
615     for (i = 0; i < group->n_mem; i++)
616       gst_memory_unref (group->mem[i]);
617   } else {
618     /* Otherwise, group has to be on free queue for _stop() to work */
619     gst_atomic_queue_push (allocator->free_queue, group);
620   }
621 }
622 
623 
624 
625 GstV4l2Allocator *
gst_v4l2_allocator_new(GstObject * parent,GstV4l2Object * v4l2object)626 gst_v4l2_allocator_new (GstObject * parent, GstV4l2Object * v4l2object)
627 {
628   GstV4l2Allocator *allocator;
629   guint32 flags = 0;
630   gchar *name, *parent_name;
631 
632   parent_name = gst_object_get_name (parent);
633   name = g_strconcat (parent_name, ":allocator", NULL);
634   g_free (parent_name);
635 
636   allocator = g_object_new (GST_TYPE_V4L2_ALLOCATOR, "name", name, NULL);
637   gst_object_ref_sink (allocator);
638   g_free (name);
639 
640   /* Save everything */
641   allocator->obj = v4l2object;
642 
643   /* Keep a ref on the elemnt so obj does not disapear */
644   gst_object_ref (allocator->obj->element);
645 
646   flags |= GST_V4L2_ALLOCATOR_PROBE (allocator, MMAP);
647   flags |= GST_V4L2_ALLOCATOR_PROBE (allocator, USERPTR);
648   flags |= GST_V4L2_ALLOCATOR_PROBE (allocator, DMABUF);
649 
650 
651   if (flags == 0) {
652     /* Drivers not ported from videobuf to videbuf2 don't allow freeing buffers
653      * using REQBUFS(0). This is a workaround to still support these drivers,
654      * which are known to have MMAP support. */
655     GST_WARNING_OBJECT (allocator, "Could not probe supported memory type, "
656         "assuming MMAP is supported, this is expected for older drivers not "
657         " yet ported to videobuf2 framework");
658     flags = GST_V4L2_ALLOCATOR_FLAG_MMAP_REQBUFS;
659   }
660 
661   GST_OBJECT_FLAG_SET (allocator, flags);
662 
663   return allocator;
664 }
665 
666 guint
gst_v4l2_allocator_start(GstV4l2Allocator * allocator,guint32 count,guint32 memory)667 gst_v4l2_allocator_start (GstV4l2Allocator * allocator, guint32 count,
668     guint32 memory)
669 {
670   GstV4l2Object *obj = allocator->obj;
671   struct v4l2_requestbuffers breq = { count, obj->type, memory };
672   gboolean can_allocate;
673   gint i;
674 
675   g_return_val_if_fail (count != 0, 0);
676 
677   GST_OBJECT_LOCK (allocator);
678 
679   if (g_atomic_int_get (&allocator->active))
680     goto already_active;
681 
682   if (GST_V4L2_ALLOCATOR_IS_ORPHANED (allocator))
683     goto orphaned;
684 
685   if (obj->ioctl (obj->video_fd, VIDIOC_REQBUFS, &breq) < 0)
686     goto reqbufs_failed;
687 
688   if (breq.count < 1)
689     goto out_of_memory;
690 
691   switch (memory) {
692     case V4L2_MEMORY_MMAP:
693       can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (allocator, MMAP);
694       break;
695     case V4L2_MEMORY_USERPTR:
696       can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (allocator, USERPTR);
697       break;
698     case V4L2_MEMORY_DMABUF:
699       can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (allocator, DMABUF);
700       break;
701     default:
702       can_allocate = FALSE;
703       break;
704   }
705 
706   GST_DEBUG_OBJECT (allocator, "allocated %u %s buffers out of %u requested",
707       breq.count, memory_type_to_str (memory), count);
708 
709   allocator->can_allocate = can_allocate;
710   allocator->count = breq.count;
711   allocator->memory = memory;
712 
713   /* Create memory groups */
714   for (i = 0; i < allocator->count; i++) {
715     allocator->groups[i] = gst_v4l2_memory_group_new (allocator, i);
716     if (allocator->groups[i] == NULL)
717       goto error;
718 
719     gst_atomic_queue_push (allocator->free_queue, allocator->groups[i]);
720   }
721 
722   g_atomic_int_set (&allocator->active, TRUE);
723 
724 done:
725   GST_OBJECT_UNLOCK (allocator);
726   return breq.count;
727 
728 already_active:
729   {
730     GST_ERROR_OBJECT (allocator, "allocator already active");
731     goto error;
732   }
733 orphaned:
734   {
735     GST_ERROR_OBJECT (allocator, "allocator was orphaned");
736     goto error;
737   }
738 reqbufs_failed:
739   {
740     GST_ERROR_OBJECT (allocator,
741         "error requesting %d buffers: %s", count, g_strerror (errno));
742     goto error;
743   }
744 out_of_memory:
745   {
746     GST_ERROR_OBJECT (allocator, "Not enough memory to allocate buffers");
747     goto error;
748   }
749 error:
750   {
751     breq.count = 0;
752     goto done;
753   }
754 }
755 
756 GstV4l2Return
gst_v4l2_allocator_stop(GstV4l2Allocator * allocator)757 gst_v4l2_allocator_stop (GstV4l2Allocator * allocator)
758 {
759   GstV4l2Object *obj = allocator->obj;
760   struct v4l2_requestbuffers breq = { 0, obj->type, allocator->memory };
761   gint i = 0;
762   GstV4l2Return ret = GST_V4L2_OK;
763 
764   GST_DEBUG_OBJECT (allocator, "stop allocator");
765 
766   GST_OBJECT_LOCK (allocator);
767 
768   if (!g_atomic_int_get (&allocator->active))
769     goto done;
770 
771   if (gst_atomic_queue_length (allocator->free_queue) != allocator->count) {
772     GST_DEBUG_OBJECT (allocator, "allocator is still in use");
773     ret = GST_V4L2_BUSY;
774     goto done;
775   }
776 
777   while (gst_atomic_queue_pop (allocator->free_queue)) {
778     /* nothing */
779   };
780 
781   for (i = 0; i < allocator->count; i++) {
782     GstV4l2MemoryGroup *group = allocator->groups[i];
783     allocator->groups[i] = NULL;
784     if (group)
785       gst_v4l2_memory_group_free (group);
786   }
787 
788   if (!GST_V4L2_ALLOCATOR_IS_ORPHANED (allocator)) {
789     /* Not all drivers support rebufs(0), so warn only */
790     if (obj->ioctl (obj->video_fd, VIDIOC_REQBUFS, &breq) < 0)
791       GST_WARNING_OBJECT (allocator,
792           "error releasing buffers buffers: %s", g_strerror (errno));
793   }
794 
795   allocator->count = 0;
796 
797   g_atomic_int_set (&allocator->active, FALSE);
798 
799 done:
800   GST_OBJECT_UNLOCK (allocator);
801   return ret;
802 }
803 
804 gboolean
gst_v4l2_allocator_orphan(GstV4l2Allocator * allocator)805 gst_v4l2_allocator_orphan (GstV4l2Allocator * allocator)
806 {
807   GstV4l2Object *obj = allocator->obj;
808   struct v4l2_requestbuffers breq = { 0, obj->type, allocator->memory };
809 
810   if (!GST_V4L2_ALLOCATOR_CAN_ORPHAN_BUFS (allocator))
811     return FALSE;
812 
813   GST_OBJECT_FLAG_SET (allocator, GST_V4L2_ALLOCATOR_FLAG_ORPHANED);
814 
815   if (obj->ioctl (obj->video_fd, VIDIOC_REQBUFS, &breq) < 0) {
816     GST_ERROR_OBJECT (allocator,
817         "error orphaning buffers buffers: %s", g_strerror (errno));
818     return FALSE;
819   }
820 
821   return TRUE;
822 }
823 
824 GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc_mmap(GstV4l2Allocator * allocator)825 gst_v4l2_allocator_alloc_mmap (GstV4l2Allocator * allocator)
826 {
827   GstV4l2Object *obj = allocator->obj;
828   GstV4l2MemoryGroup *group;
829   gint i;
830 
831   g_return_val_if_fail (allocator->memory == V4L2_MEMORY_MMAP, NULL);
832 
833   group = gst_v4l2_allocator_alloc (allocator);
834 
835   if (group == NULL)
836     return NULL;
837 
838   for (i = 0; i < group->n_mem; i++) {
839     if (group->mem[i] == NULL) {
840       gpointer data;
841       data = obj->mmap (NULL, group->planes[i].length, PROT_READ | PROT_WRITE,
842           MAP_SHARED, obj->video_fd, group->planes[i].m.mem_offset);
843 
844       if (data == MAP_FAILED)
845         goto mmap_failed;
846 
847       GST_LOG_OBJECT (allocator,
848           "mmap buffer length %d, data offset %d, plane %d",
849           group->planes[i].length, group->planes[i].data_offset, i);
850 
851       group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
852           NULL, group->planes[i].length, 0, 0, group->planes[i].length, i, data,
853           -1, group);
854     } else {
855       /* Take back the allocator reference */
856       gst_object_ref (allocator);
857     }
858 
859     group->mems_allocated++;
860   }
861 
862   /* Ensure group size. Unlike GST, v4l2 have size (bytesused) initially set
863    * to 0. As length might be bigger then the expected size exposed in the
864    * format, we simply set bytesused initially and reset it here for
865    * simplicity */
866   gst_v4l2_allocator_reset_size (allocator, group);
867 
868   return group;
869 
870 mmap_failed:
871   {
872     GST_ERROR_OBJECT (allocator, "Failed to mmap buffer: %s",
873         g_strerror (errno));
874     _cleanup_failed_alloc (allocator, group);
875     return NULL;
876   }
877 }
878 
879 GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc_dmabuf(GstV4l2Allocator * allocator,GstAllocator * dmabuf_allocator)880 gst_v4l2_allocator_alloc_dmabuf (GstV4l2Allocator * allocator,
881     GstAllocator * dmabuf_allocator)
882 {
883   GstV4l2Object *obj = allocator->obj;
884   GstV4l2MemoryGroup *group;
885   gint i;
886 
887   g_return_val_if_fail (allocator->memory == V4L2_MEMORY_MMAP, NULL);
888 
889   group = gst_v4l2_allocator_alloc (allocator);
890 
891   if (group == NULL)
892     return NULL;
893 
894   for (i = 0; i < group->n_mem; i++) {
895     GstV4l2Memory *mem;
896     GstMemory *dma_mem;
897 
898     if (group->mem[i] == NULL) {
899       struct v4l2_exportbuffer expbuf = { 0 };
900 
901       expbuf.type = obj->type;
902       expbuf.index = group->buffer.index;
903       expbuf.plane = i;
904       expbuf.flags = O_CLOEXEC | O_RDWR;
905 
906       if (obj->ioctl (obj->video_fd, VIDIOC_EXPBUF, &expbuf) < 0)
907         goto expbuf_failed;
908 
909       GST_LOG_OBJECT (allocator, "exported DMABUF as fd %i plane %d",
910           expbuf.fd, i);
911 
912       group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
913           NULL, group->planes[i].length, 0, group->planes[i].data_offset,
914           group->planes[i].length - group->planes[i].data_offset, i, NULL,
915           expbuf.fd, group);
916     } else {
917       /* Take back the allocator reference */
918       gst_object_ref (allocator);
919     }
920 
921     group->mems_allocated++;
922 
923     g_assert (gst_is_v4l2_memory (group->mem[i]));
924     mem = (GstV4l2Memory *) group->mem[i];
925 
926     dma_mem = gst_fd_allocator_alloc (dmabuf_allocator, mem->dmafd,
927         group->planes[i].length, GST_FD_MEMORY_FLAG_DONT_CLOSE);
928     gst_memory_resize (dma_mem, group->planes[i].data_offset,
929         group->planes[i].length - group->planes[i].data_offset);
930 
931     gst_mini_object_set_qdata (GST_MINI_OBJECT (dma_mem),
932         GST_V4L2_MEMORY_QUARK, mem, (GDestroyNotify) gst_memory_unref);
933 
934     group->mem[i] = dma_mem;
935   }
936 
937   gst_v4l2_allocator_reset_size (allocator, group);
938 
939   return group;
940 
941 expbuf_failed:
942   {
943     GST_ERROR_OBJECT (allocator, "Failed to export DMABUF: %s",
944         g_strerror (errno));
945     goto cleanup;
946   }
947 cleanup:
948   {
949     _cleanup_failed_alloc (allocator, group);
950     return NULL;
951   }
952 }
953 
954 static void
gst_v4l2_allocator_clear_dmabufin(GstV4l2Allocator * allocator,GstV4l2MemoryGroup * group)955 gst_v4l2_allocator_clear_dmabufin (GstV4l2Allocator * allocator,
956     GstV4l2MemoryGroup * group)
957 {
958   GstV4l2Object *obj = allocator->obj;
959   GstV4l2Memory *mem;
960   gint i;
961 
962   g_return_if_fail (allocator->memory == V4L2_MEMORY_DMABUF);
963 
964   for (i = 0; i < group->n_mem; i++) {
965 
966     mem = (GstV4l2Memory *) group->mem[i];
967 
968     GST_LOG_OBJECT (allocator, "[%i] clearing DMABUF import, fd %i plane %d",
969         group->buffer.index, mem->dmafd, i);
970 
971     /* Update memory */
972     mem->mem.maxsize = 0;
973     mem->mem.offset = 0;
974     mem->mem.size = 0;
975     mem->dmafd = -1;
976 
977     /* Update v4l2 structure */
978     group->planes[i].length = 0;
979     group->planes[i].bytesused = 0;
980     group->planes[i].m.fd = -1;
981     group->planes[i].data_offset = 0;
982   }
983 
984   if (!V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
985     group->buffer.bytesused = 0;
986     group->buffer.length = 0;
987     group->buffer.m.fd = -1;
988   }
989 }
990 
991 GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc_dmabufin(GstV4l2Allocator * allocator)992 gst_v4l2_allocator_alloc_dmabufin (GstV4l2Allocator * allocator)
993 {
994   GstV4l2MemoryGroup *group;
995   gint i;
996 
997   g_return_val_if_fail (allocator->memory == V4L2_MEMORY_DMABUF, NULL);
998 
999   group = gst_v4l2_allocator_alloc (allocator);
1000 
1001   if (group == NULL)
1002     return NULL;
1003 
1004   GST_LOG_OBJECT (allocator, "allocating empty DMABUF import group");
1005 
1006   for (i = 0; i < group->n_mem; i++) {
1007     if (group->mem[i] == NULL) {
1008       group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
1009           NULL, 0, 0, 0, 0, i, NULL, -1, group);
1010     } else {
1011       /* Take back the allocator reference */
1012       gst_object_ref (allocator);
1013     }
1014 
1015     group->mems_allocated++;
1016   }
1017 
1018   gst_v4l2_allocator_clear_dmabufin (allocator, group);
1019 
1020   return group;
1021 }
1022 
1023 static void
gst_v4l2_allocator_clear_userptr(GstV4l2Allocator * allocator,GstV4l2MemoryGroup * group)1024 gst_v4l2_allocator_clear_userptr (GstV4l2Allocator * allocator,
1025     GstV4l2MemoryGroup * group)
1026 {
1027   GstV4l2Object *obj = allocator->obj;
1028   GstV4l2Memory *mem;
1029   gint i;
1030 
1031   g_return_if_fail (allocator->memory == V4L2_MEMORY_USERPTR);
1032 
1033   for (i = 0; i < group->n_mem; i++) {
1034     mem = (GstV4l2Memory *) group->mem[i];
1035 
1036     GST_LOG_OBJECT (allocator, "[%i] clearing USERPTR %p plane %d size %"
1037         G_GSIZE_FORMAT, group->buffer.index, mem->data, i, mem->mem.size);
1038 
1039     mem->mem.maxsize = 0;
1040     mem->mem.size = 0;
1041     mem->data = NULL;
1042 
1043     group->planes[i].length = 0;
1044     group->planes[i].bytesused = 0;
1045     group->planes[i].m.userptr = 0;
1046   }
1047 
1048   if (!V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
1049     group->buffer.bytesused = 0;
1050     group->buffer.length = 0;
1051     group->buffer.m.userptr = 0;
1052   }
1053 }
1054 
1055 GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc_userptr(GstV4l2Allocator * allocator)1056 gst_v4l2_allocator_alloc_userptr (GstV4l2Allocator * allocator)
1057 {
1058   GstV4l2MemoryGroup *group;
1059   gint i;
1060 
1061   g_return_val_if_fail (allocator->memory == V4L2_MEMORY_USERPTR, NULL);
1062 
1063   group = gst_v4l2_allocator_alloc (allocator);
1064 
1065   if (group == NULL)
1066     return NULL;
1067 
1068   GST_LOG_OBJECT (allocator, "allocating empty USERPTR group");
1069 
1070   for (i = 0; i < group->n_mem; i++) {
1071 
1072     if (group->mem[i] == NULL) {
1073       group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
1074           NULL, 0, 0, 0, 0, i, NULL, -1, group);
1075     } else {
1076       /* Take back the allocator reference */
1077       gst_object_ref (allocator);
1078     }
1079 
1080     group->mems_allocated++;
1081   }
1082 
1083   gst_v4l2_allocator_clear_userptr (allocator, group);
1084 
1085   return group;
1086 }
1087 
1088 gboolean
gst_v4l2_allocator_import_dmabuf(GstV4l2Allocator * allocator,GstV4l2MemoryGroup * group,gint n_mem,GstMemory ** dma_mem)1089 gst_v4l2_allocator_import_dmabuf (GstV4l2Allocator * allocator,
1090     GstV4l2MemoryGroup * group, gint n_mem, GstMemory ** dma_mem)
1091 {
1092   GstV4l2Object *obj = allocator->obj;
1093   GstV4l2Memory *mem;
1094   gint i;
1095 
1096   g_return_val_if_fail (allocator->memory == V4L2_MEMORY_DMABUF, FALSE);
1097 
1098   if (group->n_mem != n_mem)
1099     goto n_mem_missmatch;
1100 
1101   for (i = 0; i < group->n_mem; i++) {
1102     gint dmafd;
1103     gsize size, offset, maxsize;
1104 
1105     if (!gst_is_dmabuf_memory (dma_mem[i]))
1106       goto not_dmabuf;
1107 
1108     size = gst_memory_get_sizes (dma_mem[i], &offset, &maxsize);
1109 
1110     dmafd = gst_dmabuf_memory_get_fd (dma_mem[i]);
1111 
1112     GST_LOG_OBJECT (allocator, "[%i] imported DMABUF as fd %i plane %d",
1113         group->buffer.index, dmafd, i);
1114 
1115     mem = (GstV4l2Memory *) group->mem[i];
1116 
1117     /* Update memory */
1118     mem->mem.maxsize = maxsize;
1119     mem->mem.offset = offset;
1120     mem->mem.size = size;
1121     mem->dmafd = dmafd;
1122 
1123     /* Update v4l2 structure */
1124     group->planes[i].length = maxsize;
1125     group->planes[i].bytesused = size + offset;
1126     group->planes[i].m.fd = dmafd;
1127     group->planes[i].data_offset = offset;
1128   }
1129 
1130   /* Copy into buffer structure if not using planes */
1131   if (!V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
1132     group->buffer.bytesused = group->planes[0].bytesused;
1133     group->buffer.length = group->planes[0].length;
1134     group->buffer.m.fd = group->planes[0].m.userptr;
1135 
1136     /* FIXME Check if data_offset > 0 and fail for non-multi-planar */
1137     g_assert (group->planes[0].data_offset == 0);
1138   } else {
1139     group->buffer.length = group->n_mem;
1140   }
1141 
1142   return TRUE;
1143 
1144 n_mem_missmatch:
1145   {
1146     GST_ERROR_OBJECT (allocator, "Got %i dmabuf but needed %i", n_mem,
1147         group->n_mem);
1148     return FALSE;
1149   }
1150 not_dmabuf:
1151   {
1152     GST_ERROR_OBJECT (allocator, "Memory %i is not of DMABUF", i);
1153     return FALSE;
1154   }
1155 }
1156 
1157 gboolean
gst_v4l2_allocator_import_userptr(GstV4l2Allocator * allocator,GstV4l2MemoryGroup * group,gsize img_size,int n_planes,gpointer * data,gsize * size)1158 gst_v4l2_allocator_import_userptr (GstV4l2Allocator * allocator,
1159     GstV4l2MemoryGroup * group, gsize img_size, int n_planes,
1160     gpointer * data, gsize * size)
1161 {
1162   GstV4l2Object *obj = allocator->obj;
1163   GstV4l2Memory *mem;
1164   gint i;
1165 
1166   g_return_val_if_fail (allocator->memory == V4L2_MEMORY_USERPTR, FALSE);
1167 
1168   /* TODO Support passing N plane from 1 memory to MPLANE v4l2 format */
1169   if (V4L2_TYPE_IS_MULTIPLANAR (obj->type) && n_planes != group->n_mem)
1170     goto n_mem_missmatch;
1171 
1172   for (i = 0; i < group->n_mem; i++) {
1173     gsize maxsize, psize;
1174 
1175     /* TODO request used size and maxsize seperatly */
1176     if (V4L2_TYPE_IS_MULTIPLANAR (obj->type))
1177       maxsize = psize = size[i];
1178     else
1179       maxsize = psize = img_size;
1180 
1181     g_assert (psize <= img_size);
1182 
1183     GST_LOG_OBJECT (allocator, "[%i] imported USERPTR %p plane %d size %"
1184         G_GSIZE_FORMAT, group->buffer.index, data[i], i, psize);
1185 
1186     mem = (GstV4l2Memory *) group->mem[i];
1187 
1188     mem->mem.maxsize = maxsize;
1189     mem->mem.size = psize;
1190     mem->data = data[i];
1191 
1192     group->planes[i].length = maxsize;
1193     group->planes[i].bytesused = psize;
1194     group->planes[i].m.userptr = (unsigned long) data[i];
1195     group->planes[i].data_offset = 0;
1196   }
1197 
1198   /* Copy into buffer structure if not using planes */
1199   if (!V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
1200     group->buffer.bytesused = group->planes[0].bytesused;
1201     group->buffer.length = group->planes[0].length;
1202     group->buffer.m.userptr = group->planes[0].m.userptr;
1203   } else {
1204     group->buffer.length = group->n_mem;
1205   }
1206 
1207   return TRUE;
1208 
1209 n_mem_missmatch:
1210   {
1211     GST_ERROR_OBJECT (allocator, "Got %i userptr plane while driver need %i",
1212         n_planes, group->n_mem);
1213     return FALSE;
1214   }
1215 }
1216 
1217 void
gst_v4l2_allocator_flush(GstV4l2Allocator * allocator)1218 gst_v4l2_allocator_flush (GstV4l2Allocator * allocator)
1219 {
1220   gint i;
1221 
1222   GST_OBJECT_LOCK (allocator);
1223 
1224   if (!g_atomic_int_get (&allocator->active))
1225     goto done;
1226 
1227   for (i = 0; i < allocator->count; i++) {
1228     GstV4l2MemoryGroup *group = allocator->groups[i];
1229     gint n;
1230 
1231     if (IS_QUEUED (group->buffer)) {
1232       UNSET_QUEUED (group->buffer);
1233 
1234       gst_v4l2_allocator_reset_group (allocator, group);
1235 
1236       for (n = 0; n < group->n_mem; n++)
1237         gst_memory_unref (group->mem[n]);
1238     }
1239   }
1240 
1241 done:
1242   GST_OBJECT_UNLOCK (allocator);
1243 }
1244 
1245 gboolean
gst_v4l2_allocator_qbuf(GstV4l2Allocator * allocator,GstV4l2MemoryGroup * group)1246 gst_v4l2_allocator_qbuf (GstV4l2Allocator * allocator,
1247     GstV4l2MemoryGroup * group)
1248 {
1249   GstV4l2Object *obj = allocator->obj;
1250   gboolean ret = TRUE;
1251   gint i;
1252 
1253   g_return_val_if_fail (g_atomic_int_get (&allocator->active), FALSE);
1254 
1255   /* update sizes */
1256   if (V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
1257     for (i = 0; i < group->n_mem; i++)
1258       group->planes[i].bytesused =
1259           gst_memory_get_sizes (group->mem[i], NULL, NULL);
1260   } else {
1261     group->buffer.bytesused = gst_memory_get_sizes (group->mem[0], NULL, NULL);
1262   }
1263 
1264   /* Ensure the memory will stay around and is RO */
1265   for (i = 0; i < group->n_mem; i++)
1266     gst_memory_ref (group->mem[i]);
1267 
1268   if (obj->ioctl (obj->video_fd, VIDIOC_QBUF, &group->buffer) < 0) {
1269     GST_ERROR_OBJECT (allocator, "failed queueing buffer %i: %s",
1270         group->buffer.index, g_strerror (errno));
1271 
1272     /* Release the memory, possibly making it RW again */
1273     for (i = 0; i < group->n_mem; i++)
1274       gst_memory_unref (group->mem[i]);
1275 
1276     ret = FALSE;
1277     if (IS_QUEUED (group->buffer)) {
1278       GST_DEBUG_OBJECT (allocator,
1279           "driver pretends buffer is queued even if queue failed");
1280       UNSET_QUEUED (group->buffer);
1281     }
1282     goto done;
1283   }
1284 
1285   GST_LOG_OBJECT (allocator, "queued buffer %i (flags 0x%X)",
1286       group->buffer.index, group->buffer.flags);
1287 
1288   if (!IS_QUEUED (group->buffer)) {
1289     GST_DEBUG_OBJECT (allocator,
1290         "driver pretends buffer is not queued even if queue succeeded");
1291     SET_QUEUED (group->buffer);
1292   }
1293 
1294 done:
1295   return ret;
1296 }
1297 
1298 GstFlowReturn
gst_v4l2_allocator_dqbuf(GstV4l2Allocator * allocator,GstV4l2MemoryGroup ** group_out)1299 gst_v4l2_allocator_dqbuf (GstV4l2Allocator * allocator,
1300     GstV4l2MemoryGroup ** group_out)
1301 {
1302   GstV4l2Object *obj = allocator->obj;
1303   struct v4l2_buffer buffer = { 0 };
1304   struct v4l2_plane planes[VIDEO_MAX_PLANES] = { {0} };
1305   gint i;
1306 
1307   GstV4l2MemoryGroup *group = NULL;
1308 
1309   g_return_val_if_fail (g_atomic_int_get (&allocator->active), GST_FLOW_ERROR);
1310 
1311   buffer.type = obj->type;
1312   buffer.memory = allocator->memory;
1313 
1314   if (V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
1315     buffer.length = obj->format.fmt.pix_mp.num_planes;
1316     buffer.m.planes = planes;
1317   }
1318 
1319   if (obj->ioctl (obj->video_fd, VIDIOC_DQBUF, &buffer) < 0)
1320     goto error;
1321 
1322   group = allocator->groups[buffer.index];
1323 
1324   if (!IS_QUEUED (group->buffer)) {
1325     GST_ERROR_OBJECT (allocator,
1326         "buffer %i was not queued, this indicate a driver bug.", buffer.index);
1327     return GST_FLOW_ERROR;
1328   }
1329 
1330   group->buffer = buffer;
1331 
1332   GST_LOG_OBJECT (allocator, "dequeued buffer %i (flags 0x%X)", buffer.index,
1333       buffer.flags);
1334 
1335   if (IS_QUEUED (group->buffer)) {
1336     GST_DEBUG_OBJECT (allocator,
1337         "driver pretends buffer is queued even if dequeue succeeded");
1338     UNSET_QUEUED (group->buffer);
1339   }
1340 
1341   if (V4L2_TYPE_IS_MULTIPLANAR (obj->type)) {
1342     group->buffer.m.planes = group->planes;
1343     memcpy (group->planes, buffer.m.planes, sizeof (planes));
1344   } else {
1345     group->planes[0].bytesused = group->buffer.bytesused;
1346     group->planes[0].length = group->buffer.length;
1347     g_assert (sizeof (group->planes[0].m) == sizeof (group->buffer.m));
1348     memcpy (&group->planes[0].m, &group->buffer.m, sizeof (group->buffer.m));
1349   }
1350 
1351   /* And update memory size */
1352   if (V4L2_TYPE_IS_OUTPUT (obj->type)) {
1353     gst_v4l2_allocator_reset_size (allocator, group);
1354   } else {
1355     /* for capture, simply read the size */
1356     for (i = 0; i < group->n_mem; i++) {
1357       gsize size, offset;
1358 
1359       GST_LOG_OBJECT (allocator,
1360           "Dequeued capture buffer, length: %u bytesused: %u data_offset: %u",
1361           group->planes[i].length, group->planes[i].bytesused,
1362           group->planes[i].data_offset);
1363 
1364       offset = group->planes[i].data_offset;
1365 
1366       if (group->planes[i].bytesused > group->planes[i].data_offset) {
1367         size = group->planes[i].bytesused - group->planes[i].data_offset;
1368       } else {
1369         GST_WARNING_OBJECT (allocator, "V4L2 provided buffer has bytesused %"
1370             G_GUINT32_FORMAT " which is too small to include data_offset %"
1371             G_GUINT32_FORMAT, group->planes[i].bytesused,
1372             group->planes[i].data_offset);
1373         size = group->planes[i].bytesused;
1374       }
1375 
1376       if (G_LIKELY (size + offset <= group->mem[i]->maxsize))
1377         gst_memory_resize (group->mem[i], offset, size);
1378       else {
1379         GST_WARNING_OBJECT (allocator,
1380             "v4l2 provided buffer that is too big for the memory it was "
1381             "writing into.  v4l2 claims %" G_GSIZE_FORMAT " bytes used but "
1382             "memory is only %" G_GSIZE_FORMAT "B.  This is probably a driver "
1383             "bug.", size, group->mem[i]->maxsize);
1384         gst_memory_resize (group->mem[i], 0, group->mem[i]->maxsize);
1385       }
1386     }
1387   }
1388 
1389   /* Release the memory, possibly making it RW again */
1390   for (i = 0; i < group->n_mem; i++)
1391     gst_memory_unref (group->mem[i]);
1392 
1393   *group_out = group;
1394   return GST_FLOW_OK;
1395 
1396 error:
1397   if (errno == EPIPE) {
1398     GST_DEBUG_OBJECT (allocator, "broken pipe signals last buffer");
1399     return GST_FLOW_EOS;
1400   }
1401 
1402   GST_ERROR_OBJECT (allocator, "failed dequeuing a %s buffer: %s",
1403       memory_type_to_str (allocator->memory), g_strerror (errno));
1404 
1405   switch (errno) {
1406     case EAGAIN:
1407       GST_WARNING_OBJECT (allocator,
1408           "Non-blocking I/O has been selected using O_NONBLOCK and"
1409           " no buffer was in the outgoing queue.");
1410       break;
1411     case EINVAL:
1412       GST_ERROR_OBJECT (allocator,
1413           "The buffer type is not supported, or the index is out of bounds, "
1414           "or no buffers have been allocated yet, or the userptr "
1415           "or length are invalid.");
1416       break;
1417     case ENOMEM:
1418       GST_ERROR_OBJECT (allocator,
1419           "insufficient memory to enqueue a user pointer buffer");
1420       break;
1421     case EIO:
1422       GST_INFO_OBJECT (allocator,
1423           "VIDIOC_DQBUF failed due to an internal error."
1424           " Can also indicate temporary problems like signal loss."
1425           " Note the driver might dequeue an (empty) buffer despite"
1426           " returning an error, or even stop capturing.");
1427       /* have we de-queued a buffer ? */
1428       if (!IS_QUEUED (buffer)) {
1429         GST_DEBUG_OBJECT (allocator, "reenqueueing buffer");
1430         /* FIXME ... should we do something here? */
1431       }
1432       break;
1433     case EINTR:
1434       GST_WARNING_OBJECT (allocator, "could not sync on a buffer on device");
1435       break;
1436     default:
1437       GST_WARNING_OBJECT (allocator,
1438           "Grabbing frame got interrupted unexpectedly. %d: %s.", errno,
1439           g_strerror (errno));
1440       break;
1441   }
1442 
1443   return GST_FLOW_ERROR;
1444 }
1445 
1446 void
gst_v4l2_allocator_reset_group(GstV4l2Allocator * allocator,GstV4l2MemoryGroup * group)1447 gst_v4l2_allocator_reset_group (GstV4l2Allocator * allocator,
1448     GstV4l2MemoryGroup * group)
1449 {
1450   switch (allocator->memory) {
1451     case V4L2_MEMORY_USERPTR:
1452       gst_v4l2_allocator_clear_userptr (allocator, group);
1453       break;
1454     case V4L2_MEMORY_DMABUF:
1455       gst_v4l2_allocator_clear_dmabufin (allocator, group);
1456       break;
1457     case V4L2_MEMORY_MMAP:
1458       break;
1459     default:
1460       g_assert_not_reached ();
1461       break;
1462   }
1463 
1464   gst_v4l2_allocator_reset_size (allocator, group);
1465 }
1466