1 /*
2  * GStreamer
3  * Copyright (C) 2018 Sebastian Dröge <sebastian@centricular.com>
4  *
5  * This library is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU Library General Public
7  * License as published by the Free Software Foundation; either
8  * version 2 of the License, or (at your option) any later version.
9  *
10  * This library is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * Library General Public License for more details.
14  *
15  * You should have received a copy of the GNU Library General Public
16  * License along with this library; if not, write to the
17  * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
18  * Boston, MA 02110-1301, USA.
19  */
20 
21 
22 #ifdef HAVE_CONFIG_H
23 #  include <config.h>
24 #endif
25 
26 #include <gst/gst.h>
27 #include <gst/base/base.h>
28 #include <gst/video/video.h>
29 #include <string.h>
30 
31 #include "gstcccombiner.h"
32 
33 GST_DEBUG_CATEGORY_STATIC (gst_cc_combiner_debug);
34 #define GST_CAT_DEFAULT gst_cc_combiner_debug
35 
36 static GstStaticPadTemplate sinktemplate = GST_STATIC_PAD_TEMPLATE ("sink",
37     GST_PAD_SINK,
38     GST_PAD_ALWAYS,
39     GST_STATIC_CAPS_ANY);
40 
41 static GstStaticPadTemplate srctemplate = GST_STATIC_PAD_TEMPLATE ("src",
42     GST_PAD_SRC,
43     GST_PAD_ALWAYS,
44     GST_STATIC_CAPS_ANY);
45 
46 static GstStaticPadTemplate captiontemplate =
47     GST_STATIC_PAD_TEMPLATE ("caption",
48     GST_PAD_SINK,
49     GST_PAD_REQUEST,
50     GST_STATIC_CAPS
51     ("closedcaption/x-cea-608,format={ (string) raw, (string) s334-1a}; "
52         "closedcaption/x-cea-708,format={ (string) cc_data, (string) cdp }"));
53 
54 G_DEFINE_TYPE (GstCCCombiner, gst_cc_combiner, GST_TYPE_AGGREGATOR);
55 #define parent_class gst_cc_combiner_parent_class
56 
57 typedef struct
58 {
59   GstVideoCaptionType caption_type;
60   GstBuffer *buffer;
61 } CaptionData;
62 
63 static void
caption_data_clear(CaptionData * data)64 caption_data_clear (CaptionData * data)
65 {
66   gst_buffer_unref (data->buffer);
67 }
68 
69 static void
gst_cc_combiner_finalize(GObject * object)70 gst_cc_combiner_finalize (GObject * object)
71 {
72   GstCCCombiner *self = GST_CCCOMBINER (object);
73 
74   g_array_unref (self->current_frame_captions);
75   self->current_frame_captions = NULL;
76   gst_caps_replace (&self->video_caps, NULL);
77 
78   G_OBJECT_CLASS (parent_class)->finalize (object);
79 }
80 
81 #define GST_FLOW_NEED_DATA GST_FLOW_CUSTOM_SUCCESS
82 
83 static GstFlowReturn
gst_cc_combiner_collect_captions(GstCCCombiner * self,gboolean timeout)84 gst_cc_combiner_collect_captions (GstCCCombiner * self, gboolean timeout)
85 {
86   GstAggregatorPad *src_pad =
87       GST_AGGREGATOR_PAD (GST_AGGREGATOR_SRC_PAD (self));
88   GstAggregatorPad *caption_pad;
89   GstBuffer *video_buf;
90 
91   g_assert (self->current_video_buffer != NULL);
92 
93   caption_pad =
94       GST_AGGREGATOR_PAD_CAST (gst_element_get_static_pad (GST_ELEMENT_CAST
95           (self), "caption"));
96 
97   /* No caption pad, forward buffer directly */
98   if (!caption_pad) {
99     GST_LOG_OBJECT (self, "No caption pad, passing through video");
100     video_buf = self->current_video_buffer;
101     self->current_video_buffer = NULL;
102     goto done;
103   }
104 
105   GST_LOG_OBJECT (self, "Trying to collect captions for queued video buffer");
106   do {
107     GstBuffer *caption_buf;
108     GstClockTime caption_time;
109     CaptionData caption_data;
110 
111     caption_buf = gst_aggregator_pad_peek_buffer (caption_pad);
112     if (!caption_buf) {
113       if (gst_aggregator_pad_is_eos (caption_pad)) {
114         GST_DEBUG_OBJECT (self, "Caption pad is EOS, we're done");
115         break;
116       } else if (!timeout) {
117         GST_DEBUG_OBJECT (self, "Need more caption data");
118         gst_object_unref (caption_pad);
119         return GST_FLOW_NEED_DATA;
120       } else {
121         GST_DEBUG_OBJECT (self, "No caption data on timeout");
122         break;
123       }
124     }
125 
126     caption_time = GST_BUFFER_PTS (caption_buf);
127     if (!GST_CLOCK_TIME_IS_VALID (caption_time)) {
128       GST_ERROR_OBJECT (self, "Caption buffer without PTS");
129 
130       gst_buffer_unref (caption_buf);
131       gst_object_unref (caption_pad);
132 
133       return GST_FLOW_ERROR;
134     }
135 
136     caption_time =
137         gst_segment_to_running_time (&caption_pad->segment, GST_FORMAT_TIME,
138         caption_time);
139 
140     if (!GST_CLOCK_TIME_IS_VALID (caption_time)) {
141       GST_DEBUG_OBJECT (self, "Caption buffer outside segment, dropping");
142 
143       gst_aggregator_pad_drop_buffer (caption_pad);
144       gst_buffer_unref (caption_buf);
145 
146       continue;
147     }
148 
149     /* Collected all caption buffers for this video buffer */
150     if (caption_time >= self->current_video_running_time_end) {
151       gst_buffer_unref (caption_buf);
152       break;
153     } else if (caption_time < self->current_video_running_time) {
154       GST_DEBUG_OBJECT (self,
155           "Caption buffer before current video frame, dropping");
156 
157       gst_aggregator_pad_drop_buffer (caption_pad);
158       gst_buffer_unref (caption_buf);
159       continue;
160     }
161 
162     /* This caption buffer has to be collected */
163     GST_LOG_OBJECT (self,
164         "Collecting caption buffer %p %" GST_TIME_FORMAT " for video buffer %p",
165         caption_buf, GST_TIME_ARGS (caption_time), self->current_video_buffer);
166     caption_data.caption_type = self->current_caption_type;
167     caption_data.buffer = caption_buf;
168     g_array_append_val (self->current_frame_captions, caption_data);
169     gst_aggregator_pad_drop_buffer (caption_pad);
170   } while (TRUE);
171 
172   if (self->current_frame_captions->len > 0) {
173     guint i;
174 
175     GST_LOG_OBJECT (self, "Attaching %u captions to buffer %p",
176         self->current_frame_captions->len, self->current_video_buffer);
177     video_buf = gst_buffer_make_writable (self->current_video_buffer);
178     self->current_video_buffer = NULL;
179 
180     for (i = 0; i < self->current_frame_captions->len; i++) {
181       CaptionData *caption_data =
182           &g_array_index (self->current_frame_captions, CaptionData, i);
183       GstMapInfo map;
184 
185       gst_buffer_map (caption_data->buffer, &map, GST_MAP_READ);
186       gst_buffer_add_video_caption_meta (video_buf, caption_data->caption_type,
187           map.data, map.size);
188       gst_buffer_unmap (caption_data->buffer, &map);
189     }
190 
191     g_array_set_size (self->current_frame_captions, 0);
192   } else {
193     GST_LOG_OBJECT (self, "No captions for buffer %p",
194         self->current_video_buffer);
195     video_buf = self->current_video_buffer;
196     self->current_video_buffer = NULL;
197   }
198 
199   gst_object_unref (caption_pad);
200 
201 done:
202   src_pad->segment.position =
203       GST_BUFFER_PTS (video_buf) + GST_BUFFER_DURATION (video_buf);
204 
205   return gst_aggregator_finish_buffer (GST_AGGREGATOR_CAST (self), video_buf);
206 }
207 
208 static GstFlowReturn
gst_cc_combiner_aggregate(GstAggregator * aggregator,gboolean timeout)209 gst_cc_combiner_aggregate (GstAggregator * aggregator, gboolean timeout)
210 {
211   GstCCCombiner *self = GST_CCCOMBINER (aggregator);
212   GstFlowReturn flow_ret = GST_FLOW_OK;
213 
214   /* If we have no current video buffer, queue one. If we have one but
215    * its end running time is not known yet, try to determine it from the
216    * next video buffer */
217   if (!self->current_video_buffer
218       || !GST_CLOCK_TIME_IS_VALID (self->current_video_running_time_end)) {
219     GstAggregatorPad *video_pad;
220     GstClockTime video_start;
221     GstBuffer *video_buf;
222 
223     video_pad =
224         GST_AGGREGATOR_PAD_CAST (gst_element_get_static_pad (GST_ELEMENT_CAST
225             (aggregator), "sink"));
226     video_buf = gst_aggregator_pad_peek_buffer (video_pad);
227     if (!video_buf) {
228       if (gst_aggregator_pad_is_eos (video_pad)) {
229         GST_DEBUG_OBJECT (aggregator, "Video pad is EOS, we're done");
230 
231         /* Assume that this buffer ends where it started +50ms (25fps) and handle it */
232         if (self->current_video_buffer) {
233           self->current_video_running_time_end =
234               self->current_video_running_time + 50 * GST_MSECOND;
235           flow_ret = gst_cc_combiner_collect_captions (self, timeout);
236         }
237 
238         /* If we collected all captions for the remaining video frame we're
239          * done, otherwise get called another time and go directly into the
240          * outer branch for finishing the current video frame */
241         if (flow_ret == GST_FLOW_NEED_DATA)
242           flow_ret = GST_FLOW_OK;
243         else
244           flow_ret = GST_FLOW_EOS;
245       } else {
246         flow_ret = GST_FLOW_OK;
247       }
248 
249       gst_object_unref (video_pad);
250       return flow_ret;
251     }
252 
253     video_start = GST_BUFFER_PTS (video_buf);
254     if (!GST_CLOCK_TIME_IS_VALID (video_start)) {
255       gst_buffer_unref (video_buf);
256       gst_object_unref (video_pad);
257 
258       GST_ERROR_OBJECT (aggregator, "Video buffer without PTS");
259 
260       return GST_FLOW_ERROR;
261     }
262 
263     video_start =
264         gst_segment_to_running_time (&video_pad->segment, GST_FORMAT_TIME,
265         video_start);
266     if (!GST_CLOCK_TIME_IS_VALID (video_start)) {
267       GST_DEBUG_OBJECT (aggregator, "Buffer outside segment, dropping");
268       gst_aggregator_pad_drop_buffer (video_pad);
269       gst_buffer_unref (video_buf);
270       gst_object_unref (video_pad);
271       return GST_FLOW_OK;
272     }
273 
274     if (self->current_video_buffer) {
275       /* If we already have a video buffer just update the current end running
276        * time accordingly. That's what was missing and why we got here */
277       self->current_video_running_time_end = video_start;
278       gst_buffer_unref (video_buf);
279       GST_LOG_OBJECT (self,
280           "Determined end timestamp for video buffer: %p %" GST_TIME_FORMAT
281           " - %" GST_TIME_FORMAT, self->current_video_buffer,
282           GST_TIME_ARGS (self->current_video_running_time),
283           GST_TIME_ARGS (self->current_video_running_time_end));
284     } else {
285       /* Otherwise we had no buffer queued currently. Let's do that now
286        * so that we can collect captions for it */
287       gst_buffer_replace (&self->current_video_buffer, video_buf);
288       self->current_video_running_time = video_start;
289       gst_aggregator_pad_drop_buffer (video_pad);
290       gst_buffer_unref (video_buf);
291 
292       if (GST_BUFFER_DURATION_IS_VALID (video_buf)) {
293         GstClockTime end_time =
294             GST_BUFFER_PTS (video_buf) + GST_BUFFER_DURATION (video_buf);
295         if (video_pad->segment.stop != -1 && end_time > video_pad->segment.stop)
296           end_time = video_pad->segment.stop;
297         self->current_video_running_time_end =
298             gst_segment_to_running_time (&video_pad->segment, GST_FORMAT_TIME,
299             end_time);
300       } else if (self->video_fps_n != 0 && self->video_fps_d != 0) {
301         GstClockTime end_time =
302             GST_BUFFER_PTS (video_buf) + gst_util_uint64_scale_int (GST_SECOND,
303             self->video_fps_d, self->video_fps_n);
304         if (video_pad->segment.stop != -1 && end_time > video_pad->segment.stop)
305           end_time = video_pad->segment.stop;
306         self->current_video_running_time_end =
307             gst_segment_to_running_time (&video_pad->segment, GST_FORMAT_TIME,
308             end_time);
309       } else {
310         self->current_video_running_time_end = GST_CLOCK_TIME_NONE;
311       }
312 
313       GST_LOG_OBJECT (self,
314           "Queued new video buffer: %p %" GST_TIME_FORMAT " - %"
315           GST_TIME_FORMAT, self->current_video_buffer,
316           GST_TIME_ARGS (self->current_video_running_time),
317           GST_TIME_ARGS (self->current_video_running_time_end));
318     }
319 
320     gst_object_unref (video_pad);
321   }
322 
323   /* At this point we have a video buffer queued and can start collecting
324    * caption buffers for it */
325   g_assert (self->current_video_buffer != NULL);
326   g_assert (GST_CLOCK_TIME_IS_VALID (self->current_video_running_time));
327   g_assert (GST_CLOCK_TIME_IS_VALID (self->current_video_running_time_end));
328 
329   flow_ret = gst_cc_combiner_collect_captions (self, timeout);
330 
331   /* Only if we collected all captions we replace the current video buffer
332    * with NULL and continue with the next one on the next call */
333   if (flow_ret == GST_FLOW_NEED_DATA) {
334     flow_ret = GST_FLOW_OK;
335   } else {
336     gst_buffer_replace (&self->current_video_buffer, NULL);
337     self->current_video_running_time = self->current_video_running_time_end =
338         GST_CLOCK_TIME_NONE;
339   }
340 
341   return flow_ret;
342 }
343 
344 static gboolean
gst_cc_combiner_sink_event(GstAggregator * aggregator,GstAggregatorPad * agg_pad,GstEvent * event)345 gst_cc_combiner_sink_event (GstAggregator * aggregator,
346     GstAggregatorPad * agg_pad, GstEvent * event)
347 {
348   GstCCCombiner *self = GST_CCCOMBINER (aggregator);
349 
350   switch (GST_EVENT_TYPE (event)) {
351     case GST_EVENT_CAPS:{
352       GstCaps *caps;
353       GstStructure *s;
354 
355       gst_event_parse_caps (event, &caps);
356       s = gst_caps_get_structure (caps, 0);
357 
358       if (strcmp (GST_OBJECT_NAME (agg_pad), "caption") == 0) {
359         self->current_caption_type = gst_video_caption_type_from_caps (caps);
360       } else {
361         gint fps_n, fps_d;
362 
363         fps_n = fps_d = 0;
364 
365         gst_structure_get_fraction (s, "framerate", &fps_n, &fps_d);
366 
367         if (fps_n != self->video_fps_n || fps_d != self->video_fps_d) {
368           GstClockTime latency;
369 
370           latency = gst_util_uint64_scale (GST_SECOND, fps_d, fps_n);
371           gst_aggregator_set_latency (aggregator, latency, latency);
372         }
373 
374         self->video_fps_n = fps_n;
375         self->video_fps_d = fps_d;
376 
377         self->video_caps = gst_caps_ref (caps);
378       }
379 
380       break;
381     }
382     default:
383       break;
384   }
385 
386   return GST_AGGREGATOR_CLASS (parent_class)->sink_event (aggregator, agg_pad,
387       event);
388 }
389 
390 static gboolean
gst_cc_combiner_stop(GstAggregator * aggregator)391 gst_cc_combiner_stop (GstAggregator * aggregator)
392 {
393   GstCCCombiner *self = GST_CCCOMBINER (aggregator);
394 
395   self->video_fps_n = self->video_fps_d = 0;
396   self->current_video_running_time = self->current_video_running_time_end =
397       GST_CLOCK_TIME_NONE;
398   gst_buffer_replace (&self->current_video_buffer, NULL);
399   gst_caps_replace (&self->video_caps, NULL);
400 
401   g_array_set_size (self->current_frame_captions, 0);
402   self->current_caption_type = GST_VIDEO_CAPTION_TYPE_UNKNOWN;
403 
404   return TRUE;
405 }
406 
407 static GstFlowReturn
gst_cc_combiner_flush(GstAggregator * aggregator)408 gst_cc_combiner_flush (GstAggregator * aggregator)
409 {
410   GstCCCombiner *self = GST_CCCOMBINER (aggregator);
411   GstAggregatorPad *src_pad =
412       GST_AGGREGATOR_PAD (GST_AGGREGATOR_SRC_PAD (aggregator));
413 
414   self->current_video_running_time = self->current_video_running_time_end =
415       GST_CLOCK_TIME_NONE;
416   gst_buffer_replace (&self->current_video_buffer, NULL);
417 
418   g_array_set_size (self->current_frame_captions, 0);
419 
420   src_pad->segment.position = GST_CLOCK_TIME_NONE;
421 
422   return GST_FLOW_OK;
423 }
424 
425 static GstAggregatorPad *
gst_cc_combiner_create_new_pad(GstAggregator * aggregator,GstPadTemplate * templ,const gchar * req_name,const GstCaps * caps)426 gst_cc_combiner_create_new_pad (GstAggregator * aggregator,
427     GstPadTemplate * templ, const gchar * req_name, const GstCaps * caps)
428 {
429   GstCCCombiner *self = GST_CCCOMBINER (aggregator);
430   GstAggregatorPad *agg_pad;
431 
432   if (templ->direction != GST_PAD_SINK)
433     return NULL;
434 
435   if (templ->presence != GST_PAD_REQUEST)
436     return NULL;
437 
438   if (strcmp (templ->name_template, "caption") != 0)
439     return NULL;
440 
441   GST_OBJECT_LOCK (self);
442   agg_pad = g_object_new (GST_TYPE_AGGREGATOR_PAD,
443       "name", "caption", "direction", GST_PAD_SINK, "template", templ, NULL);
444   self->current_caption_type = GST_VIDEO_CAPTION_TYPE_UNKNOWN;
445   GST_OBJECT_UNLOCK (self);
446 
447   return agg_pad;
448 }
449 
450 static GstFlowReturn
gst_cc_combiner_update_src_caps(GstAggregator * agg,GstCaps * caps,GstCaps ** ret)451 gst_cc_combiner_update_src_caps (GstAggregator * agg,
452     GstCaps * caps, GstCaps ** ret)
453 {
454   GstFlowReturn res = GST_AGGREGATOR_FLOW_NEED_DATA;
455   GstCCCombiner *self = GST_CCCOMBINER (agg);
456 
457   if (self->video_caps) {
458     *ret = gst_caps_intersect (caps, self->video_caps);
459     res = GST_FLOW_OK;
460   }
461 
462   return res;
463 }
464 
465 static void
gst_cc_combiner_class_init(GstCCCombinerClass * klass)466 gst_cc_combiner_class_init (GstCCCombinerClass * klass)
467 {
468   GObjectClass *gobject_class;
469   GstElementClass *gstelement_class;
470   GstAggregatorClass *aggregator_class;
471 
472   gobject_class = (GObjectClass *) klass;
473   gstelement_class = (GstElementClass *) klass;
474   aggregator_class = (GstAggregatorClass *) klass;
475 
476   gobject_class->finalize = gst_cc_combiner_finalize;
477 
478   gst_element_class_set_static_metadata (gstelement_class,
479       "Closed Caption Combiner",
480       "Filter",
481       "Combines GstVideoCaptionMeta with video input stream",
482       "Sebastian Dröge <sebastian@centricular.com>");
483 
484   gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
485       &sinktemplate, GST_TYPE_AGGREGATOR_PAD);
486   gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
487       &srctemplate, GST_TYPE_AGGREGATOR_PAD);
488   gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
489       &captiontemplate, GST_TYPE_AGGREGATOR_PAD);
490 
491   aggregator_class->aggregate = gst_cc_combiner_aggregate;
492   aggregator_class->stop = gst_cc_combiner_stop;
493   aggregator_class->flush = gst_cc_combiner_flush;
494   aggregator_class->create_new_pad = gst_cc_combiner_create_new_pad;
495   aggregator_class->sink_event = gst_cc_combiner_sink_event;
496   aggregator_class->update_src_caps = gst_cc_combiner_update_src_caps;
497   aggregator_class->get_next_time = gst_aggregator_simple_get_next_time;
498 
499   GST_DEBUG_CATEGORY_INIT (gst_cc_combiner_debug, "cccombiner",
500       0, "Closed Caption combiner");
501 }
502 
503 static void
gst_cc_combiner_init(GstCCCombiner * self)504 gst_cc_combiner_init (GstCCCombiner * self)
505 {
506   GstPadTemplate *templ;
507   GstAggregatorPad *agg_pad;
508 
509   templ = gst_static_pad_template_get (&sinktemplate);
510   agg_pad = g_object_new (GST_TYPE_AGGREGATOR_PAD,
511       "name", "sink", "direction", GST_PAD_SINK, "template", templ, NULL);
512   gst_object_unref (templ);
513   gst_element_add_pad (GST_ELEMENT_CAST (self), GST_PAD_CAST (agg_pad));
514 
515   self->current_frame_captions =
516       g_array_new (FALSE, FALSE, sizeof (CaptionData));
517   g_array_set_clear_func (self->current_frame_captions,
518       (GDestroyNotify) caption_data_clear);
519 
520   self->current_video_running_time = self->current_video_running_time_end =
521       GST_CLOCK_TIME_NONE;
522 
523   self->current_caption_type = GST_VIDEO_CAPTION_TYPE_UNKNOWN;
524 }
525