1 /* rendersub.c
2 
3    Copyright (c) 2003-2021 HandBrake Team
4    This file is part of the HandBrake source code
5    Homepage: <http://handbrake.fr/>.
6    It may be used under the terms of the GNU General Public License v2.
7    For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
8  */
9 
10 #include "handbrake/handbrake.h"
11 #include "handbrake/hbffmpeg.h"
12 #include <ass/ass.h>
13 
14 #define ABS(a) ((a) > 0 ? (a) : (-(a)))
15 
16 struct hb_filter_private_s
17 {
18     // Common
19     int                 crop[4];
20     int                 type;
21     struct SwsContext * sws;
22     int                 sws_width;
23     int                 sws_height;
24 
25     // VOBSUB && PGSSUB
26     hb_list_t         * sub_list; // List of active subs
27 
28     // SSA
29     ASS_Library       * ssa;
30     ASS_Renderer      * renderer;
31     ASS_Track         * ssaTrack;
32     uint8_t             script_initialized;
33 
34     // SRT
35     int                 line;
36     hb_buffer_t       * current_sub;
37 
38     hb_filter_init_t    input;
39     hb_filter_init_t    output;
40 };
41 
42 // VOBSUB
43 static int vobsub_post_init( hb_filter_object_t * filter, hb_job_t * job );
44 
45 static int vobsub_work( hb_filter_object_t * filter,
46                         hb_buffer_t ** buf_in,
47                         hb_buffer_t ** buf_out );
48 
49 static void vobsub_close( hb_filter_object_t * filter );
50 
51 
52 // SSA
53 static int ssa_post_init( hb_filter_object_t * filter, hb_job_t * job );
54 
55 static int ssa_work( hb_filter_object_t * filter,
56                      hb_buffer_t ** buf_in,
57                      hb_buffer_t ** buf_out );
58 
59 static void ssa_close( hb_filter_object_t * filter );
60 
61 
62 // SRT
63 static int textsub_post_init( hb_filter_object_t * filter, hb_job_t * job );
64 static int cc608sub_post_init( hb_filter_object_t * filter, hb_job_t * job );
65 
66 static int textsub_work( hb_filter_object_t * filter,
67                      hb_buffer_t ** buf_in,
68                      hb_buffer_t ** buf_out );
69 
70 static void textsub_close( hb_filter_object_t * filter );
71 
72 
73 // PGS
74 static int pgssub_post_init( hb_filter_object_t * filter, hb_job_t * job );
75 
76 static int pgssub_work ( hb_filter_object_t * filter,
77                       hb_buffer_t ** buf_in,
78                       hb_buffer_t ** buf_out );
79 
80 static void pgssub_close( hb_filter_object_t * filter );
81 
82 
83 // Entry points
84 static int hb_rendersub_init( hb_filter_object_t * filter,
85                                  hb_filter_init_t * init );
86 
87 static int hb_rendersub_post_init( hb_filter_object_t * filter, hb_job_t *job );
88 
89 static int hb_rendersub_work( hb_filter_object_t * filter,
90                                  hb_buffer_t ** buf_in,
91                                  hb_buffer_t ** buf_out );
92 
93 static void hb_rendersub_close( hb_filter_object_t * filter );
94 
95 hb_filter_object_t hb_filter_render_sub =
96 {
97     .id            = HB_FILTER_RENDER_SUB,
98     .enforce_order = 1,
99     .name          = "Subtitle renderer",
100     .settings      = NULL,
101     .init          = hb_rendersub_init,
102     .post_init     = hb_rendersub_post_init,
103     .work          = hb_rendersub_work,
104     .close         = hb_rendersub_close,
105 };
106 
107 // blends src YUVA420P buffer into dst
108 // dst is currently YUV420P, but in future will be other formats as well
blend(hb_buffer_t * dst,hb_buffer_t * src,int left,int top)109 static void blend( hb_buffer_t *dst, hb_buffer_t *src, int left, int top )
110 {
111     int xx, yy;
112     int ww, hh;
113     int x0, y0;
114     uint8_t *y_in, *y_out;
115     uint8_t *u_in, *u_out;
116     uint8_t *v_in, *v_out;
117     uint8_t *a_in, alpha;
118 
119     x0 = y0 = 0;
120     if( left < 0 )
121     {
122         x0 = -left;
123     }
124     if( top < 0 )
125     {
126         y0 = -top;
127     }
128 
129     ww = src->f.width;
130     if( src->f.width - x0 > dst->f.width - left )
131     {
132         ww = dst->f.width - left + x0;
133     }
134     hh = src->f.height;
135     if( src->f.height - y0 > dst->f.height - top )
136     {
137         hh = dst->f.height - top + y0;
138     }
139     // Blend luma
140     for( yy = y0; yy < hh; yy++ )
141     {
142         y_in   = src->plane[0].data + yy * src->plane[0].stride;
143         y_out   = dst->plane[0].data + ( yy + top ) * dst->plane[0].stride;
144         a_in = src->plane[3].data + yy * src->plane[3].stride;
145         for( xx = x0; xx < ww; xx++ )
146         {
147             alpha = a_in[xx];
148             /*
149              * Merge the luminance and alpha with the picture
150              */
151             y_out[left + xx] =
152                 ( (uint16_t)y_out[left + xx] * ( 255 - alpha ) +
153                      (uint16_t)y_in[xx] * alpha ) / 255;
154         }
155     }
156 
157     // Blend U & V
158     // Assumes source and dest are the same PIX_FMT
159     int hshift = 0;
160     int wshift = 0;
161     if( dst->plane[1].height < dst->plane[0].height )
162         hshift = 1;
163     if( dst->plane[1].width < dst->plane[0].width )
164         wshift = 1;
165 
166     for( yy = y0 >> hshift; yy < hh >> hshift; yy++ )
167     {
168         u_in = src->plane[1].data + yy * src->plane[1].stride;
169         u_out = dst->plane[1].data + ( yy + ( top >> hshift ) ) * dst->plane[1].stride;
170         v_in = src->plane[2].data + yy * src->plane[2].stride;
171         v_out = dst->plane[2].data + ( yy + ( top >> hshift ) ) * dst->plane[2].stride;
172         a_in = src->plane[3].data + ( yy << hshift ) * src->plane[3].stride;
173 
174         for( xx = x0 >> wshift; xx < ww >> wshift; xx++ )
175         {
176             alpha = a_in[xx << wshift];
177 
178             // Blend averge U and alpha
179             u_out[(left >> wshift) + xx] =
180                 ( (uint16_t)u_out[(left >> wshift) + xx] * ( 255 - alpha ) +
181                   (uint16_t)u_in[xx] * alpha ) / 255;
182 
183             // Blend V and alpha
184             v_out[(left >> wshift) + xx] =
185                 ( (uint16_t)v_out[(left >> wshift) + xx] * ( 255 - alpha ) +
186                   (uint16_t)v_in[xx] * alpha ) / 255;
187         }
188     }
189 }
190 
blend8on1x(hb_buffer_t * dst,hb_buffer_t * src,int left,int top,int shift)191 static void blend8on1x( hb_buffer_t *dst, hb_buffer_t *src, int left, int top, int shift )
192 {
193     int xx, yy;
194     int ww, hh;
195     int x0, y0;
196     int max;
197 
198     uint8_t *y_in;
199     uint8_t *u_in;
200     uint8_t *v_in;
201     uint8_t *a_in;
202 
203     uint16_t *y_out;
204     uint16_t *u_out;
205     uint16_t *v_out;
206     uint16_t alpha;
207 
208     x0 = y0 = 0;
209     if( left < 0 )
210     {
211         x0 = -left;
212     }
213     if( top < 0 )
214     {
215         y0 = -top;
216     }
217 
218     ww = src->f.width;
219     if( src->f.width - x0 > dst->f.width - left )
220     {
221         ww = dst->f.width - left + x0;
222     }
223     hh = src->f.height;
224     if( src->f.height - y0 > dst->f.height - top )
225     {
226         hh = dst->f.height - top + y0;
227     }
228 
229     max = (256 << shift) -1;
230 
231     // Blend luma
232     for( yy = y0; yy < hh; yy++ )
233     {
234         y_in   = src->plane[0].data + yy * src->plane[0].stride;
235         y_out   = (uint16_t*)(dst->plane[0].data + ( yy + top ) * dst->plane[0].stride);
236         a_in = src->plane[3].data + yy * src->plane[3].stride;
237         for( xx = x0; xx < ww; xx++ )
238         {
239             alpha = a_in[xx] << shift;
240             /*
241              * Merge the luminance and alpha with the picture
242              */
243             y_out[left + xx] =
244                 ( (uint32_t)y_out[left + xx] * ( max - alpha ) +
245                      ((uint32_t)y_in[xx] << shift) * alpha ) / max;
246         }
247     }
248 
249     // Blend U & V
250     int hshift = 0;
251     int wshift = 0;
252     if( dst->plane[1].height < dst->plane[0].height )
253         hshift = 1;
254     if( dst->plane[1].width < dst->plane[0].width )
255         wshift = 1;
256 
257     for( yy = y0 >> hshift; yy < hh >> hshift; yy++ )
258     {
259         u_in = src->plane[1].data + yy * src->plane[1].stride;
260         u_out = (uint16_t*)(dst->plane[1].data + ( yy + ( top >> hshift ) ) * dst->plane[1].stride);
261         v_in = src->plane[2].data + yy * src->plane[2].stride;
262         v_out = (uint16_t*)(dst->plane[2].data + ( yy + ( top >> hshift ) ) * dst->plane[2].stride);
263         a_in = src->plane[3].data + ( yy << hshift ) * src->plane[3].stride;
264 
265         for( xx = x0 >> wshift; xx < ww >> wshift; xx++ )
266         {
267             alpha = a_in[xx << wshift] << shift;
268 
269             // Blend averge U and alpha
270             u_out[(left >> wshift) + xx] =
271                 ( (uint32_t)u_out[(left >> wshift) + xx] * ( max - alpha ) +
272                   ((uint32_t)u_in[xx] << shift) * alpha ) / max;
273 
274             // Blend V and alpha
275             v_out[(left >> wshift) + xx] =
276                 ( (uint32_t)v_out[(left >> wshift) + xx] * ( max - alpha ) +
277                   ((uint32_t)v_in[xx] << shift) * alpha ) / max;
278         }
279     }
280 }
281 
282 // Assumes that the input destination buffer has the same dimensions
283 // as the original title dimensions
ApplySub(hb_filter_private_t * pv,hb_buffer_t * buf,hb_buffer_t * sub)284 static void ApplySub( hb_filter_private_t * pv, hb_buffer_t * buf, hb_buffer_t * sub )
285 {
286     switch (pv->output.pix_fmt) {
287         case AV_PIX_FMT_YUV420P10:
288             blend8on1x(buf, sub, sub->f.x, sub->f.y, 2);
289             break;
290         case AV_PIX_FMT_YUV420P12:
291             blend8on1x(buf, sub, sub->f.x, sub->f.y, 4);
292             break;
293         default:
294             blend(buf, sub, sub->f.x, sub->f.y);
295             break;
296     }
297 }
298 
ScaleSubtitle(hb_filter_private_t * pv,hb_buffer_t * sub,hb_buffer_t * buf)299 static hb_buffer_t * ScaleSubtitle(hb_filter_private_t *pv,
300                                    hb_buffer_t *sub, hb_buffer_t *buf)
301 {
302     hb_buffer_t * scaled;
303     double xfactor = 1., yfactor = 1.;
304 
305     // Do we need to rescale subtitles?
306     if (sub->f.window_width > 0 && sub->f.window_height > 0)
307     {
308 
309         // TODO: Factor aspect ratio
310         // For now, assume subtitle and video PAR is the same.
311         xfactor     = (double)buf->f.width  / sub->f.window_width;
312         yfactor     = (double)buf->f.height / sub->f.window_height;
313         // The video may have been cropped.  This will make xfactor != yfactor
314         // even though video and subtitles are the same PAR.  So use the
315         // larger of as the scale factor.
316         if (xfactor > yfactor)
317         {
318             yfactor = xfactor;
319         }
320         else
321         {
322             xfactor = yfactor;
323         }
324     }
325     if (ABS(xfactor - 1) > 0.01 || ABS(yfactor - 1) > 0.01)
326     {
327         uint8_t * in_data[4], * out_data[4];
328         int       in_stride[4], out_stride[4];
329         int       width, height;
330 
331         width       = sub->f.width  * xfactor;
332         height      = sub->f.height * yfactor;
333         // Note that subtitle frame buffer is YUVA420P, not YUV420P, it has alpha
334         scaled      = hb_frame_buffer_init(AV_PIX_FMT_YUVA420P, width, height);
335         if (scaled == NULL)
336             return NULL;
337 
338         scaled->f.x = sub->f.x * xfactor;
339         scaled->f.y = sub->f.y * yfactor;
340 
341         hb_picture_fill(in_data,  in_stride,  sub);
342         hb_picture_fill(out_data, out_stride, scaled);
343 
344         if (pv->sws        == NULL   ||
345             pv->sws_width  != width  ||
346             pv->sws_height != height)
347         {
348             if (pv->sws!= NULL)
349                 sws_freeContext(pv->sws);
350             pv->sws = hb_sws_get_context(
351                                 sub->f.width, sub->f.height, sub->f.fmt, AVCOL_RANGE_MPEG,
352                                 scaled->f.width, scaled->f.height, sub->f.fmt, AVCOL_RANGE_MPEG,
353                                 SWS_LANCZOS|SWS_ACCURATE_RND, SWS_CS_DEFAULT);
354             pv->sws_width   = width;
355             pv->sws_height  = height;
356         }
357         sws_scale(pv->sws, (const uint8_t* const *)in_data, in_stride,
358                   0, sub->f.height, out_data, out_stride);
359     }
360     else
361     {
362         scaled = hb_buffer_dup(sub);
363     }
364 
365     int top, left, margin_top, margin_percent;
366 
367     /*
368      * Percent of height of picture that form a margin that subtitles
369      * should not be displayed within.
370      */
371     margin_percent = 2;
372 
373     /*
374      * If necessary, move the subtitle so it is not in a cropped zone.
375      * When it won't fit, we center it so we lose as much on both ends.
376      * Otherwise we try to leave a 20px or 2% margin around it.
377      */
378     margin_top = ( ( buf->f.height - pv->crop[0] - pv->crop[1] ) *
379                    margin_percent ) / 100;
380 
381     if( margin_top > 20 )
382     {
383         /*
384          * A maximum margin of 20px regardless of height of the picture.
385          */
386         margin_top = 20;
387     }
388 
389     if( scaled->f.height > buf->f.height - pv->crop[0] - pv->crop[1] -
390         ( margin_top * 2 ) )
391     {
392         /*
393          * The subtitle won't fit in the cropped zone, so center
394          * it vertically so we fit in as much as we can.
395          */
396         top = pv->crop[0] + ( buf->f.height - pv->crop[0] -
397                                       pv->crop[1] - scaled->f.height ) / 2;
398     }
399     else if( scaled->f.y < pv->crop[0] + margin_top )
400     {
401         /*
402          * The subtitle fits in the cropped zone, but is currently positioned
403          * within our top margin, so move it outside of our margin.
404          */
405         top = pv->crop[0] + margin_top;
406     }
407     else if( scaled->f.y > buf->f.height - pv->crop[1] - margin_top - scaled->f.height )
408     {
409         /*
410          * The subtitle fits in the cropped zone, and is not within the top
411          * margin but is within the bottom margin, so move it to be above
412          * the margin.
413          */
414         top = buf->f.height - pv->crop[1] - margin_top - scaled->f.height;
415     }
416     else
417     {
418         /*
419          * The subtitle is fine where it is.
420          */
421         top = scaled->f.y;
422     }
423 
424     if( scaled->f.width > buf->f.width - pv->crop[2] - pv->crop[3] - 40 )
425         left = pv->crop[2] + ( buf->f.width - pv->crop[2] -
426                 pv->crop[3] - scaled->f.width ) / 2;
427     else if( scaled->f.x < pv->crop[2] + 20 )
428         left = pv->crop[2] + 20;
429     else if( scaled->f.x > buf->f.width - pv->crop[3] - 20 - scaled->f.width )
430         left = buf->f.width - pv->crop[3] - 20 - scaled->f.width;
431     else
432         left = scaled->f.x;
433 
434     scaled->f.x = left;
435     scaled->f.y = top;
436 
437     return scaled;
438 }
439 
440 // Assumes that the input buffer has the same dimensions
441 // as the original title dimensions
ApplyVOBSubs(hb_filter_private_t * pv,hb_buffer_t * buf)442 static void ApplyVOBSubs( hb_filter_private_t * pv, hb_buffer_t * buf )
443 {
444     int ii;
445     hb_buffer_t *sub, *next;
446 
447     // Note that VOBSUBs can overlap in time.
448     // I.e. more than one may be rendered to the screen at once.
449     for( ii = 0; ii < hb_list_count(pv->sub_list); )
450     {
451         sub = hb_list_item( pv->sub_list, ii );
452         if (ii + 1 < hb_list_count(pv->sub_list))
453             next = hb_list_item( pv->sub_list, ii + 1 );
454         else
455             next = NULL;
456 
457         if ((sub->s.stop != AV_NOPTS_VALUE && sub->s.stop <= buf->s.start) ||
458             (next != NULL && sub->s.stop == AV_NOPTS_VALUE && next->s.start <= buf->s.start))
459         {
460             // Subtitle stop is in the past, delete it
461             hb_list_rem( pv->sub_list, sub );
462             hb_buffer_close( &sub );
463         }
464         else if( sub->s.start <= buf->s.start )
465         {
466             // The subtitle has started before this frame and ends
467             // after it.  Render the subtitle into the frame.
468             while ( sub )
469             {
470                 hb_buffer_t *scaled = ScaleSubtitle(pv, sub, buf);
471                 ApplySub( pv, buf, scaled );
472                 hb_buffer_close(&scaled);
473                 sub = sub->next;
474             }
475             ii++;
476         }
477         else
478         {
479             // The subtitle starts in the future.  No need to continue.
480             break;
481         }
482     }
483 }
484 
vobsub_post_init(hb_filter_object_t * filter,hb_job_t * job)485 static int vobsub_post_init( hb_filter_object_t * filter, hb_job_t * job )
486 {
487     hb_filter_private_t * pv = filter->private_data;
488 
489     pv->sub_list = hb_list_init();
490 
491     return 0;
492 }
493 
vobsub_close(hb_filter_object_t * filter)494 static void vobsub_close( hb_filter_object_t * filter )
495 {
496     hb_filter_private_t * pv = filter->private_data;
497 
498     if( !pv )
499     {
500         return;
501     }
502 
503     if( pv->sub_list )
504         hb_list_empty( &pv->sub_list );
505 
506     free( pv );
507     filter->private_data = NULL;
508 }
509 
vobsub_work(hb_filter_object_t * filter,hb_buffer_t ** buf_in,hb_buffer_t ** buf_out)510 static int vobsub_work( hb_filter_object_t * filter,
511                         hb_buffer_t ** buf_in,
512                         hb_buffer_t ** buf_out )
513 {
514     hb_filter_private_t * pv = filter->private_data;
515     hb_buffer_t * in = *buf_in;
516     hb_buffer_t * sub;
517 
518     if (in->s.flags & HB_BUF_FLAG_EOF)
519     {
520         *buf_in = NULL;
521         *buf_out = in;
522         return HB_FILTER_DONE;
523     }
524 
525     // Get any pending subtitles and add them to the active
526     // subtitle list
527     while( ( sub = hb_fifo_get( filter->subtitle->fifo_out ) ) )
528     {
529         if (sub->s.flags & HB_BUF_FLAG_EOF)
530         {
531             hb_buffer_close(&sub);
532             break;
533         }
534         hb_list_add( pv->sub_list, sub );
535     }
536 
537     ApplyVOBSubs( pv, in );
538     *buf_in = NULL;
539     *buf_out = in;
540 
541     return HB_FILTER_OK;
542 }
543 
ssaAlpha(ASS_Image * frame,int x,int y)544 static uint8_t ssaAlpha( ASS_Image *frame, int x, int y )
545 {
546     unsigned frameA = ( frame->color ) & 0xff;
547     unsigned gliphA = frame->bitmap[y*frame->stride + x];
548 
549     // Alpha for this pixel is the frame opacity (255 - frameA)
550     // multiplied by the gliph alfa (gliphA) for this pixel
551     unsigned alpha = (255 - frameA) * gliphA >> 8;
552 
553     return (uint8_t)alpha;
554 }
555 
556 // Returns a subtitle rendered to a YUVA420P frame
RenderSSAFrame(hb_filter_private_t * pv,ASS_Image * frame)557 static hb_buffer_t * RenderSSAFrame( hb_filter_private_t * pv, ASS_Image * frame )
558 {
559     hb_buffer_t *sub;
560     int xx, yy;
561 
562     unsigned r = ( frame->color >> 24 ) & 0xff;
563     unsigned g = ( frame->color >> 16 ) & 0xff;
564     unsigned b = ( frame->color >>  8 ) & 0xff;
565 
566     int yuv = hb_rgb2yuv_bt709((r << 16) | (g << 8) | b );
567 
568     unsigned frameY = (yuv >> 16) & 0xff;
569     unsigned frameV = (yuv >> 8 ) & 0xff;
570     unsigned frameU = (yuv >> 0 ) & 0xff;
571 
572     // Note that subtitle frame buffer is YUVA420P, not YUV420P, it has alpha
573     sub = hb_frame_buffer_init(AV_PIX_FMT_YUVA420P, frame->w, frame->h);
574     if (sub == NULL)
575         return NULL;
576 
577     uint8_t *y_out, *u_out, *v_out, *a_out;
578     y_out = sub->plane[0].data;
579     u_out = sub->plane[1].data;
580     v_out = sub->plane[2].data;
581     a_out = sub->plane[3].data;
582 
583     for( yy = 0; yy < frame->h; yy++ )
584     {
585         for( xx = 0; xx < frame->w; xx++ )
586         {
587             y_out[xx] = frameY;
588             if( ( yy & 1 ) == 0 )
589             {
590                 u_out[xx>>1] = frameU;
591                 v_out[xx>>1] = frameV;
592             }
593             a_out[xx] = ssaAlpha( frame, xx, yy );;
594         }
595         y_out += sub->plane[0].stride;
596         if( ( yy & 1 ) == 0 )
597         {
598             u_out += sub->plane[1].stride;
599             v_out += sub->plane[2].stride;
600         }
601         a_out += sub->plane[3].stride;
602     }
603     sub->f.width = frame->w;
604     sub->f.height = frame->h;
605     sub->f.x = frame->dst_x + pv->crop[2];
606     sub->f.y = frame->dst_y + pv->crop[0];
607 
608     return sub;
609 }
610 
ApplySSASubs(hb_filter_private_t * pv,hb_buffer_t * buf)611 static void ApplySSASubs( hb_filter_private_t * pv, hb_buffer_t * buf )
612 {
613     ASS_Image *frameList;
614     hb_buffer_t *sub;
615 
616     frameList = ass_render_frame( pv->renderer, pv->ssaTrack,
617                                   buf->s.start / 90, NULL );
618     if ( !frameList )
619         return;
620 
621     ASS_Image *frame;
622     for (frame = frameList; frame; frame = frame->next) {
623         sub = RenderSSAFrame( pv, frame );
624         if( sub )
625         {
626             ApplySub( pv, buf, sub );
627             hb_buffer_close( &sub );
628         }
629     }
630 }
631 
ssa_log(int level,const char * fmt,va_list args,void * data)632 static void ssa_log(int level, const char *fmt, va_list args, void *data)
633 {
634     if ( level < 5 )      // same as default verbosity when no callback is set
635     {
636         hb_valog( 1, "[ass]", fmt, args );
637     }
638 }
639 
ssa_post_init(hb_filter_object_t * filter,hb_job_t * job)640 static int ssa_post_init( hb_filter_object_t * filter, hb_job_t * job )
641 {
642     hb_filter_private_t * pv = filter->private_data;
643 
644     pv->ssa = ass_library_init();
645     if ( !pv->ssa ) {
646         hb_error( "decssasub: libass initialization failed\n" );
647         return 1;
648     }
649 
650     // Redirect libass output to hb_log
651     ass_set_message_cb( pv->ssa, ssa_log, NULL );
652 
653     // Load embedded fonts
654     hb_list_t * list_attachment = job->list_attachment;
655     int i;
656     for ( i = 0; i < hb_list_count(list_attachment); i++ )
657     {
658         hb_attachment_t * attachment = hb_list_item( list_attachment, i );
659 
660         if ( attachment->type == FONT_TTF_ATTACH ||
661              attachment->type == FONT_OTF_ATTACH )
662         {
663             ass_add_font(
664                 pv->ssa,
665                 attachment->name,
666                 attachment->data,
667                 attachment->size );
668         }
669     }
670 
671     ass_set_extract_fonts( pv->ssa, 1 );
672     ass_set_style_overrides( pv->ssa, NULL );
673 
674     pv->renderer = ass_renderer_init( pv->ssa );
675     if ( !pv->renderer ) {
676         hb_log( "decssasub: renderer initialization failed\n" );
677         return 1;
678     }
679 
680     ass_set_use_margins( pv->renderer, 0 );
681     ass_set_hinting( pv->renderer, ASS_HINTING_NONE );
682     ass_set_font_scale( pv->renderer, 1.0 );
683     ass_set_line_spacing( pv->renderer, 1.0 );
684 
685     // Setup default font family
686     //
687     // SSA v4.00 requires that "Arial" be the default font
688     const char *font = NULL;
689     const char *family = "Arial";
690     // NOTE: This can sometimes block for several *seconds*.
691     //       It seems that process_fontdata() for some embedded fonts is slow.
692     ass_set_fonts( pv->renderer, font, family, /*haveFontConfig=*/1, NULL, 1 );
693 
694     // Setup track state
695     pv->ssaTrack = ass_new_track( pv->ssa );
696     if ( !pv->ssaTrack ) {
697         hb_log( "decssasub: ssa track initialization failed\n" );
698         return 1;
699     }
700 
701     int height = job->title->geometry.height - job->crop[0] - job->crop[1];
702     int width = job->title->geometry.width - job->crop[2] - job->crop[3];
703     ass_set_frame_size( pv->renderer, width, height);
704 
705     double par = (double)job->par.num / job->par.den;
706     ass_set_pixel_aspect( pv->renderer, par );
707 
708     return 0;
709 }
710 
ssa_close(hb_filter_object_t * filter)711 static void ssa_close( hb_filter_object_t * filter )
712 {
713     hb_filter_private_t * pv = filter->private_data;
714 
715     if( !pv )
716     {
717         return;
718     }
719 
720     if ( pv->ssaTrack )
721         ass_free_track( pv->ssaTrack );
722     if ( pv->renderer )
723         ass_renderer_done( pv->renderer );
724     if ( pv->ssa )
725         ass_library_done( pv->ssa );
726 
727     free( pv );
728     filter->private_data = NULL;
729 }
730 
ssa_work(hb_filter_object_t * filter,hb_buffer_t ** buf_in,hb_buffer_t ** buf_out)731 static int ssa_work( hb_filter_object_t * filter,
732                      hb_buffer_t ** buf_in,
733                      hb_buffer_t ** buf_out )
734 {
735     hb_filter_private_t * pv = filter->private_data;
736     hb_buffer_t * in = *buf_in;
737     hb_buffer_t * sub;
738 
739     if (!pv->script_initialized)
740     {
741         // NOTE: The codec extradata is expected to be in MKV format
742         // I would like to initialize this in ssa_post_init, but when we are
743         // transcoding text subtitles to SSA, the extradata does not
744         // get initialized until the decoder is initialized.  Since
745         // decoder initialization happens after filter initialization,
746         // we need to postpone this.
747         ass_process_codec_private(pv->ssaTrack,
748                                   (char*)filter->subtitle->extradata,
749                                   filter->subtitle->extradata_size);
750         pv->script_initialized = 1;
751     }
752     if (in->s.flags & HB_BUF_FLAG_EOF)
753     {
754         *buf_in = NULL;
755         *buf_out = in;
756         return HB_FILTER_DONE;
757     }
758 
759     // Get any pending subtitles and add them to the active
760     // subtitle list
761     while( ( sub = hb_fifo_get( filter->subtitle->fifo_out ) ) )
762     {
763         if (sub->s.flags & HB_BUF_FLAG_EOF)
764         {
765             hb_buffer_close(&sub);
766             break;
767         }
768         // Parse MKV-SSA packet
769         // SSA subtitles always have an explicit stop time, so we
770         // do not need to do special processing for stop == AV_NOPTS_VALUE
771         ass_process_chunk( pv->ssaTrack, (char*)sub->data, sub->size,
772                            sub->s.start / 90,
773                            (sub->s.stop - sub->s.start) / 90 );
774         hb_buffer_close(&sub);
775     }
776 
777     ApplySSASubs( pv, in );
778     *buf_in = NULL;
779     *buf_out = in;
780 
781     return HB_FILTER_OK;
782 }
783 
cc608sub_post_init(hb_filter_object_t * filter,hb_job_t * job)784 static int cc608sub_post_init( hb_filter_object_t * filter, hb_job_t * job )
785 {
786     // Text subtitles for which we create a dummy ASS header need
787     // to have the header rewritten with the correct dimensions.
788     int height = job->title->geometry.height - job->crop[0] - job->crop[1];
789     int width = job->title->geometry.width - job->crop[2] - job->crop[3];
790     int safe_height = 0.8 * job->title->geometry.height;
791     // Use fixed width font for CC
792     hb_subtitle_add_ssa_header(filter->subtitle, HB_FONT_MONO,
793                                .08 * safe_height, width, height);
794     return ssa_post_init(filter, job);
795 }
796 
textsub_post_init(hb_filter_object_t * filter,hb_job_t * job)797 static int textsub_post_init( hb_filter_object_t * filter, hb_job_t * job )
798 {
799     // Text subtitles for which we create a dummy ASS header need
800     // to have the header rewritten with the correct dimensions.
801     int height = job->title->geometry.height - job->crop[0] - job->crop[1];
802     int width = job->title->geometry.width - job->crop[2] - job->crop[3];
803     hb_subtitle_add_ssa_header(filter->subtitle, HB_FONT_SANS,
804                                .066 * job->title->geometry.height,
805                                width, height);
806     return ssa_post_init(filter, job);
807 }
808 
textsub_close(hb_filter_object_t * filter)809 static void textsub_close( hb_filter_object_t * filter )
810 {
811     return ssa_close(filter);
812 }
813 
process_sub(hb_filter_private_t * pv,hb_buffer_t * sub)814 static void process_sub(hb_filter_private_t *pv, hb_buffer_t *sub)
815 {
816     int64_t start, dur;
817     int size;
818     char *ssa, *tmp;
819 
820     // libass expects every chunk to have a unique sequence number
821     // since we are repeating subs in some cases, we need to replace
822     // the sequence number.
823     tmp = strchr((char*)sub->data, ',');
824     if (tmp == NULL)
825         return;
826 
827     ssa = hb_strdup_printf("%d%s", ++pv->line, tmp);
828 
829     // Parse MKV-SSA packet
830     // SSA subtitles always have an explicit stop time, so we
831     // do not need to do special processing for stop == AV_NOPTS_VALUE
832     start = sub->s.start;
833     dur = sub->s.stop - sub->s.start;
834     size = strlen(ssa);
835     ass_process_chunk(pv->ssaTrack, ssa, size, start, dur);
836     free(ssa);
837 }
838 
textsub_work(hb_filter_object_t * filter,hb_buffer_t ** buf_in,hb_buffer_t ** buf_out)839 static int textsub_work(hb_filter_object_t * filter,
840                     hb_buffer_t ** buf_in,
841                     hb_buffer_t ** buf_out)
842 {
843     hb_filter_private_t * pv = filter->private_data;
844     hb_buffer_t * in = *buf_in;
845     hb_buffer_t * sub;
846 
847     if (!pv->script_initialized)
848     {
849         ass_process_codec_private(pv->ssaTrack,
850                                   (char*)filter->subtitle->extradata,
851                                   filter->subtitle->extradata_size);
852         pv->script_initialized = 1;
853     }
854 
855     if (in->s.flags & HB_BUF_FLAG_EOF)
856     {
857         *buf_in = NULL;
858         *buf_out = in;
859         return HB_FILTER_DONE;
860     }
861 
862     int in_start_ms = in->s.start / 90;
863 
864     // Get any pending subtitles and add them to the active
865     // subtitle list
866     while ((sub = hb_fifo_get(filter->subtitle->fifo_out)))
867     {
868         if (sub->s.flags & HB_BUF_FLAG_EOF)
869         {
870             hb_buffer_close(&sub);
871             if (pv->current_sub != NULL)
872             {
873                 // Make us some duration for final sub
874                 pv->current_sub->s.stop = pv->current_sub->s.start +
875                                           90000LL * 10;
876                 process_sub(pv, pv->current_sub);
877                 hb_buffer_close(&pv->current_sub);
878             }
879             break;
880         }
881 
882         // libass expects times in ms.  So to make the math easy,
883         // convert to ms immediately.
884         sub->s.start /= 90;
885         if (sub->s.stop != AV_NOPTS_VALUE)
886         {
887             sub->s.stop /= 90;
888         }
889 
890         // Subtitle formats such as CC can have stop times
891         // that are not known until an "erase display" command
892         // is encountered in the stream.  For these formats
893         // current_sub is the currently active subtitle for which
894         // we do not yet know the stop time.  We do not currently
895         // support overlapping subtitles of this type.
896         if (pv->current_sub != NULL)
897         {
898             // Next sub start time tells us the stop time of the
899             // current sub when it is not known in advance.
900             pv->current_sub->s.stop = sub->s.start;
901             process_sub(pv, pv->current_sub);
902             hb_buffer_close(&pv->current_sub);
903         }
904         if (sub->s.flags & HB_BUF_FLAG_EOS)
905         {
906             // marker used to "clear" previous sub that had
907             // an unknown duration
908             hb_buffer_close(&sub);
909         }
910         else if (sub->s.stop == AV_NOPTS_VALUE)
911         {
912             // We don't know the duration of this sub.  So we will
913             // apply it to every video frame until we see a "clear" sub.
914             pv->current_sub = sub;
915             pv->current_sub->s.stop = pv->current_sub->s.start;
916         }
917         else
918         {
919             // Duration of this subtitle is known, so we can just
920             // process it normally.
921             process_sub(pv, sub);
922             hb_buffer_close(&sub);
923         }
924     }
925     if (pv->current_sub != NULL && pv->current_sub->s.start <= in_start_ms)
926     {
927         // We don't know the duration of this subtitle, but we know
928         // that it started before the current video frame and that
929         // it is still active.  So render it on this video frame.
930         pv->current_sub->s.start = pv->current_sub->s.stop;
931         pv->current_sub->s.stop = in_start_ms + 1;
932         process_sub(pv, pv->current_sub);
933     }
934 
935     ApplySSASubs(pv, in);
936     *buf_in = NULL;
937     *buf_out = in;
938 
939     return HB_FILTER_OK;
940 }
941 
ApplyPGSSubs(hb_filter_private_t * pv,hb_buffer_t * buf)942 static void ApplyPGSSubs( hb_filter_private_t * pv, hb_buffer_t * buf )
943 {
944     int index;
945     hb_buffer_t * old_sub;
946     hb_buffer_t * sub;
947 
948     // Each PGS subtitle supersedes anything that preceded it.
949     // Find the active subtitle (if there is one), and delete
950     // everything before it.
951     for( index = hb_list_count( pv->sub_list ) - 1; index > 0; index-- )
952     {
953         sub = hb_list_item( pv->sub_list, index);
954         if ( sub->s.start <= buf->s.start )
955         {
956             while ( index > 0 )
957             {
958                 old_sub = hb_list_item( pv->sub_list, index - 1);
959                 hb_list_rem( pv->sub_list, old_sub );
960                 hb_buffer_close( &old_sub );
961                 index--;
962             }
963         }
964     }
965 
966     // Some PGS subtitles have no content and only serve to clear
967     // the screen. If any of these are at the front of our list,
968     // we can now get rid of them.
969     while ( hb_list_count( pv->sub_list ) > 0 )
970     {
971         sub = hb_list_item( pv->sub_list, 0 );
972         if (sub->f.width != 0 && sub->f.height != 0)
973             break;
974 
975         hb_list_rem( pv->sub_list, sub );
976         hb_buffer_close( &sub );
977     }
978 
979     // Check to see if there's an active subtitle, and apply it.
980     if ( hb_list_count( pv->sub_list ) > 0)
981     {
982         sub = hb_list_item( pv->sub_list, 0 );
983         if ( sub->s.start <= buf->s.start )
984         {
985             hb_buffer_t *scaled = ScaleSubtitle(pv, sub, buf);
986             ApplySub( pv, buf, scaled );
987             hb_buffer_close(&scaled);
988         }
989     }
990 }
991 
pgssub_post_init(hb_filter_object_t * filter,hb_job_t * job)992 static int pgssub_post_init( hb_filter_object_t * filter, hb_job_t * job )
993 {
994     hb_filter_private_t * pv = filter->private_data;
995 
996     pv->sub_list = hb_list_init();
997 
998     return 0;
999 }
1000 
pgssub_close(hb_filter_object_t * filter)1001 static void pgssub_close( hb_filter_object_t * filter )
1002 {
1003     hb_filter_private_t * pv = filter->private_data;
1004 
1005     if ( !pv )
1006     {
1007         return;
1008     }
1009 
1010     if ( pv->sub_list )
1011         hb_list_empty( &pv->sub_list );
1012 
1013     free( pv );
1014     filter->private_data = NULL;
1015 }
1016 
pgssub_work(hb_filter_object_t * filter,hb_buffer_t ** buf_in,hb_buffer_t ** buf_out)1017 static int pgssub_work( hb_filter_object_t * filter,
1018                         hb_buffer_t ** buf_in,
1019                         hb_buffer_t ** buf_out)
1020 {
1021     hb_filter_private_t * pv = filter->private_data;
1022     hb_buffer_t * in = *buf_in;
1023     hb_buffer_t * sub;
1024 
1025     if (in->s.flags & HB_BUF_FLAG_EOF)
1026     {
1027         *buf_in = NULL;
1028         *buf_out = in;
1029         return HB_FILTER_DONE;
1030     }
1031 
1032     // Get any pending subtitles and add them to the active
1033     // subtitle list
1034     while ( ( sub = hb_fifo_get( filter->subtitle->fifo_out ) ) )
1035     {
1036         if (sub->s.flags & HB_BUF_FLAG_EOF)
1037         {
1038             hb_buffer_close(&sub);
1039             break;
1040         }
1041         hb_list_add( pv->sub_list, sub );
1042     }
1043 
1044     ApplyPGSSubs( pv, in );
1045     *buf_in = NULL;
1046     *buf_out = in;
1047 
1048     return HB_FILTER_OK;
1049 }
1050 
hb_rendersub_init(hb_filter_object_t * filter,hb_filter_init_t * init)1051 static int hb_rendersub_init( hb_filter_object_t * filter,
1052                               hb_filter_init_t * init )
1053 {
1054     filter->private_data = calloc( 1, sizeof(struct hb_filter_private_s) );
1055     hb_filter_private_t * pv = filter->private_data;
1056     hb_subtitle_t *subtitle;
1057     int ii;
1058 
1059     pv->input = *init;
1060 
1061     // Find the subtitle we need
1062     for( ii = 0; ii < hb_list_count(init->job->list_subtitle); ii++ )
1063     {
1064         subtitle = hb_list_item( init->job->list_subtitle, ii );
1065         if( subtitle && subtitle->config.dest == RENDERSUB )
1066         {
1067             // Found it
1068             filter->subtitle = subtitle;
1069             pv->type = subtitle->source;
1070             break;
1071         }
1072     }
1073     if( filter->subtitle == NULL )
1074     {
1075         hb_log("rendersub: no subtitle marked for burn");
1076         return 1;
1077     }
1078     pv->output = *init;
1079 
1080     return 0;
1081 }
1082 
hb_rendersub_post_init(hb_filter_object_t * filter,hb_job_t * job)1083 static int hb_rendersub_post_init( hb_filter_object_t * filter, hb_job_t *job )
1084 {
1085     hb_filter_private_t * pv = filter->private_data;
1086 
1087     pv->crop[0] = job->crop[0];
1088     pv->crop[1] = job->crop[1];
1089     pv->crop[2] = job->crop[2];
1090     pv->crop[3] = job->crop[3];
1091 
1092     switch( pv->type )
1093     {
1094         case VOBSUB:
1095         {
1096             return vobsub_post_init( filter, job );
1097         }
1098 
1099         case SSASUB:
1100         {
1101             return ssa_post_init( filter, job );
1102         }
1103 
1104         case IMPORTSRT:
1105         case IMPORTSSA:
1106         case UTF8SUB:
1107         case TX3GSUB:
1108         {
1109             return textsub_post_init( filter, job );
1110         }
1111 
1112         case CC608SUB:
1113         {
1114             return cc608sub_post_init( filter, job );
1115         }
1116 
1117         case DVBSUB:
1118         case PGSSUB:
1119         {
1120             return pgssub_post_init( filter, job );
1121         }
1122 
1123         default:
1124         {
1125             hb_log("rendersub: unsupported subtitle format %d", pv->type );
1126             return 1;
1127         }
1128     }
1129 }
1130 
hb_rendersub_work(hb_filter_object_t * filter,hb_buffer_t ** buf_in,hb_buffer_t ** buf_out)1131 static int hb_rendersub_work( hb_filter_object_t * filter,
1132                                  hb_buffer_t ** buf_in,
1133                                  hb_buffer_t ** buf_out )
1134 {
1135     hb_filter_private_t * pv = filter->private_data;
1136     switch( pv->type )
1137     {
1138         case VOBSUB:
1139         {
1140             return vobsub_work( filter, buf_in, buf_out );
1141         }
1142 
1143         case SSASUB:
1144         {
1145             return ssa_work( filter, buf_in, buf_out );
1146         }
1147 
1148         case IMPORTSRT:
1149         case IMPORTSSA:
1150         case CC608SUB:
1151         case UTF8SUB:
1152         case TX3GSUB:
1153         {
1154             return textsub_work( filter, buf_in, buf_out );
1155         }
1156 
1157         case DVBSUB:
1158         case PGSSUB:
1159         {
1160             return pgssub_work( filter, buf_in, buf_out );
1161         }
1162 
1163         default:
1164         {
1165             hb_error("rendersub: unsupported subtitle format %d", pv->type );
1166             return 1;
1167         }
1168     }
1169 }
1170 
hb_rendersub_close(hb_filter_object_t * filter)1171 static void hb_rendersub_close( hb_filter_object_t * filter )
1172 {
1173     hb_filter_private_t * pv = filter->private_data;
1174 
1175     if (pv->sws != NULL)
1176     {
1177         sws_freeContext(pv->sws);
1178     }
1179     switch( pv->type )
1180     {
1181         case VOBSUB:
1182         {
1183             vobsub_close( filter );
1184         } break;
1185 
1186         case SSASUB:
1187         {
1188             ssa_close( filter );
1189         } break;
1190 
1191         case IMPORTSRT:
1192         case IMPORTSSA:
1193         case CC608SUB:
1194         case UTF8SUB:
1195         case TX3GSUB:
1196         {
1197             textsub_close( filter );
1198         } break;
1199 
1200         case DVBSUB:
1201         case PGSSUB:
1202         {
1203             pgssub_close( filter );
1204         } break;
1205 
1206         default:
1207         {
1208             hb_error("rendersub: unsupported subtitle format %d", pv->type );
1209         } break;
1210     }
1211 }
1212 
1213