1 /*
2 * filter_telecide.c -- Donald Graft's Inverse Telecine Filter
3 * Copyright (C) 2003 Donald A. Graft
4 * Copyright (C) 2008 Dan Dennedy <dan@dennedy.org>
5 * Author: Dan Dennedy <dan@dennedy.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 */
21
22 #include <framework/mlt.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26
27 #define MAX_CYCLE 6
28 #define BLKSIZE 24
29 #define BLKSIZE_TIMES2 (2 * BLKSIZE)
30 #define GUIDE_32 1
31 #define GUIDE_22 2
32 #define GUIDE_32322 3
33 #define AHEAD 0
34 #define BEHIND 1
35 #define POST_METRICS 1
36 #define POST_FULL 2
37 #define POST_FULL_MAP 3
38 #define POST_FULL_NOMATCH 4
39 #define POST_FULL_NOMATCH_MAP 5
40 #define CACHE_SIZE 100000
41 #define P 0
42 #define C 1
43 #define N 2
44 #define PBLOCK 3
45 #define CBLOCK 4
46 #define NO_BACK 0
47 #define BACK_ON_COMBED 1
48 #define ALWAYS_BACK 2
49
50 struct CACHE_ENTRY
51 {
52 unsigned int frame;
53 unsigned int metrics[5];
54 unsigned int chosen;
55 };
56
57 struct PREDICTION
58 {
59 unsigned int metric;
60 unsigned int phase;
61 unsigned int predicted;
62 unsigned int predicted_metric;
63 };
64
65 struct context_s {
66 int is_configured;
67 mlt_properties image_cache;
68 int out;
69
70 int tff, chroma, blend, hints, show, debug;
71 float dthresh, gthresh, vthresh, vthresh_saved, bthresh;
72 int y0, y1, nt, guide, post, back, back_saved;
73 int pitch, dpitch, pitchover2, pitchtimes4;
74 int w, h, wover2, hover2, hplus1over2, hminus2;
75 int xblocks, yblocks;
76 #ifdef WINDOWED_MATCH
77 unsigned int *matchc, *matchp, highest_matchc, highest_matchp;
78 #endif
79 unsigned int *sumc, *sump, highest_sumc, highest_sump;
80 int vmetric;
81 unsigned int *overrides, *overrides_p;
82 int film, override, inpattern, found;
83 int force;
84
85 // Used by field matching.
86 unsigned char *fprp, *fcrp, *fcrp_saved, *fnrp;
87 unsigned char *dstp, *finalp;
88 int chosen;
89 unsigned int p, c, pblock, cblock, lowest, predicted, predicted_metric;
90 unsigned int np, nc, npblock, ncblock, nframe;
91 float mismatch;
92 int pframe, x, y;
93 unsigned char *crp, *prp;
94 unsigned char *crpU, *prpU;
95 unsigned char *crpV, *prpV;
96 int hard;
97 char status[80];
98
99 // Metrics cache.
100 struct CACHE_ENTRY *cache;
101
102 // Pattern guidance data.
103 int cycle;
104 struct PREDICTION pred[MAX_CYCLE+1];
105 };
106 typedef struct context_s *context;
107
108
109 static inline
BitBlt(uint8_t * dstp,int dst_pitch,const uint8_t * srcp,int src_pitch,int row_size,int height)110 void BitBlt(uint8_t* dstp, int dst_pitch, const uint8_t* srcp,
111 int src_pitch, int row_size, int height)
112 {
113 uint32_t y;
114 for(y=0;y<height;y++)
115 {
116 memcpy(dstp,srcp,row_size);
117 dstp+=dst_pitch;
118 srcp+=src_pitch;
119 }
120 }
121
Show(context cx,int frame,mlt_properties properties)122 static void Show(context cx, int frame, mlt_properties properties)
123 {
124 char use;
125 char buf[512];
126
127 if (cx->chosen == P) use = 'p';
128 else if (cx->chosen == C) use = 'c';
129 else use = 'n';
130 snprintf(buf, sizeof(buf), "Telecide: frame %d: matches: %d %d %d\n", frame, cx->p, cx->c, cx->np);
131 if ( cx->post )
132 snprintf(buf, sizeof(buf), "%sTelecide: frame %d: vmetrics: %d %d %d [chosen=%d]\n", buf, frame, cx->pblock, cx->cblock, cx->npblock, cx->vmetric);
133 if ( cx->guide )
134 snprintf(buf, sizeof(buf), "%spattern mismatch=%0.2f%%\n", buf, cx->mismatch);
135 snprintf(buf, sizeof(buf), "%sTelecide: frame %d: [%s %c]%s %s\n", buf, frame, cx->found ? "forcing" : "using", use,
136 cx->post ? (cx->film ? " [progressive]" : " [interlaced]") : "",
137 cx->guide ? cx->status : "");
138 mlt_properties_set( properties, "meta.attr.telecide.markup", buf );
139 }
140
Debug(context cx,int frame)141 static void Debug(context cx, int frame)
142 {
143 char use;
144
145 if (cx->chosen == P) use = 'p';
146 else if (cx->chosen == C) use = 'c';
147 else use = 'n';
148 fprintf(stderr, "Telecide: frame %d: matches: %d %d %d\n", frame, cx->p, cx->c, cx->np);
149 if ( cx->post )
150 fprintf(stderr, "Telecide: frame %d: vmetrics: %d %d %d [chosen=%d]\n", frame, cx->pblock, cx->cblock, cx->npblock, cx->vmetric);
151 if ( cx->guide )
152 fprintf(stderr, "pattern mismatch=%0.2f%%\n", cx->mismatch);
153 fprintf(stderr, "Telecide: frame %d: [%s %c]%s %s\n", frame, cx->found ? "forcing" : "using", use,
154 cx->post ? (cx->film ? " [progressive]" : " [interlaced]") : "",
155 cx->guide ? cx->status : "");
156 }
157
WriteHints(int film,int inpattern,mlt_properties frame_properties)158 static void WriteHints(int film, int inpattern, mlt_properties frame_properties)
159 {
160 mlt_properties_set_int( frame_properties, "telecide.progressive", film);
161 mlt_properties_set_int( frame_properties, "telecide.in_pattern", inpattern);
162 }
163
PutChosen(context cx,int frame,unsigned int chosen)164 static void PutChosen(context cx, int frame, unsigned int chosen)
165 {
166 int f = frame % CACHE_SIZE;
167 if (frame < 0 || frame > cx->out || cx->cache[f].frame != frame)
168 return;
169 cx->cache[f].chosen = chosen;
170 }
171
CacheInsert(context cx,int frame,unsigned int p,unsigned int pblock,unsigned int c,unsigned int cblock)172 static void CacheInsert(context cx, int frame, unsigned int p, unsigned int pblock,
173 unsigned int c, unsigned int cblock)
174 {
175 int f = frame % CACHE_SIZE;
176 if (frame < 0 || frame > cx->out)
177 fprintf( stderr, "%s: internal error: invalid frame %d for CacheInsert", __FUNCTION__, frame);
178 cx->cache[f].frame = frame;
179 cx->cache[f].metrics[P] = p;
180 if (f) cx->cache[f-1].metrics[N] = p;
181 cx->cache[f].metrics[C] = c;
182 cx->cache[f].metrics[PBLOCK] = pblock;
183 cx->cache[f].metrics[CBLOCK] = cblock;
184 cx->cache[f].chosen = 0xff;
185 }
186
CacheQuery(context cx,int frame,unsigned int * p,unsigned int * pblock,unsigned int * c,unsigned int * cblock)187 static int CacheQuery(context cx, int frame, unsigned int *p, unsigned int *pblock,
188 unsigned int *c, unsigned int *cblock)
189 {
190 int f;
191
192 f = frame % CACHE_SIZE;
193 if (frame < 0 || frame > cx->out)
194 fprintf( stderr, "%s: internal error: invalid frame %d for CacheQuery", __FUNCTION__, frame);
195 if (cx->cache[f].frame != frame)
196 {
197 return 0;
198 }
199 *p = cx->cache[f].metrics[P];
200 *c = cx->cache[f].metrics[C];
201 *pblock = cx->cache[f].metrics[PBLOCK];
202 *cblock = cx->cache[f].metrics[CBLOCK];
203 return 1;
204 }
205
PredictHardYUY2(context cx,int frame,unsigned int * predicted,unsigned int * predicted_metric)206 static int PredictHardYUY2(context cx, int frame, unsigned int *predicted, unsigned int *predicted_metric)
207 {
208 // Look for pattern in the actual delivered matches of the previous cycle of frames.
209 // If a pattern is found, use that to predict the current match.
210 if ( cx->guide == GUIDE_22 )
211 {
212 if (cx->cache[(frame- cx->cycle)%CACHE_SIZE ].chosen == 0xff ||
213 cx->cache[(frame- cx->cycle+1)%CACHE_SIZE].chosen == 0xff)
214 return 0;
215 switch ((cx->cache[(frame- cx->cycle)%CACHE_SIZE ].chosen << 4) +
216 (cx->cache[(frame- cx->cycle+1)%CACHE_SIZE].chosen))
217 {
218 case 0x11:
219 *predicted = C;
220 *predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[C];
221 break;
222 case 0x22:
223 *predicted = N;
224 *predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[N];
225 break;
226 default: return 0;
227 }
228 }
229 else if ( cx->guide == GUIDE_32 )
230 {
231 if (cx->cache[(frame-cx->cycle)%CACHE_SIZE ].chosen == 0xff ||
232 cx->cache[(frame-cx->cycle+1)%CACHE_SIZE].chosen == 0xff ||
233 cx->cache[(frame-cx->cycle+2)%CACHE_SIZE].chosen == 0xff ||
234 cx->cache[(frame-cx->cycle+3)%CACHE_SIZE].chosen == 0xff ||
235 cx->cache[(frame-cx->cycle+4)%CACHE_SIZE].chosen == 0xff)
236 return 0;
237
238 switch ((cx->cache[(frame-cx->cycle)%CACHE_SIZE ].chosen << 16) +
239 (cx->cache[(frame-cx->cycle+1)%CACHE_SIZE].chosen << 12) +
240 (cx->cache[(frame-cx->cycle+2)%CACHE_SIZE].chosen << 8) +
241 (cx->cache[(frame-cx->cycle+3)%CACHE_SIZE].chosen << 4) +
242 (cx->cache[(frame-cx->cycle+4)%CACHE_SIZE].chosen))
243 {
244 case 0x11122:
245 case 0x11221:
246 case 0x12211:
247 case 0x12221:
248 case 0x21122:
249 case 0x11222:
250 *predicted = C;
251 *predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[C];
252 break;
253 case 0x22111:
254 case 0x21112:
255 case 0x22112:
256 case 0x22211:
257 *predicted = N;
258 *predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[N];
259 break;
260 default: return 0;
261 }
262 }
263 else if ( cx->guide == GUIDE_32322 )
264 {
265 if (cx->cache[(frame- cx->cycle)%CACHE_SIZE ].chosen == 0xff ||
266 cx->cache[(frame- cx->cycle +1)%CACHE_SIZE].chosen == 0xff ||
267 cx->cache[(frame- cx->cycle +2)%CACHE_SIZE].chosen == 0xff ||
268 cx->cache[(frame- cx->cycle +3)%CACHE_SIZE].chosen == 0xff ||
269 cx->cache[(frame- cx->cycle +4)%CACHE_SIZE].chosen == 0xff ||
270 cx->cache[(frame- cx->cycle +5)%CACHE_SIZE].chosen == 0xff)
271 return 0;
272
273 switch ((cx->cache[(frame- cx->cycle)%CACHE_SIZE ].chosen << 20) +
274 (cx->cache[(frame- cx->cycle +1)%CACHE_SIZE].chosen << 16) +
275 (cx->cache[(frame- cx->cycle +2)%CACHE_SIZE].chosen << 12) +
276 (cx->cache[(frame- cx->cycle +3)%CACHE_SIZE].chosen << 8) +
277 (cx->cache[(frame- cx->cycle +4)%CACHE_SIZE].chosen << 4) +
278 (cx->cache[(frame- cx->cycle +5)%CACHE_SIZE].chosen))
279 {
280 case 0x111122:
281 case 0x111221:
282 case 0x112211:
283 case 0x122111:
284 case 0x111222:
285 case 0x112221:
286 case 0x122211:
287 case 0x222111:
288 *predicted = C;
289 *predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[C];
290 break;
291 case 0x221111:
292 case 0x211112:
293
294 case 0x221112:
295 case 0x211122:
296 *predicted = N;
297 *predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[N];
298 break;
299 default: return 0;
300 }
301 }
302 #ifdef DEBUG_PATTERN_GUIDANCE
303 fprintf( stderr, "%s: pos=%d HARD: predicted=%d\n", __FUNCTION__, frame, *predicted);
304 #endif
305 return 1;
306 }
307
PredictSoftYUY2(context cx,int frame)308 static struct PREDICTION *PredictSoftYUY2(context cx, int frame )
309 {
310 // Use heuristics to look forward for a match.
311 int i, j, y, c, n, phase;
312 unsigned int metric;
313
314 cx->pred[0].metric = 0xffffffff;
315 if (frame < 0 || frame > cx->out - cx->cycle) return cx->pred;
316
317 // Look at the next cycle of frames.
318 for (y = frame + 1; y <= frame + cx->cycle; y++)
319 {
320 // Look for a frame where the current and next match values are
321 // very close. Those are candidates to predict the phase, because
322 // that condition should occur only once per cycle. Store the candidate
323 // phases and predictions in a list sorted by goodness. The list will
324 // be used by the caller to try the phases in order.
325 c = cx->cache[y%CACHE_SIZE].metrics[C];
326 n = cx->cache[y%CACHE_SIZE].metrics[N];
327 if (c == 0) c = 1;
328 metric = (100 * abs (c - n)) / c;
329 phase = y % cx->cycle;
330 if (metric < 5)
331 {
332 // Place the new candidate phase in sorted order in the list.
333 // Find the insertion point.
334 i = 0;
335 while (metric > cx->pred[i].metric) i++;
336 // Find the end-of-list marker.
337 j = 0;
338 while (cx->pred[j].metric != 0xffffffff) j++;
339 // Shift all items below the insertion point down by one to make
340 // room for the insertion.
341 j++;
342 for (; j > i; j--)
343 {
344 cx->pred[j].metric = cx->pred[j-1].metric;
345 cx->pred[j].phase = cx->pred[j-1].phase;
346 cx->pred[j].predicted = cx->pred[j-1].predicted;
347 cx->pred[j].predicted_metric = cx->pred[j-1].predicted_metric;
348 }
349 // Insert the new candidate data.
350 cx->pred[j].metric = metric;
351 cx->pred[j].phase = phase;
352 if ( cx->guide == GUIDE_32 )
353 {
354 switch ((frame % cx->cycle) - phase)
355 {
356 case -4: cx->pred[j].predicted = N; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[N]; break;
357 case -3: cx->pred[j].predicted = N; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[N]; break;
358 case -2: cx->pred[j].predicted = C; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[C]; break;
359 case -1: cx->pred[j].predicted = C; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[C]; break;
360 case 0: cx->pred[j].predicted = C; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[C]; break;
361 case +1: cx->pred[j].predicted = N; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[N]; break;
362 case +2: cx->pred[j].predicted = N; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[N]; break;
363 case +3: cx->pred[j].predicted = C; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[C]; break;
364 case +4: cx->pred[j].predicted = C; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[C]; break;
365 }
366 }
367 else if ( cx->guide == GUIDE_32322 )
368 {
369 switch ((frame % cx->cycle) - phase)
370 {
371 case -5: cx->pred[j].predicted = N; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[N]; break;
372 case -4: cx->pred[j].predicted = N; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[N]; break;
373 case -3: cx->pred[j].predicted = C; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[C]; break;
374 case -2: cx->pred[j].predicted = C; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[C]; break;
375 case -1: cx->pred[j].predicted = C; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[C]; break;
376 case 0: cx->pred[j].predicted = C; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[C]; break;
377 case +1: cx->pred[j].predicted = N; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[N]; break;
378 case +2: cx->pred[j].predicted = N; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[N]; break;
379 case +3: cx->pred[j].predicted = C; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[C]; break;
380 case +4: cx->pred[j].predicted = C; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[C]; break;
381 case +5: cx->pred[j].predicted = C; cx->pred[j].predicted_metric = cx->cache[frame%CACHE_SIZE].metrics[C]; break;
382 }
383 }
384 }
385 #ifdef DEBUG_PATTERN_GUIDANCE
386 fprintf( stderr, "%s: pos=%d metric=%d phase=%d\n", __FUNCTION__, frame, metric, phase);
387 #endif
388 }
389 return cx->pred;
390 }
391
392 static
CalculateMetrics(context cx,int frame,unsigned char * fcrp,unsigned char * fcrpU,unsigned char * fcrpV,unsigned char * fprp,unsigned char * fprpU,unsigned char * fprpV)393 void CalculateMetrics(context cx, int frame, unsigned char *fcrp, unsigned char *fcrpU, unsigned char *fcrpV,
394 unsigned char *fprp, unsigned char *fprpU, unsigned char *fprpV)
395 {
396 int x, y, p, c, tmp1, tmp2, skip;
397 int vc;
398 unsigned char *currbot0, *currbot2, *prevbot0, *prevbot2;
399 unsigned char *prevtop0, *prevtop2, *prevtop4, *currtop0, *currtop2, *currtop4;
400 unsigned char *a0, *a2, *b0, *b2, *b4;
401 unsigned int diff, index;
402 # define T 4
403
404 /* Clear the block sums. */
405 for (y = 0; y < cx->yblocks; y++)
406 {
407 for (x = 0; x < cx->xblocks; x++)
408 {
409 #ifdef WINDOWED_MATCH
410 matchp[y*xblocks+x] = 0;
411 matchc[y*xblocks+x] = 0;
412 #endif
413 cx->sump[y * cx->xblocks + x] = 0;
414 cx->sumc[y * cx->xblocks + x] = 0;
415 }
416 }
417
418 /* Find the best field match. Subsample the frames for speed. */
419 currbot0 = fcrp + cx->pitch;
420 currbot2 = fcrp + 3 * cx->pitch;
421 currtop0 = fcrp;
422 currtop2 = fcrp + 2 * cx->pitch;
423 currtop4 = fcrp + 4 * cx->pitch;
424 prevbot0 = fprp + cx->pitch;
425 prevbot2 = fprp + 3 * cx->pitch;
426 prevtop0 = fprp;
427 prevtop2 = fprp + 2 * cx->pitch;
428 prevtop4 = fprp + 4 * cx->pitch;
429 if ( cx->tff )
430 {
431 a0 = prevbot0;
432 a2 = prevbot2;
433 b0 = currtop0;
434 b2 = currtop2;
435 b4 = currtop4;
436 }
437 else
438 {
439 a0 = currbot0;
440 a2 = currbot2;
441 b0 = prevtop0;
442 b2 = prevtop2;
443 b4 = prevtop4;
444 }
445 p = c = 0;
446
447 // Calculate the field match and film/video metrics.
448 skip = 1 + ( !cx->chroma );
449 for (y = 0, index = 0; y < cx->h - 4; y+=4)
450 {
451 /* Exclusion band. Good for ignoring subtitles. */
452 if (cx->y0 == cx->y1 || y < cx->y0 || y > cx->y1)
453 {
454 for (x = 0; x < cx->w;)
455 {
456 index = (y/BLKSIZE) * cx->xblocks + x/BLKSIZE_TIMES2;
457
458 // Test combination with current frame.
459 tmp1 = ((long)currbot0[x] + (long)currbot2[x]);
460 diff = labs((((long)currtop0[x] + (long)currtop2[x] + (long)currtop4[x])) - (tmp1 >> 1) - tmp1);
461 if (diff > cx->nt)
462 {
463 c += diff;
464 #ifdef WINDOWED_MATCH
465 matchc[index] += diff;
466 #endif
467 }
468
469 tmp1 = currbot0[x] + T;
470 tmp2 = currbot0[x] - T;
471 vc = (tmp1 < currtop0[x] && tmp1 < currtop2[x]) ||
472 (tmp2 > currtop0[x] && tmp2 > currtop2[x]);
473 if (vc)
474 {
475 cx->sumc[index]++;
476 }
477
478 // Test combination with previous frame.
479 tmp1 = ((long)a0[x] + (long)a2[x]);
480 diff = labs((((long)b0[x] + (long)b2[x] + (long)b4[x])) - (tmp1 >> 1) - tmp1);
481 if (diff > cx->nt)
482 {
483 p += diff;
484 #ifdef WINDOWED_MATCH
485 matchp[index] += diff;
486 #endif
487 }
488
489 tmp1 = a0[x] + T;
490 tmp2 = a0[x] - T;
491 vc = (tmp1 < b0[x] && tmp1 < b2[x]) ||
492 (tmp2 > b0[x] && tmp2 > b2[x]);
493 if (vc)
494 {
495 cx->sump[index]++;
496 }
497
498 x += skip;
499 if (!(x&3)) x += 4;
500 }
501 }
502 currbot0 += cx->pitchtimes4;
503 currbot2 += cx->pitchtimes4;
504 currtop0 += cx->pitchtimes4;
505 currtop2 += cx->pitchtimes4;
506 currtop4 += cx->pitchtimes4;
507 a0 += cx->pitchtimes4;
508 a2 += cx->pitchtimes4;
509 b0 += cx->pitchtimes4;
510 b2 += cx->pitchtimes4;
511 b4 += cx->pitchtimes4;
512 }
513
514
515 if ( cx->post )
516 {
517 cx->highest_sump = 0;
518 for (y = 0; y < cx->yblocks; y++)
519 {
520 for (x = 0; x < cx->xblocks; x++)
521 {
522 if (cx->sump[y * cx->xblocks + x] > cx->highest_sump)
523 {
524 cx->highest_sump = cx->sump[y * cx->xblocks + x];
525 }
526 }
527 }
528 cx->highest_sumc = 0;
529 for (y = 0; y < cx->yblocks; y++)
530 {
531 for (x = 0; x < cx->xblocks; x++)
532 {
533 if (cx->sumc[y * cx->xblocks + x] > cx->highest_sumc)
534 {
535 cx->highest_sumc = cx->sumc[y * cx->xblocks + x];
536 }
537 }
538 }
539 }
540 #ifdef WINDOWED_MATCH
541 CacheInsert(frame, highest_matchp, highest_sump, highest_matchc, highest_sumc);
542 #else
543 CacheInsert( cx, frame, p, cx->highest_sump, c, cx->highest_sumc);
544 #endif
545 }
546
547 /** Process the image.
548 */
549
get_image(mlt_frame frame,uint8_t ** image,mlt_image_format * format,int * width,int * height,int writable)550 static int get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
551 {
552 // Get the filter service
553 mlt_filter filter = mlt_frame_pop_service( frame );
554 mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
555 mlt_properties frame_properties = mlt_frame_properties( frame );
556 context cx = mlt_properties_get_data( properties, "context", NULL );
557 mlt_service producer = mlt_service_producer( mlt_filter_service( filter ) );
558 cx->out = producer? mlt_producer_get_playtime( MLT_PRODUCER( producer ) ) : 999999;
559
560 if ( ! cx->is_configured )
561 {
562 cx->back = mlt_properties_get_int( properties, "back" );
563 cx->chroma = mlt_properties_get_int( properties, "chroma" );
564 cx->guide = mlt_properties_get_int( properties, "guide" );
565 cx->gthresh = mlt_properties_get_double( properties, "gthresh" );
566 cx->post = mlt_properties_get_int( properties, "post" );
567 cx->vthresh = mlt_properties_get_double( properties, "vthresh" );
568 cx->bthresh = mlt_properties_get_double( properties, "bthresh" );
569 cx->dthresh = mlt_properties_get_double( properties, "dthresh" );
570 cx->blend = mlt_properties_get_int( properties, "blend" );
571 cx->nt = mlt_properties_get_int( properties, "nt" );
572 cx->y0 = mlt_properties_get_int( properties, "y0" );
573 cx->y1 = mlt_properties_get_int( properties, "y1" );
574 cx->hints = mlt_properties_get_int( properties, "hints" );
575 cx->debug = mlt_properties_get_int( properties, "debug" );
576 cx->show = mlt_properties_get_int( properties, "show" );
577 }
578
579 // Get the image
580 int error = mlt_frame_get_image( frame, image, format, width, height, 1 );
581
582 if ( ! cx->sump )
583 {
584 int guide = mlt_properties_get_int( properties, "guide" );
585 cx->cycle = 0;
586 if ( guide == GUIDE_32 )
587 {
588 // 24fps to 30 fps telecine.
589 cx->cycle = 5;
590 }
591 else if ( guide == GUIDE_22 )
592 {
593 // PAL guidance (expect the current match to be continued).
594 cx->cycle = 2;
595 }
596 else if ( guide == GUIDE_32322 )
597 {
598 // 25fps to 30 fps telecine.
599 cx->cycle = 6;
600 }
601
602 cx->xblocks = (*width+BLKSIZE-1) / BLKSIZE;
603 cx->yblocks = (*height+BLKSIZE-1) / BLKSIZE;
604 cx->sump = (unsigned int *) mlt_pool_alloc( cx->xblocks * cx->yblocks * sizeof(unsigned int) );
605 cx->sumc = (unsigned int *) mlt_pool_alloc( cx->xblocks * cx->yblocks * sizeof(unsigned int) );
606 mlt_properties_set_data( properties, "sump", cx->sump, cx->xblocks * cx->yblocks * sizeof(unsigned int), (mlt_destructor)mlt_pool_release, NULL );
607 mlt_properties_set_data( properties, "sumc", cx->sumc, cx->xblocks * cx->yblocks * sizeof(unsigned int), (mlt_destructor)mlt_pool_release, NULL );
608 cx->tff = mlt_properties_get_int( frame_properties, "top_field_first" );
609 }
610
611 // Only process if we have no error and a valid colour space
612 if ( error == 0 && *format == mlt_image_yuv422 )
613 {
614 // Put the current image into the image cache, keyed on position
615 size_t image_size = (*width * *height) << 1;
616 mlt_position pos = mlt_filter_get_position( filter, frame );
617 uint8_t *image_copy = mlt_pool_alloc( image_size );
618 memcpy( image_copy, *image, image_size );
619 char key[20];
620 sprintf( key, MLT_POSITION_FMT, pos );
621 mlt_properties_set_data( cx->image_cache, key, image_copy, image_size, (mlt_destructor)mlt_pool_release, NULL );
622
623 // Only if we have enough frame images cached
624 if ( pos > 1 && pos > cx->cycle + 1 )
625 {
626 pos -= cx->cycle + 1;
627 // Get the current frame image
628 sprintf( key, MLT_POSITION_FMT, pos );
629 cx->fcrp = mlt_properties_get_data( cx->image_cache, key, NULL );
630 if (!cx->fcrp) return error;
631
632 // Get the previous frame image
633 cx->pframe = pos == 0 ? 0 : pos - 1;
634 sprintf( key, "%d", cx->pframe );
635 cx->fprp = mlt_properties_get_data( cx->image_cache, key, NULL );
636 if (!cx->fprp) return error;
637
638 // Get the next frame image
639 cx->nframe = pos > cx->out ? cx->out : pos + 1;
640 sprintf( key, "%d", cx->nframe );
641 cx->fnrp = mlt_properties_get_data( cx->image_cache, key, NULL );
642 if (!cx->fnrp) return error;
643
644 cx->pitch = *width << 1;
645 cx->pitchover2 = cx->pitch >> 1;
646 cx->pitchtimes4 = cx->pitch << 2;
647 cx->w = *width << 1;
648 cx->h = *height;
649 if ((cx->w/2) & 1)
650 fprintf( stderr, "%s: width must be a multiple of 2\n", __FUNCTION__ );
651 if (cx->h & 1)
652 fprintf( stderr, "%s: height must be a multiple of 2\n", __FUNCTION__ );
653 cx->wover2 = cx->w/2;
654 cx->hover2 = cx->h/2;
655 cx->hplus1over2 = (cx->h+1)/2;
656 cx->hminus2 = cx->h - 2;
657 cx->dpitch = cx->pitch;
658
659 // Ensure that the metrics for the frames
660 // after the current frame are in the cache. They will be used for
661 // pattern guidance.
662 if ( cx->guide )
663 {
664 for ( cx->y = pos + 1; (cx->y <= pos + cx->cycle + 1) && (cx->y <= cx->out); cx->y++ )
665 {
666 if ( ! CacheQuery( cx, cx->y, &cx->p, &cx->pblock, &cx->c, &cx->cblock ) )
667 {
668 sprintf( key, "%d", cx->y );
669 cx->crp = (unsigned char *) mlt_properties_get_data( cx->image_cache, key, NULL );
670 sprintf( key, "%d", cx->y ? cx->y - 1 : 1 );
671 cx->prp = (unsigned char *) mlt_properties_get_data( cx->image_cache, key, NULL );
672 CalculateMetrics( cx, cx->y, cx->crp, NULL, NULL, cx->prp, NULL, NULL );
673 }
674 }
675 }
676
677 // Check for manual overrides of the field matching.
678 cx->found = 0;
679 cx->film = 1;
680 cx->override = 0;
681 cx->inpattern = 0;
682 cx->back = cx->back_saved;
683
684 // Get the metrics for the current-previous (p), current-current (c), and current-next (n) match candidates.
685 if ( ! CacheQuery( cx, pos, &cx->p, &cx->pblock, &cx->c, &cx->cblock ) )
686 {
687 CalculateMetrics( cx, pos, cx->fcrp, NULL, NULL, cx->fprp, NULL, NULL );
688 CacheQuery( cx, pos, &cx->p, &cx->pblock, &cx->c, &cx->cblock );
689 }
690 if ( ! CacheQuery( cx, cx->nframe, &cx->np, &cx->npblock, &cx->nc, &cx->ncblock ) )
691 {
692 CalculateMetrics( cx, cx->nframe, cx->fnrp, NULL, NULL, cx->fcrp, NULL, NULL );
693 CacheQuery( cx, cx->nframe, &cx->np, &cx->npblock, &cx->nc, &cx->ncblock );
694 }
695
696 // Determine the best candidate match.
697 if ( !cx->found )
698 {
699 cx->lowest = cx->c;
700 cx->chosen = C;
701 if ( cx->back == ALWAYS_BACK && cx->p < cx->lowest )
702 {
703 cx->lowest = cx->p;
704 cx->chosen = P;
705 }
706 if ( cx->np < cx->lowest )
707 {
708 cx->lowest = cx->np;
709 cx->chosen = N;
710 }
711 }
712 if ((pos == 0 && cx->chosen == P) || (pos == cx->out && cx->chosen == N))
713 {
714 cx->chosen = C;
715 cx->lowest = cx->c;
716 }
717
718 // See if we can apply pattern guidance.
719 cx->mismatch = 100.0;
720 if ( cx->guide )
721 {
722 cx->hard = 0;
723 if ( pos >= cx->cycle && PredictHardYUY2( cx, pos, &cx->predicted, &cx->predicted_metric) )
724 {
725 cx->inpattern = 1;
726 cx->mismatch = 0.0;
727 cx->hard = 1;
728 if ( cx->chosen != cx->predicted )
729 {
730 // The chosen frame doesn't match the prediction.
731 if ( cx->predicted_metric == 0 )
732 cx->mismatch = 0.0;
733 else
734 cx->mismatch = (100.0 * ( cx->predicted_metric - cx->lowest ) ) / cx->predicted_metric;
735 if ( cx->mismatch < cx->gthresh )
736 {
737 // It's close enough, so use the predicted one.
738 if ( !cx->found )
739 {
740 cx->chosen = cx->predicted;
741 cx->override = 1;
742 }
743 }
744 else
745 {
746 cx->hard = 0;
747 cx->inpattern = 0;
748 }
749 }
750 }
751
752 if ( !cx->hard && cx->guide != GUIDE_22 )
753 {
754 int i;
755 struct PREDICTION *pred = PredictSoftYUY2( cx, pos );
756
757 if ( ( pos <= cx->out - cx->cycle) && ( pred[0].metric != 0xffffffff ) )
758 {
759 // Apply pattern guidance.
760 // If the predicted match metric is within defined percentage of the
761 // best calculated one, then override the calculated match with the
762 // predicted match.
763 i = 0;
764 while ( pred[i].metric != 0xffffffff )
765 {
766 cx->predicted = pred[i].predicted;
767 cx->predicted_metric = pred[i].predicted_metric;
768 #ifdef DEBUG_PATTERN_GUIDANCE
769 fprintf(stderr, "%s: pos=%d predicted=%d\n", __FUNCTION__, pos, cx->predicted);
770 #endif
771 if ( cx->chosen != cx->predicted )
772 {
773 // The chosen frame doesn't match the prediction.
774 if ( cx->predicted_metric == 0 )
775 cx->mismatch = 0.0;
776 else
777 cx->mismatch = (100.0 * ( cx->predicted_metric - cx->lowest )) / cx->predicted_metric;
778 if ( (int) cx->mismatch <= cx->gthresh )
779 {
780 // It's close enough, so use the predicted one.
781 if ( !cx->found )
782 {
783 cx->chosen = cx->predicted;
784 cx->override = 1;
785 }
786 cx->inpattern = 1;
787 break;
788 }
789 else
790 {
791 // Looks like we're not in a predictable pattern.
792 cx->inpattern = 0;
793 }
794 }
795 else
796 {
797 cx->inpattern = 1;
798 cx->mismatch = 0.0;
799 break;
800 }
801 i++;
802 }
803 }
804 }
805 }
806
807 // Check the match for progressive versus interlaced.
808 if ( cx->post )
809 {
810 if (cx->chosen == P) cx->vmetric = cx->pblock;
811 else if (cx->chosen == C) cx->vmetric = cx->cblock;
812 else if (cx->chosen == N) cx->vmetric = cx->npblock;
813
814 if ( !cx->found && cx->back == BACK_ON_COMBED && cx->vmetric > cx->bthresh && cx->p < cx->lowest )
815 {
816 // Backward match.
817 cx->vmetric = cx->pblock;
818 cx->chosen = P;
819 cx->inpattern = 0;
820 cx->mismatch = 100;
821 }
822 if ( cx->vmetric > cx->vthresh )
823 {
824 // After field matching and pattern guidance the frame is still combed.
825 cx->film = 0;
826 if ( !cx->found && ( cx->post == POST_FULL_NOMATCH || cx->post == POST_FULL_NOMATCH_MAP ) )
827 {
828 cx->chosen = C;
829 cx->vmetric = cx->cblock;
830 cx->inpattern = 0;
831 cx->mismatch = 100;
832 }
833 }
834 }
835 cx->vthresh = cx->vthresh_saved;
836
837 // Setup strings for debug info.
838 if ( cx->inpattern && !cx->override ) strcpy( cx->status, "[in-pattern]" );
839 else if ( cx->inpattern && cx->override ) strcpy( cx->status, "[in-pattern*]" );
840 else strcpy( cx->status, "[out-of-pattern]" );
841
842 // Assemble and output the reconstructed frame according to the final match.
843 cx->dstp = *image;
844 if ( cx->chosen == N )
845 {
846 // The best match was with the next frame.
847 if ( cx->tff )
848 {
849 BitBlt( cx->dstp, 2 * cx->dpitch, cx->fnrp, 2 * cx->pitch, cx->w, cx->hover2 );
850 BitBlt( cx->dstp + cx->dpitch, 2 * cx->dpitch, cx->fcrp + cx->pitch, 2 * cx->pitch, cx->w, cx->hover2 );
851 }
852 else
853 {
854 BitBlt( cx->dstp, 2 * cx->dpitch, cx->fcrp, 2 * cx->pitch, cx->w, cx->hplus1over2 );
855 BitBlt( cx->dstp + cx->dpitch, 2 * cx->dpitch, cx->fnrp + cx->pitch, 2 * cx->pitch, cx->w, cx->hover2 );
856 }
857 }
858 else if ( cx->chosen == C )
859 {
860 // The best match was with the current frame.
861 BitBlt( cx->dstp, 2 * cx->dpitch, cx->fcrp, 2 * cx->pitch, cx->w, cx->hplus1over2 );
862 BitBlt( cx->dstp + cx->dpitch, 2 * cx->dpitch, cx->fcrp + cx->pitch, 2 * cx->pitch, cx->w, cx->hover2 );
863 }
864 else if ( ! cx->tff )
865 {
866 // The best match was with the previous frame.
867 BitBlt( cx->dstp, 2 * cx->dpitch, cx->fprp, 2 * cx->pitch, cx->w, cx->hplus1over2 );
868 BitBlt( cx->dstp + cx->dpitch, 2 * cx->dpitch, cx->fcrp + cx->pitch, 2 * cx->pitch, cx->w, cx->hover2 );
869 }
870 else
871 {
872 // The best match was with the previous frame.
873 BitBlt( cx->dstp, 2 * cx->dpitch, cx->fcrp, 2 * cx->pitch, cx->w, cx->hplus1over2 );
874 BitBlt( cx->dstp + cx->dpitch, 2 * cx->dpitch, cx->fprp + cx->pitch, 2 * cx->pitch, cx->w, cx->hover2 );
875 }
876 if ( cx->guide )
877 PutChosen( cx, pos, cx->chosen );
878
879 if ( !cx->post || cx->post == POST_METRICS )
880 {
881 if ( cx->force == '+') cx->film = 0;
882 else if ( cx->force == '-' ) cx->film = 1;
883 }
884 else if ((cx->force == '+') ||
885 ((cx->post == POST_FULL || cx->post == POST_FULL_MAP || cx->post == POST_FULL_NOMATCH || cx->post == POST_FULL_NOMATCH_MAP)
886 && (cx->film == 0 && cx->force != '-')))
887 {
888 unsigned char *dstpp, *dstpn;
889 int v1, v2;
890
891 if ( cx->blend )
892 {
893 // Do first and last lines.
894 uint8_t *final = mlt_pool_alloc( image_size );
895 cx->finalp = final;
896 mlt_frame_set_image( frame, final, image_size, mlt_pool_release );
897 dstpn = cx->dstp + cx->dpitch;
898 for ( cx->x = 0; cx->x < cx->w; cx->x++ )
899 {
900 cx->finalp[cx->x] = (((int)cx->dstp[cx->x] + (int)dstpn[cx->x]) >> 1);
901 }
902 cx->finalp = final + (cx->h-1)*cx->dpitch;
903 cx->dstp = *image + (cx->h-1)*cx->dpitch;
904 dstpp = cx->dstp - cx->dpitch;
905 for ( cx->x = 0; cx->x < cx->w; cx->x++ )
906 {
907 cx->finalp[cx->x] = (((int)cx->dstp[cx->x] + (int)dstpp[cx->x]) >> 1);
908 }
909 // Now do the rest.
910 cx->dstp = *image + cx->dpitch;
911 dstpp = cx->dstp - cx->dpitch;
912 dstpn = cx->dstp + cx->dpitch;
913 cx->finalp = final + cx->dpitch;
914 for ( cx->y = 1; cx->y < cx->h - 1; cx->y++ )
915 {
916 for ( cx->x = 0; cx->x < cx->w; cx->x++ )
917 {
918 v1 = (int) cx->dstp[cx->x] - cx->dthresh;
919 if ( v1 < 0 )
920 v1 = 0;
921 v2 = (int) cx->dstp[cx->x] + cx->dthresh;
922 if (v2 > 235) v2 = 235;
923 if ((v1 > dstpp[cx->x] && v1 > dstpn[cx->x]) || (v2 < dstpp[cx->x] && v2 < dstpn[cx->x]))
924 {
925 if ( cx->post == POST_FULL_MAP || cx->post == POST_FULL_NOMATCH_MAP )
926 {
927 if (cx->x & 1) cx->finalp[cx->x] = 128;
928 else cx->finalp[cx->x] = 235;
929 }
930 else
931 cx->finalp[cx->x] = ((int)dstpp[cx->x] + (int)dstpn[cx->x] + (int)cx->dstp[cx->x] + (int)cx->dstp[cx->x]) >> 2;
932 }
933 else cx->finalp[cx->x] = cx->dstp[cx->x];
934 }
935 cx->finalp += cx->dpitch;
936 cx->dstp += cx->dpitch;
937 dstpp += cx->dpitch;
938 dstpn += cx->dpitch;
939 }
940
941
942 if (cx->show ) Show( cx, pos, frame_properties);
943 if (cx->debug) Debug(cx, pos);
944 if (cx->hints) WriteHints(cx->film, cx->inpattern, frame_properties);
945 goto final;
946 }
947
948 // Interpolate mode.
949 cx->dstp = *image + cx->dpitch;
950 dstpp = cx->dstp - cx->dpitch;
951 dstpn = cx->dstp + cx->dpitch;
952 for ( cx->y = 1; cx->y < cx->h - 1; cx->y+=2 )
953 {
954 for ( cx->x = 0; cx->x < cx->w; cx->x++ )
955 {
956 v1 = (int) cx->dstp[cx->x] - cx->dthresh;
957 if (v1 < 0) v1 = 0;
958 v2 = (int) cx->dstp[cx->x] + cx->dthresh;
959 if (v2 > 235) v2 = 235;
960 if ((v1 > dstpp[cx->x] && v1 > dstpn[cx->x]) || (v2 < dstpp[cx->x] && v2 < dstpn[cx->x]))
961 {
962 if ( cx->post == POST_FULL_MAP || cx->post == POST_FULL_NOMATCH_MAP )
963 {
964 if (cx->x & 1) cx->dstp[cx->x] = 128;
965 else cx->dstp[cx->x] = 235;
966 }
967 else
968 cx->dstp[cx->x] = (dstpp[cx->x] + dstpn[cx->x]) >> 1;
969 }
970 }
971 cx->dstp += 2 * cx->dpitch;
972 dstpp += 2 * cx->dpitch;
973 dstpn += 2 * cx->dpitch;
974 }
975 }
976 if (cx->show ) Show( cx, pos, frame_properties);
977 if (cx->debug) Debug(cx, pos);
978 if (cx->hints) WriteHints(cx->film, cx->inpattern, frame_properties);
979
980 final:
981 // Flush frame at tail of period from the cache
982 sprintf( key, MLT_POSITION_FMT, pos - 1 );
983 mlt_properties_set_data( cx->image_cache, key, NULL, 0, NULL, NULL );
984 }
985 else
986 {
987 // Signal the first {cycle} frames as invalid
988 mlt_properties_set_int( frame_properties, "garbage", 1 );
989 }
990 }
991 else if ( error == 0 && *format == mlt_image_yuv420p )
992 {
993 fprintf(stderr,"%s: %d pos " MLT_POSITION_FMT "\n", __FUNCTION__, *width * *height * 3/2, mlt_frame_get_position(frame) );
994 }
995
996 return error;
997 }
998
999 /** Process the frame object.
1000 */
1001
process(mlt_filter filter,mlt_frame frame)1002 static mlt_frame process( mlt_filter filter, mlt_frame frame )
1003 {
1004 // Push the filter on to the stack
1005 mlt_frame_push_service( frame, filter );
1006
1007 // Push the frame filter
1008 mlt_frame_push_get_image( frame, get_image );
1009
1010 return frame;
1011 }
1012
1013 /** Constructor for the filter.
1014 */
1015
filter_telecide_init(mlt_profile profile,mlt_service_type type,const char * id,char * arg)1016 mlt_filter filter_telecide_init( mlt_profile profile, mlt_service_type type, const char *id, char *arg )
1017 {
1018 mlt_filter filter = mlt_filter_new( );
1019 if ( filter != NULL )
1020 {
1021 filter->process = process;
1022
1023 // Allocate the context and set up for garbage collection
1024 context cx = (context) mlt_pool_alloc( sizeof(struct context_s) );
1025 memset( cx, 0, sizeof( struct context_s ) );
1026 mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
1027 mlt_properties_set_data( properties, "context", cx, sizeof(struct context_s), (mlt_destructor)mlt_pool_release, NULL );
1028
1029 // Allocate the metrics cache and set up for garbage collection
1030 cx->cache = (struct CACHE_ENTRY *) mlt_pool_alloc(CACHE_SIZE * sizeof(struct CACHE_ENTRY ));
1031 mlt_properties_set_data( properties, "cache", cx->cache, CACHE_SIZE * sizeof(struct CACHE_ENTRY), (mlt_destructor)mlt_pool_release, NULL );
1032 int i;
1033 for (i = 0; i < CACHE_SIZE; i++)
1034 {
1035 cx->cache[i].frame = 0xffffffff;
1036 cx->cache[i].chosen = 0xff;
1037 }
1038
1039 // Allocate the image cache and set up for garbage collection
1040 cx->image_cache = mlt_properties_new();
1041 mlt_properties_set_data( properties, "image_cache", cx->image_cache, 0, (mlt_destructor)mlt_properties_close, NULL );
1042
1043 // Initialize the parameter defaults
1044 mlt_properties_set_int( properties, "guide", 0 );
1045 mlt_properties_set_int( properties, "back", 0 );
1046 mlt_properties_set_int( properties, "chroma", 0 );
1047 mlt_properties_set_int( properties, "post", POST_FULL );
1048 mlt_properties_set_double( properties, "gthresh", 10.0 );
1049 mlt_properties_set_double( properties, "vthresh", 50.0 );
1050 mlt_properties_set_double( properties, "bthresh", 50.0 );
1051 mlt_properties_set_double( properties, "dthresh", 7.0 );
1052 mlt_properties_set_int( properties, "blend", 0 );
1053 mlt_properties_set_int( properties, "nt", 10 );
1054 mlt_properties_set_int( properties, "y0", 0 );
1055 mlt_properties_set_int( properties, "y1", 0 );
1056 mlt_properties_set_int( properties, "hints", 1 );
1057 }
1058 return filter;
1059 }
1060
1061