1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software Foundation,
14 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * The Original Code is Copyright (C) 2011 Blender Foundation.
17 * All rights reserved.
18 */
19
20 /** \file
21 * \ingroup bke
22 *
23 * This file contains implementation of function which are used
24 * by multiple tracking files but which should not be public.
25 */
26
27 #include <stddef.h>
28
29 #include "MEM_guardedalloc.h"
30
31 #include "DNA_movieclip_types.h"
32
33 #include "BLI_ghash.h"
34 #include "BLI_listbase.h"
35 #include "BLI_math.h"
36 #include "BLI_string.h"
37 #include "BLI_string_utils.h"
38 #include "BLI_threads.h"
39 #include "BLI_utildefines.h"
40
41 #include "BLT_translation.h"
42
43 #include "BKE_movieclip.h"
44 #include "BKE_tracking.h"
45
46 #include "IMB_imbuf.h"
47 #include "IMB_imbuf_types.h"
48 #include "IMB_moviecache.h"
49
50 #include "tracking_private.h"
51
52 #include "libmv-capi.h"
53
54 /* Uncomment this to have caching-specific debug prints. */
55 // #define DEBUG_CACHE
56
57 #ifdef DEBUG_CACHE
58 # define CACHE_PRINTF(...) printf(__VA_ARGS__)
59 #else
60 # define CACHE_PRINTF(...)
61 #endif
62
63 /*********************** Tracks map *************************/
64
tracks_map_new(const char * object_name,bool is_camera,int num_tracks,int customdata_size)65 TracksMap *tracks_map_new(const char *object_name,
66 bool is_camera,
67 int num_tracks,
68 int customdata_size)
69 {
70 TracksMap *map = MEM_callocN(sizeof(TracksMap), "TrackingsMap");
71
72 BLI_strncpy(map->object_name, object_name, sizeof(map->object_name));
73 map->is_camera = is_camera;
74
75 map->num_tracks = num_tracks;
76 map->customdata_size = customdata_size;
77
78 map->tracks = MEM_callocN(sizeof(MovieTrackingTrack) * num_tracks, "TrackingsMap tracks");
79
80 if (customdata_size) {
81 map->customdata = MEM_callocN(customdata_size * num_tracks, "TracksMap customdata");
82 }
83
84 map->hash = BLI_ghash_ptr_new("TracksMap hash");
85
86 BLI_spin_init(&map->spin_lock);
87
88 return map;
89 }
90
tracks_map_get_size(TracksMap * map)91 int tracks_map_get_size(TracksMap *map)
92 {
93 return map->num_tracks;
94 }
95
tracks_map_get_indexed_element(TracksMap * map,int index,MovieTrackingTrack ** track,void ** customdata)96 void tracks_map_get_indexed_element(TracksMap *map,
97 int index,
98 MovieTrackingTrack **track,
99 void **customdata)
100 {
101 *track = &map->tracks[index];
102
103 if (map->customdata) {
104 *customdata = &map->customdata[index * map->customdata_size];
105 }
106 }
107
tracks_map_insert(TracksMap * map,MovieTrackingTrack * track,void * customdata)108 void tracks_map_insert(TracksMap *map, MovieTrackingTrack *track, void *customdata)
109 {
110 MovieTrackingTrack new_track = *track;
111
112 new_track.markers = MEM_dupallocN(new_track.markers);
113
114 map->tracks[map->ptr] = new_track;
115
116 if (customdata) {
117 memcpy(&map->customdata[map->ptr * map->customdata_size], customdata, map->customdata_size);
118 }
119
120 BLI_ghash_insert(map->hash, &map->tracks[map->ptr], track);
121
122 map->ptr++;
123 }
124
tracks_map_merge(TracksMap * map,MovieTracking * tracking)125 void tracks_map_merge(TracksMap *map, MovieTracking *tracking)
126 {
127 MovieTrackingTrack *track;
128 ListBase tracks = {NULL, NULL}, new_tracks = {NULL, NULL};
129 ListBase *old_tracks;
130
131 if (map->is_camera) {
132 old_tracks = &tracking->tracks;
133 }
134 else {
135 MovieTrackingObject *object = BKE_tracking_object_get_named(tracking, map->object_name);
136
137 if (!object) {
138 /* object was deleted by user, create new one */
139 object = BKE_tracking_object_add(tracking, map->object_name);
140 }
141
142 old_tracks = &object->tracks;
143 }
144
145 /* duplicate currently operating tracks to temporary list.
146 * this is needed to keep names in unique state and it's faster to change names
147 * of currently operating tracks (if needed)
148 */
149 for (int a = 0; a < map->num_tracks; a++) {
150 MovieTrackingTrack *old_track;
151 bool mapped_to_old = false;
152
153 track = &map->tracks[a];
154
155 /* find original of operating track in list of previously displayed tracks */
156 old_track = BLI_ghash_lookup(map->hash, track);
157 if (old_track) {
158 if (BLI_findindex(old_tracks, old_track) != -1) {
159 BLI_remlink(old_tracks, old_track);
160
161 BLI_spin_lock(&map->spin_lock);
162
163 /* Copy flags like selection back to the track map. */
164 track->flag = old_track->flag;
165 track->pat_flag = old_track->pat_flag;
166 track->search_flag = old_track->search_flag;
167
168 /* Copy all the rest settings back from the map to the actual tracks. */
169 MEM_freeN(old_track->markers);
170 *old_track = *track;
171 old_track->markers = MEM_dupallocN(old_track->markers);
172
173 BLI_spin_unlock(&map->spin_lock);
174
175 BLI_addtail(&tracks, old_track);
176
177 mapped_to_old = true;
178 }
179 }
180
181 if (mapped_to_old == false) {
182 MovieTrackingTrack *new_track = BKE_tracking_track_duplicate(track);
183
184 /* Update old-new track mapping */
185 BLI_ghash_reinsert(map->hash, track, new_track, NULL, NULL);
186
187 BLI_addtail(&tracks, new_track);
188 }
189 }
190
191 /* move all tracks, which aren't operating */
192 track = old_tracks->first;
193 while (track) {
194 MovieTrackingTrack *next = track->next;
195 BLI_addtail(&new_tracks, track);
196 track = next;
197 }
198
199 /* now move all tracks which are currently operating and keep their names unique */
200 track = tracks.first;
201 while (track) {
202 MovieTrackingTrack *next = track->next;
203
204 BLI_remlink(&tracks, track);
205
206 track->next = track->prev = NULL;
207 BLI_addtail(&new_tracks, track);
208
209 BLI_uniquename(&new_tracks,
210 track,
211 CTX_DATA_(BLT_I18NCONTEXT_ID_MOVIECLIP, "Track"),
212 '.',
213 offsetof(MovieTrackingTrack, name),
214 sizeof(track->name));
215
216 track = next;
217 }
218
219 *old_tracks = new_tracks;
220 }
221
tracks_map_free(TracksMap * map,void (* customdata_free)(void * customdata))222 void tracks_map_free(TracksMap *map, void (*customdata_free)(void *customdata))
223 {
224 BLI_ghash_free(map->hash, NULL, NULL);
225
226 for (int i = 0; i < map->num_tracks; i++) {
227 if (map->customdata && customdata_free) {
228 customdata_free(&map->customdata[i * map->customdata_size]);
229 }
230
231 BKE_tracking_track_free(&map->tracks[i]);
232 }
233
234 if (map->customdata) {
235 MEM_freeN(map->customdata);
236 }
237
238 MEM_freeN(map->tracks);
239
240 BLI_spin_end(&map->spin_lock);
241
242 MEM_freeN(map);
243 }
244
245 /*********************** Space transformation functions *************************/
246
247 /* Three coordinate frames: Frame, Search, and Marker
248 * Two units: Pixels, Unified
249 * Notation: {coordinate frame}_{unit}; for example, "search_pixel" are search
250 * window relative coordinates in pixels, and "frame_unified" are unified 0..1
251 * coordinates relative to the entire frame.
252 */
unified_to_pixel(int frame_width,int frame_height,const float unified_coords[2],float pixel_coords[2])253 static void unified_to_pixel(int frame_width,
254 int frame_height,
255 const float unified_coords[2],
256 float pixel_coords[2])
257 {
258 pixel_coords[0] = unified_coords[0] * frame_width;
259 pixel_coords[1] = unified_coords[1] * frame_height;
260 }
261
marker_to_frame_unified(const MovieTrackingMarker * marker,const float marker_unified_coords[2],float frame_unified_coords[2])262 static void marker_to_frame_unified(const MovieTrackingMarker *marker,
263 const float marker_unified_coords[2],
264 float frame_unified_coords[2])
265 {
266 frame_unified_coords[0] = marker_unified_coords[0] + marker->pos[0];
267 frame_unified_coords[1] = marker_unified_coords[1] + marker->pos[1];
268 }
269
marker_unified_to_frame_pixel_coordinates(int frame_width,int frame_height,const MovieTrackingMarker * marker,const float marker_unified_coords[2],float frame_pixel_coords[2])270 static void marker_unified_to_frame_pixel_coordinates(int frame_width,
271 int frame_height,
272 const MovieTrackingMarker *marker,
273 const float marker_unified_coords[2],
274 float frame_pixel_coords[2])
275 {
276 marker_to_frame_unified(marker, marker_unified_coords, frame_pixel_coords);
277 unified_to_pixel(frame_width, frame_height, frame_pixel_coords, frame_pixel_coords);
278 }
279
tracking_get_search_origin_frame_pixel(int frame_width,int frame_height,const MovieTrackingMarker * marker,float frame_pixel[2])280 void tracking_get_search_origin_frame_pixel(int frame_width,
281 int frame_height,
282 const MovieTrackingMarker *marker,
283 float frame_pixel[2])
284 {
285 /* Get the lower left coordinate of the search window and snap to pixel coordinates */
286 marker_unified_to_frame_pixel_coordinates(
287 frame_width, frame_height, marker, marker->search_min, frame_pixel);
288 frame_pixel[0] = (int)frame_pixel[0];
289 frame_pixel[1] = (int)frame_pixel[1];
290 }
291
pixel_to_unified(int frame_width,int frame_height,const float pixel_coords[2],float unified_coords[2])292 static void pixel_to_unified(int frame_width,
293 int frame_height,
294 const float pixel_coords[2],
295 float unified_coords[2])
296 {
297 unified_coords[0] = pixel_coords[0] / frame_width;
298 unified_coords[1] = pixel_coords[1] / frame_height;
299 }
300
marker_unified_to_search_pixel(int frame_width,int frame_height,const MovieTrackingMarker * marker,const float marker_unified[2],float search_pixel[2])301 static void marker_unified_to_search_pixel(int frame_width,
302 int frame_height,
303 const MovieTrackingMarker *marker,
304 const float marker_unified[2],
305 float search_pixel[2])
306 {
307 float frame_pixel[2];
308 float search_origin_frame_pixel[2];
309
310 marker_unified_to_frame_pixel_coordinates(
311 frame_width, frame_height, marker, marker_unified, frame_pixel);
312 tracking_get_search_origin_frame_pixel(
313 frame_width, frame_height, marker, search_origin_frame_pixel);
314 sub_v2_v2v2(search_pixel, frame_pixel, search_origin_frame_pixel);
315 }
316
search_pixel_to_marker_unified(int frame_width,int frame_height,const MovieTrackingMarker * marker,const float search_pixel[2],float marker_unified[2])317 static void search_pixel_to_marker_unified(int frame_width,
318 int frame_height,
319 const MovieTrackingMarker *marker,
320 const float search_pixel[2],
321 float marker_unified[2])
322 {
323 float frame_unified[2];
324 float search_origin_frame_pixel[2];
325
326 tracking_get_search_origin_frame_pixel(
327 frame_width, frame_height, marker, search_origin_frame_pixel);
328 add_v2_v2v2(frame_unified, search_pixel, search_origin_frame_pixel);
329 pixel_to_unified(frame_width, frame_height, frame_unified, frame_unified);
330
331 /* marker pos is in frame unified */
332 sub_v2_v2v2(marker_unified, frame_unified, marker->pos);
333 }
334
335 /* Each marker has 5 coordinates associated with it that get warped with
336 * tracking: the four corners ("pattern_corners"), and the center ("pos").
337 * This function puts those 5 points into the appropriate frame for tracking
338 * (the "search" coordinate frame).
339 */
tracking_get_marker_coords_for_tracking(int frame_width,int frame_height,const MovieTrackingMarker * marker,double search_pixel_x[5],double search_pixel_y[5])340 void tracking_get_marker_coords_for_tracking(int frame_width,
341 int frame_height,
342 const MovieTrackingMarker *marker,
343 double search_pixel_x[5],
344 double search_pixel_y[5])
345 {
346 float unified_coords[2];
347 float pixel_coords[2];
348
349 /* Convert the corners into search space coordinates. */
350 for (int i = 0; i < 4; i++) {
351 marker_unified_to_search_pixel(
352 frame_width, frame_height, marker, marker->pattern_corners[i], pixel_coords);
353 search_pixel_x[i] = pixel_coords[0] - 0.5f;
354 search_pixel_y[i] = pixel_coords[1] - 0.5f;
355 }
356
357 /* Convert the center position (aka "pos"); this is the origin */
358 unified_coords[0] = 0.0f;
359 unified_coords[1] = 0.0f;
360 marker_unified_to_search_pixel(frame_width, frame_height, marker, unified_coords, pixel_coords);
361
362 search_pixel_x[4] = pixel_coords[0] - 0.5f;
363 search_pixel_y[4] = pixel_coords[1] - 0.5f;
364 }
365
366 /* Inverse of above. */
tracking_set_marker_coords_from_tracking(int frame_width,int frame_height,MovieTrackingMarker * marker,const double search_pixel_x[5],const double search_pixel_y[5])367 void tracking_set_marker_coords_from_tracking(int frame_width,
368 int frame_height,
369 MovieTrackingMarker *marker,
370 const double search_pixel_x[5],
371 const double search_pixel_y[5])
372 {
373 float marker_unified[2];
374 float search_pixel[2];
375
376 /* Convert the corners into search space coordinates. */
377 for (int i = 0; i < 4; i++) {
378 search_pixel[0] = search_pixel_x[i] + 0.5;
379 search_pixel[1] = search_pixel_y[i] + 0.5;
380 search_pixel_to_marker_unified(
381 frame_width, frame_height, marker, search_pixel, marker->pattern_corners[i]);
382 }
383
384 /* Convert the center position (aka "pos"); this is the origin */
385 search_pixel[0] = search_pixel_x[4] + 0.5;
386 search_pixel[1] = search_pixel_y[4] + 0.5;
387 search_pixel_to_marker_unified(frame_width, frame_height, marker, search_pixel, marker_unified);
388
389 /* If the tracker tracked nothing, then "marker_unified" would be zero.
390 * Otherwise, the entire patch shifted, and that delta should be applied to
391 * all the coordinates.
392 */
393 for (int i = 0; i < 4; i++) {
394 marker->pattern_corners[i][0] -= marker_unified[0];
395 marker->pattern_corners[i][1] -= marker_unified[1];
396 }
397
398 marker->pos[0] += marker_unified[0];
399 marker->pos[1] += marker_unified[1];
400 }
401
402 /*********************** General purpose utility functions *************************/
403
404 /* Place a disabled marker before or after specified ref_marker.
405 *
406 * If before is truth, disabled marker is placed before reference
407 * one, and it's placed after it otherwise.
408 *
409 * If there's already a marker at the frame where disabled one
410 * is expected to be placed, nothing will happen if overwrite
411 * is false.
412 */
tracking_marker_insert_disabled(MovieTrackingTrack * track,const MovieTrackingMarker * ref_marker,bool before,bool overwrite)413 void tracking_marker_insert_disabled(MovieTrackingTrack *track,
414 const MovieTrackingMarker *ref_marker,
415 bool before,
416 bool overwrite)
417 {
418 MovieTrackingMarker marker_new;
419
420 marker_new = *ref_marker;
421 marker_new.flag &= ~MARKER_TRACKED;
422 marker_new.flag |= MARKER_DISABLED;
423
424 if (before) {
425 marker_new.framenr--;
426 }
427 else {
428 marker_new.framenr++;
429 }
430
431 if (overwrite || !BKE_tracking_track_has_marker_at_frame(track, marker_new.framenr)) {
432 BKE_tracking_marker_insert(track, &marker_new);
433 }
434 }
435
distortion_model_parameters_from_tracking(const MovieTrackingCamera * camera,libmv_CameraIntrinsicsOptions * camera_intrinsics_options)436 static void distortion_model_parameters_from_tracking(
437 const MovieTrackingCamera *camera, libmv_CameraIntrinsicsOptions *camera_intrinsics_options)
438 {
439 switch (camera->distortion_model) {
440 case TRACKING_DISTORTION_MODEL_POLYNOMIAL:
441 camera_intrinsics_options->distortion_model = LIBMV_DISTORTION_MODEL_POLYNOMIAL;
442 camera_intrinsics_options->polynomial_k1 = camera->k1;
443 camera_intrinsics_options->polynomial_k2 = camera->k2;
444 camera_intrinsics_options->polynomial_k3 = camera->k3;
445 camera_intrinsics_options->polynomial_p1 = 0.0;
446 camera_intrinsics_options->polynomial_p2 = 0.0;
447 return;
448
449 case TRACKING_DISTORTION_MODEL_DIVISION:
450 camera_intrinsics_options->distortion_model = LIBMV_DISTORTION_MODEL_DIVISION;
451 camera_intrinsics_options->division_k1 = camera->division_k1;
452 camera_intrinsics_options->division_k2 = camera->division_k2;
453 return;
454
455 case TRACKING_DISTORTION_MODEL_NUKE:
456 camera_intrinsics_options->distortion_model = LIBMV_DISTORTION_MODEL_NUKE;
457 camera_intrinsics_options->nuke_k1 = camera->nuke_k1;
458 camera_intrinsics_options->nuke_k2 = camera->nuke_k2;
459 return;
460 case TRACKING_DISTORTION_MODEL_BROWN:
461 camera_intrinsics_options->distortion_model = LIBMV_DISTORTION_MODEL_BROWN;
462 camera_intrinsics_options->brown_k1 = camera->brown_k1;
463 camera_intrinsics_options->brown_k2 = camera->brown_k2;
464 camera_intrinsics_options->brown_k3 = camera->brown_k3;
465 camera_intrinsics_options->brown_k4 = camera->brown_k4;
466 camera_intrinsics_options->brown_p1 = camera->brown_p1;
467 camera_intrinsics_options->brown_p2 = camera->brown_p2;
468 return;
469 }
470
471 /* Unknown distortion model, which might be due to opening newer file in older Blender.
472 * Fallback to a known and supported model with 0 distortion. */
473 camera_intrinsics_options->distortion_model = LIBMV_DISTORTION_MODEL_POLYNOMIAL;
474 camera_intrinsics_options->polynomial_k1 = 0.0;
475 camera_intrinsics_options->polynomial_k2 = 0.0;
476 camera_intrinsics_options->polynomial_k3 = 0.0;
477 camera_intrinsics_options->polynomial_p1 = 0.0;
478 camera_intrinsics_options->polynomial_p2 = 0.0;
479 }
480
distortion_model_parameters_from_options(const libmv_CameraIntrinsicsOptions * camera_intrinsics_options,MovieTrackingCamera * camera)481 static void distortion_model_parameters_from_options(
482 const libmv_CameraIntrinsicsOptions *camera_intrinsics_options, MovieTrackingCamera *camera)
483 {
484 switch (camera_intrinsics_options->distortion_model) {
485 case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
486 camera->distortion_model = TRACKING_DISTORTION_MODEL_POLYNOMIAL;
487 camera->k1 = camera_intrinsics_options->polynomial_k1;
488 camera->k2 = camera_intrinsics_options->polynomial_k2;
489 camera->k3 = camera_intrinsics_options->polynomial_k3;
490 return;
491
492 case LIBMV_DISTORTION_MODEL_DIVISION:
493 camera->distortion_model = TRACKING_DISTORTION_MODEL_DIVISION;
494 camera->division_k1 = camera_intrinsics_options->division_k1;
495 camera->division_k2 = camera_intrinsics_options->division_k2;
496 return;
497
498 case LIBMV_DISTORTION_MODEL_NUKE:
499 camera->distortion_model = TRACKING_DISTORTION_MODEL_NUKE;
500 camera->nuke_k1 = camera_intrinsics_options->nuke_k1;
501 camera->nuke_k2 = camera_intrinsics_options->nuke_k2;
502 return;
503 case LIBMV_DISTORTION_MODEL_BROWN:
504 camera->distortion_model = TRACKING_DISTORTION_MODEL_BROWN;
505 camera->brown_k1 = camera_intrinsics_options->brown_k1;
506 camera->brown_k2 = camera_intrinsics_options->brown_k2;
507 camera->brown_k3 = camera_intrinsics_options->brown_k3;
508 camera->brown_k4 = camera_intrinsics_options->brown_k4;
509 camera->brown_p1 = camera_intrinsics_options->brown_p1;
510 camera->brown_p2 = camera_intrinsics_options->brown_p2;
511 return;
512 }
513
514 /* Libmv returned distortion model which is not known to Blender. This is a logical error in code
515 * and Blender side is to be updated to match Libmv. */
516 BLI_assert(!"Unknown distortion model");
517 }
518
519 /* Fill in Libmv C-API camera intrinsics options from tracking structure. */
tracking_cameraIntrinscisOptionsFromTracking(MovieTracking * tracking,int calibration_width,int calibration_height,libmv_CameraIntrinsicsOptions * camera_intrinsics_options)520 void tracking_cameraIntrinscisOptionsFromTracking(
521 MovieTracking *tracking,
522 int calibration_width,
523 int calibration_height,
524 libmv_CameraIntrinsicsOptions *camera_intrinsics_options)
525 {
526 MovieTrackingCamera *camera = &tracking->camera;
527 float aspy = 1.0f / tracking->camera.pixel_aspect;
528
529 camera_intrinsics_options->num_threads = BLI_system_thread_count();
530
531 camera_intrinsics_options->focal_length = camera->focal;
532
533 camera_intrinsics_options->principal_point_x = camera->principal[0];
534 camera_intrinsics_options->principal_point_y = camera->principal[1] * aspy;
535
536 distortion_model_parameters_from_tracking(camera, camera_intrinsics_options);
537
538 camera_intrinsics_options->image_width = calibration_width;
539 camera_intrinsics_options->image_height = (int)(calibration_height * aspy);
540 }
541
tracking_trackingCameraFromIntrinscisOptions(MovieTracking * tracking,const libmv_CameraIntrinsicsOptions * camera_intrinsics_options)542 void tracking_trackingCameraFromIntrinscisOptions(
543 MovieTracking *tracking, const libmv_CameraIntrinsicsOptions *camera_intrinsics_options)
544 {
545 float aspy = 1.0f / tracking->camera.pixel_aspect;
546 MovieTrackingCamera *camera = &tracking->camera;
547
548 camera->focal = camera_intrinsics_options->focal_length;
549
550 camera->principal[0] = camera_intrinsics_options->principal_point_x;
551 camera->principal[1] = camera_intrinsics_options->principal_point_y / (double)aspy;
552
553 distortion_model_parameters_from_options(camera_intrinsics_options, camera);
554 }
555
556 /* Get previous keyframed marker. */
tracking_get_keyframed_marker(MovieTrackingTrack * track,int current_frame,bool backwards)557 MovieTrackingMarker *tracking_get_keyframed_marker(MovieTrackingTrack *track,
558 int current_frame,
559 bool backwards)
560 {
561 MovieTrackingMarker *marker_keyed = NULL;
562 MovieTrackingMarker *marker_keyed_fallback = NULL;
563 int a = BKE_tracking_marker_get(track, current_frame) - track->markers;
564
565 while (a >= 0 && a < track->markersnr) {
566 int next = backwards ? a + 1 : a - 1;
567 bool is_keyframed = false;
568 MovieTrackingMarker *cur_marker = &track->markers[a];
569 MovieTrackingMarker *next_marker = NULL;
570
571 if (next >= 0 && next < track->markersnr) {
572 next_marker = &track->markers[next];
573 }
574
575 if ((cur_marker->flag & MARKER_DISABLED) == 0) {
576 /* If it'll happen so we didn't find a real keyframe marker,
577 * fallback to the first marker in current tracked segment
578 * as a keyframe.
579 */
580 if (next_marker == NULL) {
581 /* Could happen when trying to get reference marker for the fist
582 * one on the segment which isn't surrounded by disabled markers.
583 *
584 * There's no really good choice here, just use the reference
585 * marker which looks correct..
586 */
587 if (marker_keyed_fallback == NULL) {
588 marker_keyed_fallback = cur_marker;
589 }
590 }
591 else if (next_marker->flag & MARKER_DISABLED) {
592 if (marker_keyed_fallback == NULL) {
593 marker_keyed_fallback = cur_marker;
594 }
595 }
596
597 is_keyframed |= (cur_marker->flag & MARKER_TRACKED) == 0;
598 }
599
600 if (is_keyframed) {
601 marker_keyed = cur_marker;
602
603 break;
604 }
605
606 a = next;
607 }
608
609 if (marker_keyed == NULL) {
610 marker_keyed = marker_keyed_fallback;
611 }
612
613 return marker_keyed;
614 }
615
616 /*********************** Frame accessr *************************/
617
618 typedef struct AccessCacheKey {
619 int clip_index;
620 int frame;
621 int downscale;
622 libmv_InputMode input_mode;
623 bool has_region;
624 float region_min[2], region_max[2];
625 int64_t transform_key;
626 } AccessCacheKey;
627
accesscache_hashhash(const void * key_v)628 static unsigned int accesscache_hashhash(const void *key_v)
629 {
630 const AccessCacheKey *key = (const AccessCacheKey *)key_v;
631 /* TODP(sergey): Need better hashing here for faster frame access. */
632 return key->clip_index << 16 | key->frame;
633 }
634
accesscache_hashcmp(const void * a_v,const void * b_v)635 static bool accesscache_hashcmp(const void *a_v, const void *b_v)
636 {
637 const AccessCacheKey *a = (const AccessCacheKey *)a_v;
638 const AccessCacheKey *b = (const AccessCacheKey *)b_v;
639 if (a->clip_index != b->clip_index || a->frame != b->frame || a->downscale != b->downscale ||
640 a->input_mode != b->input_mode || a->has_region != b->has_region ||
641 a->transform_key != b->transform_key) {
642 return true;
643 }
644 /* If there is region applied, compare it. */
645 if (a->has_region) {
646 if (!equals_v2v2(a->region_min, b->region_min) || !equals_v2v2(a->region_max, b->region_max)) {
647 return true;
648 }
649 }
650 return false;
651 }
652
accesscache_construct_key(AccessCacheKey * key,int clip_index,int frame,libmv_InputMode input_mode,int downscale,const libmv_Region * region,int64_t transform_key)653 static void accesscache_construct_key(AccessCacheKey *key,
654 int clip_index,
655 int frame,
656 libmv_InputMode input_mode,
657 int downscale,
658 const libmv_Region *region,
659 int64_t transform_key)
660 {
661 key->clip_index = clip_index;
662 key->frame = frame;
663 key->input_mode = input_mode;
664 key->downscale = downscale;
665 key->has_region = (region != NULL);
666 if (key->has_region) {
667 copy_v2_v2(key->region_min, region->min);
668 copy_v2_v2(key->region_max, region->max);
669 }
670 key->transform_key = transform_key;
671 }
672
accesscache_put(TrackingImageAccessor * accessor,int clip_index,int frame,libmv_InputMode input_mode,int downscale,const libmv_Region * region,int64_t transform_key,ImBuf * ibuf)673 static void accesscache_put(TrackingImageAccessor *accessor,
674 int clip_index,
675 int frame,
676 libmv_InputMode input_mode,
677 int downscale,
678 const libmv_Region *region,
679 int64_t transform_key,
680 ImBuf *ibuf)
681 {
682 AccessCacheKey key;
683 accesscache_construct_key(&key, clip_index, frame, input_mode, downscale, region, transform_key);
684 IMB_moviecache_put(accessor->cache, &key, ibuf);
685 }
686
accesscache_get(TrackingImageAccessor * accessor,int clip_index,int frame,libmv_InputMode input_mode,int downscale,const libmv_Region * region,int64_t transform_key)687 static ImBuf *accesscache_get(TrackingImageAccessor *accessor,
688 int clip_index,
689 int frame,
690 libmv_InputMode input_mode,
691 int downscale,
692 const libmv_Region *region,
693 int64_t transform_key)
694 {
695 AccessCacheKey key;
696 accesscache_construct_key(&key, clip_index, frame, input_mode, downscale, region, transform_key);
697 return IMB_moviecache_get(accessor->cache, &key);
698 }
699
accessor_get_preprocessed_ibuf(TrackingImageAccessor * accessor,int clip_index,int frame)700 static ImBuf *accessor_get_preprocessed_ibuf(TrackingImageAccessor *accessor,
701 int clip_index,
702 int frame)
703 {
704 MovieClip *clip;
705 MovieClipUser user;
706 ImBuf *ibuf;
707 int scene_frame;
708
709 BLI_assert(clip_index < accessor->num_clips);
710
711 clip = accessor->clips[clip_index];
712 scene_frame = BKE_movieclip_remap_clip_to_scene_frame(clip, frame);
713 BKE_movieclip_user_set_frame(&user, scene_frame);
714 user.render_size = MCLIP_PROXY_RENDER_SIZE_FULL;
715 user.render_flag = 0;
716 ibuf = BKE_movieclip_get_ibuf(clip, &user);
717
718 return ibuf;
719 }
720
make_grayscale_ibuf_copy(ImBuf * ibuf)721 static ImBuf *make_grayscale_ibuf_copy(ImBuf *ibuf)
722 {
723 ImBuf *grayscale = IMB_allocImBuf(ibuf->x, ibuf->y, 32, 0);
724
725 BLI_assert(ibuf->channels == 3 || ibuf->channels == 4);
726
727 /* TODO(sergey): Bummer, currently IMB API only allows to create 4 channels
728 * float buffer, so we do it manually here.
729 *
730 * Will generalize it later.
731 */
732 const size_t size = (size_t)grayscale->x * (size_t)grayscale->y * sizeof(float);
733 grayscale->channels = 1;
734 if ((grayscale->rect_float = MEM_callocN(size, "tracking grayscale image")) != NULL) {
735 grayscale->mall |= IB_rectfloat;
736 grayscale->flags |= IB_rectfloat;
737
738 for (int i = 0; i < grayscale->x * grayscale->y; i++) {
739 const float *pixel = ibuf->rect_float + ibuf->channels * i;
740
741 grayscale->rect_float[i] = 0.2126f * pixel[0] + 0.7152f * pixel[1] + 0.0722f * pixel[2];
742 }
743 }
744
745 return grayscale;
746 }
747
ibuf_to_float_image(const ImBuf * ibuf,libmv_FloatImage * float_image)748 static void ibuf_to_float_image(const ImBuf *ibuf, libmv_FloatImage *float_image)
749 {
750 BLI_assert(ibuf->rect_float != NULL);
751 float_image->buffer = ibuf->rect_float;
752 float_image->width = ibuf->x;
753 float_image->height = ibuf->y;
754 float_image->channels = ibuf->channels;
755 }
756
float_image_to_ibuf(libmv_FloatImage * float_image)757 static ImBuf *float_image_to_ibuf(libmv_FloatImage *float_image)
758 {
759 ImBuf *ibuf = IMB_allocImBuf(float_image->width, float_image->height, 32, 0);
760 size_t size = (size_t)ibuf->x * (size_t)ibuf->y * float_image->channels * sizeof(float);
761 ibuf->channels = float_image->channels;
762 if ((ibuf->rect_float = MEM_callocN(size, "tracking grayscale image")) != NULL) {
763 ibuf->mall |= IB_rectfloat;
764 ibuf->flags |= IB_rectfloat;
765
766 memcpy(ibuf->rect_float, float_image->buffer, size);
767 }
768 return ibuf;
769 }
770
accessor_get_ibuf(TrackingImageAccessor * accessor,int clip_index,int frame,libmv_InputMode input_mode,int downscale,const libmv_Region * region,const libmv_FrameTransform * transform)771 static ImBuf *accessor_get_ibuf(TrackingImageAccessor *accessor,
772 int clip_index,
773 int frame,
774 libmv_InputMode input_mode,
775 int downscale,
776 const libmv_Region *region,
777 const libmv_FrameTransform *transform)
778 {
779 ImBuf *ibuf, *orig_ibuf, *final_ibuf;
780 int64_t transform_key = 0;
781 if (transform != NULL) {
782 transform_key = libmv_frameAccessorgetTransformKey(transform);
783 }
784 /* First try to get fully processed image from the cache. */
785 BLI_spin_lock(&accessor->cache_lock);
786 ibuf = accesscache_get(
787 accessor, clip_index, frame, input_mode, downscale, region, transform_key);
788 BLI_spin_unlock(&accessor->cache_lock);
789 if (ibuf != NULL) {
790 CACHE_PRINTF("Used cached buffer for frame %d\n", frame);
791 /* This is a little heuristic here: if we re-used image once, this is
792 * a high probability of the image to be related to a keyframe matched
793 * reference image. Those images we don't want to be thrown away because
794 * if we toss them out we'll be re-calculating them at the next
795 * iteration.
796 */
797 ibuf->userflags |= IB_PERSISTENT;
798 return ibuf;
799 }
800 CACHE_PRINTF("Calculate new buffer for frame %d\n", frame);
801 /* And now we do postprocessing of the original frame. */
802 orig_ibuf = accessor_get_preprocessed_ibuf(accessor, clip_index, frame);
803 if (orig_ibuf == NULL) {
804 return NULL;
805 }
806 /* Cut a region if requested. */
807 if (region != NULL) {
808 int width = region->max[0] - region->min[0], height = region->max[1] - region->min[1];
809
810 /* If the requested region goes outside of the actual frame we still
811 * return the requested region size, but only fill it's partially with
812 * the data we can.
813 */
814 int clamped_origin_x = max_ii((int)region->min[0], 0),
815 clamped_origin_y = max_ii((int)region->min[1], 0);
816 int dst_offset_x = clamped_origin_x - (int)region->min[0],
817 dst_offset_y = clamped_origin_y - (int)region->min[1];
818 int clamped_width = width - dst_offset_x, clamped_height = height - dst_offset_y;
819 clamped_width = min_ii(clamped_width, orig_ibuf->x - clamped_origin_x);
820 clamped_height = min_ii(clamped_height, orig_ibuf->y - clamped_origin_y);
821
822 final_ibuf = IMB_allocImBuf(width, height, 32, IB_rectfloat);
823
824 if (orig_ibuf->rect_float != NULL) {
825 IMB_rectcpy(final_ibuf,
826 orig_ibuf,
827 dst_offset_x,
828 dst_offset_y,
829 clamped_origin_x,
830 clamped_origin_y,
831 clamped_width,
832 clamped_height);
833 }
834 else {
835 /* TODO(sergey): We don't do any color space or alpha conversion
836 * here. Probably Libmv is better to work in the linear space,
837 * but keep sRGB space here for compatibility for now.
838 */
839 for (int y = 0; y < clamped_height; y++) {
840 for (int x = 0; x < clamped_width; x++) {
841 int src_x = x + clamped_origin_x, src_y = y + clamped_origin_y;
842 int dst_x = x + dst_offset_x, dst_y = y + dst_offset_y;
843 int dst_index = (dst_y * width + dst_x) * 4,
844 src_index = (src_y * orig_ibuf->x + src_x) * 4;
845 rgba_uchar_to_float(final_ibuf->rect_float + dst_index,
846 (unsigned char *)orig_ibuf->rect + src_index);
847 }
848 }
849 }
850 }
851 else {
852 /* Libmv only works with float images,
853 *
854 * This would likely make it so loads of float buffers are being stored
855 * in the cache which is nice on the one hand (faster re-use of the
856 * frames) but on the other hand it bumps the memory usage up.
857 */
858 BLI_thread_lock(LOCK_MOVIECLIP);
859 IMB_float_from_rect(orig_ibuf);
860 BLI_thread_unlock(LOCK_MOVIECLIP);
861 final_ibuf = orig_ibuf;
862 }
863 /* Downscale if needed. */
864 if (downscale > 0) {
865 if (final_ibuf == orig_ibuf) {
866 final_ibuf = IMB_dupImBuf(orig_ibuf);
867 }
868 IMB_scaleImBuf(final_ibuf, orig_ibuf->x / (1 << downscale), orig_ibuf->y / (1 << downscale));
869 }
870 /* Apply possible transformation. */
871 if (transform != NULL) {
872 libmv_FloatImage input_image, output_image;
873 ibuf_to_float_image(final_ibuf, &input_image);
874 libmv_frameAccessorgetTransformRun(transform, &input_image, &output_image);
875 if (final_ibuf != orig_ibuf) {
876 IMB_freeImBuf(final_ibuf);
877 }
878 final_ibuf = float_image_to_ibuf(&output_image);
879 libmv_floatImageDestroy(&output_image);
880 }
881 /* Transform number of channels. */
882 if (input_mode == LIBMV_IMAGE_MODE_RGBA) {
883 BLI_assert(orig_ibuf->channels == 3 || orig_ibuf->channels == 4);
884 /* pass */
885 }
886 else /* if (input_mode == LIBMV_IMAGE_MODE_MONO) */ {
887 BLI_assert(input_mode == LIBMV_IMAGE_MODE_MONO);
888 if (final_ibuf->channels != 1) {
889 ImBuf *grayscale_ibuf = make_grayscale_ibuf_copy(final_ibuf);
890 if (final_ibuf != orig_ibuf) {
891 /* We dereference original frame later. */
892 IMB_freeImBuf(final_ibuf);
893 }
894 final_ibuf = grayscale_ibuf;
895 }
896 }
897 /* It's possible processing still didn't happen at this point,
898 * but we really need a copy of the buffer to be transformed
899 * and to be put to the cache.
900 */
901 if (final_ibuf == orig_ibuf) {
902 final_ibuf = IMB_dupImBuf(orig_ibuf);
903 }
904 IMB_freeImBuf(orig_ibuf);
905 BLI_spin_lock(&accessor->cache_lock);
906 /* Put final buffer to cache. */
907 accesscache_put(
908 accessor, clip_index, frame, input_mode, downscale, region, transform_key, final_ibuf);
909 BLI_spin_unlock(&accessor->cache_lock);
910 return final_ibuf;
911 }
912
accessor_get_image_callback(struct libmv_FrameAccessorUserData * user_data,int clip_index,int frame,libmv_InputMode input_mode,int downscale,const libmv_Region * region,const libmv_FrameTransform * transform,float ** destination,int * width,int * height,int * channels)913 static libmv_CacheKey accessor_get_image_callback(struct libmv_FrameAccessorUserData *user_data,
914 int clip_index,
915 int frame,
916 libmv_InputMode input_mode,
917 int downscale,
918 const libmv_Region *region,
919 const libmv_FrameTransform *transform,
920 float **destination,
921 int *width,
922 int *height,
923 int *channels)
924 {
925 TrackingImageAccessor *accessor = (TrackingImageAccessor *)user_data;
926 ImBuf *ibuf;
927
928 BLI_assert(clip_index >= 0 && clip_index < accessor->num_clips);
929
930 ibuf = accessor_get_ibuf(accessor, clip_index, frame, input_mode, downscale, region, transform);
931
932 if (ibuf) {
933 *destination = ibuf->rect_float;
934 *width = ibuf->x;
935 *height = ibuf->y;
936 *channels = ibuf->channels;
937 }
938 else {
939 *destination = NULL;
940 *width = 0;
941 *height = 0;
942 *channels = 0;
943 }
944
945 return ibuf;
946 }
947
accessor_release_image_callback(libmv_CacheKey cache_key)948 static void accessor_release_image_callback(libmv_CacheKey cache_key)
949 {
950 ImBuf *ibuf = (ImBuf *)cache_key;
951 IMB_freeImBuf(ibuf);
952 }
953
accessor_get_mask_for_track_callback(libmv_FrameAccessorUserData * user_data,int clip_index,int frame,int track_index,const libmv_Region * region,float ** r_destination,int * r_width,int * r_height)954 static libmv_CacheKey accessor_get_mask_for_track_callback(libmv_FrameAccessorUserData *user_data,
955 int clip_index,
956 int frame,
957 int track_index,
958 const libmv_Region *region,
959 float **r_destination,
960 int *r_width,
961 int *r_height)
962 {
963 /* Perform sanity checks first. */
964 TrackingImageAccessor *accessor = (TrackingImageAccessor *)user_data;
965 BLI_assert(clip_index < accessor->num_clips);
966 BLI_assert(track_index < accessor->num_tracks);
967 MovieTrackingTrack *track = accessor->tracks[track_index];
968 /* Early output, track does not use mask. */
969 if ((track->algorithm_flag & TRACK_ALGORITHM_FLAG_USE_MASK) == 0) {
970 return NULL;
971 }
972 MovieClip *clip = accessor->clips[clip_index];
973 /* Construct fake user so we can access movie clip. */
974 MovieClipUser user;
975 int scene_frame = BKE_movieclip_remap_clip_to_scene_frame(clip, frame);
976 BKE_movieclip_user_set_frame(&user, scene_frame);
977 user.render_size = MCLIP_PROXY_RENDER_SIZE_FULL;
978 user.render_flag = 0;
979 /* Get frame width and height so we can convert stroke coordinates
980 * and other things from normalized to pixel space.
981 */
982 int frame_width, frame_height;
983 BKE_movieclip_get_size(clip, &user, &frame_width, &frame_height);
984 /* Actual mask sampling. */
985 MovieTrackingMarker *marker = BKE_tracking_marker_get_exact(track, frame);
986 const float region_min[2] = {
987 region->min[0] - marker->pos[0] * frame_width,
988 region->min[1] - marker->pos[1] * frame_height,
989 };
990 const float region_max[2] = {
991 region->max[0] - marker->pos[0] * frame_width,
992 region->max[1] - marker->pos[1] * frame_height,
993 };
994 *r_destination = tracking_track_get_mask_for_region(
995 frame_width, frame_height, region_min, region_max, track);
996 *r_width = region->max[0] - region->min[0];
997 *r_height = region->max[1] - region->min[1];
998 return *r_destination;
999 }
1000
accessor_release_mask_callback(libmv_CacheKey cache_key)1001 static void accessor_release_mask_callback(libmv_CacheKey cache_key)
1002 {
1003 if (cache_key != NULL) {
1004 float *mask = (float *)cache_key;
1005 MEM_freeN(mask);
1006 }
1007 }
1008
tracking_image_accessor_new(MovieClip * clips[MAX_ACCESSOR_CLIP],int num_clips,MovieTrackingTrack ** tracks,int num_tracks,int start_frame)1009 TrackingImageAccessor *tracking_image_accessor_new(MovieClip *clips[MAX_ACCESSOR_CLIP],
1010 int num_clips,
1011 MovieTrackingTrack **tracks,
1012 int num_tracks,
1013 int start_frame)
1014 {
1015 TrackingImageAccessor *accessor = MEM_callocN(sizeof(TrackingImageAccessor),
1016 "tracking image accessor");
1017
1018 BLI_assert(num_clips <= MAX_ACCESSOR_CLIP);
1019
1020 accessor->cache = IMB_moviecache_create(
1021 "frame access cache", sizeof(AccessCacheKey), accesscache_hashhash, accesscache_hashcmp);
1022
1023 memcpy(accessor->clips, clips, num_clips * sizeof(MovieClip *));
1024 accessor->num_clips = num_clips;
1025 accessor->tracks = tracks;
1026 accessor->num_tracks = num_tracks;
1027 accessor->start_frame = start_frame;
1028
1029 accessor->libmv_accessor = libmv_FrameAccessorNew((libmv_FrameAccessorUserData *)accessor,
1030 accessor_get_image_callback,
1031 accessor_release_image_callback,
1032 accessor_get_mask_for_track_callback,
1033 accessor_release_mask_callback);
1034
1035 BLI_spin_init(&accessor->cache_lock);
1036
1037 return accessor;
1038 }
1039
tracking_image_accessor_destroy(TrackingImageAccessor * accessor)1040 void tracking_image_accessor_destroy(TrackingImageAccessor *accessor)
1041 {
1042 IMB_moviecache_free(accessor->cache);
1043 libmv_FrameAccessorDestroy(accessor->libmv_accessor);
1044 BLI_spin_end(&accessor->cache_lock);
1045 MEM_freeN(accessor);
1046 }
1047