1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
4 #include "../precomp.hpp"
5 
6 #include "opencv2/calib3d.hpp"  // findHomography
7 #include "rlof_localflow.h"
8 #include "berlof_invoker.hpp"
9 #include "rlof_invoker.hpp"
10 #include "plk_invoker.hpp"
11 using namespace std;
12 using namespace cv;
13 
14 namespace cv {
15 namespace detail {
16 typedef short deriv_type;
17 } // namespace
18 
19 namespace {
calcSharrDeriv(const cv::Mat & src,cv::Mat & dst)20 static void calcSharrDeriv(const cv::Mat& src, cv::Mat& dst)
21 {
22     using namespace cv;
23     using cv::detail::deriv_type;
24     int rows = src.rows, cols = src.cols, cn = src.channels(), colsn = cols * cn, depth = src.depth();
25     CV_Assert(depth == CV_8U);
26     dst.create(rows, cols, CV_MAKETYPE(DataType<deriv_type>::depth, cn * 2));
27 
28     int x, y, delta = (int)alignSize((cols + 2)*cn, 16);
29     AutoBuffer<deriv_type> _tempBuf(delta * 2 + 64);
30     deriv_type *trow0 = alignPtr(_tempBuf.data() + cn, 16), *trow1 = alignPtr(trow0 + delta, 16);
31 
32 #if CV_SIMD128
33     v_int16x8 c3 = v_setall_s16(3), c10 = v_setall_s16(10);
34     bool haveSIMD = checkHardwareSupport(CV_CPU_SSE2) || checkHardwareSupport(CV_CPU_NEON);
35 #endif
36 
37     for (y = 0; y < rows; y++)
38     {
39         const uchar* srow0 = src.ptr<uchar>(y > 0 ? y - 1 : rows > 1 ? 1 : 0);
40         const uchar* srow1 = src.ptr<uchar>(y);
41         const uchar* srow2 = src.ptr<uchar>(y < rows - 1 ? y + 1 : rows > 1 ? rows - 2 : 0);
42         deriv_type* drow = dst.ptr<deriv_type>(y);
43 
44         // do vertical convolution
45         x = 0;
46 #if CV_SIMD128
47         if (haveSIMD)
48         {
49             for (; x <= colsn - 8; x += 8)
50             {
51                 v_int16x8 s0 = v_reinterpret_as_s16(v_load_expand(srow0 + x));
52                 v_int16x8 s1 = v_reinterpret_as_s16(v_load_expand(srow1 + x));
53                 v_int16x8 s2 = v_reinterpret_as_s16(v_load_expand(srow2 + x));
54 
55                 v_int16x8 t1 = s2 - s0;
56                 v_int16x8 t0 = v_mul_wrap(s0 + s2, c3) + v_mul_wrap(s1, c10);
57 
58                 v_store(trow0 + x, t0);
59                 v_store(trow1 + x, t1);
60             }
61         }
62 #endif
63 
64         for (; x < colsn; x++)
65         {
66             int t0 = (srow0[x] + srow2[x]) * 3 + srow1[x] * 10;
67             int t1 = srow2[x] - srow0[x];
68             trow0[x] = (deriv_type)t0;
69             trow1[x] = (deriv_type)t1;
70         }
71 
72         // make border
73         int x0 = (cols > 1 ? 1 : 0)*cn, x1 = (cols > 1 ? cols - 2 : 0)*cn;
74         for (int k = 0; k < cn; k++)
75         {
76             trow0[-cn + k] = trow0[x0 + k]; trow0[colsn + k] = trow0[x1 + k];
77             trow1[-cn + k] = trow1[x0 + k]; trow1[colsn + k] = trow1[x1 + k];
78         }
79 
80         // do horizontal convolution, interleave the results and store them to dst
81         x = 0;
82 #if CV_SIMD128
83         if (haveSIMD)
84         {
85             for (; x <= colsn - 8; x += 8)
86             {
87                 v_int16x8 s0 = v_load(trow0 + x - cn);
88                 v_int16x8 s1 = v_load(trow0 + x + cn);
89                 v_int16x8 s2 = v_load(trow1 + x - cn);
90                 v_int16x8 s3 = v_load(trow1 + x);
91                 v_int16x8 s4 = v_load(trow1 + x + cn);
92 
93                 v_int16x8 t0 = s1 - s0;
94                 v_int16x8 t1 = v_mul_wrap(s2 + s4, c3) + v_mul_wrap(s3, c10);
95 
96                 v_store_interleave((drow + x * 2), t0, t1);
97             }
98         }
99 #endif
100         for (; x < colsn; x++)
101         {
102             deriv_type t0 = (deriv_type)(trow0[x + cn] - trow0[x - cn]);
103             deriv_type t1 = (deriv_type)((trow1[x + cn] + trow1[x - cn]) * 3 + trow1[x] * 10);
104             drow[x * 2] = t0; drow[x * 2 + 1] = t1;
105         }
106     }
107 }
108 
109 } // namespace
110 namespace optflow {
111 
112 /*! Helper function for preCalcCrossSegmentation. Everything is performed on the large
113 *\param data CV_8UC3 image ( use extended image mit winSize)
114 *\param winSize
115 *\param dst CV_32SC1 bounding map
116 *\param threshold
117 *\param stride if true store into first two bounding maps
118 */
119 class HorizontalCrossSegmentation  : public cv::ParallelLoopBody
120 {
121 public:
HorizontalCrossSegmentation(const cv::Point2f * ptList,int npoints,float pointScale,const cv::Mat * data,const int winSize,cv::Mat * dst,int threshold,bool stride,const cv::Mat * mask)122     HorizontalCrossSegmentation(
123             const cv::Point2f * ptList,
124             int npoints,
125             float pointScale,
126             const cv::Mat * data,
127             const int winSize,
128             cv::Mat * dst,
129             int threshold,
130             bool stride,
131             const cv::Mat * mask
132     )
133     {
134         m_ptList        = ptList;
135         m_npoints       = npoints;
136         m_pointScale    = pointScale;
137         m_data          = data;
138         m_winSize       = winSize;
139         m_dst           = dst;
140         m_threshold     = threshold;
141         m_stride        = stride;
142         m_mask          = mask;
143     }
144 
operator ()(const cv::Range & range) const145     void operator()(const cv::Range& range) const CV_OVERRIDE
146     {
147         uchar channel[2];
148         channel[0] = m_stride ? 2 : 0;
149         channel[1] = m_stride ? 3 : 1;
150         int hWinSize        = (m_winSize - 1) / 2;
151         std::vector<int> differenz(m_winSize);
152         for( int r = range.start; r < range.end; r++ )
153         {
154             for(int c = hWinSize; c < m_data->cols - hWinSize; c++)
155             {
156                 if( m_mask->at<uchar>(r,c) == 0)
157                     continue;
158         const Point3_<uchar> & ucval = m_data->at<Point3_<uchar>>(r,c);
159         Point3i val(static_cast<int>(ucval.x), static_cast<int>(ucval.y), static_cast<int>(ucval.z));
160                 int x = c - hWinSize;
161         Point dstPos = m_stride ? Point(r,c) : Point(c,r);
162                 for(int ix = 0; ix < m_winSize; ix++, x++)
163                 {
164             const Point3_<uchar> & valref = m_data->at<Point3_<uchar>>(r,x);
165                     differenz[ix] = MAX(std::abs(static_cast<int>(valref.x) - val.x),
166                                     MAX(std::abs(static_cast<int>(valref.y) - val.y),
167                                     (std::abs(static_cast<int>(valref.z) - val.z))));
168 
169                 }
170                 cv::Vec4i & bounds = m_dst->at<cv::Vec4i>(dstPos);
171                 bounds.val[channel[0]] = c - hWinSize;
172                 bounds.val[channel[1]] = c + hWinSize;
173                 int * diffPtr = &differenz[hWinSize];
174                 bool useUpperBound = false;
175                 bool useLowerBound = false;
176                 for(int ix = 1; ix <= hWinSize; ix++)
177                 {
178                     if( !useUpperBound && diffPtr[-ix] > m_threshold)
179                     {
180                         useUpperBound = true;
181                         bounds.val[channel[0]] = c - ix;
182                     }
183                     if( !useLowerBound && diffPtr[ix-1] > m_threshold)
184                     {
185                         useLowerBound = true;
186                         bounds.val[channel[1]] = c + ix - 1;
187                     }
188                     if( useUpperBound && useLowerBound)
189                         break;
190                 }
191             }
192         }
193     }
194 
195     const cv::Point2f * m_ptList;
196     int                 m_npoints;
197     float               m_pointScale;
198     const cv::Mat *     m_data;
199     int                 m_winSize;
200     cv::Mat *           m_dst;
201     int                 m_threshold;
202     bool                m_stride;
203     const cv::Mat *     m_mask;
204 };
205 
206 static
preCalcCrossSegmentation(const cv::Point2f * ptList,int npoints,float pointScale,const cv::Mat & img,const int winSize,cv::Mat & dst,int threshold)207 void preCalcCrossSegmentation(
208     const cv::Point2f * ptList,
209     int npoints,
210     float pointScale,
211     const cv::Mat & img,
212     const int winSize,
213     cv::Mat & dst,
214     int threshold
215 )
216 {
217     int hWinSize = (winSize - 1) / 2;
218     cv::Mat data = img;
219     data.adjustROI(hWinSize, hWinSize, hWinSize, hWinSize);
220     if( dst.size() != dst.size() || dst.type() != CV_32SC4)
221     {
222         dst.release();
223         dst.create(data.size(), CV_32SC4);
224     }
225     cv::Mat mask(data.cols, data.rows, CV_8UC1);
226     mask.setTo(0);
227     for( unsigned int n = 0; n < static_cast<unsigned int>(npoints); n++)
228     {
229         cv::Point ipos( static_cast<int>(floor(ptList[n].y * pointScale)),
230                         static_cast<int>(floor(ptList[n].x * pointScale) + hWinSize));
231         ipos.x = MAX( MIN(ipos.x, mask.cols - 1), 0);
232         int to = MIN( mask.cols - 1, ipos.x + winSize );
233         int ypos = MAX( MIN(ipos.y, mask.rows - 1), 0);
234         for(int x = ipos.x; x <= to ; x++)
235         {
236             mask.at<uchar>(ypos, x) = 255;
237         }
238     }
239     cv::Mat datat = data.t();
240     cv::Mat maskt = mask.t();
241     parallel_for_(cv::Range(0, datat.rows),    HorizontalCrossSegmentation(ptList, npoints, pointScale, &datat, winSize, &dst, threshold, true, &mask));
242     parallel_for_(cv::Range(0, data.rows),    HorizontalCrossSegmentation(ptList, npoints, pointScale, &data, winSize, &dst, threshold, false, &maskt));
243 
244 }
245 
246 
247 static inline
isrobust(const RLOFOpticalFlowParameter & param)248 bool isrobust(const RLOFOpticalFlowParameter & param)
249 {
250     return (param.normSigma0 < 255 && param.normSigma1 < 255);
251 }
252 static inline
get_norm(float sigma0,float sigma1)253 std::vector<float> get_norm(float sigma0, float sigma1)
254 {
255     std::vector<float> result = { sigma0, sigma1, sigma0 / (sigma0 - sigma1), sigma0 * sigma1 };
256     return result;
257 }
258 
259 static
buildOpticalFlowPyramidScale(InputArray _img,OutputArrayOfArrays pyramid,Size winSize,int maxLevel,bool withDerivatives,int pyrBorder,int derivBorder,bool tryReuseInputImage,float levelScale[2])260 int buildOpticalFlowPyramidScale(InputArray _img, OutputArrayOfArrays pyramid, Size winSize, int maxLevel, bool withDerivatives,
261     int pyrBorder, int derivBorder, bool tryReuseInputImage, float levelScale[2])
262 {
263     Mat img = _img.getMat();
264     CV_Assert(img.depth() == CV_8U && winSize.width > 2 && winSize.height > 2);
265     int pyrstep = withDerivatives ? 2 : 1;
266 
267     pyramid.create(1, (maxLevel + 1) * pyrstep, 0 /*type*/, -1, true);
268 
269     int derivType = CV_MAKETYPE(DataType<short>::depth, img.channels() * 2);
270 
271     //level 0
272     bool lvl0IsSet = false;
273     if (tryReuseInputImage && img.isSubmatrix() && (pyrBorder & BORDER_ISOLATED) == 0)
274     {
275         Size wholeSize;
276         Point ofs;
277         img.locateROI(wholeSize, ofs);
278         if (ofs.x >= winSize.width && ofs.y >= winSize.height
279             && ofs.x + img.cols + winSize.width <= wholeSize.width
280             && ofs.y + img.rows + winSize.height <= wholeSize.height)
281         {
282             pyramid.getMatRef(0) = img;
283             lvl0IsSet = true;
284         }
285     }
286 
287     if (!lvl0IsSet)
288     {
289         Mat& temp = pyramid.getMatRef(0);
290 
291         if (!temp.empty())
292             temp.adjustROI(winSize.height, winSize.height, winSize.width, winSize.width);
293         if (temp.type() != img.type() || temp.cols != winSize.width * 2 + img.cols || temp.rows != winSize.height * 2 + img.rows)
294             temp.create(img.rows + winSize.height * 2, img.cols + winSize.width * 2, img.type());
295 
296         if (pyrBorder == BORDER_TRANSPARENT)
297             img.copyTo(temp(Rect(winSize.width, winSize.height, img.cols, img.rows)));
298         else
299             copyMakeBorder(img, temp, winSize.height, winSize.height, winSize.width, winSize.width, pyrBorder);
300         temp.adjustROI(-winSize.height, -winSize.height, -winSize.width, -winSize.width);
301     }
302 
303     Size sz = img.size();
304     Mat prevLevel = pyramid.getMatRef(0);
305     Mat thisLevel = prevLevel;
306 
307     for (int level = 0; level <= maxLevel; ++level)
308     {
309         if (level != 0)
310         {
311             Mat& temp = pyramid.getMatRef(level * pyrstep);
312 
313             if (!temp.empty())
314                 temp.adjustROI(winSize.height, winSize.height, winSize.width, winSize.width);
315             if (temp.type() != img.type() || temp.cols != winSize.width * 2 + sz.width || temp.rows != winSize.height * 2 + sz.height)
316                 temp.create(sz.height + winSize.height * 2, sz.width + winSize.width * 2, img.type());
317 
318             thisLevel = temp(Rect(winSize.width, winSize.height, sz.width, sz.height));
319             pyrDown(prevLevel, thisLevel, sz);
320 
321             if (pyrBorder != BORDER_TRANSPARENT)
322                 copyMakeBorder(thisLevel, temp, winSize.height, winSize.height, winSize.width, winSize.width, pyrBorder | BORDER_ISOLATED);
323             temp.adjustROI(-winSize.height, -winSize.height, -winSize.width, -winSize.width);
324         }
325 
326         if (withDerivatives)
327         {
328             Mat& deriv = pyramid.getMatRef(level * pyrstep + 1);
329 
330             if (!deriv.empty())
331                 deriv.adjustROI(winSize.height, winSize.height, winSize.width, winSize.width);
332             if (deriv.type() != derivType || deriv.cols != winSize.width * 2 + sz.width || deriv.rows != winSize.height * 2 + sz.height)
333                 deriv.create(sz.height + winSize.height * 2, sz.width + winSize.width * 2, derivType);
334 
335             Mat derivI = deriv(Rect(winSize.width, winSize.height, sz.width, sz.height));
336             calcSharrDeriv(thisLevel, derivI);
337 
338             if (derivBorder != BORDER_TRANSPARENT)
339                 copyMakeBorder(derivI, deriv, winSize.height, winSize.height, winSize.width, winSize.width, derivBorder | BORDER_ISOLATED);
340             deriv.adjustROI(-winSize.height, -winSize.height, -winSize.width, -winSize.width);
341         }
342 
343         sz = Size(static_cast<int>((sz.width + 1) / levelScale[0]),
344             static_cast<int>((sz.height + 1) / levelScale[1]));
345         if (sz.width <= winSize.width || sz.height <= winSize.height)
346         {
347             pyramid.create(1, (level + 1) * pyrstep, 0 /*type*/, -1, true);//check this
348             return level;
349         }
350 
351         prevLevel = thisLevel;
352     }
353 
354     return maxLevel;
355 }
356 
buildPyramid(cv::Size winSize,int maxLevel,float levelScale[2],bool withBlurredImage)357 int CImageBuffer::buildPyramid(cv::Size winSize, int maxLevel, float levelScale[2],bool withBlurredImage )
358 {
359     if (! m_Overwrite)
360         return m_maxLevel;
361     if (withBlurredImage)
362         m_maxLevel = buildOpticalFlowPyramidScale(m_BlurredImage, m_ImagePyramid, winSize, maxLevel, false, 4, 0, true, levelScale);
363     else
364         m_maxLevel = buildOpticalFlowPyramidScale(m_Image, m_ImagePyramid, winSize, maxLevel, false, 4, 0, true, levelScale);
365     return m_maxLevel;
366 }
367 
368 static
calcLocalOpticalFlowCore(Ptr<CImageBuffer> prevPyramids[2],Ptr<CImageBuffer> currPyramids[2],InputArray _prevPts,InputOutputArray _nextPts,const RLOFOpticalFlowParameter & param)369 void calcLocalOpticalFlowCore(
370     Ptr<CImageBuffer>  prevPyramids[2],
371     Ptr<CImageBuffer> currPyramids[2],
372     InputArray _prevPts,
373     InputOutputArray _nextPts,
374     const RLOFOpticalFlowParameter & param)
375 {
376 
377     bool useAdditionalRGB = param.supportRegionType == SR_CROSS;
378     int iWinSize = param.largeWinSize;
379     int winSizes[2] = { iWinSize, iWinSize };
380     if (param.supportRegionType != SR_FIXED)
381     {
382         winSizes[0] = param.smallWinSize;
383     }
384     //cv::Size winSize(iWinSize, iWinSize);
385     cv::TermCriteria criteria(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, param.maxIteration, 0.01);
386     std::vector<float> rlofNorm = get_norm(param.normSigma0, param.normSigma1);
387     CV_Assert(winSizes[0] <= winSizes[1]);
388 
389     bool usePreComputedCross = winSizes[0] != winSizes[1];
390     Mat prevPtsMat = _prevPts.getMat();
391     const int derivDepth = DataType<detail::deriv_type>::depth;
392 
393     CV_Assert(param.maxLevel >= 0 && iWinSize > 2);
394 
395     int level = 0,  npoints;
396     CV_Assert((npoints = prevPtsMat.checkVector(2, CV_32F, true)) >= 0);
397 
398     if (!(param.useInitialFlow))
399         _nextPts.create(prevPtsMat.size(), prevPtsMat.type(), -1, true);
400 
401     Mat nextPtsMat = _nextPts.getMat();
402     CV_Assert(nextPtsMat.checkVector(2, CV_32F, true) == npoints);
403 
404     const Point2f* prevPts = (const Point2f*)prevPtsMat.data;
405     Point2f* nextPts = (Point2f*)nextPtsMat.data;
406     std::vector<uchar> status(npoints);
407     std::vector<float> err(npoints);
408     std::vector<Point2f> gainPts(npoints);
409 
410     float levelScale[2] = { 2.f,2.f };
411 
412     int maxLevel = prevPyramids[0]->buildPyramid(cv::Size(iWinSize, iWinSize), param.maxLevel, levelScale);
413     maxLevel = currPyramids[0]->buildPyramid(cv::Size(iWinSize, iWinSize), maxLevel, levelScale);
414 
415     if (useAdditionalRGB)
416     {
417         prevPyramids[1]->buildPyramid(cv::Size(iWinSize, iWinSize), maxLevel, levelScale, true);
418         currPyramids[1]->buildPyramid(cv::Size(iWinSize, iWinSize), maxLevel, levelScale, true);
419     }
420 
421     if ((criteria.type & TermCriteria::COUNT) == 0)
422         criteria.maxCount = 30;
423     else
424         criteria.maxCount = std::min(std::max(criteria.maxCount, 0), 100);
425     if ((criteria.type & TermCriteria::EPS) == 0)
426         criteria.epsilon = 0.001;
427     else
428         criteria.epsilon = std::min(std::max(criteria.epsilon, 0.), 10.);
429     criteria.epsilon *= criteria.epsilon;
430 
431     // dI/dx ~ Ix, dI/dy ~ Iy
432     Mat derivIBuf;
433     derivIBuf.create(prevPyramids[0]->m_ImagePyramid[0].rows + iWinSize * 2, prevPyramids[0]->m_ImagePyramid[0].cols + iWinSize * 2, CV_MAKETYPE(derivDepth, prevPyramids[0]->m_ImagePyramid[0].channels() * 2));
434 
435     for (level = maxLevel; level >= 0; level--)
436     {
437         Mat derivI;
438         Size imgSize = prevPyramids[0]->getImage(level).size();
439         Mat _derivI(imgSize.height + iWinSize * 2, imgSize.width + iWinSize * 2, derivIBuf.type(), derivIBuf.data);
440         derivI = _derivI(Rect(iWinSize, iWinSize, imgSize.width, imgSize.height));
441         calcSharrDeriv(prevPyramids[0]->getImage(level), derivI);
442         copyMakeBorder(derivI, _derivI, iWinSize, iWinSize, iWinSize, iWinSize, BORDER_CONSTANT | BORDER_ISOLATED);
443 
444         cv::Mat tRGBPrevPyr;
445         cv::Mat tRGBNextPyr;
446         if (useAdditionalRGB)
447         {
448             tRGBPrevPyr = prevPyramids[1]->getImage(level);
449             tRGBNextPyr = prevPyramids[1]->getImage(level);
450 
451             prevPyramids[1]->m_Overwrite = false;
452             currPyramids[1]->m_Overwrite = false;
453         }
454 
455         cv::Mat prevImage = prevPyramids[0]->getImage(level);
456         cv::Mat currImage = currPyramids[0]->getImage(level);
457 
458         cv::Mat preCrossMap;
459         if( usePreComputedCross )
460         {
461             preCalcCrossSegmentation(prevPts, npoints, (float)(1./(1 << level)), tRGBPrevPyr, winSizes[1], preCrossMap, param.crossSegmentationThreshold);
462             tRGBNextPyr = cv::Mat();
463             tRGBPrevPyr = preCrossMap;
464         }
465         // apply plk like tracker
466             prevImage.adjustROI(iWinSize, iWinSize, iWinSize, iWinSize);
467             currImage.adjustROI(iWinSize, iWinSize, iWinSize, iWinSize);
468             derivI.adjustROI(iWinSize, iWinSize, iWinSize, iWinSize);
469         if (isrobust(param) == false)
470         {
471             if (param.useIlluminationModel)
472             {
473                 if (param.solverType == SolverType::ST_STANDART)
474                 {
475                     cv::parallel_for_(cv::Range(0, npoints),
476                         plk::radial::TrackerInvoker(
477                             prevImage, derivI, currImage, tRGBPrevPyr, tRGBNextPyr,
478                             prevPts, nextPts, &status[0], &err[0], &gainPts[0],
479                             level, maxLevel, winSizes,
480                             param.maxIteration,
481                             param.useInitialFlow,
482                             param.supportRegionType,
483                             param.minEigenValue,
484                             param.crossSegmentationThreshold));
485                 }
486                 else
487                 {
488                     cv::parallel_for_(cv::Range(0, npoints),
489                         beplk::radial::TrackerInvoker(
490                             prevImage, derivI, currImage, tRGBPrevPyr, tRGBNextPyr,
491                             prevPts, nextPts, &status[0], &err[0], &gainPts[0],
492                             level, maxLevel, winSizes,
493                             param.maxIteration,
494                             param.useInitialFlow,
495                             param.supportRegionType,
496                             param.crossSegmentationThreshold,
497                             param.minEigenValue));
498                 }
499             }
500             else
501             {
502                 if (param.solverType == SolverType::ST_STANDART)
503                 {
504                     cv::parallel_for_(cv::Range(0, npoints),
505                         plk::ica::TrackerInvoker(
506                             prevImage, derivI, currImage, tRGBPrevPyr, tRGBNextPyr,
507                             prevPts, nextPts, &status[0], &err[0],
508                             level, maxLevel, winSizes,
509                             param.maxIteration,
510                             param.useInitialFlow,
511                             param.supportRegionType,
512                             param.crossSegmentationThreshold,
513                             param.minEigenValue));
514                 }
515                 else
516                 {
517                     cv::parallel_for_(cv::Range(0, npoints),
518                         beplk::ica::TrackerInvoker(prevImage, derivI, currImage, tRGBPrevPyr, tRGBNextPyr,
519                             prevPts, nextPts, &status[0], &err[0],
520                             level, maxLevel, winSizes,
521                             param.maxIteration,
522                             param.useInitialFlow,
523                             param.supportRegionType,
524                             param.crossSegmentationThreshold,
525                             param.minEigenValue));
526                 }
527             }
528         }
529         // for robust models
530         else
531         {
532             if (param.useIlluminationModel)
533             {
534                 if (param.solverType == SolverType::ST_STANDART)
535                 {
536                     cv::parallel_for_(cv::Range(0, npoints),
537                         rlof::radial::TrackerInvoker(
538                             prevImage, derivI, currImage, tRGBPrevPyr, tRGBNextPyr,
539                             prevPts, nextPts, &status[0], &err[0], &gainPts[0],
540                             level, maxLevel, winSizes,
541                             param.maxIteration,
542                             param.useInitialFlow,
543                             param.supportRegionType,
544                             rlofNorm,
545                             param.minEigenValue,
546                             param.crossSegmentationThreshold));
547                 }
548                 else
549                 {
550                     cv::parallel_for_(cv::Range(0, npoints),
551                         berlof::radial::TrackerInvoker(prevImage, derivI, currImage, tRGBPrevPyr, tRGBNextPyr,
552                             prevPts, nextPts, &status[0], &err[0], &gainPts[0],
553                             level, maxLevel, winSizes,
554                             param.maxIteration,
555                             param.useInitialFlow,
556                             param.supportRegionType,
557                             param.crossSegmentationThreshold,
558                             rlofNorm,
559                             param.minEigenValue));
560                 }
561             }
562             else
563             {
564 
565                 if (param.solverType == SolverType::ST_STANDART)
566                 {
567                     cv::parallel_for_(cv::Range(0, npoints),
568                         rlof::ica::TrackerInvoker(
569                             prevImage, derivI, currImage, tRGBPrevPyr, tRGBNextPyr,
570                             prevPts, nextPts, &status[0], &err[0],
571                             level, maxLevel, winSizes,
572                             param.maxIteration,
573                             param.useInitialFlow,
574                             param.supportRegionType,
575                             rlofNorm,
576                             param.minEigenValue,
577                             param.crossSegmentationThreshold));
578                 }
579                 else
580                 {
581                     cv::parallel_for_(cv::Range(0, npoints),
582                         berlof::ica::TrackerInvoker(prevImage, derivI, currImage, tRGBPrevPyr, tRGBNextPyr,
583                             prevPts, nextPts, &status[0], &err[0],
584                             level, maxLevel, winSizes,
585                             param.maxIteration,
586                             param.useInitialFlow,
587                             param.supportRegionType,
588                             param.crossSegmentationThreshold,
589                             rlofNorm,
590                             param.minEigenValue));
591                 }
592 
593             }
594         }
595 
596         prevPyramids[0]->m_Overwrite = true;
597         currPyramids[0]->m_Overwrite = true;
598     }
599 }
600 
601 static
preprocess(Ptr<CImageBuffer> prevPyramids[2],Ptr<CImageBuffer> currPyramids[2],const std::vector<cv::Point2f> & prevPoints,std::vector<cv::Point2f> & currPoints,const RLOFOpticalFlowParameter & param)602 void preprocess(Ptr<CImageBuffer> prevPyramids[2],
603     Ptr<CImageBuffer> currPyramids[2],
604     const std::vector<cv::Point2f> & prevPoints,
605     std::vector<cv::Point2f> & currPoints,
606     const RLOFOpticalFlowParameter & param)
607 {
608     cv::Mat mask, homography;
609     if (param.useGlobalMotionPrior == false)
610         return;
611 
612     currPoints.resize(prevPoints.size());
613 
614     RLOFOpticalFlowParameter gmeTrackerParam = param;
615     gmeTrackerParam.useGlobalMotionPrior = false;
616     gmeTrackerParam.largeWinSize = 17;
617     // use none robust tracker for global motion estimation since it is faster
618     gmeTrackerParam.normSigma0 = std::numeric_limits<float>::max();
619     gmeTrackerParam.maxIteration = MAX(15, param.maxIteration);
620     gmeTrackerParam.minEigenValue = 0.000001f;
621 
622     std::vector<cv::Point2f> gmPrevPoints, gmCurrPoints;
623 
624     // Initialize point grid
625     int stepr = prevPyramids[0]->m_Image.rows / 30;
626     int stepc = prevPyramids[0]->m_Image.cols / 40;
627     for (int r = stepr / 2; r < prevPyramids[0]->m_Image.rows; r += stepr)
628     {
629         for (int c = stepc / 2; c < prevPyramids[0]->m_Image.cols; c += stepc)
630         {
631             gmPrevPoints.push_back(cv::Point2f(static_cast<float>(c), static_cast<float>(r)));
632         }
633     }
634 
635     // perform motion estimation
636     calcLocalOpticalFlowCore(prevPyramids, currPyramids, gmPrevPoints, gmCurrPoints, gmeTrackerParam);
637 
638     cv::Mat prevPointsMat(static_cast<int>(gmPrevPoints.size()), 1, CV_32FC2);
639     cv::Mat currPointsMat(static_cast<int>(gmPrevPoints.size()), 1, CV_32FC2);
640     cv::Mat distMat(static_cast<int>(gmPrevPoints.size()), 1, CV_32FC1);
641 
642     // Forward backward confidence to estimate optimal ransac reprojection error
643     int noPoints = 0;
644     for (unsigned int n = 0; n < gmPrevPoints.size(); n++)
645     {
646         cv::Point2f flow = gmCurrPoints[n] - gmPrevPoints[n];
647         prevPointsMat.at<cv::Point2f>(noPoints) = gmPrevPoints[n];
648         currPointsMat.at<cv::Point2f>(noPoints) = gmCurrPoints[n];
649         distMat.at<float>(noPoints) = flow.x * flow.x + flow.y* flow.y;
650         if (isnan(distMat.at<float>(noPoints)) == false)
651             noPoints++;
652     }
653 
654     float medianDist = (param.globalMotionRansacThreshold == 0) ? 1.f :
655         quickselect<float>(distMat, static_cast<int>(noPoints  * static_cast<float>(param.globalMotionRansacThreshold) / 100.f));
656     medianDist = sqrt(medianDist);
657 
658     if (noPoints < 8)
659         return;
660 
661     cv::findHomography(prevPointsMat(cv::Rect(0, 0, 1, noPoints)), currPointsMat(cv::Rect(0, 0, 1, noPoints)), cv::RANSAC, medianDist, mask).convertTo(homography, CV_32FC1);
662 
663     if (homography.empty())
664         return;
665 
666     cv::perspectiveTransform(prevPoints, currPoints, homography);
667 }
668 
calcLocalOpticalFlow(const Mat prevImage,const Mat currImage,Ptr<CImageBuffer> prevPyramids[2],Ptr<CImageBuffer> currPyramids[2],const std::vector<Point2f> & prevPoints,std::vector<Point2f> & currPoints,const RLOFOpticalFlowParameter & param)669 void calcLocalOpticalFlow(
670     const Mat prevImage,
671     const Mat currImage,
672     Ptr<CImageBuffer>  prevPyramids[2],
673     Ptr<CImageBuffer>  currPyramids[2],
674     const std::vector<Point2f> & prevPoints,
675     std::vector<Point2f> & currPoints,
676     const RLOFOpticalFlowParameter & param)
677 {
678     if (prevImage.empty() == false && currImage.empty()== false)
679     {
680         prevPyramids[0]->m_Overwrite = true;
681         currPyramids[0]->m_Overwrite = true;
682         prevPyramids[1]->m_Overwrite = true;
683         // perform blurring and build blur pyramid only for the prev image
684         currPyramids[1]->m_Overwrite = false;
685         if (prevImage.type() == CV_8UC3)
686         {
687             prevPyramids[0]->setGrayFromRGB(prevImage);
688             currPyramids[0]->setGrayFromRGB(currImage);
689             prevPyramids[1]->setImage(prevImage);
690             currPyramids[1]->setImage(currImage);
691 
692             if (param.supportRegionType == SR_CROSS)
693             {
694                 prevPyramids[1]->setBlurFromRGB(prevImage);
695                 currPyramids[1]->setBlurFromRGB(currImage);
696             }
697         }
698         else
699         {
700             prevPyramids[0]->setImage(prevImage);
701             currPyramids[0]->setImage(currImage);
702         }
703     }
704     preprocess(prevPyramids, currPyramids, prevPoints, currPoints, param);
705     RLOFOpticalFlowParameter internParam = param;
706     if (param.useGlobalMotionPrior == true)
707         internParam.useInitialFlow = true;
708     calcLocalOpticalFlowCore(prevPyramids, currPyramids, prevPoints, currPoints, internParam);
709 }
710 
711 }} // namespace
712