1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html
4 
5 // This file is based on file issued with the following license:
6 
7 /*============================================================================
8 
9 Copyright 2017 Toby Collins
10 Redistribution and use in source and binary forms, with or without
11 modification, are permitted provided that the following conditions are met:
12 
13 1. Redistributions of source code must retain the above copyright notice, this
14    list of conditions and the following disclaimer.
15 
16 2. Redistributions in binary form must reproduce the above copyright notice, this
17    list of conditions and the following disclaimer in the documentation
18    and/or other materials provided with the distribution.
19 
20 3. Neither the name of the copyright holder nor the names of its contributors may
21    be used to endorse or promote products derived from this software without
22    specific prior written permission.
23 
24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
28 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
31 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
33 USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35 
36 #include "precomp.hpp"
37 #include "ippe.hpp"
38 
39 namespace cv {
40 namespace IPPE {
PoseSolver()41 PoseSolver::PoseSolver() : IPPE_SMALL(1e-3)
42 {
43 }
44 
solveGeneric(InputArray _objectPoints,InputArray _imagePoints,OutputArray _rvec1,OutputArray _tvec1,float & err1,OutputArray _rvec2,OutputArray _tvec2,float & err2)45 void PoseSolver::solveGeneric(InputArray _objectPoints, InputArray _imagePoints, OutputArray _rvec1, OutputArray _tvec1,
46                               float& err1, OutputArray _rvec2, OutputArray _tvec2, float& err2)
47 {
48     Mat normalizedImagePoints;
49     if (_imagePoints.getMat().type() == CV_32FC2)
50     {
51         _imagePoints.getMat().convertTo(normalizedImagePoints, CV_64F);
52     }
53     else
54     {
55         normalizedImagePoints = _imagePoints.getMat();
56     }
57 
58     //solve:
59     Mat Ma, Mb;
60     solveGeneric(_objectPoints, normalizedImagePoints, Ma, Mb);
61 
62     //the two poses computed by IPPE (sorted):
63     Mat M1, M2;
64 
65     //sort poses by reprojection error:
66     sortPosesByReprojError(_objectPoints, normalizedImagePoints, Ma, Mb, M1, M2, err1, err2);
67 
68     //fill outputs
69     rot2vec(M1.colRange(0, 3).rowRange(0, 3), _rvec1);
70     rot2vec(M2.colRange(0, 3).rowRange(0, 3), _rvec2);
71 
72     M1.colRange(3, 4).rowRange(0, 3).copyTo(_tvec1);
73     M2.colRange(3, 4).rowRange(0, 3).copyTo(_tvec2);
74 }
75 
solveGeneric(InputArray _objectPoints,InputArray _normalizedInputPoints,OutputArray _Ma,OutputArray _Mb)76 void PoseSolver::solveGeneric(InputArray _objectPoints, InputArray _normalizedInputPoints,
77                               OutputArray _Ma, OutputArray _Mb)
78 {
79     //argument checking:
80     size_t n = static_cast<size_t>(_normalizedInputPoints.rows()) * static_cast<size_t>(_normalizedInputPoints.cols()); //number of points
81     int objType = _objectPoints.type();
82     int type_input = _normalizedInputPoints.type();
83 
84     CV_CheckType(objType, objType == CV_32FC3 || objType == CV_64FC3,
85                  "Type of _objectPoints must be CV_32FC3 or CV_64FC3" );
86     CV_CheckType(type_input, type_input == CV_32FC2 || type_input == CV_64FC2,
87                  "Type of _normalizedInputPoints must be CV_32FC2 or CV_64FC2" );
88     CV_Assert(_objectPoints.rows() == 1 || _objectPoints.cols() == 1);
89     CV_Assert(_objectPoints.rows() >= 4 || _objectPoints.cols() >= 4);
90     CV_Assert(_normalizedInputPoints.rows() == 1 || _normalizedInputPoints.cols() == 1);
91     CV_Assert(static_cast<size_t>(_objectPoints.rows()) * static_cast<size_t>(_objectPoints.cols()) == n);
92 
93     Mat normalizedInputPoints;
94     if (type_input == CV_32FC2)
95     {
96         _normalizedInputPoints.getMat().convertTo(normalizedInputPoints, CV_64F);
97     }
98     else
99     {
100         normalizedInputPoints = _normalizedInputPoints.getMat();
101     }
102 
103     Mat objectInputPoints;
104     if (objType == CV_32FC3)
105     {
106         _objectPoints.getMat().convertTo(objectInputPoints, CV_64F);
107     }
108     else
109     {
110         objectInputPoints = _objectPoints.getMat();
111     }
112 
113     Mat canonicalObjPoints;
114     Mat MmodelPoints2Canonical;
115 
116     //transform object points to the canonical position (zero centred and on the plane z=0):
117     makeCanonicalObjectPoints(objectInputPoints, canonicalObjPoints, MmodelPoints2Canonical);
118 
119     //compute the homography mapping the model's points to normalizedInputPoints
120     Matx33d H;
121     HomographyHO::homographyHO(canonicalObjPoints, _normalizedInputPoints, H);
122 
123     //now solve
124     Mat MaCanon, MbCanon;
125     solveCanonicalForm(canonicalObjPoints, normalizedInputPoints, H, MaCanon, MbCanon);
126 
127     //transform computed poses to account for canonical transform:
128     Mat Ma = MaCanon * MmodelPoints2Canonical;
129     Mat Mb = MbCanon * MmodelPoints2Canonical;
130 
131     //output poses:
132     Ma.copyTo(_Ma);
133     Mb.copyTo(_Mb);
134 }
135 
solveCanonicalForm(InputArray _canonicalObjPoints,InputArray _normalizedInputPoints,const Matx33d & H,OutputArray _Ma,OutputArray _Mb)136 void PoseSolver::solveCanonicalForm(InputArray _canonicalObjPoints, InputArray _normalizedInputPoints, const Matx33d& H,
137                                     OutputArray _Ma, OutputArray _Mb)
138 {
139     _Ma.create(4, 4, CV_64FC1);
140     _Mb.create(4, 4, CV_64FC1);
141 
142     Mat Ma = _Ma.getMat();
143     Mat Mb = _Mb.getMat();
144 
145     //initialise poses:
146     Ma.setTo(0);
147     Ma.at<double>(3, 3) = 1;
148     Mb.setTo(0);
149     Mb.at<double>(3, 3) = 1;
150 
151     //Compute the Jacobian J of the homography at (0,0):
152     double j00 = H(0, 0) - H(2, 0) * H(0, 2);
153     double j01 = H(0, 1) - H(2, 1) * H(0, 2);
154     double j10 = H(1, 0) - H(2, 0) * H(1, 2);
155     double j11 = H(1, 1) - H(2, 1) * H(1, 2);
156 
157     //Compute the transformation of (0,0) into the image:
158     double v0 = H(0, 2);
159     double v1 = H(1, 2);
160 
161     //compute the two rotation solutions:
162     Mat Ra = Ma.colRange(0, 3).rowRange(0, 3);
163     Mat Rb = Mb.colRange(0, 3).rowRange(0, 3);
164     computeRotations(j00, j01, j10, j11, v0, v1, Ra, Rb);
165 
166     //for each rotation solution, compute the corresponding translation solution:
167     Mat ta = Ma.colRange(3, 4).rowRange(0, 3);
168     Mat tb = Mb.colRange(3, 4).rowRange(0, 3);
169     computeTranslation(_canonicalObjPoints, _normalizedInputPoints, Ra, ta);
170     computeTranslation(_canonicalObjPoints, _normalizedInputPoints, Rb, tb);
171 }
172 
solveSquare(InputArray _objectPoints,InputArray _imagePoints,OutputArray _rvec1,OutputArray _tvec1,float & err1,OutputArray _rvec2,OutputArray _tvec2,float & err2)173 void PoseSolver::solveSquare(InputArray _objectPoints, InputArray _imagePoints, OutputArray _rvec1, OutputArray _tvec1,
174                              float& err1, OutputArray _rvec2, OutputArray _tvec2, float& err2)
175 {
176     //allocate outputs:
177     _rvec1.create(3, 1, CV_64FC1);
178     _tvec1.create(3, 1, CV_64FC1);
179     _rvec2.create(3, 1, CV_64FC1);
180     _tvec2.create(3, 1, CV_64FC1);
181 
182     Mat objectPoints2D;
183 
184     //generate the object points:
185     objectPoints2D.create(1, 4, CV_64FC2);
186     Mat objectPoints = _objectPoints.getMat();
187     double squareLength;
188     if (objectPoints.depth() == CV_32F)
189     {
190         objectPoints2D.ptr<Vec2d>(0)[0] = Vec2d(objectPoints.ptr<Vec3f>(0)[0](0), objectPoints.ptr<Vec3f>(0)[0](1));
191         objectPoints2D.ptr<Vec2d>(0)[1] = Vec2d(objectPoints.ptr<Vec3f>(0)[1](0), objectPoints.ptr<Vec3f>(0)[1](1));
192         objectPoints2D.ptr<Vec2d>(0)[2] = Vec2d(objectPoints.ptr<Vec3f>(0)[2](0), objectPoints.ptr<Vec3f>(0)[2](1));
193         objectPoints2D.ptr<Vec2d>(0)[3] = Vec2d(objectPoints.ptr<Vec3f>(0)[3](0), objectPoints.ptr<Vec3f>(0)[3](1));
194 
195         squareLength = sqrt( (objectPoints.ptr<Vec3f>(0)[1](0) - objectPoints.ptr<Vec3f>(0)[0](0))*
196                              (objectPoints.ptr<Vec3f>(0)[1](0) - objectPoints.ptr<Vec3f>(0)[0](0)) +
197                              (objectPoints.ptr<Vec3f>(0)[1](1) - objectPoints.ptr<Vec3f>(0)[0](1))*
198                              (objectPoints.ptr<Vec3f>(0)[1](1) - objectPoints.ptr<Vec3f>(0)[0](1)) );
199     }
200     else
201     {
202         objectPoints2D.ptr<Vec2d>(0)[0] = Vec2d(objectPoints.ptr<Vec3d>(0)[0](0), objectPoints.ptr<Vec3d>(0)[0](1));
203         objectPoints2D.ptr<Vec2d>(0)[1] = Vec2d(objectPoints.ptr<Vec3d>(0)[1](0), objectPoints.ptr<Vec3d>(0)[1](1));
204         objectPoints2D.ptr<Vec2d>(0)[2] = Vec2d(objectPoints.ptr<Vec3d>(0)[2](0), objectPoints.ptr<Vec3d>(0)[2](1));
205         objectPoints2D.ptr<Vec2d>(0)[3] = Vec2d(objectPoints.ptr<Vec3d>(0)[3](0), objectPoints.ptr<Vec3d>(0)[3](1));
206 
207         squareLength = sqrt( (objectPoints.ptr<Vec3d>(0)[1](0) - objectPoints.ptr<Vec3d>(0)[0](0))*
208                              (objectPoints.ptr<Vec3d>(0)[1](0) - objectPoints.ptr<Vec3d>(0)[0](0)) +
209                              (objectPoints.ptr<Vec3d>(0)[1](1) - objectPoints.ptr<Vec3d>(0)[0](1))*
210                              (objectPoints.ptr<Vec3d>(0)[1](1) - objectPoints.ptr<Vec3d>(0)[0](1)) );
211     }
212 
213     Mat H; //homography from canonical object points to normalized pixels
214 
215     Mat normalizedInputPoints;
216     if (_imagePoints.getMat().type() == CV_32FC2)
217     {
218         _imagePoints.getMat().convertTo(normalizedInputPoints, CV_64F);
219     }
220     else
221     {
222         normalizedInputPoints = _imagePoints.getMat();
223     }
224 
225     //compute H
226     homographyFromSquarePoints(normalizedInputPoints, squareLength / 2.0, H);
227 
228     //now solve
229     Mat Ma, Mb;
230     solveCanonicalForm(objectPoints2D, normalizedInputPoints, H, Ma, Mb);
231 
232     //sort poses according to reprojection error:
233     Mat M1, M2;
234     sortPosesByReprojError(_objectPoints, normalizedInputPoints, Ma, Mb, M1, M2, err1, err2);
235 
236     //fill outputs
237     rot2vec(M1.colRange(0, 3).rowRange(0, 3), _rvec1);
238     rot2vec(M2.colRange(0, 3).rowRange(0, 3), _rvec2);
239 
240     M1.colRange(3, 4).rowRange(0, 3).copyTo(_tvec1);
241     M2.colRange(3, 4).rowRange(0, 3).copyTo(_tvec2);
242 }
243 
generateSquareObjectCorners3D(double squareLength,OutputArray _objectPoints)244 void PoseSolver::generateSquareObjectCorners3D(double squareLength, OutputArray _objectPoints)
245 {
246     _objectPoints.create(1, 4, CV_64FC3);
247     Mat objectPoints = _objectPoints.getMat();
248     objectPoints.ptr<Vec3d>(0)[0] = Vec3d(-squareLength / 2.0, squareLength / 2.0, 0.0);
249     objectPoints.ptr<Vec3d>(0)[1] = Vec3d(squareLength / 2.0, squareLength / 2.0, 0.0);
250     objectPoints.ptr<Vec3d>(0)[2] = Vec3d(squareLength / 2.0, -squareLength / 2.0, 0.0);
251     objectPoints.ptr<Vec3d>(0)[3] = Vec3d(-squareLength / 2.0, -squareLength / 2.0, 0.0);
252 }
253 
generateSquareObjectCorners2D(double squareLength,OutputArray _objectPoints)254 void PoseSolver::generateSquareObjectCorners2D(double squareLength, OutputArray _objectPoints)
255 {
256     _objectPoints.create(1, 4, CV_64FC2);
257     Mat objectPoints = _objectPoints.getMat();
258     objectPoints.ptr<Vec2d>(0)[0] = Vec2d(-squareLength / 2.0, squareLength / 2.0);
259     objectPoints.ptr<Vec2d>(0)[1] = Vec2d(squareLength / 2.0, squareLength / 2.0);
260     objectPoints.ptr<Vec2d>(0)[2] = Vec2d(squareLength / 2.0, -squareLength / 2.0);
261     objectPoints.ptr<Vec2d>(0)[3] = Vec2d(-squareLength / 2.0, -squareLength / 2.0);
262 }
263 
meanSceneDepth(InputArray _objectPoints,InputArray _rvec,InputArray _tvec)264 double PoseSolver::meanSceneDepth(InputArray _objectPoints, InputArray _rvec, InputArray _tvec)
265 {
266     CV_CheckType(_objectPoints.type(), _objectPoints.type() == CV_64FC3,
267                  "Type of _objectPoints must be CV_64FC3" );
268 
269     size_t n = static_cast<size_t>(_objectPoints.rows() * _objectPoints.cols());
270     Mat R;
271     Mat q;
272     Rodrigues(_rvec, R);
273     double zBar = 0;
274 
275     for (size_t i = 0; i < n; i++)
276     {
277         Mat p(_objectPoints.getMat().at<Point3d>(static_cast<int>(i)));
278         q = R * p + _tvec.getMat();
279         double z;
280         if (q.depth() == CV_64F)
281         {
282             z = q.at<double>(2);
283         }
284         else
285         {
286             z = static_cast<double>(q.at<float>(2));
287         }
288         zBar += z;
289     }
290     return zBar / static_cast<double>(n);
291 }
292 
rot2vec(InputArray _R,OutputArray _r)293 void PoseSolver::rot2vec(InputArray _R, OutputArray _r)
294 {
295     CV_CheckType(_R.type(), _R.type() == CV_64FC1,
296                  "Type of _R must be CV_64FC1" );
297     CV_Assert(_R.rows() == 3);
298     CV_Assert(_R.cols() == 3);
299 
300     _r.create(3, 1, CV_64FC1);
301 
302     Mat R = _R.getMat();
303     Mat rvec = _r.getMat();
304 
305     double trace = R.at<double>(0, 0) + R.at<double>(1, 1) + R.at<double>(2, 2);
306     double w_norm = acos((trace - 1.0) / 2.0);
307     double eps = std::numeric_limits<float>::epsilon();
308     double d = 1 / (2 * sin(w_norm)) * w_norm;
309     if (w_norm < eps) //rotation is the identity
310     {
311         rvec.setTo(0);
312     }
313     else
314     {
315         double c0 = R.at<double>(2, 1) - R.at<double>(1, 2);
316         double c1 = R.at<double>(0, 2) - R.at<double>(2, 0);
317         double c2 = R.at<double>(1, 0) - R.at<double>(0, 1);
318         rvec.at<double>(0) = d * c0;
319         rvec.at<double>(1) = d * c1;
320         rvec.at<double>(2) = d * c2;
321     }
322 }
323 
computeTranslation(InputArray _objectPoints,InputArray _normalizedImgPoints,InputArray _R,OutputArray _t)324 void PoseSolver::computeTranslation(InputArray _objectPoints, InputArray _normalizedImgPoints, InputArray _R, OutputArray _t)
325 {
326     //This is solved by building the linear system At = b, where t corresponds to the (unknown) translation.
327     //This is then inverted with the associated normal equations to give t = inv(transpose(A)*A)*transpose(A)*b
328     //For efficiency we only store the coefficients of (transpose(A)*A) and (transpose(A)*b)
329 
330     CV_CheckType(_objectPoints.type(), _objectPoints.type() == CV_64FC2,
331                  "Type of _objectPoints must be CV_64FC2" );
332     CV_CheckType(_normalizedImgPoints.type(), _normalizedImgPoints.type() == CV_64FC2,
333                  "Type of _normalizedImgPoints must be CV_64FC2" );
334     CV_CheckType(_R.type(), _R.type() == CV_64FC1,
335                  "Type of _R must be CV_64FC1" );
336     CV_Assert(_R.rows() == 3 && _R.cols() == 3);
337     CV_Assert(_objectPoints.rows() == 1 || _objectPoints.cols() == 1);
338     CV_Assert(_normalizedImgPoints.rows() == 1 || _normalizedImgPoints.cols() == 1);
339 
340     size_t n = static_cast<size_t>(_normalizedImgPoints.rows() * _normalizedImgPoints.cols());
341     CV_Assert(n == static_cast<size_t>(_objectPoints.rows() * _objectPoints.cols()));
342 
343     Mat objectPoints = _objectPoints.getMat();
344     Mat imgPoints = _normalizedImgPoints.getMat();
345 
346     _t.create(3, 1, CV_64FC1);
347 
348     Mat R = _R.getMat();
349 
350     //coefficients of (transpose(A)*A)
351     double ATA00 = static_cast<double>(n);
352     double ATA02 = 0;
353     double ATA11 = static_cast<double>(n);
354     double ATA12 = 0;
355     double ATA20 = 0;
356     double ATA21 = 0;
357     double ATA22 = 0;
358 
359     //coefficients of (transpose(A)*b)
360     double ATb0 = 0;
361     double ATb1 = 0;
362     double ATb2 = 0;
363 
364     //now loop through each point and increment the coefficients:
365     for (int i = 0; i < static_cast<int>(n); i++)
366     {
367         const Vec2d& objPt = objectPoints.at<Vec2d>(i);
368         double rx = R.at<double>(0, 0) * objPt(0) + R.at<double>(0, 1) * objPt(1);
369         double ry = R.at<double>(1, 0) * objPt(0) + R.at<double>(1, 1) * objPt(1);
370         double rz = R.at<double>(2, 0) * objPt(0) + R.at<double>(2, 1) * objPt(1);
371 
372         const Vec2d& imgPt = imgPoints.at<Vec2d>(i);
373         double a2 = -imgPt(0);
374         double b2 = -imgPt(1);
375 
376         ATA02 = ATA02 + a2;
377         ATA12 = ATA12 + b2;
378         ATA20 = ATA20 + a2;
379         ATA21 = ATA21 + b2;
380         ATA22 = ATA22 + a2 * a2 + b2 * b2;
381 
382         double bx = -a2 * rz - rx;
383         double by = -b2 * rz - ry;
384 
385         ATb0 = ATb0 + bx;
386         ATb1 = ATb1 + by;
387         ATb2 = ATb2 + a2 * bx + b2 * by;
388     }
389 
390     double detAInv = 1.0 / (ATA00 * ATA11 * ATA22 - ATA00 * ATA12 * ATA21 - ATA02 * ATA11 * ATA20);
391 
392     //S gives inv(transpose(A)*A)/det(A)^2
393     //construct S:
394     double S00 = ATA11 * ATA22 - ATA12 * ATA21;
395     double S01 = ATA02 * ATA21;
396     double S02 = -ATA02 * ATA11;
397     double S10 = ATA12 * ATA20;
398     double S11 = ATA00 * ATA22 - ATA02 * ATA20;
399     double S12 = -ATA00 * ATA12;
400     double S20 = -ATA11 * ATA20;
401     double S21 = -ATA00 * ATA21;
402     double S22 = ATA00 * ATA11;
403 
404     //solve t:
405     Mat t = _t.getMat();
406     t.at<double>(0) = detAInv * (S00 * ATb0 + S01 * ATb1 + S02 * ATb2);
407     t.at<double>(1) = detAInv * (S10 * ATb0 + S11 * ATb1 + S12 * ATb2);
408     t.at<double>(2) = detAInv * (S20 * ATb0 + S21 * ATb1 + S22 * ATb2);
409 }
410 
computeRotations(double j00,double j01,double j10,double j11,double p,double q,OutputArray _R1,OutputArray _R2)411 void PoseSolver::computeRotations(double j00, double j01, double j10, double j11, double p, double q, OutputArray _R1, OutputArray _R2)
412 {
413     //This is fairly optimized code which makes it hard to understand. The matlab code is certainly easier to read.
414     _R1.create(3, 3, CV_64FC1);
415     _R2.create(3, 3, CV_64FC1);
416 
417     Matx33d Rv;
418     Matx31d v(p, q, 1);
419     rotateVec2ZAxis(v,Rv);
420     Rv = Rv.t();
421 
422     //setup the 2x2 SVD decomposition:
423     double rv00 = Rv(0,0);
424     double rv01 = Rv(0,1);
425     double rv02 = Rv(0,2);
426 
427     double rv10 = Rv(1,0);
428     double rv11 = Rv(1,1);
429     double rv12 = Rv(1,2);
430 
431     double rv20 = Rv(2,0);
432     double rv21 = Rv(2,1);
433     double rv22 = Rv(2,2);
434 
435     double b00 = rv00 - p * rv20;
436     double b01 = rv01 - p * rv21;
437     double b10 = rv10 - q * rv20;
438     double b11 = rv11 - q * rv21;
439 
440     double dtinv = 1.0 / ((b00 * b11 - b01 * b10));
441 
442     double binv00 = dtinv * b11;
443     double binv01 = -dtinv * b01;
444     double binv10 = -dtinv * b10;
445     double binv11 = dtinv * b00;
446 
447     double a00 = binv00 * j00 + binv01 * j10;
448     double a01 = binv00 * j01 + binv01 * j11;
449     double a10 = binv10 * j00 + binv11 * j10;
450     double a11 = binv10 * j01 + binv11 * j11;
451 
452     //compute the largest singular value of A:
453     double ata00 = a00 * a00 + a01 * a01;
454     double ata01 = a00 * a10 + a01 * a11;
455     double ata11 = a10 * a10 + a11 * a11;
456 
457     double gamma2 = 0.5 * (ata00 + ata11 + sqrt((ata00 - ata11) * (ata00 - ata11) + 4.0 * ata01 * ata01));
458     if (gamma2 < 0)
459         CV_Error(Error::StsNoConv, "gamma2 is negative.");
460 
461     double gamma = sqrt(gamma2);
462 
463     if (std::fabs(gamma) < std::numeric_limits<float>::epsilon())
464         CV_Error(Error::StsNoConv, "gamma is zero.");
465 
466     //reconstruct the full rotation matrices:
467     double rtilde00 = a00 / gamma;
468     double rtilde01 = a01 / gamma;
469     double rtilde10 = a10 / gamma;
470     double rtilde11 = a11 / gamma;
471 
472     double rtilde00_2 = rtilde00 * rtilde00;
473     double rtilde01_2 = rtilde01 * rtilde01;
474     double rtilde10_2 = rtilde10 * rtilde10;
475     double rtilde11_2 = rtilde11 * rtilde11;
476 
477     double b0 = sqrt(-rtilde00_2 - rtilde10_2 + 1);
478     double b1 = sqrt(-rtilde01_2 - rtilde11_2 + 1);
479     double sp = (-rtilde00 * rtilde01 - rtilde10 * rtilde11);
480 
481     if (sp < 0)
482     {
483         b1 = -b1;
484     }
485 
486     //store results:
487     Mat R1 = _R1.getMat();
488     Mat R2 = _R2.getMat();
489 
490     R1.at<double>(0, 0) = (rtilde00)*rv00 + (rtilde10)*rv01 + (b0)*rv02;
491     R1.at<double>(0, 1) = (rtilde01)*rv00 + (rtilde11)*rv01 + (b1)*rv02;
492     R1.at<double>(0, 2) = (b1 * rtilde10 - b0 * rtilde11) * rv00 + (b0 * rtilde01 - b1 * rtilde00) * rv01 + (rtilde00 * rtilde11 - rtilde01 * rtilde10) * rv02;
493     R1.at<double>(1, 0) = (rtilde00)*rv10 + (rtilde10)*rv11 + (b0)*rv12;
494     R1.at<double>(1, 1) = (rtilde01)*rv10 + (rtilde11)*rv11 + (b1)*rv12;
495     R1.at<double>(1, 2) = (b1 * rtilde10 - b0 * rtilde11) * rv10 + (b0 * rtilde01 - b1 * rtilde00) * rv11 + (rtilde00 * rtilde11 - rtilde01 * rtilde10) * rv12;
496     R1.at<double>(2, 0) = (rtilde00)*rv20 + (rtilde10)*rv21 + (b0)*rv22;
497     R1.at<double>(2, 1) = (rtilde01)*rv20 + (rtilde11)*rv21 + (b1)*rv22;
498     R1.at<double>(2, 2) = (b1 * rtilde10 - b0 * rtilde11) * rv20 + (b0 * rtilde01 - b1 * rtilde00) * rv21 + (rtilde00 * rtilde11 - rtilde01 * rtilde10) * rv22;
499 
500     R2.at<double>(0, 0) = (rtilde00)*rv00 + (rtilde10)*rv01 + (-b0) * rv02;
501     R2.at<double>(0, 1) = (rtilde01)*rv00 + (rtilde11)*rv01 + (-b1) * rv02;
502     R2.at<double>(0, 2) = (b0 * rtilde11 - b1 * rtilde10) * rv00 + (b1 * rtilde00 - b0 * rtilde01) * rv01 + (rtilde00 * rtilde11 - rtilde01 * rtilde10) * rv02;
503     R2.at<double>(1, 0) = (rtilde00)*rv10 + (rtilde10)*rv11 + (-b0) * rv12;
504     R2.at<double>(1, 1) = (rtilde01)*rv10 + (rtilde11)*rv11 + (-b1) * rv12;
505     R2.at<double>(1, 2) = (b0 * rtilde11 - b1 * rtilde10) * rv10 + (b1 * rtilde00 - b0 * rtilde01) * rv11 + (rtilde00 * rtilde11 - rtilde01 * rtilde10) * rv12;
506     R2.at<double>(2, 0) = (rtilde00)*rv20 + (rtilde10)*rv21 + (-b0) * rv22;
507     R2.at<double>(2, 1) = (rtilde01)*rv20 + (rtilde11)*rv21 + (-b1) * rv22;
508     R2.at<double>(2, 2) = (b0 * rtilde11 - b1 * rtilde10) * rv20 + (b1 * rtilde00 - b0 * rtilde01) * rv21 + (rtilde00 * rtilde11 - rtilde01 * rtilde10) * rv22;
509 }
510 
homographyFromSquarePoints(InputArray _targetPoints,double halfLength,OutputArray H_)511 void PoseSolver::homographyFromSquarePoints(InputArray _targetPoints, double halfLength, OutputArray H_)
512 {
513     CV_CheckType(_targetPoints.type(), _targetPoints.type() == CV_32FC2 || _targetPoints.type() == CV_64FC2,
514                  "Type of _targetPoints must be CV_32FC2 or CV_64FC2" );
515 
516     Mat pts = _targetPoints.getMat();
517 
518     double p1x, p1y;
519     double p2x, p2y;
520     double p3x, p3y;
521     double p4x, p4y;
522 
523     if (_targetPoints.type() == CV_32FC2)
524     {
525         p1x = -pts.at<Vec2f>(0)(0);
526         p1y = -pts.at<Vec2f>(0)(1);
527 
528         p2x = -pts.at<Vec2f>(1)(0);
529         p2y = -pts.at<Vec2f>(1)(1);
530 
531         p3x = -pts.at<Vec2f>(2)(0);
532         p3y = -pts.at<Vec2f>(2)(1);
533 
534         p4x = -pts.at<Vec2f>(3)(0);
535         p4y = -pts.at<Vec2f>(3)(1);
536     }
537     else
538     {
539         p1x = -pts.at<Vec2d>(0)(0);
540         p1y = -pts.at<Vec2d>(0)(1);
541 
542         p2x = -pts.at<Vec2d>(1)(0);
543         p2y = -pts.at<Vec2d>(1)(1);
544 
545         p3x = -pts.at<Vec2d>(2)(0);
546         p3y = -pts.at<Vec2d>(2)(1);
547 
548         p4x = -pts.at<Vec2d>(3)(0);
549         p4y = -pts.at<Vec2d>(3)(1);
550     }
551 
552     //analytic solution:
553     double det = (halfLength * (p1x * p2y - p2x * p1y - p1x * p4y + p2x * p3y - p3x * p2y + p4x * p1y + p3x * p4y - p4x * p3y));
554     if (abs(det) < 1e-9)
555         CV_Error(Error::StsNoConv, "Determinant is zero!");
556     double detsInv = -1 / det;
557 
558     Matx33d H;
559     H(0, 0) = detsInv * (p1x * p3x * p2y - p2x * p3x * p1y - p1x * p4x * p2y + p2x * p4x * p1y - p1x * p3x * p4y + p1x * p4x * p3y + p2x * p3x * p4y - p2x * p4x * p3y);
560     H(0, 1) = detsInv * (p1x * p2x * p3y - p1x * p3x * p2y - p1x * p2x * p4y + p2x * p4x * p1y + p1x * p3x * p4y - p3x * p4x * p1y - p2x * p4x * p3y + p3x * p4x * p2y);
561     H(0, 2) = detsInv * halfLength * (p1x * p2x * p3y - p2x * p3x * p1y - p1x * p2x * p4y + p1x * p4x * p2y - p1x * p4x * p3y + p3x * p4x * p1y + p2x * p3x * p4y - p3x * p4x * p2y);
562     H(1, 0) = detsInv * (p1x * p2y * p3y - p2x * p1y * p3y - p1x * p2y * p4y + p2x * p1y * p4y - p3x * p1y * p4y + p4x * p1y * p3y + p3x * p2y * p4y - p4x * p2y * p3y);
563     H(1, 1) = detsInv * (p2x * p1y * p3y - p3x * p1y * p2y - p1x * p2y * p4y + p4x * p1y * p2y + p1x * p3y * p4y - p4x * p1y * p3y - p2x * p3y * p4y + p3x * p2y * p4y);
564     H(1, 2) = detsInv * halfLength * (p1x * p2y * p3y - p3x * p1y * p2y - p2x * p1y * p4y + p4x * p1y * p2y - p1x * p3y * p4y + p3x * p1y * p4y + p2x * p3y * p4y - p4x * p2y * p3y);
565     H(2, 0) = -detsInv * (p1x * p3y - p3x * p1y - p1x * p4y - p2x * p3y + p3x * p2y + p4x * p1y + p2x * p4y - p4x * p2y);
566     H(2, 1) = detsInv * (p1x * p2y - p2x * p1y - p1x * p3y + p3x * p1y + p2x * p4y - p4x * p2y - p3x * p4y + p4x * p3y);
567     H(2, 2) = 1.0;
568 
569     Mat(H, false).copyTo(H_);
570 }
571 
makeCanonicalObjectPoints(InputArray _objectPoints,OutputArray _canonicalObjPoints,OutputArray _MmodelPoints2Canonical)572 void PoseSolver::makeCanonicalObjectPoints(InputArray _objectPoints, OutputArray _canonicalObjPoints, OutputArray _MmodelPoints2Canonical)
573 {
574     int objType = _objectPoints.type();
575     CV_CheckType(objType, objType == CV_32FC3 || objType == CV_64FC3,
576                  "Type of _objectPoints must be CV_32FC3 or CV_64FC3" );
577 
578     int n = _objectPoints.rows() * _objectPoints.cols();
579 
580     _canonicalObjPoints.create(1, n, CV_64FC2);
581 
582     Mat objectPoints = _objectPoints.getMat();
583     Mat canonicalObjPoints = _canonicalObjPoints.getMat();
584 
585     Mat UZero(3, n, CV_64FC1);
586 
587     double xBar = 0;
588     double yBar = 0;
589     double zBar = 0;
590     bool isOnZPlane = true;
591     for (int i = 0; i < n; i++)
592     {
593         double x, y, z;
594         if (objType == CV_32FC3)
595         {
596             x = static_cast<double>(objectPoints.at<Vec3f>(i)[0]);
597             y = static_cast<double>(objectPoints.at<Vec3f>(i)[1]);
598             z = static_cast<double>(objectPoints.at<Vec3f>(i)[2]);
599         }
600         else
601         {
602             x = objectPoints.at<Vec3d>(i)[0];
603             y = objectPoints.at<Vec3d>(i)[1];
604             z = objectPoints.at<Vec3d>(i)[2];
605         }
606 
607         if (abs(z) > IPPE_SMALL)
608         {
609             isOnZPlane = false;
610         }
611 
612         xBar += x;
613         yBar += y;
614         zBar += z;
615 
616         UZero.at<double>(0, i) = x;
617         UZero.at<double>(1, i) = y;
618         UZero.at<double>(2, i) = z;
619     }
620     xBar = xBar / static_cast<double>(n);
621     yBar = yBar / static_cast<double>(n);
622     zBar = zBar / static_cast<double>(n);
623 
624     for (int i = 0; i < n; i++)
625     {
626         UZero.at<double>(0, i) -= xBar;
627         UZero.at<double>(1, i) -= yBar;
628         UZero.at<double>(2, i) -= zBar;
629     }
630 
631     Matx44d MCenter = Matx44d::eye();
632     MCenter(0, 3) = -xBar;
633     MCenter(1, 3) = -yBar;
634     MCenter(2, 3) = -zBar;
635 
636     if (isOnZPlane)
637     {
638         //MmodelPoints2Canonical is given by MCenter
639         Mat(MCenter, false).copyTo(_MmodelPoints2Canonical);
640         for (int i = 0; i < n; i++)
641         {
642             canonicalObjPoints.at<Vec2d>(i)[0] = UZero.at<double>(0, i);
643             canonicalObjPoints.at<Vec2d>(i)[1] = UZero.at<double>(1, i);
644         }
645     }
646     else
647     {
648         Mat UZeroAligned(3, n, CV_64FC1);
649         Matx33d R; //rotation that rotates objectPoints to the plane z=0
650 
651         if (!computeObjextSpaceR3Pts(objectPoints,R))
652         {
653             //we could not compute R, probably because there is a duplicate point in {objectPoints(0),objectPoints(1),objectPoints(2)}.
654             //So we compute it with the SVD (which is slower):
655             computeObjextSpaceRSvD(UZero,R);
656         }
657 
658         UZeroAligned = R * UZero;
659 
660         for (int i = 0; i < n; i++)
661         {
662             canonicalObjPoints.at<Vec2d>(i)[0] = UZeroAligned.at<double>(0, i);
663             canonicalObjPoints.at<Vec2d>(i)[1] = UZeroAligned.at<double>(1, i);
664             if (abs(UZeroAligned.at<double>(2, i)) > IPPE_SMALL)
665                 CV_Error(Error::StsNoConv, "Cannot transform object points to the plane z=0!");
666         }
667 
668         Matx44d MRot = Matx44d::zeros();
669         MRot(3, 3) = 1;
670 
671         for (int i = 0; i < 3; i++)
672         {
673             for (int j = 0; j < 3; j++)
674             {
675                 MRot(i,j) = R(i,j);
676             }
677         }
678         Matx44d Mb = MRot * MCenter;
679         Mat(Mb, false).copyTo(_MmodelPoints2Canonical);
680     }
681 }
682 
evalReprojError(InputArray _objectPoints,InputArray _imagePoints,InputArray _M,float & err)683 void PoseSolver::evalReprojError(InputArray _objectPoints, InputArray _imagePoints, InputArray _M, float& err)
684 {
685     Mat projectedPoints;
686     Mat imagePoints = _imagePoints.getMat();
687     Mat r;
688     rot2vec(_M.getMat().colRange(0, 3).rowRange(0, 3), r);
689 
690     Mat K = Mat::eye(3, 3, CV_64FC1);
691     Mat dist;
692     projectPoints(_objectPoints, r, _M.getMat().colRange(3, 4).rowRange(0, 3), K, dist, projectedPoints);
693 
694     err = 0;
695     int n = _objectPoints.rows() * _objectPoints.cols();
696 
697     float dx, dy;
698     const int projPtsDepth = projectedPoints.depth();
699     for (int i = 0; i < n; i++)
700     {
701         if (projPtsDepth == CV_32F)
702         {
703             dx = projectedPoints.at<Vec2f>(i)[0] - static_cast<float>(imagePoints.at<Vec2d>(i)[0]);
704             dy = projectedPoints.at<Vec2f>(i)[1] - static_cast<float>(imagePoints.at<Vec2d>(i)[1]);
705         }
706         else
707         {
708             dx = static_cast<float>(projectedPoints.at<Vec2d>(i)[0] - imagePoints.at<Vec2d>(i)[0]);
709             dy = static_cast<float>(projectedPoints.at<Vec2d>(i)[1] - imagePoints.at<Vec2d>(i)[1]);
710         }
711 
712         err += dx * dx + dy * dy;
713     }
714     err = sqrt(err / (2.0f * n));
715 }
716 
sortPosesByReprojError(InputArray _objectPoints,InputArray _imagePoints,InputArray _Ma,InputArray _Mb,OutputArray _M1,OutputArray _M2,float & err1,float & err2)717 void PoseSolver::sortPosesByReprojError(InputArray _objectPoints, InputArray _imagePoints, InputArray _Ma, InputArray _Mb,
718                                         OutputArray _M1, OutputArray _M2, float& err1, float& err2)
719 {
720     float erra, errb;
721     evalReprojError(_objectPoints, _imagePoints, _Ma, erra);
722     evalReprojError(_objectPoints, _imagePoints, _Mb, errb);
723     if (erra < errb)
724     {
725         err1 = erra;
726         _Ma.copyTo(_M1);
727 
728         err2 = errb;
729         _Mb.copyTo(_M2);
730     }
731     else
732     {
733         err1 = errb;
734         _Mb.copyTo(_M1);
735 
736         err2 = erra;
737         _Ma.copyTo(_M2);
738     }
739 }
740 
rotateVec2ZAxis(const Matx31d & a,Matx33d & Ra)741 void PoseSolver::rotateVec2ZAxis(const Matx31d& a, Matx33d& Ra)
742 {
743     double ax = a(0);
744     double ay = a(1);
745     double az = a(2);
746 
747     double nrm = sqrt(ax*ax + ay*ay + az*az);
748     ax = ax/nrm;
749     ay = ay/nrm;
750     az = az/nrm;
751 
752     double c = az;
753 
754     if (abs(1.0+c) < std::numeric_limits<float>::epsilon())
755     {
756         Ra = Matx33d::zeros();
757         Ra(0,0) = 1.0;
758         Ra(1,1) = 1.0;
759         Ra(2,2) = -1.0;
760     }
761     else
762     {
763         double d = 1.0/(1.0+c);
764         double ax2 = ax*ax;
765         double ay2 = ay*ay;
766         double axay = ax*ay;
767 
768         Ra(0,0) = -ax2*d + 1.0;
769         Ra(0,1) = -axay*d;
770         Ra(0,2) = -ax;
771 
772         Ra(1,0) = -axay*d;
773         Ra(1,1) = -ay2*d + 1.0;
774         Ra(1,2) = -ay;
775 
776         Ra(2,0) = ax;
777         Ra(2,1) = ay;
778         Ra(2,2) = 1.0 - (ax2 + ay2)*d;
779     }
780 }
781 
computeObjextSpaceR3Pts(InputArray _objectPoints,Matx33d & R)782 bool PoseSolver::computeObjextSpaceR3Pts(InputArray _objectPoints, Matx33d& R)
783 {
784     bool ret; //return argument
785     double p1x,p1y,p1z;
786     double p2x,p2y,p2z;
787     double p3x,p3y,p3z;
788 
789     Mat objectPoints = _objectPoints.getMat();
790     if (objectPoints.type() == CV_32FC3)
791     {
792         p1x = objectPoints.at<Vec3f>(0)[0];
793         p1y = objectPoints.at<Vec3f>(0)[1];
794         p1z = objectPoints.at<Vec3f>(0)[2];
795 
796         p2x = objectPoints.at<Vec3f>(1)[0];
797         p2y = objectPoints.at<Vec3f>(1)[1];
798         p2z = objectPoints.at<Vec3f>(1)[2];
799 
800         p3x = objectPoints.at<Vec3f>(2)[0];
801         p3y = objectPoints.at<Vec3f>(2)[1];
802         p3z = objectPoints.at<Vec3f>(2)[2];
803     }
804     else
805     {
806         p1x = objectPoints.at<Vec3d>(0)[0];
807         p1y = objectPoints.at<Vec3d>(0)[1];
808         p1z = objectPoints.at<Vec3d>(0)[2];
809 
810         p2x = objectPoints.at<Vec3d>(1)[0];
811         p2y = objectPoints.at<Vec3d>(1)[1];
812         p2z = objectPoints.at<Vec3d>(1)[2];
813 
814         p3x = objectPoints.at<Vec3d>(2)[0];
815         p3y = objectPoints.at<Vec3d>(2)[1];
816         p3z = objectPoints.at<Vec3d>(2)[2];
817     }
818 
819     double nx = (p1y - p2y)*(p1z - p3z) - (p1y - p3y)*(p1z - p2z);
820     double ny = (p1x - p3x)*(p1z - p2z) - (p1x - p2x)*(p1z - p3z);
821     double nz = (p1x - p2x)*(p1y - p3y) - (p1x - p3x)*(p1y - p2y);
822 
823     double nrm = sqrt(nx*nx+ ny*ny + nz*nz);
824     if (nrm > IPPE_SMALL)
825     {
826         nx = nx/nrm;
827         ny = ny/nrm;
828         nz = nz/nrm;
829         Matx31d v(nx, ny, nz);
830         rotateVec2ZAxis(v,R);
831         ret = true;
832     }
833     else
834     {
835         ret = false;
836     }
837     return ret;
838 }
839 
computeObjextSpaceRSvD(InputArray _objectPointsZeroMean,OutputArray _R)840 void PoseSolver::computeObjextSpaceRSvD(InputArray _objectPointsZeroMean, OutputArray _R)
841 {
842     _R.create(3, 3, CV_64FC1);
843     Mat R = _R.getMat();
844 
845     //we could not compute R with the first three points, so lets use the SVD
846     SVD s;
847     Mat W, U, VT;
848     s.compute(_objectPointsZeroMean.getMat() * _objectPointsZeroMean.getMat().t(), W, U, VT);
849     double s3 = W.at<double>(2);
850     double s2 = W.at<double>(1);
851 
852     //check if points are coplanar:
853     CV_Assert(s3 / s2 < IPPE_SMALL);
854 
855     R = U.t();
856     if (determinant(R) < 0)
857     {
858         //this ensures R is a rotation matrix and not a general unitary matrix:
859         R.at<double>(2, 0) = -R.at<double>(2, 0);
860         R.at<double>(2, 1) = -R.at<double>(2, 1);
861         R.at<double>(2, 2) = -R.at<double>(2, 2);
862     }
863 }
864 } //namespace IPPE
865 
866 namespace HomographyHO {
normalizeDataIsotropic(InputArray _Data,OutputArray _DataN,OutputArray _T,OutputArray _Ti)867 void normalizeDataIsotropic(InputArray _Data, OutputArray _DataN, OutputArray _T, OutputArray _Ti)
868 {
869     Mat Data = _Data.getMat();
870     int numPoints = Data.rows * Data.cols;
871     CV_Assert(Data.rows == 1 || Data.cols == 1);
872     CV_Assert(Data.channels() == 2 || Data.channels() == 3);
873     CV_Assert(numPoints >= 4);
874 
875     int dataType = _Data.type();
876     CV_CheckType(dataType, dataType == CV_32FC2 || dataType == CV_32FC3 || dataType == CV_64FC2 || dataType == CV_64FC3,
877                  "Type of _Data must be one of CV_32FC2, CV_32FC3, CV_64FC2, CV_64FC3");
878 
879     _DataN.create(2, numPoints, CV_64FC1);
880 
881     _T.create(3, 3, CV_64FC1);
882     _Ti.create(3, 3, CV_64FC1);
883 
884     Mat DataN = _DataN.getMat();
885     Mat T = _T.getMat();
886     Mat Ti = _Ti.getMat();
887 
888     _T.setTo(0);
889     _Ti.setTo(0);
890 
891     int numChannels = Data.channels();
892     double xm = 0;
893     double ym = 0;
894     for (int i = 0; i < numPoints; i++)
895     {
896         if (numChannels == 2)
897         {
898             if (dataType == CV_32FC2)
899             {
900                 xm = xm + Data.at<Vec2f>(i)[0];
901                 ym = ym + Data.at<Vec2f>(i)[1];
902             }
903             else
904             {
905                 xm = xm + Data.at<Vec2d>(i)[0];
906                 ym = ym + Data.at<Vec2d>(i)[1];
907             }
908         }
909         else
910         {
911             if (dataType == CV_32FC3)
912             {
913                 xm = xm + Data.at<Vec3f>(i)[0];
914                 ym = ym + Data.at<Vec3f>(i)[1];
915             }
916             else
917             {
918                 xm = xm + Data.at<Vec3d>(i)[0];
919                 ym = ym + Data.at<Vec3d>(i)[1];
920             }
921         }
922     }
923     xm = xm / static_cast<double>(numPoints);
924     ym = ym / static_cast<double>(numPoints);
925 
926     double kappa = 0;
927     double xh, yh;
928 
929     for (int i = 0; i < numPoints; i++)
930     {
931 
932         if (numChannels == 2)
933         {
934             if (dataType == CV_32FC2)
935             {
936                 xh = Data.at<Vec2f>(i)[0] - xm;
937                 yh = Data.at<Vec2f>(i)[1] - ym;
938             }
939             else
940             {
941                 xh = Data.at<Vec2d>(i)[0] - xm;
942                 yh = Data.at<Vec2d>(i)[1] - ym;
943             }
944         }
945         else
946         {
947             if (dataType == CV_32FC3)
948             {
949                 xh = Data.at<Vec3f>(i)[0] - xm;
950                 yh = Data.at<Vec3f>(i)[1] - ym;
951             }
952             else
953             {
954                 xh = Data.at<Vec3d>(i)[0] - xm;
955                 yh = Data.at<Vec3d>(i)[1] - ym;
956             }
957         }
958 
959         DataN.at<double>(0, i) = xh;
960         DataN.at<double>(1, i) = yh;
961         kappa = kappa + xh * xh + yh * yh;
962     }
963     double beta = sqrt(2 * numPoints / kappa);
964     DataN = DataN * beta;
965 
966     T.at<double>(0, 0) = 1.0 / beta;
967     T.at<double>(1, 1) = 1.0 / beta;
968 
969     T.at<double>(0, 2) = xm;
970     T.at<double>(1, 2) = ym;
971 
972     T.at<double>(2, 2) = 1;
973 
974     Ti.at<double>(0, 0) = beta;
975     Ti.at<double>(1, 1) = beta;
976 
977     Ti.at<double>(0, 2) = -beta * xm;
978     Ti.at<double>(1, 2) = -beta * ym;
979 
980     Ti.at<double>(2, 2) = 1;
981 }
982 
homographyHO(InputArray _srcPoints,InputArray _targPoints,Matx33d & H)983 void homographyHO(InputArray _srcPoints, InputArray _targPoints, Matx33d& H)
984 {
985     Mat DataA, DataB, TA, TAi, TB, TBi;
986 
987     HomographyHO::normalizeDataIsotropic(_srcPoints, DataA, TA, TAi);
988     HomographyHO::normalizeDataIsotropic(_targPoints, DataB, TB, TBi);
989 
990     int n = DataA.cols;
991     CV_Assert(n == DataB.cols);
992 
993     Mat C1(1, n, CV_64FC1);
994     Mat C2(1, n, CV_64FC1);
995     Mat C3(1, n, CV_64FC1);
996     Mat C4(1, n, CV_64FC1);
997 
998     double mC1 = 0, mC2 = 0, mC3 = 0, mC4 = 0;
999 
1000     for (int i = 0; i < n; i++)
1001     {
1002         C1.at<double>(0, i) = -DataB.at<double>(0, i) * DataA.at<double>(0, i);
1003         C2.at<double>(0, i) = -DataB.at<double>(0, i) * DataA.at<double>(1, i);
1004         C3.at<double>(0, i) = -DataB.at<double>(1, i) * DataA.at<double>(0, i);
1005         C4.at<double>(0, i) = -DataB.at<double>(1, i) * DataA.at<double>(1, i);
1006 
1007         mC1 += C1.at<double>(0, i);
1008         mC2 += C2.at<double>(0, i);
1009         mC3 += C3.at<double>(0, i);
1010         mC4 += C4.at<double>(0, i);
1011     }
1012 
1013     mC1 /= n;
1014     mC2 /= n;
1015     mC3 /= n;
1016     mC4 /= n;
1017 
1018     Mat Mx(n, 3, CV_64FC1);
1019     Mat My(n, 3, CV_64FC1);
1020 
1021     for (int i = 0; i < n; i++)
1022     {
1023         Mx.at<double>(i, 0) = C1.at<double>(0, i) - mC1;
1024         Mx.at<double>(i, 1) = C2.at<double>(0, i) - mC2;
1025         Mx.at<double>(i, 2) = -DataB.at<double>(0, i);
1026 
1027         My.at<double>(i, 0) = C3.at<double>(0, i) - mC3;
1028         My.at<double>(i, 1) = C4.at<double>(0, i) - mC4;
1029         My.at<double>(i, 2) = -DataB.at<double>(1, i);
1030     }
1031 
1032     Mat DataAT, DataADataAT;
1033 
1034     transpose(DataA, DataAT);
1035     DataADataAT = DataA * DataAT;
1036     double dt = DataADataAT.at<double>(0, 0) * DataADataAT.at<double>(1, 1) - DataADataAT.at<double>(0, 1) * DataADataAT.at<double>(1, 0);
1037 
1038     Mat DataADataATi(2, 2, CV_64FC1);
1039     DataADataATi.at<double>(0, 0) = DataADataAT.at<double>(1, 1) / dt;
1040     DataADataATi.at<double>(0, 1) = -DataADataAT.at<double>(0, 1) / dt;
1041     DataADataATi.at<double>(1, 0) = -DataADataAT.at<double>(1, 0) / dt;
1042     DataADataATi.at<double>(1, 1) = DataADataAT.at<double>(0, 0) / dt;
1043 
1044     Mat Pp = DataADataATi * DataA;
1045 
1046     Mat Bx = Pp * Mx;
1047     Mat By = Pp * My;
1048 
1049     Mat Ex = DataAT * Bx;
1050     Mat Ey = DataAT * By;
1051 
1052     Mat D(2 * n, 3, CV_64FC1);
1053 
1054     for (int i = 0; i < n; i++)
1055     {
1056         D.at<double>(i, 0) = Mx.at<double>(i, 0) - Ex.at<double>(i, 0);
1057         D.at<double>(i, 1) = Mx.at<double>(i, 1) - Ex.at<double>(i, 1);
1058         D.at<double>(i, 2) = Mx.at<double>(i, 2) - Ex.at<double>(i, 2);
1059 
1060         D.at<double>(i + n, 0) = My.at<double>(i, 0) - Ey.at<double>(i, 0);
1061         D.at<double>(i + n, 1) = My.at<double>(i, 1) - Ey.at<double>(i, 1);
1062         D.at<double>(i + n, 2) = My.at<double>(i, 2) - Ey.at<double>(i, 2);
1063     }
1064 
1065     Mat DT, DDT;
1066     transpose(D, DT);
1067     DDT = DT * D;
1068 
1069     Mat S, U;
1070     eigen(DDT, S, U);
1071 
1072     Mat h789(3, 1, CV_64FC1);
1073     h789.at<double>(0, 0) = U.at<double>(2, 0);
1074     h789.at<double>(1, 0) = U.at<double>(2, 1);
1075     h789.at<double>(2, 0) = U.at<double>(2, 2);
1076 
1077     Mat h12 = -Bx * h789;
1078     Mat h45 = -By * h789;
1079 
1080     double h3 = -(mC1 * h789.at<double>(0, 0) + mC2 * h789.at<double>(1, 0));
1081     double h6 = -(mC3 * h789.at<double>(0, 0) + mC4 * h789.at<double>(1, 0));
1082 
1083     H(0, 0) = h12.at<double>(0, 0);
1084     H(0, 1) = h12.at<double>(1, 0);
1085     H(0, 2) = h3;
1086 
1087     H(1, 0) = h45.at<double>(0, 0);
1088     H(1, 1) = h45.at<double>(1, 0);
1089     H(1, 2) = h6;
1090 
1091     H(2, 0) = h789.at<double>(0, 0);
1092     H(2, 1) = h789.at<double>(1, 0);
1093     H(2, 2) = h789.at<double>(2, 0);
1094 
1095     H = Mat(TB * H * TAi);
1096     double h22_inv = 1 / H(2, 2);
1097     H = H * h22_inv;
1098 }
1099 }
1100 } //namespace cv
1101