1 /*
2 * Copyright (c) 2011. Philipp Wagner <bytefish[at]gmx[dot]de>.
3 * Released to public domain under terms of the BSD Simplified license.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the organization nor the names of its contributors
13 * may be used to endorse or promote products derived from this software
14 * without specific prior written permission.
15 *
16 * See <http://www.opensource.org/licenses/bsd-license>
17 */
18
19 #include "precomp.hpp"
20 #include <iostream>
21 #include <map>
22 #include <set>
23
24 namespace cv
25 {
26
27 // Removes duplicate elements in a given vector.
28 template<typename _Tp>
remove_dups(const std::vector<_Tp> & src)29 inline std::vector<_Tp> remove_dups(const std::vector<_Tp>& src) {
30 typedef typename std::set<_Tp>::const_iterator constSetIterator;
31 typedef typename std::vector<_Tp>::const_iterator constVecIterator;
32 std::set<_Tp> set_elems;
33 for (constVecIterator it = src.begin(); it != src.end(); ++it)
34 set_elems.insert(*it);
35 std::vector<_Tp> elems;
36 for (constSetIterator it = set_elems.begin(); it != set_elems.end(); ++it)
37 elems.push_back(*it);
38 return elems;
39 }
40
argsort(InputArray _src,bool ascending=true)41 static Mat argsort(InputArray _src, bool ascending=true)
42 {
43 Mat src = _src.getMat();
44 if (src.rows != 1 && src.cols != 1) {
45 String error_message = "Wrong shape of input matrix! Expected a matrix with one row or column.";
46 CV_Error(Error::StsBadArg, error_message);
47 }
48 int flags = SORT_EVERY_ROW | (ascending ? SORT_ASCENDING : SORT_DESCENDING);
49 Mat sorted_indices;
50 sortIdx(src.reshape(1,1),sorted_indices,flags);
51 return sorted_indices;
52 }
53
asRowMatrix(InputArrayOfArrays src,int rtype,double alpha=1,double beta=0)54 static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double beta=0) {
55 // make sure the input data is a vector of matrices or vector of vector
56 if(src.kind() != _InputArray::STD_VECTOR_MAT && src.kind() != _InputArray::STD_ARRAY_MAT &&
57 src.kind() != _InputArray::STD_VECTOR_VECTOR) {
58 String error_message = "The data is expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >).";
59 CV_Error(Error::StsBadArg, error_message);
60 }
61 // number of samples
62 size_t n = src.total();
63 // return empty matrix if no matrices given
64 if(n == 0)
65 return Mat();
66 // dimensionality of (reshaped) samples
67 size_t d = src.getMat(0).total();
68 // create data matrix
69 Mat data((int)n, (int)d, rtype);
70 // now copy data
71 for(int i = 0; i < (int)n; i++) {
72 // make sure data can be reshaped, throw exception if not!
73 if(src.getMat(i).total() != d) {
74 String error_message = format("Wrong number of elements in matrix #%d! Expected %d was %d.", i, (int)d, (int)src.getMat(i).total());
75 CV_Error(Error::StsBadArg, error_message);
76 }
77 // get a hold of the current row
78 Mat xi = data.row(i);
79 // make reshape happy by cloning for non-continuous matrices
80 if(src.getMat(i).isContinuous()) {
81 src.getMat(i).reshape(1, 1).convertTo(xi, rtype, alpha, beta);
82 } else {
83 src.getMat(i).clone().reshape(1, 1).convertTo(xi, rtype, alpha, beta);
84 }
85 }
86 return data;
87 }
88
sortMatrixColumnsByIndices(InputArray _src,InputArray _indices,OutputArray _dst)89 static void sortMatrixColumnsByIndices(InputArray _src, InputArray _indices, OutputArray _dst) {
90 if(_indices.getMat().type() != CV_32SC1) {
91 CV_Error(Error::StsUnsupportedFormat, "cv::sortColumnsByIndices only works on integer indices!");
92 }
93 Mat src = _src.getMat();
94 std::vector<int> indices = _indices.getMat();
95 _dst.create(src.rows, src.cols, src.type());
96 Mat dst = _dst.getMat();
97 for(size_t idx = 0; idx < indices.size(); idx++) {
98 Mat originalCol = src.col(indices[idx]);
99 Mat sortedCol = dst.col((int)idx);
100 originalCol.copyTo(sortedCol);
101 }
102 }
103
sortMatrixColumnsByIndices(InputArray src,InputArray indices)104 static Mat sortMatrixColumnsByIndices(InputArray src, InputArray indices) {
105 Mat dst;
106 sortMatrixColumnsByIndices(src, indices, dst);
107 return dst;
108 }
109
110
111 template<typename _Tp> static bool
isSymmetric_(InputArray src)112 isSymmetric_(InputArray src) {
113 Mat _src = src.getMat();
114 if(_src.cols != _src.rows)
115 return false;
116 for (int i = 0; i < _src.rows; i++) {
117 for (int j = 0; j < _src.cols; j++) {
118 _Tp a = _src.at<_Tp> (i, j);
119 _Tp b = _src.at<_Tp> (j, i);
120 if (a != b) {
121 return false;
122 }
123 }
124 }
125 return true;
126 }
127
128 template<typename _Tp> static bool
isSymmetric_(InputArray src,double eps)129 isSymmetric_(InputArray src, double eps) {
130 Mat _src = src.getMat();
131 if(_src.cols != _src.rows)
132 return false;
133 for (int i = 0; i < _src.rows; i++) {
134 for (int j = 0; j < _src.cols; j++) {
135 _Tp a = _src.at<_Tp> (i, j);
136 _Tp b = _src.at<_Tp> (j, i);
137 if (std::abs(a - b) > eps) {
138 return false;
139 }
140 }
141 }
142 return true;
143 }
144
isSymmetric(InputArray src,double eps=1e-16)145 static bool isSymmetric(InputArray src, double eps=1e-16)
146 {
147 Mat m = src.getMat();
148 switch (m.type()) {
149 case CV_8SC1: return isSymmetric_<char>(m); break;
150 case CV_8UC1:
151 return isSymmetric_<unsigned char>(m); break;
152 case CV_16SC1:
153 return isSymmetric_<short>(m); break;
154 case CV_16UC1:
155 return isSymmetric_<unsigned short>(m); break;
156 case CV_32SC1:
157 return isSymmetric_<int>(m); break;
158 case CV_32FC1:
159 return isSymmetric_<float>(m, eps); break;
160 case CV_64FC1:
161 return isSymmetric_<double>(m, eps); break;
162 default:
163 break;
164 }
165 return false;
166 }
167
168
169 //------------------------------------------------------------------------------
170 // cv::subspaceProject
171 //------------------------------------------------------------------------------
subspaceProject(InputArray _W,InputArray _mean,InputArray _src)172 Mat LDA::subspaceProject(InputArray _W, InputArray _mean, InputArray _src) {
173 // get data matrices
174 Mat W = _W.getMat();
175 Mat mean = _mean.getMat();
176 Mat src = _src.getMat();
177 // get number of samples and dimension
178 int n = src.rows;
179 int d = src.cols;
180 // make sure the data has the correct shape
181 if(W.rows != d) {
182 String error_message = format("Wrong shapes for given matrices. Was size(src) = (%d,%d), size(W) = (%d,%d).", src.rows, src.cols, W.rows, W.cols);
183 CV_Error(Error::StsBadArg, error_message);
184 }
185 // make sure mean is correct if not empty
186 if(!mean.empty() && (mean.total() != (size_t) d)) {
187 String error_message = format("Wrong mean shape for the given data matrix. Expected %d, but was %zu.", d, mean.total());
188 CV_Error(Error::StsBadArg, error_message);
189 }
190 // create temporary matrices
191 Mat X, Y;
192 // make sure you operate on correct type
193 src.convertTo(X, W.type());
194 // safe to do, because of above assertion
195 if(!mean.empty()) {
196 for(int i=0; i<n; i++) {
197 Mat r_i = X.row(i);
198 subtract(r_i, mean.reshape(1,1), r_i);
199 }
200 }
201 // finally calculate projection as Y = (X-mean)*W
202 gemm(X, W, 1.0, Mat(), 0.0, Y);
203 return Y;
204 }
205
206 //------------------------------------------------------------------------------
207 // cv::subspaceReconstruct
208 //------------------------------------------------------------------------------
subspaceReconstruct(InputArray _W,InputArray _mean,InputArray _src)209 Mat LDA::subspaceReconstruct(InputArray _W, InputArray _mean, InputArray _src)
210 {
211 // get data matrices
212 Mat W = _W.getMat();
213 Mat mean = _mean.getMat();
214 Mat src = _src.getMat();
215 // get number of samples and dimension
216 int n = src.rows;
217 int d = src.cols;
218 // make sure the data has the correct shape
219 if(W.cols != d) {
220 String error_message = format("Wrong shapes for given matrices. Was size(src) = (%d,%d), size(W) = (%d,%d).", src.rows, src.cols, W.rows, W.cols);
221 CV_Error(Error::StsBadArg, error_message);
222 }
223 // make sure mean is correct if not empty
224 if(!mean.empty() && (mean.total() != (size_t) W.rows)) {
225 String error_message = format("Wrong mean shape for the given eigenvector matrix. Expected %d, but was %zu.", W.cols, mean.total());
226 CV_Error(Error::StsBadArg, error_message);
227 }
228 // initialize temporary matrices
229 Mat X, Y;
230 // copy data & make sure we are using the correct type
231 src.convertTo(Y, W.type());
232 // calculate the reconstruction
233 gemm(Y, W, 1.0, Mat(), 0.0, X, GEMM_2_T);
234 // safe to do because of above assertion
235 if(!mean.empty()) {
236 for(int i=0; i<n; i++) {
237 Mat r_i = X.row(i);
238 add(r_i, mean.reshape(1,1), r_i);
239 }
240 }
241 return X;
242 }
243
244
245 class EigenvalueDecomposition {
246 private:
247
248 // Holds the data dimension.
249 int n;
250
251 // Pointer to internal memory.
252 double *d, *e, *ort;
253 double **V, **H;
254
255 // Holds the computed eigenvalues.
256 Mat _eigenvalues;
257
258 // Holds the computed eigenvectors.
259 Mat _eigenvectors;
260
261 // Allocates memory.
262 template<typename _Tp>
alloc_1d(int m)263 _Tp *alloc_1d(int m) {
264 return new _Tp[m];
265 }
266
267 // Allocates memory.
268 template<typename _Tp>
alloc_1d(int m,_Tp val)269 _Tp *alloc_1d(int m, _Tp val) {
270 _Tp *arr = alloc_1d<_Tp> (m);
271 for (int i = 0; i < m; i++)
272 arr[i] = val;
273 return arr;
274 }
275
276 // Allocates memory.
277 template<typename _Tp>
alloc_2d(int m,int _n)278 _Tp **alloc_2d(int m, int _n) {
279 _Tp **arr = new _Tp*[m];
280 for (int i = 0; i < m; i++)
281 arr[i] = new _Tp[_n];
282 return arr;
283 }
284
285 // Allocates memory.
286 template<typename _Tp>
alloc_2d(int m,int _n,_Tp val)287 _Tp **alloc_2d(int m, int _n, _Tp val) {
288 _Tp **arr = alloc_2d<_Tp> (m, _n);
289 for (int i = 0; i < m; i++) {
290 for (int j = 0; j < _n; j++) {
291 arr[i][j] = val;
292 }
293 }
294 return arr;
295 }
296
complex_div(double xr,double xi,double yr,double yi,double & cdivr,double & cdivi)297 static void complex_div(double xr, double xi, double yr, double yi, double& cdivr, double& cdivi) {
298 double r, dv;
299 CV_DbgAssert(std::abs(yr) + std::abs(yi) > 0.0);
300 if (std::abs(yr) > std::abs(yi)) {
301 r = yi / yr;
302 dv = yr + r * yi;
303 cdivr = (xr + r * xi) / dv;
304 cdivi = (xi - r * xr) / dv;
305 } else {
306 r = yr / yi;
307 dv = yi + r * yr;
308 cdivr = (r * xr + xi) / dv;
309 cdivi = (r * xi - xr) / dv;
310 }
311 }
312
313 // Nonsymmetric reduction from Hessenberg to real Schur form.
314
hqr2()315 void hqr2() {
316
317 // This is derived from the Algol procedure hqr2,
318 // by Martin and Wilkinson, Handbook for Auto. Comp.,
319 // Vol.ii-Linear Algebra, and the corresponding
320 // Fortran subroutine in EISPACK.
321
322 // Initialize
323 const int max_iters_count = 1000 * this->n;
324
325 const int nn = this->n; CV_Assert(nn > 0);
326 int n1 = nn - 1;
327 const int low = 0;
328 const int high = nn - 1;
329 const double eps = std::numeric_limits<double>::epsilon();
330 double exshift = 0.0;
331
332 // Store roots isolated by balanc and compute matrix norm
333
334 double norm = 0.0;
335 for (int i = 0; i < nn; i++) {
336 #if 0 // 'if' condition is always false
337 if (i < low || i > high) {
338 d[i] = H[i][i];
339 e[i] = 0.0;
340 }
341 #endif
342 for (int j = std::max(i - 1, 0); j < nn; j++) {
343 norm += std::abs(H[i][j]);
344 }
345 }
346
347 // Outer loop over eigenvalue index
348 int iter = 0;
349 while (n1 >= low) {
350
351 // Look for single small sub-diagonal element
352 int l = n1;
353 while (l > low) {
354 if (norm < FLT_EPSILON) {
355 break;
356 }
357 double s = std::abs(H[l - 1][l - 1]) + std::abs(H[l][l]);
358 if (s == 0.0) {
359 s = norm;
360 }
361 if (std::abs(H[l][l - 1]) < eps * s) {
362 break;
363 }
364 l--;
365 }
366
367 // Check for convergence
368 if (l == n1) {
369 // One root found
370 H[n1][n1] = H[n1][n1] + exshift;
371 d[n1] = H[n1][n1];
372 e[n1] = 0.0;
373 n1--;
374 iter = 0;
375
376 } else if (l == n1 - 1) {
377 // Two roots found
378 double w = H[n1][n1 - 1] * H[n1 - 1][n1];
379 double p = (H[n1 - 1][n1 - 1] - H[n1][n1]) * 0.5;
380 double q = p * p + w;
381 double z = std::sqrt(std::abs(q));
382 H[n1][n1] = H[n1][n1] + exshift;
383 H[n1 - 1][n1 - 1] = H[n1 - 1][n1 - 1] + exshift;
384 double x = H[n1][n1];
385
386 if (q >= 0) {
387 // Real pair
388 if (p >= 0) {
389 z = p + z;
390 } else {
391 z = p - z;
392 }
393 d[n1 - 1] = x + z;
394 d[n1] = d[n1 - 1];
395 if (z != 0.0) {
396 d[n1] = x - w / z;
397 }
398 e[n1 - 1] = 0.0;
399 e[n1] = 0.0;
400 x = H[n1][n1 - 1];
401 double s = std::abs(x) + std::abs(z);
402 p = x / s;
403 q = z / s;
404 double r = std::sqrt(p * p + q * q);
405 p = p / r;
406 q = q / r;
407
408 // Row modification
409
410 for (int j = n1 - 1; j < nn; j++) {
411 z = H[n1 - 1][j];
412 H[n1 - 1][j] = q * z + p * H[n1][j];
413 H[n1][j] = q * H[n1][j] - p * z;
414 }
415
416 // Column modification
417
418 for (int i = 0; i <= n1; i++) {
419 z = H[i][n1 - 1];
420 H[i][n1 - 1] = q * z + p * H[i][n1];
421 H[i][n1] = q * H[i][n1] - p * z;
422 }
423
424 // Accumulate transformations
425
426 for (int i = low; i <= high; i++) {
427 z = V[i][n1 - 1];
428 V[i][n1 - 1] = q * z + p * V[i][n1];
429 V[i][n1] = q * V[i][n1] - p * z;
430 }
431
432 } else {
433 // Complex pair
434 d[n1 - 1] = x + p;
435 d[n1] = x + p;
436 e[n1 - 1] = z;
437 e[n1] = -z;
438 }
439 n1 = n1 - 2;
440 iter = 0;
441
442 } else {
443 // No convergence yet
444
445 // Form shift
446 double x = H[n1][n1];
447 double y = 0.0;
448 double w = 0.0;
449 if (l < n1) {
450 y = H[n1 - 1][n1 - 1];
451 w = H[n1][n1 - 1] * H[n1 - 1][n1];
452 }
453
454 // Wilkinson's original ad hoc shift
455 if (iter == 10) {
456 exshift += x;
457 for (int i = low; i <= n1; i++) {
458 H[i][i] -= x;
459 }
460 double s = std::abs(H[n1][n1 - 1]) + std::abs(H[n1 - 1][n1 - 2]);
461 x = y = 0.75 * s;
462 w = -0.4375 * s * s;
463 }
464
465 // MATLAB's new ad hoc shift
466
467 if (iter == 30) {
468 double s = (y - x) * 0.5;
469 s = s * s + w;
470 if (s > 0) {
471 s = std::sqrt(s);
472 if (y < x) {
473 s = -s;
474 }
475 s = x - w / ((y - x) * 0.5 + s);
476 for (int i = low; i <= n1; i++) {
477 H[i][i] -= s;
478 }
479 exshift += s;
480 x = y = w = 0.964;
481 }
482 }
483
484 iter = iter + 1;
485 if (iter > max_iters_count)
486 CV_Error(Error::StsNoConv, "Algorithm doesn't converge (complex eigen values?)");
487
488 double p = std::numeric_limits<double>::quiet_NaN();
489 double q = std::numeric_limits<double>::quiet_NaN();
490 double r = std::numeric_limits<double>::quiet_NaN();
491
492 // Look for two consecutive small sub-diagonal elements
493 int m = n1 - 2;
494 while (m >= l) {
495 double z = H[m][m];
496 r = x - z;
497 double s = y - z;
498 p = (r * s - w) / H[m + 1][m] + H[m][m + 1];
499 q = H[m + 1][m + 1] - z - r - s;
500 r = H[m + 2][m + 1];
501 s = std::abs(p) + std::abs(q) + std::abs(r);
502 p = p / s;
503 q = q / s;
504 r = r / s;
505 if (m == l) {
506 break;
507 }
508 if (std::abs(H[m][m - 1]) * (std::abs(q) + std::abs(r)) < eps * (std::abs(p)
509 * (std::abs(H[m - 1][m - 1]) + std::abs(z) + std::abs(
510 H[m + 1][m + 1])))) {
511 break;
512 }
513 m--;
514 }
515
516 for (int i = m + 2; i <= n1; i++) {
517 H[i][i - 2] = 0.0;
518 if (i > m + 2) {
519 H[i][i - 3] = 0.0;
520 }
521 }
522
523 // Double QR step involving rows l:n and columns m:n
524
525 for (int k = m; k < n1; k++) {
526
527 bool notlast = (k != n1 - 1);
528 if (k != m) {
529 p = H[k][k - 1];
530 q = H[k + 1][k - 1];
531 r = (notlast ? H[k + 2][k - 1] : 0.0);
532 x = std::abs(p) + std::abs(q) + std::abs(r);
533 if (x != 0.0) {
534 p = p / x;
535 q = q / x;
536 r = r / x;
537 }
538 }
539 if (x == 0.0) {
540 break;
541 }
542 double s = std::sqrt(p * p + q * q + r * r);
543 if (p < 0) {
544 s = -s;
545 }
546 if (s != 0) {
547 if (k != m) {
548 H[k][k - 1] = -s * x;
549 } else if (l != m) {
550 H[k][k - 1] = -H[k][k - 1];
551 }
552 p = p + s;
553 x = p / s;
554 y = q / s;
555 double z = r / s;
556 q = q / p;
557 r = r / p;
558
559 // Row modification
560
561 for (int j = k; j < nn; j++) {
562 p = H[k][j] + q * H[k + 1][j];
563 if (notlast) {
564 p = p + r * H[k + 2][j];
565 H[k + 2][j] = H[k + 2][j] - p * z;
566 }
567 H[k][j] -= p * x;
568 H[k + 1][j] -= p * y;
569 }
570
571 // Column modification
572
573 for (int i = 0; i <= std::min(n1, k + 3); i++) {
574 p = x * H[i][k] + y * H[i][k + 1];
575 if (notlast) {
576 p = p + z * H[i][k + 2];
577 H[i][k + 2] = H[i][k + 2] - p * r;
578 }
579 H[i][k] -= p;
580 H[i][k + 1] -= p * q;
581 }
582
583 // Accumulate transformations
584
585 for (int i = low; i <= high; i++) {
586 p = x * V[i][k] + y * V[i][k + 1];
587 if (notlast) {
588 p = p + z * V[i][k + 2];
589 V[i][k + 2] = V[i][k + 2] - p * r;
590 }
591 V[i][k] = V[i][k] - p;
592 V[i][k + 1] = V[i][k + 1] - p * q;
593 }
594 } // (s != 0)
595 } // k loop
596 } // check convergence
597 } // while (n1 >= low)
598
599 // Backsubstitute to find vectors of upper triangular form
600
601 if (norm < FLT_EPSILON) {
602 return;
603 }
604
605 for (n1 = nn - 1; n1 >= 0; n1--) {
606 double p = d[n1];
607 double q = e[n1];
608
609 if (q == 0) {
610 // Real vector
611 double z = std::numeric_limits<double>::quiet_NaN();
612 double s = std::numeric_limits<double>::quiet_NaN();
613
614 int l = n1;
615 H[n1][n1] = 1.0;
616 for (int i = n1 - 1; i >= 0; i--) {
617 double w = H[i][i] - p;
618 double r = 0.0;
619 for (int j = l; j <= n1; j++) {
620 r = r + H[i][j] * H[j][n1];
621 }
622 if (e[i] < 0.0) {
623 z = w;
624 s = r;
625 } else {
626 l = i;
627 if (e[i] == 0.0) {
628 if (w != 0.0) {
629 H[i][n1] = -r / w;
630 } else {
631 H[i][n1] = -r / (eps * norm);
632 }
633 } else {
634 // Solve real equations
635 CV_DbgAssert(!cvIsNaN(z));
636 double x = H[i][i + 1];
637 double y = H[i + 1][i];
638 q = (d[i] - p) * (d[i] - p) + e[i] * e[i];
639 double t = (x * s - z * r) / q;
640 H[i][n1] = t;
641 if (std::abs(x) > std::abs(z)) {
642 H[i + 1][n1] = (-r - w * t) / x;
643 } else {
644 CV_DbgAssert(z != 0.0);
645 H[i + 1][n1] = (-s - y * t) / z;
646 }
647 }
648
649 // Overflow control
650 double t = std::abs(H[i][n1]);
651 if ((eps * t) * t > 1) {
652 double inv_t = 1.0 / t;
653 for (int j = i; j <= n1; j++) {
654 H[j][n1] *= inv_t;
655 }
656 }
657 }
658 }
659 } else if (q < 0) {
660 // Complex vector
661 double z = std::numeric_limits<double>::quiet_NaN();
662 double r = std::numeric_limits<double>::quiet_NaN();
663 double s = std::numeric_limits<double>::quiet_NaN();
664
665 int l = n1 - 1;
666
667 // Last vector component imaginary so matrix is triangular
668
669 if (std::abs(H[n1][n1 - 1]) > std::abs(H[n1 - 1][n1])) {
670 H[n1 - 1][n1 - 1] = q / H[n1][n1 - 1];
671 H[n1 - 1][n1] = -(H[n1][n1] - p) / H[n1][n1 - 1];
672 } else {
673 complex_div(
674 0.0, -H[n1 - 1][n1],
675 H[n1 - 1][n1 - 1] - p, q,
676 H[n1 - 1][n1 - 1], H[n1 - 1][n1]
677 );
678 }
679 H[n1][n1 - 1] = 0.0;
680 H[n1][n1] = 1.0;
681 for (int i = n1 - 2; i >= 0; i--) {
682 double ra, sa, vr, vi;
683 ra = 0.0;
684 sa = 0.0;
685 for (int j = l; j <= n1; j++) {
686 ra = ra + H[i][j] * H[j][n1 - 1];
687 sa = sa + H[i][j] * H[j][n1];
688 }
689 double w = H[i][i] - p;
690
691 if (e[i] < 0.0) {
692 z = w;
693 r = ra;
694 s = sa;
695 } else {
696 l = i;
697 if (e[i] == 0) {
698 complex_div(
699 -ra, -sa,
700 w, q,
701 H[i][n1 - 1], H[i][n1]
702 );
703 } else {
704 // Solve complex equations
705
706 double x = H[i][i + 1];
707 double y = H[i + 1][i];
708 vr = (d[i] - p) * (d[i] - p) + e[i] * e[i] - q * q;
709 vi = (d[i] - p) * 2.0 * q;
710 if (vr == 0.0 && vi == 0.0) {
711 vr = eps * norm * (std::abs(w) + std::abs(q) + std::abs(x)
712 + std::abs(y) + std::abs(z));
713 }
714 complex_div(
715 x * r - z * ra + q * sa, x * s - z * sa - q * ra,
716 vr, vi,
717 H[i][n1 - 1], H[i][n1]);
718 if (std::abs(x) > (std::abs(z) + std::abs(q))) {
719 H[i + 1][n1 - 1] = (-ra - w * H[i][n1 - 1] + q
720 * H[i][n1]) / x;
721 H[i + 1][n1] = (-sa - w * H[i][n1] - q * H[i][n1
722 - 1]) / x;
723 } else {
724 complex_div(
725 -r - y * H[i][n1 - 1], -s - y * H[i][n1],
726 z, q,
727 H[i + 1][n1 - 1], H[i + 1][n1]);
728 }
729 }
730
731 // Overflow control
732
733 double t = std::max(std::abs(H[i][n1 - 1]), std::abs(H[i][n1]));
734 if ((eps * t) * t > 1) {
735 for (int j = i; j <= n1; j++) {
736 H[j][n1 - 1] = H[j][n1 - 1] / t;
737 H[j][n1] = H[j][n1] / t;
738 }
739 }
740 }
741 }
742 }
743 }
744
745 // Vectors of isolated roots
746
747 #if 0 // 'if' condition is always false
748 for (int i = 0; i < nn; i++) {
749 if (i < low || i > high) {
750 for (int j = i; j < nn; j++) {
751 V[i][j] = H[i][j];
752 }
753 }
754 }
755 #endif
756
757 // Back transformation to get eigenvectors of original matrix
758
759 for (int j = nn - 1; j >= low; j--) {
760 for (int i = low; i <= high; i++) {
761 double z = 0.0;
762 for (int k = low; k <= std::min(j, high); k++) {
763 z += V[i][k] * H[k][j];
764 }
765 V[i][j] = z;
766 }
767 }
768 }
769
770 // Nonsymmetric reduction to Hessenberg form.
orthes()771 void orthes() {
772 // This is derived from the Algol procedures orthes and ortran,
773 // by Martin and Wilkinson, Handbook for Auto. Comp.,
774 // Vol.ii-Linear Algebra, and the corresponding
775 // Fortran subroutines in EISPACK.
776 int low = 0;
777 int high = n - 1;
778
779 for (int m = low + 1; m < high; m++) {
780
781 // Scale column.
782
783 double scale = 0.0;
784 for (int i = m; i <= high; i++) {
785 scale = scale + std::abs(H[i][m - 1]);
786 }
787 if (scale != 0.0) {
788
789 // Compute Householder transformation.
790
791 double h = 0.0;
792 for (int i = high; i >= m; i--) {
793 ort[i] = H[i][m - 1] / scale;
794 h += ort[i] * ort[i];
795 }
796 double g = std::sqrt(h);
797 if (ort[m] > 0) {
798 g = -g;
799 }
800 h = h - ort[m] * g;
801 ort[m] = ort[m] - g;
802
803 // Apply Householder similarity transformation
804 // H = (I-u*u'/h)*H*(I-u*u')/h)
805
806 for (int j = m; j < n; j++) {
807 double f = 0.0;
808 for (int i = high; i >= m; i--) {
809 f += ort[i] * H[i][j];
810 }
811 f = f / h;
812 for (int i = m; i <= high; i++) {
813 H[i][j] -= f * ort[i];
814 }
815 }
816
817 for (int i = 0; i <= high; i++) {
818 double f = 0.0;
819 for (int j = high; j >= m; j--) {
820 f += ort[j] * H[i][j];
821 }
822 f = f / h;
823 for (int j = m; j <= high; j++) {
824 H[i][j] -= f * ort[j];
825 }
826 }
827 ort[m] = scale * ort[m];
828 H[m][m - 1] = scale * g;
829 }
830 }
831
832 // Accumulate transformations (Algol's ortran).
833
834 for (int i = 0; i < n; i++) {
835 for (int j = 0; j < n; j++) {
836 V[i][j] = (i == j ? 1.0 : 0.0);
837 }
838 }
839
840 for (int m = high - 1; m > low; m--) {
841 if (H[m][m - 1] != 0.0) {
842 for (int i = m + 1; i <= high; i++) {
843 ort[i] = H[i][m - 1];
844 }
845 for (int j = m; j <= high; j++) {
846 double g = 0.0;
847 for (int i = m; i <= high; i++) {
848 g += ort[i] * V[i][j];
849 }
850 // Double division avoids possible underflow
851 g = (g / ort[m]) / H[m][m - 1];
852 for (int i = m; i <= high; i++) {
853 V[i][j] += g * ort[i];
854 }
855 }
856 }
857 }
858 }
859
860 // Releases all internal working memory.
release()861 void release() {
862 // releases the working data
863 delete[] d; d = NULL;
864 delete[] e; e = NULL;
865 delete[] ort; ort = NULL;
866 for (int i = 0; i < n; i++) {
867 if (H) delete[] H[i];
868 if (V) delete[] V[i];
869 }
870 delete[] H; H = NULL;
871 delete[] V; V = NULL;
872 }
873
874 // Computes the Eigenvalue Decomposition for a matrix given in H.
compute()875 void compute() {
876 // Allocate memory for the working data.
877 V = alloc_2d<double> (n, n, 0.0);
878 d = alloc_1d<double> (n);
879 e = alloc_1d<double> (n);
880 ort = alloc_1d<double> (n);
881 {
882 // Reduce to Hessenberg form.
883 orthes();
884 // Reduce Hessenberg to real Schur form.
885 hqr2();
886 // Copy eigenvalues to OpenCV Matrix.
887 _eigenvalues.create(1, n, CV_64FC1);
888 for (int i = 0; i < n; i++) {
889 _eigenvalues.at<double> (0, i) = d[i];
890 }
891 // Copy eigenvectors to OpenCV Matrix.
892 _eigenvectors.create(n, n, CV_64FC1);
893 for (int i = 0; i < n; i++)
894 for (int j = 0; j < n; j++)
895 _eigenvectors.at<double> (i, j) = V[i][j];
896 // Deallocate the memory by releasing all internal working data.
897 release();
898 }
899 }
900
901 public:
902 // Initializes & computes the Eigenvalue Decomposition for a general matrix
903 // given in src. This function is a port of the EigenvalueSolver in JAMA,
904 // which has been released to public domain by The MathWorks and the
905 // National Institute of Standards and Technology (NIST).
EigenvalueDecomposition()906 EigenvalueDecomposition() :
907 n(0),
908 d(NULL), e(NULL), ort(NULL),
909 V(NULL), H(NULL)
910 {
911 // nothing
912 }
913
914 // This function computes the Eigenvalue Decomposition for a general matrix
915 // given in src. This function is a port of the EigenvalueSolver in JAMA,
916 // which has been released to public domain by The MathWorks and the
917 // National Institute of Standards and Technology (NIST).
compute(InputArray src,bool fallbackSymmetric=true)918 void compute(InputArray src, bool fallbackSymmetric = true)
919 {
920 CV_INSTRUMENT_REGION();
921
922 if(fallbackSymmetric && isSymmetric(src)) {
923 // Fall back to OpenCV for a symmetric matrix!
924 cv::eigen(src, _eigenvalues, _eigenvectors);
925 } else {
926 Mat tmp;
927 // Convert the given input matrix to double. Is there any way to
928 // prevent allocating the temporary memory? Only used for copying
929 // into working memory and deallocated after.
930 src.getMat().convertTo(tmp, CV_64FC1);
931 // Get dimension of the matrix.
932 this->n = tmp.cols;
933 // Allocate the matrix data to work on.
934 this->H = alloc_2d<double> (n, n);
935 // Now safely copy the data.
936 for (int i = 0; i < tmp.rows; i++) {
937 for (int j = 0; j < tmp.cols; j++) {
938 this->H[i][j] = tmp.at<double>(i, j);
939 }
940 }
941 // Deallocates the temporary matrix before computing.
942 tmp.release();
943 // Performs the eigenvalue decomposition of H.
944 compute();
945 }
946 }
947
~EigenvalueDecomposition()948 ~EigenvalueDecomposition() { release(); }
949
950 // Returns the eigenvalues of the Eigenvalue Decomposition.
eigenvalues() const951 Mat eigenvalues() const { return _eigenvalues; }
952 // Returns the eigenvectors of the Eigenvalue Decomposition.
eigenvectors() const953 Mat eigenvectors() const { return _eigenvectors; }
954 };
955
eigenNonSymmetric(InputArray _src,OutputArray _evals,OutputArray _evects)956 void eigenNonSymmetric(InputArray _src, OutputArray _evals, OutputArray _evects)
957 {
958 CV_INSTRUMENT_REGION();
959
960 Mat src = _src.getMat();
961 int type = src.type();
962 size_t n = (size_t)src.rows;
963
964 CV_Assert(src.rows == src.cols);
965 CV_Assert(type == CV_32F || type == CV_64F);
966
967 Mat src64f;
968 if (type == CV_32F)
969 src.convertTo(src64f, CV_32FC1);
970 else
971 src64f = src;
972
973 EigenvalueDecomposition eigensystem;
974 eigensystem.compute(src64f, false);
975
976 // EigenvalueDecomposition returns transposed and non-sorted eigenvalues
977 std::vector<double> eigenvalues64f;
978 eigensystem.eigenvalues().copyTo(eigenvalues64f);
979 CV_Assert(eigenvalues64f.size() == n);
980
981 std::vector<int> sort_indexes(n);
982 cv::sortIdx(eigenvalues64f, sort_indexes, SORT_EVERY_ROW | SORT_DESCENDING);
983
984 std::vector<double> sorted_eigenvalues64f(n);
985 for (size_t i = 0; i < n; i++) sorted_eigenvalues64f[i] = eigenvalues64f[sort_indexes[i]];
986
987 Mat(sorted_eigenvalues64f).convertTo(_evals, type);
988
989 if( _evects.needed() )
990 {
991 Mat eigenvectors64f = eigensystem.eigenvectors().t(); // transpose
992 CV_Assert((size_t)eigenvectors64f.rows == n);
993 CV_Assert((size_t)eigenvectors64f.cols == n);
994 Mat_<double> sorted_eigenvectors64f((int)n, (int)n, CV_64FC1);
995 for (size_t i = 0; i < n; i++)
996 {
997 double* pDst = sorted_eigenvectors64f.ptr<double>((int)i);
998 double* pSrc = eigenvectors64f.ptr<double>(sort_indexes[(int)i]);
999 CV_Assert(pSrc != NULL);
1000 memcpy(pDst, pSrc, n * sizeof(double));
1001 }
1002 sorted_eigenvectors64f.convertTo(_evects, type);
1003 }
1004 }
1005
1006
1007 //------------------------------------------------------------------------------
1008 // Linear Discriminant Analysis implementation
1009 //------------------------------------------------------------------------------
1010
LDA(int num_components)1011 LDA::LDA(int num_components) : _num_components(num_components) { }
1012
LDA(InputArrayOfArrays src,InputArray labels,int num_components)1013 LDA::LDA(InputArrayOfArrays src, InputArray labels, int num_components) : _num_components(num_components)
1014 {
1015 this->compute(src, labels); //! compute eigenvectors and eigenvalues
1016 }
1017
~LDA()1018 LDA::~LDA() {}
1019
save(const String & filename) const1020 void LDA::save(const String& filename) const
1021 {
1022 FileStorage fs(filename, FileStorage::WRITE);
1023 if (!fs.isOpened()) {
1024 CV_Error(Error::StsError, "File can't be opened for writing!");
1025 }
1026 this->save(fs);
1027 fs.release();
1028 }
1029
1030 // Deserializes this object from a given filename.
load(const String & filename)1031 void LDA::load(const String& filename) {
1032 FileStorage fs(filename, FileStorage::READ);
1033 if (!fs.isOpened())
1034 CV_Error(Error::StsError, "File can't be opened for reading!");
1035 this->load(fs);
1036 fs.release();
1037 }
1038
1039 // Serializes this object to a given FileStorage.
save(FileStorage & fs) const1040 void LDA::save(FileStorage& fs) const {
1041 // write matrices
1042 fs << "num_components" << _num_components;
1043 fs << "eigenvalues" << _eigenvalues;
1044 fs << "eigenvectors" << _eigenvectors;
1045 }
1046
1047 // Deserializes this object from a given FileStorage.
load(const FileStorage & fs)1048 void LDA::load(const FileStorage& fs) {
1049 //read matrices
1050 fs["num_components"] >> _num_components;
1051 fs["eigenvalues"] >> _eigenvalues;
1052 fs["eigenvectors"] >> _eigenvectors;
1053 }
1054
lda(InputArrayOfArrays _src,InputArray _lbls)1055 void LDA::lda(InputArrayOfArrays _src, InputArray _lbls) {
1056 // get data
1057 Mat src = _src.getMat();
1058 std::vector<int> labels;
1059 // safely copy the labels
1060 {
1061 Mat tmp = _lbls.getMat();
1062 for(unsigned int i = 0; i < tmp.total(); i++) {
1063 labels.push_back(tmp.at<int>(i));
1064 }
1065 }
1066 // turn into row sampled matrix
1067 Mat data;
1068 // ensure working matrix is double precision
1069 src.convertTo(data, CV_64FC1);
1070 // maps the labels, so they're ascending: [0,1,...,C]
1071 std::vector<int> mapped_labels(labels.size());
1072 std::vector<int> num2label = remove_dups(labels);
1073 std::map<int, int> label2num;
1074 for (int i = 0; i < (int)num2label.size(); i++)
1075 label2num[num2label[i]] = i;
1076 for (size_t i = 0; i < labels.size(); i++)
1077 mapped_labels[i] = label2num[labels[i]];
1078 // get sample size, dimension
1079 int N = data.rows;
1080 int D = data.cols;
1081 // number of unique labels
1082 int C = (int)num2label.size();
1083 // we can't do a LDA on one class, what do you
1084 // want to separate from each other then?
1085 if(C == 1) {
1086 String error_message = "At least two classes are needed to perform a LDA. Reason: Only one class was given!";
1087 CV_Error(Error::StsBadArg, error_message);
1088 }
1089 // throw error if less labels, than samples
1090 if (labels.size() != static_cast<size_t>(N)) {
1091 String error_message = format("The number of samples must equal the number of labels. Given %zu labels, %d samples. ", labels.size(), N);
1092 CV_Error(Error::StsBadArg, error_message);
1093 }
1094 // warn if within-classes scatter matrix becomes singular
1095 if (N < D) {
1096 std::cout << "Warning: Less observations than feature dimension given!"
1097 << "Computation will probably fail."
1098 << std::endl;
1099 }
1100 // clip number of components to be a valid number
1101 if ((_num_components <= 0) || (_num_components >= C)) {
1102 _num_components = (C - 1);
1103 }
1104 // holds the mean over all classes
1105 Mat meanTotal = Mat::zeros(1, D, data.type());
1106 // holds the mean for each class
1107 std::vector<Mat> meanClass(C);
1108 std::vector<int> numClass(C);
1109 // initialize
1110 for (int i = 0; i < C; i++) {
1111 numClass[i] = 0;
1112 meanClass[i] = Mat::zeros(1, D, data.type()); //! Dx1 image vector
1113 }
1114 // calculate sums
1115 for (int i = 0; i < N; i++) {
1116 Mat instance = data.row(i);
1117 int classIdx = mapped_labels[i];
1118 add(meanTotal, instance, meanTotal);
1119 add(meanClass[classIdx], instance, meanClass[classIdx]);
1120 numClass[classIdx]++;
1121 }
1122 // calculate total mean
1123 meanTotal.convertTo(meanTotal, meanTotal.type(), 1.0 / static_cast<double> (N));
1124 // calculate class means
1125 for (int i = 0; i < C; i++) {
1126 meanClass[i].convertTo(meanClass[i], meanClass[i].type(), 1.0 / static_cast<double> (numClass[i]));
1127 }
1128 // subtract class means
1129 for (int i = 0; i < N; i++) {
1130 int classIdx = mapped_labels[i];
1131 Mat instance = data.row(i);
1132 subtract(instance, meanClass[classIdx], instance);
1133 }
1134 // calculate within-classes scatter
1135 Mat Sw = Mat::zeros(D, D, data.type());
1136 mulTransposed(data, Sw, true);
1137 // calculate between-classes scatter
1138 Mat Sb = Mat::zeros(D, D, data.type());
1139 for (int i = 0; i < C; i++) {
1140 Mat tmp;
1141 subtract(meanClass[i], meanTotal, tmp);
1142 mulTransposed(tmp, tmp, true);
1143 add(Sb, tmp, Sb);
1144 }
1145 // invert Sw
1146 Mat Swi = Sw.inv();
1147 // M = inv(Sw)*Sb
1148 Mat M;
1149 gemm(Swi, Sb, 1.0, Mat(), 0.0, M);
1150 EigenvalueDecomposition es;
1151 es.compute(M);
1152 _eigenvalues = es.eigenvalues();
1153 _eigenvectors = es.eigenvectors();
1154 // reshape eigenvalues, so they are stored by column
1155 _eigenvalues = _eigenvalues.reshape(1, 1);
1156 // get sorted indices descending by their eigenvalue
1157 std::vector<int> sorted_indices = argsort(_eigenvalues, false);
1158 // now sort eigenvalues and eigenvectors accordingly
1159 _eigenvalues = sortMatrixColumnsByIndices(_eigenvalues, sorted_indices);
1160 _eigenvectors = sortMatrixColumnsByIndices(_eigenvectors, sorted_indices);
1161 // and now take only the num_components and we're out!
1162 _eigenvalues = Mat(_eigenvalues, Range::all(), Range(0, _num_components));
1163 _eigenvectors = Mat(_eigenvectors, Range::all(), Range(0, _num_components));
1164 }
1165
compute(InputArrayOfArrays _src,InputArray _lbls)1166 void LDA::compute(InputArrayOfArrays _src, InputArray _lbls) {
1167 switch(_src.kind()) {
1168 case _InputArray::STD_VECTOR_MAT:
1169 case _InputArray::STD_ARRAY_MAT:
1170 lda(asRowMatrix(_src, CV_64FC1), _lbls);
1171 break;
1172 case _InputArray::MAT:
1173 lda(_src.getMat(), _lbls);
1174 break;
1175 default:
1176 String error_message= format("InputArray Datatype %d is not supported.", _src.kind());
1177 CV_Error(Error::StsBadArg, error_message);
1178 break;
1179 }
1180 }
1181
1182 // Projects one or more row aligned samples into the LDA subspace.
project(InputArray src)1183 Mat LDA::project(InputArray src) {
1184 return subspaceProject(_eigenvectors, Mat(), src);
1185 }
1186
1187 // Reconstructs projections from the LDA subspace from one or more row aligned samples.
reconstruct(InputArray src)1188 Mat LDA::reconstruct(InputArray src) {
1189 return subspaceReconstruct(_eigenvectors, Mat(), src);
1190 }
1191
1192 }
1193