1 // This is core/vnl/algo/vnl_svd.hxx
2 #ifndef vnl_svd_hxx_
3 #define vnl_svd_hxx_
4 //:
5 // \file
6 
7 #include <cstdlib>
8 #include <complex>
9 #include <iostream>
10 #include <algorithm>
11 #include "vnl_svd.h"
12 
13 #include <cassert>
14 #ifdef _MSC_VER
15 #  include <vcl_msvc_warnings.h>
16 #endif
17 
18 #include <vnl/vnl_math.h>
19 #include <vnl/vnl_fortran_copy.h>
20 #include <vnl/algo/vnl_netlib.h> // dsvdc_()
21 
22 // use C++ overloading to call the right linpack routine from the template code :
23 #define macro(p, T) \
24 inline void vnl_linpack_svdc(vnl_netlib_svd_proto(T)) \
25 { v3p_netlib_##p##svdc_(vnl_netlib_svd_params); }
26 macro(s, float);
27 macro(d, double);
28 macro(c, std::complex<float>);
29 macro(z, std::complex<double>);
30 #undef macro
31 
32 //--------------------------------------------------------------------------------
33 
34 static bool vnl_svd_test_heavily = false;
35 #include <vnl/vnl_matlab_print.h>
36 
37 template <class T>
vnl_svd(vnl_matrix<T> const & M,double zero_out_tol)38 vnl_svd<T>::vnl_svd(vnl_matrix<T> const& M, double zero_out_tol):
39   m_(M.rows()),
40   n_(M.columns()),
41   U_(m_, n_),
42   W_(n_),
43   Winverse_(n_),
44   V_(n_, n_)
45 {
46   assert(m_ > 0);
47   assert(n_ > 0);
48 
49   {
50     long n = M.rows();
51     long p = M.columns();
52     long mm = std::min(n+1L,p);
53 
54     // Copy source matrix into fortran storage
55     // SVD is slow, don't worry about the cost of this transpose.
56     vnl_fortran_copy<T> X(M);
57 
58     // Make workspace vectors.
59     vnl_vector<T> work(n, T(0));
60     vnl_vector<T> uspace(n*p, T(0));
61     vnl_vector<T> vspace(p*p, T(0));
62     vnl_vector<T> wspace(mm, T(0)); // complex fortran routine actually _wants_ complex W!
63     vnl_vector<T> espace(p, T(0));
64 
65     // Call Linpack SVD
66     long info = 0;
67     constexpr long job = 21; // min(n,p) svs in U, n svs in V (i.e. economy size)
68     vnl_linpack_svdc((T*)X, &n, &n, &p,
69                      wspace.data_block(),
70                      espace.data_block(),
71                      uspace.data_block(), &n,
72                      vspace.data_block(), &p,
73                      work.data_block(),
74                      &job, &info);
75 
76     // Error return?
77     if (info != 0)
78     {
79       // If info is non-zero, it contains the number of singular values
80       // for this the SVD algorithm failed to converge. The condition is
81       // not bogus. Even if the returned singular values are sensible,
82       // the singular vectors can be utterly wrong.
83 
84       // It is possible the failure was due to NaNs or infinities in the
85       // matrix. Check for that now.
86       M.assert_finite();
87 
88       // If we get here it might be because
89       // 1. The scalar type has such
90       // extreme precision that too few iterations were performed to
91       // converge to within machine precision (that is the svdc criterion).
92       // One solution to that is to increase the maximum number of
93       // iterations in the netlib code.
94       //
95       // 2. The LINPACK dsvdc_ code expects correct IEEE rounding behaviour,
96       // which some platforms (notably x86 processors)
97       // have trouble doing. For example, gcc can output
98       // code in -O2 and static-linked code that causes this problem.
99       // One solution to this is to persuade gcc to output slightly different code
100       // by adding and -fPIC option to the command line for v3p/netlib/dsvdc.c. If
101       // that doesn't work try adding -ffloat-store, which should fix the problem
102       // at the expense of being significantly slower for big problems. Note that
103       // if this is the cause, core/vnl/tests/test_svd should have failed.
104       //
105       // You may be able to diagnose the problem here by printing a warning message.
106       std::cerr << __FILE__ ": suspicious return value (" << info << ") from SVDC\n"
107                << __FILE__ ": M is " << M.rows() << 'x' << M.cols() << std::endl;
108 
109       vnl_matlab_print(std::cerr, M, "M", vnl_matlab_print_format_long);
110       valid_ = false;
111     }
112     else
113       valid_ = true;
114 
115     // Copy fortran outputs into our storage
116     {
117       const T *d = uspace.data_block();
118       for (int j = 0; j < p; ++j)
119         for (int i = 0; i < n; ++i)
120           U_(i,j) = *d++;
121     }
122 
123     for (int j = 0; j < mm; ++j)
124       W_(j, j) = std::abs(wspace(j)); // we get rid of complexness here.
125 
126     for (int j = mm; j < n_; ++j)
127       W_(j, j) = 0;
128 
129     {
130       const T *d = vspace.data_block();
131       for (int j = 0; j < p; ++j)
132         for (int i = 0; i < p; ++i)
133           V_(i,j) = *d++;
134     }
135   }
136 
137   if (vnl_svd_test_heavily)
138   {
139     // Test that recomposed matrix == M
140     typedef typename vnl_numeric_traits<T>::abs_t abs_t;
141     abs_t recomposition_residual = std::abs((recompose() - M).fro_norm());
142     abs_t n = std::abs(M.fro_norm());
143     abs_t thresh = abs_t(m_) * abs_t(vnl_math::eps) * n;
144     if (recomposition_residual > thresh)
145     {
146       std::cerr << "vnl_svd<T>::vnl_svd<T>() -- Warning, recomposition_residual = "
147                << recomposition_residual << std::endl
148                << "fro_norm(M) = " << n << std::endl
149                << "eps*fro_norm(M) = " << thresh << std::endl
150                << "Press return to continue\n";
151       char x;
152       std::cin.get(&x, 1, '\n');
153     }
154   }
155 
156   if (zero_out_tol >= 0)
157     // Zero out small sv's and update rank count.
158     zero_out_absolute(double(+zero_out_tol));
159   else
160     // negative tolerance implies relative to max elt.
161     zero_out_relative(double(-zero_out_tol));
162 }
163 
164 template <class T>
operator <<(std::ostream & s,const vnl_svd<T> & svd)165 std::ostream& operator<<(std::ostream& s, const vnl_svd<T>& svd)
166 {
167   s << "vnl_svd<T>:\n"
168 //  << "M = [\n" << M << "]\n"
169     << "U = [\n" << svd.U() << "]\n"
170     << "W = " << svd.W() << '\n'
171     << "V = [\n" << svd.V() << "]\n"
172     << "rank = " << svd.rank() << std::endl;
173   return s;
174 }
175 
176 //-----------------------------------------------------------------------------
177 // Chunky bits.
178 
179 //: find weights below threshold tol, zero them out, and update W_ and Winverse_
180 template <class T>
181 void
zero_out_absolute(double tol)182 vnl_svd<T>::zero_out_absolute(double tol)
183 {
184   last_tol_ = tol;
185   rank_ = W_.rows();
186   for (unsigned k = 0; k < W_.rows(); k++)
187   {
188     singval_t& weight = W_(k, k);
189     if (std::abs(weight) <= tol)
190     {
191       Winverse_(k,k) = 0;
192       weight = 0;
193       --rank_;
194     }
195     else
196     {
197       Winverse_(k,k) = singval_t(1.0)/weight;
198     }
199   }
200 }
201 
202 //: find weights below tol*max(w) and zero them out
zero_out_relative(double tol)203 template <class T> void vnl_svd<T>::zero_out_relative(double tol) // sqrt(machine epsilon)
204 {
205   zero_out_absolute(tol * std::abs(sigma_max()));
206 }
207 
208 static bool w=false;
vnl_svn_warned()209 inline bool vnl_svn_warned() { if (w) return true; else { w=true; return false; } }
210 
211 //: Calculate determinant as product of diagonals in W.
212 template <class T>
determinant_magnitude() const213 typename vnl_svd<T>::singval_t vnl_svd<T>::determinant_magnitude() const
214 {
215   if (!vnl_svn_warned() && m_ != n_)
216     std::cerr << __FILE__ ": called determinant_magnitude() on SVD of non-square matrix\n"
217              << "(This warning is displayed only once)\n";
218   singval_t product = W_(0, 0);
219   for (unsigned long k = 1; k < W_.columns(); k++)
220     product *= W_(k, k);
221 
222   return product;
223 }
224 
225 template <class T>
norm() const226 typename vnl_svd<T>::singval_t vnl_svd<T>::norm() const
227 {
228   return std::abs(sigma_max());
229 }
230 
231 //: Recompose SVD to U*W*V'
232 template <class T>
recompose(unsigned int rnk) const233 vnl_matrix<T> vnl_svd<T>::recompose(unsigned int rnk) const
234 {
235   if (rnk > rank_) rnk=rank_;
236   vnl_matrix<T> Wmatr(W_.rows(),W_.columns());
237   Wmatr.fill(T(0));
238   for (unsigned int i=0;i<rnk;++i)
239     Wmatr(i,i)=W_(i,i);
240 
241   return U_*Wmatr*V_.conjugate_transpose();
242 }
243 
244 
245 //: Calculate pseudo-inverse.
246 template <class T>
pinverse(unsigned int rnk) const247 vnl_matrix<T> vnl_svd<T>::pinverse(unsigned int rnk) const
248 {
249   if (rnk > rank_) rnk=rank_;
250   vnl_matrix<T> W_inverse(Winverse_.rows(),Winverse_.columns());
251   W_inverse.fill(T(0));
252   for (unsigned int i=0;i<rnk;++i)
253     W_inverse(i,i)=Winverse_(i,i);
254 
255   return V_ * W_inverse * U_.conjugate_transpose();
256 }
257 
258 
259 //: Calculate (pseudo-)inverse of transpose.
260 template <class T>
tinverse(unsigned int rnk) const261 vnl_matrix<T> vnl_svd<T>::tinverse(unsigned int rnk) const
262 {
263   if (rnk > rank_) rnk=rank_;
264   vnl_matrix<T> W_inverse(Winverse_.rows(),Winverse_.columns());
265   W_inverse.fill(T(0));
266   for (unsigned int i=0;i<rnk;++i)
267     W_inverse(i,i)=Winverse_(i,i);
268 
269   return U_ * W_inverse * V_.conjugate_transpose();
270 }
271 
272 
273 //: Solve the matrix equation M X = B, returning X
274 template <class T>
solve(vnl_matrix<T> const & B) const275 vnl_matrix<T> vnl_svd<T>::solve(vnl_matrix<T> const& B)  const
276 {
277   vnl_matrix<T> x;                                      // solution matrix
278   if (U_.rows() < U_.columns()) {                       // augment y with extra rows of
279     vnl_matrix<T> yy(U_.rows(), B.columns(), T(0));     // zeros, so that it matches
280     yy.update(B);                                       // cols of u.transpose. ???
281     x = U_.conjugate_transpose() * yy;
282   }
283   else
284     x = U_.conjugate_transpose() * B;
285   for (unsigned long i = 0; i < x.rows(); ++i) {        // multiply with diagonal 1/W
286     T weight = W_(i, i);
287     if (weight != T(0)) // vnl_numeric_traits<T>::zero
288       weight = T(1) / weight;
289     for (unsigned long j = 0; j < x.columns(); ++j)
290       x(i, j) *= weight;
291   }
292   x = V_ * x;                                           // premultiply with v.
293   return x;
294 }
295 
296 //: Solve the matrix-vector system M x = y, returning x.
297 template <class T>
solve(vnl_vector<T> const & y) const298 vnl_vector<T> vnl_svd<T>::solve(vnl_vector<T> const& y)  const
299 {
300   // fsm sanity check :
301   if (y.size() != U_.rows())
302   {
303     std::cerr << __FILE__ << ": size of rhs is incompatible with no. of rows in U_\n"
304              << "y =" << y << '\n'
305              << "m_=" << m_ << '\n'
306              << "n_=" << n_ << '\n'
307              << "U_=\n" << U_
308              << "V_=\n" << V_
309              << "W_=\n" << W_;
310   }
311 
312   vnl_vector<T> x(V_.rows());                   // Solution matrix.
313   if (U_.rows() < U_.columns()) {               // Augment y with extra rows of
314     vnl_vector<T> yy(U_.rows(), T(0));          // zeros, so that it matches
315     if (yy.size()<y.size()) { // fsm
316       std::cerr << "yy=" << yy << std::endl
317                << "y =" << y  << std::endl;
318       // the update() call on the next line will abort...
319     }
320     yy.update(y);                               // cols of u.transpose.
321     x = U_.conjugate_transpose() * yy;
322   }
323   else
324     x = U_.conjugate_transpose() * y;
325 
326   for (unsigned i = 0; i < x.size(); i++) {        // multiply with diagonal 1/W
327     T weight = W_(i, i), zero_(0);
328     if (weight != zero_)
329       x[i] /= weight;
330     else
331       x[i] = zero_;
332   }
333   return V_ * x;                                // premultiply with v.
334 }
335 
336 template <class T> // FIXME. this should implement the above, not the other way round.
solve(T const * y,T * x) const337 void vnl_svd<T>::solve(T const *y, T *x) const
338 {
339   solve(vnl_vector<T>(y, m_)).copy_out(x);
340 }
341 
342 //: Solve the matrix-vector system M x = y.
343 // Assume that the singular values W have been preinverted by the caller.
344 template <class T>
solve_preinverted(vnl_vector<T> const & y,vnl_vector<T> * x_out) const345 void vnl_svd<T>::solve_preinverted(vnl_vector<T> const& y, vnl_vector<T>* x_out)  const
346 {
347   vnl_vector<T> x;              // solution matrix
348   if (U_.rows() < U_.columns()) {               // augment y with extra rows of
349     std::cout << "vnl_svd<T>::solve_preinverted() -- Augmenting y\n";
350     vnl_vector<T> yy(U_.rows(), T(0));     // zeros, so that it match
351     yy.update(y);                               // cols of u.transpose. ??
352     x = U_.conjugate_transpose() * yy;
353   }
354   else
355     x = U_.conjugate_transpose() * y;
356   for (unsigned i = 0; i < x.size(); i++)  // multiply with diagonal W, assumed inverted
357     x[i] *= W_(i, i);
358 
359   *x_out = V_ * x;                                      // premultiply with v.
360 }
361 
362 //-----------------------------------------------------------------------------
363 //: Return N s.t. M * N = 0
364 template <class T>
nullspace() const365 vnl_matrix <T> vnl_svd<T>::nullspace()  const
366 {
367   int k = rank();
368   if (k == n_)
369     std::cerr << "vnl_svd<T>::nullspace() -- Matrix is full rank." << last_tol_ << std::endl;
370   return nullspace(n_-k);
371 }
372 
373 //-----------------------------------------------------------------------------
374 //: Return N s.t. M * N = 0
375 template <class T>
nullspace(int required_nullspace_dimension) const376 vnl_matrix <T> vnl_svd<T>::nullspace(int required_nullspace_dimension)  const
377 {
378   return V_.extract(V_.rows(), required_nullspace_dimension, 0, n_ - required_nullspace_dimension);
379 }
380 
381 //-----------------------------------------------------------------------------
382 //: Return N s.t. M' * N = 0
383 template <class T>
left_nullspace() const384 vnl_matrix <T> vnl_svd<T>::left_nullspace()  const
385 {
386   int k = rank();
387   if (k == n_)
388     std::cerr << "vnl_svd<T>::left_nullspace() -- Matrix is full rank." << last_tol_ << std::endl;
389   return U_.extract(U_.rows(), n_-k, 0, k);
390 }
391 
392 //:
393 // \todo Implementation to be done yet; currently returns left_nullspace(). - PVr.
394 template <class T>
left_nullspace(int) const395 vnl_matrix<T> vnl_svd<T>::left_nullspace(int /*required_nullspace_dimension*/) const
396 {
397   return left_nullspace();
398 }
399 
400 
401 //-----------------------------------------------------------------------------
402 //: Return the rightmost column of V.
403 //  Does not check to see whether or not the matrix actually was rank-deficient -
404 //  the caller is assumed to have examined W and decided that to his or her satisfaction.
405 template <class T>
nullvector() const406 vnl_vector <T> vnl_svd<T>::nullvector()  const
407 {
408   vnl_vector<T> ret(n_);
409   for (int i = 0; i < n_; ++i)
410     ret(i) = V_(i, n_-1);
411   return ret;
412 }
413 
414 //-----------------------------------------------------------------------------
415 //: Return the rightmost column of U.
416 //  Does not check to see whether or not the matrix actually was rank-deficient.
417 template <class T>
left_nullvector() const418 vnl_vector <T> vnl_svd<T>::left_nullvector()  const
419 {
420   vnl_vector<T> ret(m_);
421   int col = std::min(m_, n_) - 1;
422   for (int i = 0; i < m_; ++i)
423     ret(i) = U_(i, col);
424   return ret;
425 }
426 
427 //--------------------------------------------------------------------------------
428 
429 #undef VNL_SVD_INSTANTIATE
430 #define VNL_SVD_INSTANTIATE(T) \
431 template class VNL_ALGO_EXPORT vnl_svd<T >; \
432 template VNL_ALGO_EXPORT std::ostream& operator<<(std::ostream &, vnl_svd<T > const &)
433 
434 #endif // vnl_svd_hxx_
435