1 /*  _______________________________________________________________________
2 
3     DAKOTA: Design Analysis Kit for Optimization and Terascale Applications
4     Copyright 2014-2020 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
5     This software is distributed under the GNU Lesser General Public License.
6     For more information, see the README file in the top Dakota directory.
7     _______________________________________________________________________ */
8 
9 #include "ExperimentDataUtils.hpp"
10 #include "DakotaResponse.hpp"
11 #include <algorithm>
12 #include <iostream>
13 #include <numeric>
14 
15 namespace Dakota {
16 
17 /** This assumes the souce gradient/Hessian are size less or equal to
18     the destination response, and that the leading part is to be populated. */
copy_field_data(const RealVector & fn_vals,RealMatrix & fn_grad,const RealSymMatrixArray & fn_hess,size_t offset,size_t num_fns,Response & response)19 void copy_field_data(const RealVector& fn_vals, RealMatrix& fn_grad,
20 		     const RealSymMatrixArray& fn_hess, size_t offset,
21 		     size_t num_fns, Response& response)
22 {
23   const ShortArray& asv = response.active_set_request_vector();
24   for (size_t i=0; i<num_fns; i++) {
25     if (asv[i] & 1) {
26       response.function_value(fn_vals[i], offset+i);
27     }
28     // populate only the part of the gradients/Hessians for this
29     // experiment, for the active submodel derivative variables
30     if (asv[i] & 2) {
31       size_t num_sm_cv = fn_grad.numRows();
32       RealVector resp_grad = response.function_gradient_view(offset+i);
33       resp_grad = 0.0;
34       for (size_t j=0; j<num_sm_cv; ++j)
35 	resp_grad[j] = fn_grad(j, i);
36     }
37     if (asv[i] & 4) {
38       size_t num_sm_cv = fn_hess[i].numRows();
39       RealSymMatrix resp_hess = response.function_hessian_view(offset+i);
40       resp_hess = 0.0;
41       for (size_t j=0; j<num_sm_cv; ++j)
42 	for (size_t k=0; k<num_sm_cv; ++k)
43 	  resp_hess(j,k) = fn_hess[i](j,k);
44     }
45   }
46 }
47 
48 /** This assumes the souce gradient/Hessian are size less or equal to
49     the destination response, and that the leading part is to be populated. */
copy_field_data(const RealVector & fn_vals,RealMatrix & fn_grad,const RealSymMatrixArray & fn_hess,size_t offset,size_t num_fns,short total_asv,Response & response)50 void copy_field_data(const RealVector& fn_vals, RealMatrix& fn_grad,
51 		     const RealSymMatrixArray& fn_hess, size_t offset,
52 		     size_t num_fns, short total_asv, Response& response)
53 {
54 
55   for (size_t i=0; i<num_fns; i++) {
56     if (total_asv & 1) {
57       response.function_value(fn_vals[i], offset+i);
58     }
59     if (total_asv & 2) {
60       size_t num_sm_cv = fn_grad.numRows();
61       RealVector resp_grad = response.function_gradient_view(offset+i);
62       resp_grad = 0.0;
63       for (size_t j=0; j<num_sm_cv; ++j)
64 	resp_grad[j] = fn_grad(j, i);
65     }
66     if (total_asv & 4) {
67       size_t num_sm_cv = fn_hess[i].numRows();
68       RealSymMatrix resp_hess = response.function_hessian_view(offset+i);
69       resp_hess = 0.0;
70       for (size_t j=0; j<num_sm_cv; ++j)
71 	for (size_t k=0; k<num_sm_cv; ++k)
72 	  resp_hess(j,k) = fn_hess[i](j,k);
73     }
74   }
75 }
76 
77 //----------------------------------------------------------------
78 
interpolate_simulation_field_data(const Response & sim_resp,const RealMatrix & exp_coords,size_t field_num,short total_asv,size_t interp_resp_offset,Response & interp_resp)79 void interpolate_simulation_field_data( const Response &sim_resp,
80 					const RealMatrix &exp_coords,
81 					size_t field_num, short total_asv,
82 					size_t interp_resp_offset,
83 					Response &interp_resp ){
84 
85   const RealMatrix& sim_coords = sim_resp.field_coords_view(field_num);
86   const RealVector sim_vals = sim_resp.field_values_view(field_num);
87   RealMatrix sim_grads;
88   // TODO(JDJ) : Decide if total_asv is fine grained enough. At the moment
89   // it is per experiment. But we can use it at this level at the more
90   // fine grained level of per field
91   if ( total_asv & 2 )
92     sim_grads = sim_resp.field_gradients_view(field_num);
93 
94   RealSymMatrixArray sim_hessians;
95   if ( total_asv & 4 )
96     sim_hessians = sim_resp.field_hessians_view(field_num);
97 
98   RealVector interp_vals;
99   RealMatrix interp_grads;
100   RealSymMatrixArray interp_hessians;
101   linear_interpolate_1d( sim_coords, sim_vals, sim_grads, sim_hessians,
102 			 exp_coords, interp_vals, interp_grads,
103 			 interp_hessians );
104 
105   size_t field_size = interp_vals.length();
106 
107   copy_field_data(interp_vals, interp_grads, interp_hessians,
108 		  interp_resp_offset, field_size, total_asv,
109 		  interp_resp);
110 }
111 
112 //----------------------------------------------------------------
113 
linear_interpolate_1d(const RealMatrix & build_pts,const RealVector & build_vals,const RealMatrix & build_grads,const RealSymMatrixArray & build_hessians,const RealMatrix & pred_pts,RealVector & pred_vals,RealMatrix & pred_grads,RealSymMatrixArray & pred_hessians)114 void linear_interpolate_1d( const RealMatrix &build_pts,
115 			    const RealVector &build_vals,
116 			    const RealMatrix &build_grads,
117 			    const RealSymMatrixArray &build_hessians,
118 			    const RealMatrix &pred_pts,
119 			    RealVector &pred_vals,
120 			    RealMatrix &pred_grads,
121 			    RealSymMatrixArray &pred_hessians )
122 {
123 
124   int num_pred_pts = pred_pts.numRows();
125   int num_build_pts = build_pts.numRows();
126 
127   // Following code assumes that vals are always interpolated
128   // and if hessians are requested that gradients are provided
129   bool interp_grads = ( ( build_grads.numRows() > 0 ) &&
130 			( build_grads.numCols() > 0 ) );
131   bool interp_hessians = ( build_hessians.size() > 0 );
132 
133   if ( ( interp_hessians ) && ( !interp_grads) )
134     throw( std::runtime_error("Hessians were provided, but gradients were missing") );
135 
136   if ( build_pts.numCols()!=1 )
137     throw( std::runtime_error("build pts must be Nx1") );
138   if ( pred_pts.numCols()!=1 )
139     throw( std::runtime_error("build pts must be Mx1") );
140   // The following is for when we active multivariate interpolation of field data
141   //if ( pred_pts.numCols()!=build_pts.numCols() )
142   //  throw( std::runtime_error("build pts and pred pts must have the same number of columns") );
143 
144   int num_vars = build_grads.numRows();
145   RealVector pred_pts_1d( Teuchos::View, pred_pts.values(), num_pred_pts );
146   RealVector build_pts_1d( Teuchos::View, build_pts.values(), num_build_pts );
147 
148   // Initialize memory for interpolated data
149   if ( pred_vals.length() != num_pred_pts )
150     pred_vals.sizeUninitialized( num_pred_pts );
151   // Only initialize gradient memory if build gradients was not empty
152   if ( ( interp_grads ) &&
153        ( ( pred_grads.numRows() != num_vars ) ||
154 	 ( pred_grads.numCols() != num_pred_pts ) ) )
155     pred_grads.shapeUninitialized( num_vars, num_pred_pts );
156   // Only initialize hessian memory if build hessians was not empty
157   if ( ( interp_hessians ) && ( pred_hessians.size() != num_pred_pts ) )
158     pred_hessians.resize( num_pred_pts );
159 
160   for ( int i = 0; i < num_pred_pts; i++ ){
161     // enforce constant interpolation when interpolation is outside the
162     // range of build_pts
163     if ( pred_pts_1d[i] <= build_pts_1d[0] ){
164       pred_vals[i] = build_vals[0];
165       for (int k=0; k<build_grads.numRows();k++)
166 	pred_grads(k,i) = build_grads(k,0);
167       if ( interp_hessians ){
168 	if ( pred_hessians[i].numRows() != num_vars )
169 	  pred_hessians[i].shapeUninitialized( num_vars );
170 	for (int k=0; k<build_grads.numRows();k++)
171 	  for (int j=0; j<=k; j++)
172 	    pred_hessians[i](j,k) = build_hessians[0](j,k);
173       }
174     }else if ( pred_pts_1d[i] >= build_pts_1d[num_build_pts-1] ){
175       pred_vals[i] = build_vals[num_build_pts-1];
176       for (int k=0; k<build_grads.numRows();k++)
177 	pred_grads(k,i) = build_grads(k,num_build_pts-1);
178       if ( interp_hessians ){
179 	if ( pred_hessians[i].numRows() != num_vars )
180 	  pred_hessians[i].shapeUninitialized( num_vars );
181 	for (int k=0; k<build_grads.numRows();k++)
182 	  for (int j=0; j<=k; j++)
183 	    pred_hessians[i](j,k) = build_hessians[num_build_pts-1](j,k);
184       }
185     }else{
186       // assumes binary search returns index of the closest point in
187       // build_pts to the left of pts(0,i)
188       int index = binary_search( pred_pts_1d[i], build_pts_1d );
189 
190       // interpolate function values
191       pred_vals[i] = build_vals[index] +
192 	( (build_vals[index+1]-build_vals[index] ) /
193 	  (build_pts_1d[index+1]-build_pts_1d[index]) ) *
194 	( pred_pts_1d[i] - build_pts_1d[index] );
195       // interpolate gradient values
196       if ( interp_grads ){
197 	for (int j=0; j<build_grads.numRows();j++){
198 	  pred_grads(j,i) = build_grads(j,index) +
199 	    ( (build_grads(j,index+1)-build_grads(j,index) ) /
200 	      (build_pts_1d[index+1]-build_pts_1d[index]) ) *
201 	    ( pred_pts_1d[i] - build_pts_1d[index] );
202 	}
203       }
204       // interpolate hessian values
205       if ( interp_hessians ){
206 	if ( pred_hessians[i].numRows() != num_vars )
207 	  pred_hessians[i].shapeUninitialized( num_vars );
208 	for (int k=0; k<build_grads.numRows();k++){
209 	  for (int j=0; j<=k; j++){
210 	    pred_hessians[i](j,k) = build_hessians[index](j,k) +
211 	      ( (build_hessians[index+1](j,k)-build_hessians[index](j,k) ) /
212 		(build_pts_1d[index+1]-build_pts_1d[index]) ) *
213 	      ( pred_pts_1d[i] - build_pts_1d[index] );
214 	  }
215 	}
216       }
217     }
218   }
219 }
220 
221 //----------------------------------------------------------------
222 
CovarianceMatrix()223 CovarianceMatrix::CovarianceMatrix() : numDOF_(0), covIsDiagonal_(false) {}
224 
CovarianceMatrix(const CovarianceMatrix & source)225 CovarianceMatrix::CovarianceMatrix( const CovarianceMatrix &source ){
226   copy( source );
227 }
228 
~CovarianceMatrix()229 CovarianceMatrix::~CovarianceMatrix(){}
230 
copy(const CovarianceMatrix & source)231 void CovarianceMatrix::copy( const CovarianceMatrix &source ){
232   numDOF_=source.numDOF_;
233   covIsDiagonal_ = source.covIsDiagonal_;
234   if ( source.covDiagonal_.length() > 0 ) {
235     // use assign instead of operator= to disconnect any Teuchos::View
236     covDiagonal_.sizeUninitialized(source.covDiagonal_.length());
237     covDiagonal_.assign(source.covDiagonal_);
238   }
239   else if ( source.covMatrix_.numRows() > 0 )
240   {
241     // use assign instead of operator= to disconnect any Teuchos::View
242     covMatrix_.shapeUninitialized(source.covMatrix_.numRows());
243     covMatrix_.assign(source.covMatrix_);
244     // Copy covariance matrix cholesky factor from source.
245     // WARNING: Using Teuchos::SerialDenseSpdSolver prevents copying of
246     // covariance cholesky factor so it must be done again here.
247     factor_covariance_matrix(); // The source has already been factored in-place? RWH
248   }
249   else
250     // No covariance matrix is present in source
251     return;
252 }
253 
operator =(const CovarianceMatrix & source)254 CovarianceMatrix& CovarianceMatrix::operator=( const CovarianceMatrix &source ){
255   if(this == &source)
256     return(*this); // Special case of source same as target
257 
258   copy( source );
259   return *this;
260 }
261 
dense_covariance(RealSymMatrix & cov) const262 void CovarianceMatrix::dense_covariance(RealSymMatrix &cov) const
263 {
264   // reshape could be dangerous as will disconnect any Teuchos::View
265   if (cov.numRows() != numDOF_)
266     cov.shape(numDOF_);
267   cov = 0.0;
268   if ( !covIsDiagonal_ ) {
269     for (int i=0; i<numDOF_; i++)
270       for (int j=0; j<i; j++)
271         cov(i,j) = covMatrix_(i,j);
272   } else {
273     for (int i=0; i<numDOF_; i++) {
274       cov(i,i) = covDiagonal_[i];
275     }
276   }
277 }
278 
set_covariance(const RealMatrix & cov)279 void CovarianceMatrix::set_covariance( const RealMatrix &cov )
280 {
281   if ( cov.numRows() != cov.numCols() ){
282     std::string msg = "Covariance matrix must be square.";
283     throw( std::runtime_error( msg ) );
284   }
285 
286   numDOF_ = cov.numRows();
287   covMatrix_.shape(numDOF_);
288   for (int j=0; j<numDOF_; j++)
289     for (int i=j; i<numDOF_; i++){
290       covMatrix_(i,j) = cov(i,j);
291       covMatrix_(j,i) = cov(i,j);
292     }
293 
294   covIsDiagonal_ = false;
295   factor_covariance_matrix();
296 }
297 
set_covariance(Real cov)298 void CovarianceMatrix::set_covariance( Real cov )
299 {
300   RealVector cov_diag(1,false);
301   cov_diag[0] = cov;
302   set_covariance( cov_diag );
303 }
304 
set_covariance(const RealVector & cov)305 void CovarianceMatrix::set_covariance( const RealVector & cov )
306 {
307   covDiagonal_.sizeUninitialized( cov.length() );
308   covDiagonal_.assign( cov );
309   covIsDiagonal_ = true;
310   numDOF_ = cov.length();
311 }
312 
apply_covariance_inverse(const RealVector & vector) const313 Real CovarianceMatrix::apply_covariance_inverse( const RealVector &vector ) const
314 {
315   RealVector result;
316   apply_covariance_inverse_sqrt( vector, result );
317   return result.dot( result );
318 }
319 
factor_covariance_matrix()320 void CovarianceMatrix::factor_covariance_matrix()
321   {
322     covCholFactor_ = covMatrix_;
323     covSlvr_.setMatrix( rcp(&covCholFactor_, false) );
324     // Equilibrate must be false (which is the default) when chol factor is
325     // used to compute inverse sqrt
326     //covSlvr_.factorWithEquilibration(true);
327     int info = covSlvr_.factor();
328     if ( info > 0 ){
329       std::string msg = "The covariance matrix is not positive definite\n";
330       throw( std::runtime_error( msg ) );
331     }
332     invert_cholesky_factor();
333   };
334 
invert_cholesky_factor()335 void CovarianceMatrix::invert_cholesky_factor()
336 {
337   // covCholFactor is a symmetric matrix so must make a copy
338   // that only copies the correct lower or upper triangular part
339   cholFactorInv_.shape( numDOF_, numDOF_ );
340   if ( covCholFactor_.UPLO()=='L' ){
341       for (int j=0; j<numDOF_; j++)
342 	for (int i=j; i<numDOF_; i++)
343 	  cholFactorInv_(i,j)=covCholFactor_(i,j);
344     }else{
345       for (int i=0; i<numDOF_; i++)
346 	for (int j=i; j<numDOF_; j++)
347 	  cholFactorInv_(i,j)=covCholFactor_(i,j);
348     }
349 
350     int info = 0;
351     Teuchos::LAPACK<int, Real> la;
352     // always assume non_unit_diag even if covSlvr_.equilibratedA()==true
353     la.TRTRI( covCholFactor_.UPLO(), 'N',
354 	      numDOF_, cholFactorInv_.values(),
355 	      cholFactorInv_.stride(), &info );
356     if ( info > 0 ){
357       std::string msg = "Inverting the covariance Cholesky factor failed\n";
358       throw( std::runtime_error( msg ) );
359     }
360 }
361 
apply_covariance_inverse_sqrt(const RealVector & vector,RealVector & result) const362 void CovarianceMatrix::apply_covariance_inverse_sqrt( const RealVector &vector,
363 						      RealVector &result ) const
364 {
365   if ( vector.length() != numDOF_ ){
366     std::string msg = "Vector and covariance are incompatible for ";
367     msg += "multiplication.";
368     throw( std::runtime_error( msg ) );
369   }
370 
371   if ( result.length() != numDOF_)
372     result.sizeUninitialized( numDOF_ );
373   if ( covIsDiagonal_ ) {
374     for (int i=0; i<numDOF_; i++)
375       result[i] = vector[i] / std::sqrt( covDiagonal_[i] );
376   }else{
377     result.multiply( Teuchos::NO_TRANS, Teuchos::NO_TRANS,
378 		     1.0, cholFactorInv_, vector, 0.0 );
379   }
380 }
381 
apply_covariance_inverse_sqrt_to_gradients(const RealMatrix & gradients,RealMatrix & result) const382 void CovarianceMatrix::apply_covariance_inverse_sqrt_to_gradients(
383           const RealMatrix &gradients,
384 	  RealMatrix &result ) const
385 {
386   if ( gradients.numCols() != numDOF_ ){
387     std::string msg = "Gradients and covariance are incompatible for ";
388     msg += "multiplication.";
389     throw( std::runtime_error( msg ) );
390   }
391   int num_grads = gradients.numRows();
392   // BMA: relaxed this to allow result to have extra rows that aren't
393   // populated (don't want to resize the result gradients); may want
394   // to throw error or change API.
395   if ( ( result.numRows() < num_grads ) || ( result.numCols() != numDOF_ ) )
396     result.shapeUninitialized( num_grads, numDOF_ );
397   if ( covIsDiagonal_ ) {
398     for (int j=0; j<numDOF_; j++)
399       for (int i=0; i<num_grads; i++)
400 	result(i,j) = gradients(i,j) / std::sqrt( covDiagonal_[j] );
401   }else{
402     // Let A = cholFactorInv_ and B = gradients. We want to compute C' = AB'
403     // so compute C = (AB')' = BA'
404     result.multiply( Teuchos::NO_TRANS, Teuchos::TRANS,
405 		     1.0, gradients, cholFactorInv_, 0.0 );
406   }
407 }
408 
apply_covariance_inverse_sqrt_to_hessian(RealSymMatrixArray & hessians,int start) const409 void CovarianceMatrix::apply_covariance_inverse_sqrt_to_hessian(
410 			  RealSymMatrixArray &hessians, int start ) const
411 {
412   if ( (hessians.size()-start) < numDOF_ ){
413     std::string msg = "Hessians and covariance are incompatible for ";
414     msg += "multiplication.";
415     throw( std::runtime_error( msg ) );
416   }
417   int num_rows = hessians[start].numRows();
418   if (!num_rows) return; // if Hessian inactive for this fn, no contribution
419   if ( covIsDiagonal_ ) {
420     for (int k=0; k<numDOF_; k++){
421       // Must only loop over lower or upper triangular part
422       // because accessor function (i,j) adjusts both upper and lower triangular
423       // part
424       hessians[start+k] *= 1./std::sqrt( covDiagonal_[k] );
425     }
426   }else{
427     for (int k=0; k<numDOF_; k++) {
428       if (!hessians[start+k].numRows()) {
429 	Cerr << "Error: all Hessians must be defined in CovarianceMatrix::"
430 	     << "apply_covariance_inverse_sqrt_to_hessian()." << std::endl;
431 	abort_handler(OTHER_ERROR);
432       }
433     }
434     RealVector hess_ij_res( numDOF_, false );
435     RealVector scaled_hess_ij_res( numDOF_, false );
436     // Must only loop over lower or upper triangular part
437     // because accessor function (i,j) adjusts both upper and lower triangular
438     // part
439     for (int j=0; j<num_rows; j++){
440       for (int i=0; i<=j; i++){
441 	// Extract the ij hessian components for each degree of freedom
442 	for (int k=0; k<numDOF_; k++)
443 	  hess_ij_res[k] = hessians[start+k](i,j);
444 	// Apply covariance matrix to the extracted hessian components
445 	apply_covariance_inverse_sqrt( hess_ij_res, scaled_hess_ij_res );
446 	// Copy result back into hessians
447 	for (int k=0; k<numDOF_; k++)
448 	  hessians[start+k](i,j) = scaled_hess_ij_res[k];
449       }
450     }
451   }
452 }
453 
num_dof() const454 int CovarianceMatrix::num_dof() const {
455   return numDOF_;
456 }
457 
print() const458 void CovarianceMatrix::print() const {
459   if ( covIsDiagonal_ ) {
460     std::cout << " Covariance is Diagonal " << '\n';
461     covDiagonal_.print(std::cout);
462   }
463   else {
464     std::cout << " Covariance is Full " << '\n';
465     covMatrix_.print(std::cout);
466   }
467 }
468 
get_main_diagonal(RealVector & diagonal) const469 void CovarianceMatrix::get_main_diagonal( RealVector &diagonal ) const {
470   // Only resize matrix if the memory size has not already been allocated
471   if ( diagonal.length() != num_dof() )
472     diagonal.sizeUninitialized( num_dof() );
473 
474   if ( covIsDiagonal_ ) {
475     for (int i=0; i<num_dof(); i++ )
476       diagonal[i] = covDiagonal_[i];
477   }else{
478     for (int i=0; i<num_dof(); i++ )
479       diagonal[i] = covMatrix_(i,i);
480   }
481 }
482 
as_correlation(RealSymMatrix & corr_mat) const483 void CovarianceMatrix::as_correlation(RealSymMatrix& corr_mat) const
484 {
485   corr_mat = 0.0;
486   if (covIsDiagonal_) {
487     for (int i=0; i<num_dof(); ++i)
488       corr_mat(i, i) = 1.0;
489   }
490   else {
491     for (int i=0; i<num_dof(); ++i) {
492       corr_mat(i, i) = 1.0;
493       for (int j=0; j<i; ++j)
494         corr_mat(i,j) = covMatrix_(i,j) / std::sqrt(covMatrix_(i,i)) /
495           std::sqrt(covMatrix_(j,j));
496     }
497   }
498 }
499 
500 
determinant() const501 Real CovarianceMatrix::determinant() const
502 {
503   Real det = 1.0;
504   if (covIsDiagonal_) {
505     for (int i=0; i<num_dof(); i++)
506       det *= covDiagonal_[i];
507   }
508   else {
509     for (int i=0; i<num_dof(); i++)
510       det *= covCholFactor_(i,i)*covCholFactor_(i,i);
511   }
512   return det;
513 }
514 
515 
log_determinant() const516 Real CovarianceMatrix::log_determinant() const
517 {
518   Real log_det = 0.0;
519   if (covIsDiagonal_) {
520     for (int i=0; i<num_dof(); i++)
521       log_det += std::log(covDiagonal_[i]);
522   }
523   else {
524     for (int i=0; i<num_dof(); i++)
525       log_det += std::log(covCholFactor_(i,i))+std::log(covCholFactor_(i,i));
526   }
527   return log_det;
528 }
529 
530 
operator =(const ExperimentCovariance & source)531 ExperimentCovariance & ExperimentCovariance::operator=(const ExperimentCovariance& source)
532 {
533   if(this == &source)
534     return *this; // Special case of source same as target
535 
536   numBlocks_ = source.numBlocks_;
537   numDOF_ = source.numDOF_;
538   covMatrices_.resize(source.covMatrices_.size());
539   for( size_t i=0; i<source.covMatrices_.size(); ++i)
540     covMatrices_[i] = source.covMatrices_[i];
541 
542   return *this;
543 }
544 
set_covariance_matrices(std::vector<RealMatrix> & matrices,std::vector<RealVector> & diagonals,RealVector & scalars,IntVector matrix_map_indices,IntVector diagonal_map_indices,IntVector scalar_map_indices)545 void ExperimentCovariance::set_covariance_matrices(
546 std::vector<RealMatrix> &matrices,
547 std::vector<RealVector> &diagonals,
548 RealVector &scalars,
549 IntVector matrix_map_indices,
550 IntVector diagonal_map_indices,
551 IntVector scalar_map_indices ){
552 
553   if ( matrices.size() != matrix_map_indices.length() ){
554     std::string msg = "must specify a index map for each full ";
555     msg += "covariance matrix.";
556     throw( std::runtime_error( msg ) );
557   }
558   if ( diagonals.size() != diagonal_map_indices.length() ){
559     std::string msg = "must specify a index map for each diagonal ";
560     msg += "covariance matrix.";
561     throw( std::runtime_error( msg ) );
562   }
563   if ( scalars.length() != scalar_map_indices.length() ){
564     std::string msg = "must specify a index map for each scalar ";
565     msg += "covariance matrix.";
566     throw( std::runtime_error( msg ) );
567   }
568 
569   numDOF_ = 0;
570 
571   numBlocks_ = matrix_map_indices.length() + diagonal_map_indices.length() +
572     scalar_map_indices.length();
573 
574   covMatrices_.resize( numBlocks_ );
575 
576   for (int i=0; i<matrices.size(); i++ ){
577     int index = matrix_map_indices[i];
578     if ( index >= numBlocks_ )
579       throw( std::runtime_error( "matrix_map_indices was out of bounds." ) );
580     covMatrices_[index].set_covariance( matrices[i] );
581     numDOF_ += matrices[i].numRows();
582   }
583 
584   for (int i=0; i<diagonals.size(); i++){
585     int index = diagonal_map_indices[i];
586     if ( index >= numBlocks_ )
587       throw( std::runtime_error( "diagonal_map_indices was out of bounds." ) );
588     covMatrices_[index].set_covariance( diagonals[i] );
589     numDOF_ += diagonals[i].length();
590   }
591 
592   for (int i=0; i<scalars.length(); i++ ){
593     int index = scalar_map_indices[i];
594     if ( index >= numBlocks_ )
595       throw( std::runtime_error( "scalar_map_indices was out of bounds." ) );
596     covMatrices_[index].set_covariance( scalars[i] );
597   }
598   numDOF_ += scalars.length();
599 }
600 
apply_experiment_covariance(const RealVector & vector) const601 Real ExperimentCovariance::apply_experiment_covariance( const RealVector &vector)
602   const{
603   if ( vector.length() != num_dof() ){
604     throw(std::runtime_error("apply_covariance_inverse: vector is inconsistent with covariance matrix"));
605   }
606 
607   int shift = 0;
608   Real result = 0.;
609   for (int i=0; i<covMatrices_.size(); i++ ){
610     int num_dof = covMatrices_[i].num_dof();
611     RealVector sub_vector( Teuchos::View, vector.values()+shift, num_dof );
612     result += covMatrices_[i].apply_covariance_inverse( sub_vector );
613     shift += num_dof;
614   }
615   return result;
616 }
617 
apply_experiment_covariance_inverse_sqrt(const RealVector & vector,RealVector & result) const618 void ExperimentCovariance::apply_experiment_covariance_inverse_sqrt(
619 		  const RealVector &vector, RealVector &result ) const{
620   if ( vector.length() != num_dof() )
621     throw(std::runtime_error("apply_covariance_inverse_sqrt: vector is inconsistent with covariance matrix"));
622 
623   int shift = 0;
624   result.sizeUninitialized( vector.length() );
625   for (int i=0; i<covMatrices_.size(); i++ ){
626     int num_dof = covMatrices_[i].num_dof();
627     RealVector sub_vector( Teuchos::View, vector.values()+shift, num_dof );
628     RealVector sub_result( Teuchos::View, result.values()+shift, num_dof );
629     covMatrices_[i].apply_covariance_inverse_sqrt( sub_vector, sub_result );
630     shift += num_dof;
631   }
632 }
633 
apply_experiment_covariance_inverse_sqrt_to_gradients(const RealMatrix & gradients,RealMatrix & result) const634 void ExperimentCovariance::apply_experiment_covariance_inverse_sqrt_to_gradients(
635 const RealMatrix &gradients, RealMatrix &result ) const{
636 
637   if ( gradients.numCols() != num_dof() )
638     throw(std::runtime_error("apply_covariance_inverse_sqrt_to_gradients: gradients is inconsistent with covariance matrix"));
639 
640   int shift = 0;
641   int num_grads = gradients.numRows();
642   result.shape( num_grads, gradients.numCols() );
643   for (int i=0; i<covMatrices_.size(); i++ ){
644     int num_dof = covMatrices_[i].num_dof();
645     RealMatrix sub_matrix( Teuchos::View,gradients,num_grads,num_dof,0,shift );
646     RealMatrix sub_result( Teuchos::View, result, num_grads, num_dof, 0, shift );
647     covMatrices_[i].apply_covariance_inverse_sqrt_to_gradients( sub_matrix,
648 								sub_result );
649     shift += num_dof;
650   }
651 }
652 
apply_experiment_covariance_inverse_sqrt_to_hessians(const RealSymMatrixArray & hessians,RealSymMatrixArray & result) const653 void ExperimentCovariance::apply_experiment_covariance_inverse_sqrt_to_hessians(
654 	const RealSymMatrixArray &hessians, RealSymMatrixArray &result ) const {
655 
656   if ( hessians.size() != num_dof() )
657     throw(std::runtime_error("apply_covariance_inverse_sqrt_to_hessians: hessians is inconsistent with covariance matrix"));
658 
659   // perform deep copy of hessians
660   result.resize( hessians.size() );
661   for (int i=0; i<hessians.size(); i++ )
662     if (hessians[i].numRows()) { // else don't bother to assign empty matrix
663       result[i].shapeUninitialized( hessians[i].numRows() );
664       result[i].assign( hessians[i] );
665     }
666   int shift = 0;
667   for (int i=0; i<covMatrices_.size(); i++ ){
668     int num_dof = covMatrices_[i].num_dof();
669     // modifify in place the hessian copies stored in result
670     covMatrices_[i].apply_covariance_inverse_sqrt_to_hessian( result, shift );
671     shift += num_dof;
672   }
673 }
674 
print_covariance_blocks() const675 void ExperimentCovariance::print_covariance_blocks() const {
676 
677   for (int i=0; i<covMatrices_.size(); i++ ){
678     std::cout << "Covariance Matrix " << i << '\n';
679     covMatrices_[i].print();
680   }
681 }
682 
get_main_diagonal(RealVector & diagonal) const683 void ExperimentCovariance::get_main_diagonal( RealVector &diagonal ) const {
684 
685   // Get the number of entries along the main diagonal
686   int num_diagonal_entries = 0;
687   for (int i=0; i<covMatrices_.size(); i++ )
688     num_diagonal_entries += covMatrices_[i].num_dof();
689   diagonal.sizeUninitialized( num_diagonal_entries );
690 
691   // Extract the diagonal of each sub block of the experimental covariance matrix
692   int global_row_num = 0;
693   for (int i=0; i<covMatrices_.size(); i++ ){
694     RealVector sub_diagonal( Teuchos::View, diagonal.values()+global_row_num,
695 			     covMatrices_[i].num_dof() );
696       covMatrices_[i].get_main_diagonal( sub_diagonal );
697     global_row_num += covMatrices_[i].num_dof();
698   }
699 }
700 
701 
dense_covariance(RealSymMatrix & cov_mat) const702 void ExperimentCovariance::dense_covariance(RealSymMatrix& cov_mat) const
703 {
704   // reshape could be dangerous as will disconnect any Teuchos::View
705   if (cov_mat.numRows() != num_dof())
706     cov_mat.shape(num_dof());
707   int global_row_num = 0;
708   for (int i=0; i<covMatrices_.size(); ++i) {
709     RealSymMatrix sub_matrix(Teuchos::View, cov_mat, covMatrices_[i].num_dof(),
710                              global_row_num);
711     covMatrices_[i].dense_covariance(sub_matrix);
712     global_row_num += covMatrices_[i].num_dof();
713   }
714 }
715 
716 
as_correlation(RealSymMatrix & corr_mat) const717 void ExperimentCovariance::as_correlation(RealSymMatrix& corr_mat) const
718 {
719   // reshape could be dangerous as will disconnect any Teuchos::View
720   if (corr_mat.numRows() != num_dof())
721     corr_mat.shape(num_dof());
722   int global_row_num = 0;
723   for (int i=0; i<covMatrices_.size(); ++i) {
724     RealSymMatrix sub_matrix(Teuchos::View, corr_mat, covMatrices_[i].num_dof(),
725                              global_row_num);
726     covMatrices_[i].as_correlation(sub_matrix);
727     global_row_num += covMatrices_[i].num_dof();
728   }
729 }
730 
731 
determinant() const732 Real ExperimentCovariance::determinant() const
733 {
734   Real det = 1.0;
735   for (int i=0; i<numBlocks_; ++i)
736     det *= covMatrices_[i].determinant();
737   return det;
738 }
739 
740 
log_determinant() const741 Real ExperimentCovariance::log_determinant() const
742 {
743   Real log_det = 0.0;
744   for (int i=0; i<numBlocks_; ++i)
745     log_det += covMatrices_[i].log_determinant();
746   return log_det;
747 }
748 
749 
750 //----------------------------------------------------------------
751 
symmetric_eigenvalue_decomposition(const RealSymMatrix & matrix,RealVector & eigenvalues,RealMatrix & eigenvectors)752 void symmetric_eigenvalue_decomposition( const RealSymMatrix &matrix,
753 					 RealVector &eigenvalues,
754 					 RealMatrix &eigenvectors )
755 {
756   Teuchos::LAPACK<int, Real> la;
757 
758   int N( matrix.numRows() );
759   eigenvectors.shapeUninitialized( N, N );
760   //eigenvectors.assign( matrix );
761   for ( int j=0; j<N; j++)
762     for ( int i=0; i<=j; i++)
763       eigenvectors(i,j) = matrix( i,j );
764 
765   char jobz = 'V'; // compute eigenvectors
766   char uplo = 'U'; // assume only upper triangular part of matrix is stored
767 
768   eigenvalues.sizeUninitialized( N );
769 
770   int info;        // Teuchos::LAPACK output flag
771   RealVector work; // Teuchos::LAPACK work array;
772   int lwork = -1;  // Size of Teuchos::LAPACK work array
773 
774   // Compute optimal size of work array
775   work.sizeUninitialized( 1 ); // temporary work array
776   la.SYEV( jobz, uplo, N, eigenvectors.values(), eigenvectors.stride(),
777 	   eigenvalues.values(), work.values(), lwork, &info );
778 
779   lwork = (int)work[0];
780   work.sizeUninitialized( lwork );
781 
782   la.SYEV( jobz, uplo, N, eigenvectors.values(), eigenvectors.stride(),
783 	   eigenvalues.values(), work.values(), lwork, &info );
784 
785   if ( info > 0 )
786     {
787       std::stringstream msg;
788       msg << "The algorithm failed to converge." << info
789 	  << " off-diagonal elements of an intermediate tridiagonal "
790 	  << "form did not converge to zero.";
791       throw( std::runtime_error( msg.str() ) );
792     }
793   else if ( info < 0 )
794     {
795       std::stringstream msg;
796       msg << " The " << std::abs( info ) << " argument had an illegal value.";
797       throw( std::runtime_error( msg.str() ) );
798     }
799 };
800 
801 //----------------------------------------------------------------
802 
compute_column_means(RealMatrix & matrix,RealVector & avg_vals)803 void compute_column_means( RealMatrix & matrix, RealVector & avg_vals )
804 {
805   int num_cols = matrix.numCols();
806   int num_rows = matrix.numRows();
807 
808   avg_vals.resize(num_cols);
809 
810   RealVector ones_vec(num_rows);
811   ones_vec.putScalar(1.0);
812 
813   for( int i=0; i<num_cols; ++i ) {
814     const RealVector & col_vec = Teuchos::getCol(Teuchos::View, matrix, i);
815     avg_vals(i) = col_vec.dot(ones_vec)/(Real) num_rows;
816   }
817 }
818 
819 //----------------------------------------------------------------
820 
sort_vector(const RealVector & vec,RealVector & sort_vec,IntVector & indices)821 void sort_vector( const RealVector & vec, RealVector & sort_vec, IntVector & indices )
822 {
823   if( indices.length() != vec.length() )
824     indices.resize(vec.length()); // avoid initialization to 0.0
825 
826   // initialize indices from 0 to vec.length()-1
827   std::iota(indices.values(), indices.values()+vec.length(), 0);
828 
829   // Sort indices using values from incoming vec
830   std::sort( indices.values(), indices.values()+vec.length(), [&](Real i,Real j){return vec[i]<vec[j];} );
831 
832   if( sort_vec.length() != vec.length() )
833     sort_vec.resize(vec.length()); // avoid initialization to 0.0
834 
835   // Copy into target vector in sorted order
836   for( int i=0; i<vec.length(); ++i )
837     sort_vec[i] = vec[indices[i]];
838 }
839 
840 //----------------------------------------------------------------
841 
sort_matrix_columns(const RealMatrix & mat,RealMatrix & sort_mat,IntMatrix & indices)842 void sort_matrix_columns( const RealMatrix & mat, RealMatrix & sort_mat, IntMatrix & indices )
843 {
844   if( (mat.numRows() != sort_mat.numRows()) ||
845       (mat.numCols() != sort_mat.numCols()) )
846     sort_mat.shapeUninitialized(mat.numRows(), mat.numCols()); // avoid initialization to 0.0
847 
848   if( (mat.numRows() != indices.numRows()) ||
849       (mat.numCols() != indices.numCols()) )
850     indices.shapeUninitialized(mat.numRows(), mat.numCols()); // avoid initialization to 0.0
851 
852   for( int i=0; i<mat.numCols(); ++i )
853   {
854     RealMatrix & nonconst_mat = const_cast<RealMatrix&>(mat);
855     const RealVector & unsrt_vec = Teuchos::getCol(Teuchos::View, nonconst_mat, i);
856     RealVector sorted_vec   = Teuchos::getCol(Teuchos::View, sort_mat, i);
857     IntVector  sort_indices = Teuchos::getCol(Teuchos::View, indices, i);
858     sort_vector( unsrt_vec, sorted_vec, sort_indices);
859   }
860 }
861 
862 //----------------------------------------------------------------
863 
is_matrix_symmetric(const RealMatrix & matrix)864 bool is_matrix_symmetric( const RealMatrix & matrix )
865 {
866   int num_cols = matrix.numCols();
867   int num_rows = matrix.numRows();
868 
869   if( num_cols != num_rows )
870     return false;
871 
872   bool is_symmetric = true;
873 
874   for( int i=0; i<num_cols; ++i )
875     for( int j=i+1; j<num_cols; ++j ) {
876       if( matrix(i,j) != matrix(j,i) ) {
877         is_symmetric = false;
878         break;
879       }
880     }
881 
882   return is_symmetric;
883 }
884 
885 } //namespace Dakota
886