1 /*
2  *    This file is part of CasADi.
3  *
4  *    CasADi -- A symbolic framework for dynamic optimization.
5  *    Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
6  *                            K.U. Leuven. All rights reserved.
7  *    Copyright (C) 2011-2014 Greg Horn
8  *
9  *    CasADi is free software; you can redistribute it and/or
10  *    modify it under the terms of the GNU Lesser General Public
11  *    License as published by the Free Software Foundation; either
12  *    version 3 of the License, or (at your option) any later version.
13  *
14  *    CasADi is distributed in the hope that it will be useful,
15  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  *    Lesser General Public License for more details.
18  *
19  *    You should have received a copy of the GNU Lesser General Public
20  *    License along with CasADi; if not, write to the Free Software
21  *    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
22  *
23  */
24 
25 
26 #include "blocksqp.hpp"
27 #include "casadi/core/casadi_misc.hpp"
28 #include "casadi/core/conic.hpp"
29 
30 using namespace std;
31 namespace casadi {
32 
33   extern "C"
34   int CASADI_NLPSOL_BLOCKSQP_EXPORT
casadi_register_nlpsol_blocksqp(Nlpsol::Plugin * plugin)35   casadi_register_nlpsol_blocksqp(Nlpsol::Plugin* plugin) {
36     plugin->creator = Blocksqp::creator;
37     plugin->name = "blocksqp";
38     plugin->doc = Blocksqp::meta_doc.c_str();
39     plugin->version = CASADI_VERSION;
40     plugin->options = &Blocksqp::options_;
41     plugin->deserialize = &Blocksqp::deserialize;
42     return 0;
43   }
44 
45   extern "C"
casadi_load_nlpsol_blocksqp()46   void CASADI_NLPSOL_BLOCKSQP_EXPORT casadi_load_nlpsol_blocksqp() {
47     Nlpsol::registerPlugin(casadi_register_nlpsol_blocksqp);
48   }
49 
Blocksqp(const std::string & name,const Function & nlp)50   Blocksqp::Blocksqp(const std::string& name, const Function& nlp)
51     : Nlpsol(name, nlp) {
52   }
53 
54 
~Blocksqp()55   Blocksqp::~Blocksqp() {
56     clear_mem();
57   }
58 
59   const Options Blocksqp::options_
60   = {{&Nlpsol::options_},
61      {{"qpsol",
62        {OT_STRING,
63         "The QP solver to be used by the SQP method"}},
64       {"qpsol_options",
65        {OT_DICT,
66         "Options to be passed to the QP solver"}},
67       {"linsol",
68        {OT_STRING,
69         "The linear solver to be used by the QP method"}},
70       {"print_header",
71        {OT_BOOL,
72         "Print solver header at startup"}},
73       {"print_iteration",
74        {OT_BOOL,
75         "Print SQP iterations"}},
76       {"eps",
77        {OT_DOUBLE,
78         "Values smaller than this are regarded as numerically zero"}},
79       {"opttol",
80        {OT_DOUBLE,
81         "Optimality tolerance"}},
82       {"nlinfeastol",
83        {OT_DOUBLE,
84         "Nonlinear feasibility tolerance"}},
85       {"schur",
86        {OT_BOOL,
87         "Use qpOASES Schur compliment approach"}},
88       {"globalization",
89        {OT_BOOL,
90         "Enable globalization"}},
91       {"restore_feas",
92        {OT_BOOL,
93         "Use feasibility restoration phase"}},
94       {"max_line_search",
95        {OT_INT,
96         "Maximum number of steps in line search"}},
97       {"max_consec_reduced_steps",
98        {OT_INT,
99         "Maximum number of consecutive reduced steps"}},
100       {"max_consec_skipped_updates",
101        {OT_INT,
102         "Maximum number of consecutive skipped updates"}},
103       {"max_iter",
104        {OT_INT,
105         "Maximum number of SQP iterations"}},
106       {"warmstart",
107        {OT_BOOL,
108         "Use warmstarting"}},
109       {"qp_init",
110        {OT_BOOL,
111         "Use warmstarting"}},
112       {"max_it_qp",
113        {OT_INT,
114         "Maximum number of QP iterations per SQP iteration"}},
115       {"block_hess",
116        {OT_INT,
117         "Blockwise Hessian approximation?"}},
118       {"hess_scaling",
119        {OT_INT,
120         "Scaling strategy for Hessian approximation"}},
121       {"fallback_scaling",
122        {OT_INT,
123         "If indefinite update is used, the type of fallback strategy"}},
124       {"max_time_qp",
125        {OT_DOUBLE,
126         "Maximum number of time in seconds per QP solve per SQP iteration"}},
127       {"ini_hess_diag",
128        {OT_DOUBLE,
129         "Initial Hessian guess: diagonal matrix diag(iniHessDiag)"}},
130       {"col_eps",
131        {OT_DOUBLE,
132         "Epsilon for COL scaling strategy"}},
133       {"col_tau1",
134        {OT_DOUBLE,
135         "tau1 for COL scaling strategy"}},
136       {"col_tau2",
137        {OT_DOUBLE,
138         "tau2 for COL scaling strategy"}},
139       {"hess_damp",
140        {OT_INT,
141         "Activate Powell damping for BFGS"}},
142       {"hess_damp_fac",
143        {OT_DOUBLE,
144         "Damping factor for BFGS Powell modification"}},
145       {"hess_update",
146        {OT_INT,
147         "Type of Hessian approximation"}},
148       {"fallback_update",
149        {OT_INT,
150         "If indefinite update is used, the type of fallback strategy"}},
151       {"hess_lim_mem",
152        {OT_INT,
153         "Full or limited memory"}},
154       {"hess_memsize",
155        {OT_INT,
156         "Memory size for L-BFGS updates"}},
157       {"which_second_derv",
158        {OT_INT,
159         "For which block should second derivatives be provided by the user"}},
160       {"skip_first_globalization",
161        {OT_BOOL,
162         "No globalization strategy in first iteration"}},
163       {"conv_strategy",
164        {OT_INT,
165         "Convexification strategy"}},
166       {"max_conv_qp",
167        {OT_INT,
168         "How many additional QPs may be solved for convexification per iteration?"}},
169       {"max_soc_iter",
170        {OT_INT,
171         "Maximum number of SOC line search iterations"}},
172       {"gamma_theta",
173        {OT_DOUBLE,
174         "Filter line search parameter, cf. IPOPT paper"}},
175       {"gamma_f",
176        {OT_DOUBLE,
177         "Filter line search parameter, cf. IPOPT paper"}},
178       {"kappa_soc",
179        {OT_DOUBLE,
180         "Filter line search parameter, cf. IPOPT paper"}},
181       {"kappa_f",
182        {OT_DOUBLE,
183         "Filter line search parameter, cf. IPOPT paper"}},
184       {"theta_max",
185        {OT_DOUBLE,
186         "Filter line search parameter, cf. IPOPT paper"}},
187       {"theta_min",
188        {OT_DOUBLE,
189         "Filter line search parameter, cf. IPOPT paper"}},
190       {"delta",
191        {OT_DOUBLE,
192         "Filter line search parameter, cf. IPOPT paper"}},
193       {"s_theta",
194        {OT_DOUBLE,
195         "Filter line search parameter, cf. IPOPT paper"}},
196       {"s_f",
197        {OT_DOUBLE,
198         "Filter line search parameter, cf. IPOPT paper"}},
199       {"kappa_minus",
200        {OT_DOUBLE,
201         "Filter line search parameter, cf. IPOPT paper"}},
202       {"kappa_plus",
203        {OT_DOUBLE,
204         "Filter line search parameter, cf. IPOPT paper"}},
205       {"kappa_plus_max",
206        {OT_DOUBLE,
207         "Filter line search parameter, cf. IPOPT paper"}},
208       {"delta_h0",
209        {OT_DOUBLE,
210         "Filter line search parameter, cf. IPOPT paper"}},
211       {"eta",
212        {OT_DOUBLE,
213         "Filter line search parameter, cf. IPOPT paper"}},
214       {"obj_lo",
215        {OT_DOUBLE,
216         "Lower bound on objective function [-inf]"}},
217       {"obj_up",
218        {OT_DOUBLE,
219         "Upper bound on objective function [inf]"}},
220       {"rho",
221        {OT_DOUBLE,
222         "Feasibility restoration phase parameter"}},
223       {"zeta",
224        {OT_DOUBLE,
225         "Feasibility restoration phase parameter"}},
226       {"print_maxit_reached",
227        {OT_BOOL,
228         "Print error when maximum number of SQP iterations reached"}}
229      }
230   };
231 
init(const Dict & opts)232   void Blocksqp::init(const Dict& opts) {
233     // Call the init method of the base class
234     Nlpsol::init(opts);
235 
236     // Set default options
237     //string qpsol_plugin = "qpoases";
238     //Dict qpsol_options;
239     linsol_plugin_ = "ma27";
240     print_header_ = true;
241     print_iteration_ = true;
242     eps_ = 1.0e-16;
243     opttol_ = 1.0e-6;
244     nlinfeastol_ = 1.0e-6;
245     schur_ = true;
246     globalization_ = true;
247     restore_feas_ = true;
248     max_line_search_ = 20;
249     max_consec_reduced_steps_ = 100;
250     max_consec_skipped_updates_ = 100;
251     max_iter_ = 100;
252     warmstart_ = false;
253     max_it_qp_ = 5000;
254     block_hess_ = true;
255     hess_scaling_ = 2;
256     fallback_scaling_ = 4;
257     max_time_qp_ = 10000.0;
258     ini_hess_diag_ = 1.0;
259     col_eps_ = 0.1;
260     col_tau1_ = 0.5;
261     col_tau2_ = 1.0e4;
262     hess_damp_ = 1;
263     hess_damp_fac_ = 0.2;
264     hess_update_ = 1;
265     fallback_update_ = 2;
266     hess_lim_mem_ = 1;
267     hess_memsize_ = 20;
268     which_second_derv_ = 0;
269     skip_first_globalization_ = false;
270     conv_strategy_ = 0;
271     max_conv_qp_ = 1;
272     max_soc_iter_ = 3;
273     gamma_theta_ = 1.0e-5;
274     gamma_f_ = 1.0e-5;
275     kappa_soc_ = 0.99;
276     kappa_f_ = 0.999;
277     theta_max_ = 1.0e7;
278     theta_min_ = 1.0e-5;
279     delta_ = 1.0;
280     s_theta_ = 1.1;
281     s_f_ = 2.3;
282     kappa_minus_ = 0.333;
283     kappa_plus_ = 8.0;
284     kappa_plus_max_ = 100.0;
285     delta_h0_ = 1.0e-4;
286     eta_ = 1.0e-4;
287     obj_lo_ = -inf;
288     obj_up_ = inf;
289     rho_ = 1.0e3;
290     zeta_ = 1.0e-3;
291     print_maxit_reached_ = true;
292     qp_init_ = true;
293 
294     // Read user options
295     for (auto&& op : opts) {
296       if (op.first=="qpsol") {
297         //qpsol_plugin = op.second.to_string();
298         casadi_warning("Option 'qpsol' currently not supported, ignored");
299       } else if (op.first=="qpsol_options") {
300         //qpsol_options = op.second;
301         casadi_warning("Option 'qpsol_options' currently not supported, ignored");
302       } else if (op.first=="linsol") {
303         linsol_plugin_ = string(op.second);
304       } else if (op.first=="print_header") {
305         print_header_ = op.second;
306       } else if (op.first=="print_iteration") {
307           print_iteration_ = op.second;
308       } else if (op.first=="eps") {
309         eps_ = op.second;
310       } else if (op.first=="opttol") {
311         opttol_ = op.second;
312       } else if (op.first=="nlinfeastol") {
313         nlinfeastol_ = op.second;
314       } else if (op.first=="schur") {
315         schur_ = op.second;
316       } else if (op.first=="globalization") {
317         globalization_ = op.second;
318       } else if (op.first=="restore_feas") {
319         restore_feas_ = op.second;
320       } else if (op.first=="max_line_search") {
321         max_line_search_ = op.second;
322       } else if (op.first=="max_consec_reduced_steps") {
323         max_consec_reduced_steps_ = op.second;
324       } else if (op.first=="max_consec_skipped_updates") {
325         max_consec_skipped_updates_ = op.second;
326       } else if (op.first=="max_iter") {
327         max_iter_ = op.second;
328       } else if (op.first=="warmstart") {
329         warmstart_ = op.second;
330       } else if (op.first=="qp_init") {
331         qp_init_ = op.second;
332       } else if (op.first=="max_it_qp") {
333         max_it_qp_ = op.second;
334       } else if (op.first=="block_hess") {
335         block_hess_ = op.second;
336       } else if (op.first=="hess_scaling") {
337         hess_scaling_ = op.second;
338       } else if (op.first=="fallback_scaling") {
339         fallback_scaling_ = op.second;
340       } else if (op.first=="max_time_qp") {
341         max_time_qp_ = op.second;
342       } else if (op.first=="ini_hess_diag") {
343         ini_hess_diag_ = op.second;
344       } else if (op.first=="col_eps") {
345         col_eps_ = op.second;
346       } else if (op.first=="col_tau1") {
347         col_tau1_ = op.second;
348       } else if (op.first=="col_tau2") {
349         col_tau2_ = op.second;
350       } else if (op.first=="hess_damp") {
351         hess_damp_ = op.second;
352       } else if (op.first=="hess_damp_fac") {
353         hess_damp_fac_ = op.second;
354       } else if (op.first=="hess_update") {
355         hess_update_ = op.second;
356       } else if (op.first=="fallback_update") {
357         fallback_update_ = op.second;
358       } else if (op.first=="hess_lim_mem") {
359         hess_lim_mem_ = op.second;
360       } else if (op.first=="hess_memsize") {
361         hess_memsize_ = op.second;
362       } else if (op.first=="which_second_derv") {
363         which_second_derv_ = op.second;
364       } else if (op.first=="skip_first_globalization") {
365         skip_first_globalization_ = op.second;
366       } else if (op.first=="conv_strategy") {
367         conv_strategy_ = op.second;
368       } else if (op.first=="max_conv_qp") {
369         max_conv_qp_ = op.second;
370       } else if (op.first=="max_soc_iter") {
371         max_soc_iter_ = op.second;
372       } else if (op.first=="gamma_theta") {
373         gamma_theta_ = op.second;
374       } else if (op.first=="gamma_f") {
375         gamma_f_ = op.second;
376       } else if (op.first=="kappa_soc") {
377         kappa_soc_ = op.second;
378       } else if (op.first=="kappa_f") {
379         kappa_f_ = op.second;
380       } else if (op.first=="theta_max") {
381         theta_max_ = op.second;
382       } else if (op.first=="theta_min") {
383         theta_min_ = op.second;
384       } else if (op.first=="delta") {
385         delta_ = op.second;
386       } else if (op.first=="s_theta") {
387         s_theta_ = op.second;
388       } else if (op.first=="s_f") {
389         s_f_ = op.second;
390       } else if (op.first=="kappa_minus") {
391         kappa_minus_ = op.second;
392       } else if (op.first=="kappa_plus") {
393         kappa_plus_ = op.second;
394       } else if (op.first=="kappa_plus_max") {
395         kappa_plus_max_ = op.second;
396       } else if (op.first=="delta_h0") {
397         delta_h0_ = op.second;
398       } else if (op.first=="eta") {
399         eta_ = op.second;
400       } else if (op.first=="obj_lo") {
401         obj_lo_ = op.second;
402       } else if (op.first=="obj_up") {
403         obj_up_ = op.second;
404       } else if (op.first=="rho") {
405         rho_ = op.second;
406       } else if (op.first=="zeta") {
407         zeta_ = op.second;
408       } else if (op.first=="print_maxit_reached") {
409         print_maxit_reached_ = op.second;
410       }
411     }
412 
413     // If we compute second constraints derivatives switch to
414     // finite differences Hessian (convenience)
415     if (which_second_derv_ == 2) {
416       hess_update_ = 4;
417       block_hess_ = true;
418     }
419 
420     // If we don't use limited memory BFGS we need to store only one vector.
421     if (!hess_lim_mem_) hess_memsize_ = 1;
422     if (!schur_ && hess_update_ == 1) {
423       print("***WARNING: SR1 update only works with qpOASES Schur complement version. "
424              "Using BFGS updates instead.\n");
425       hess_update_ = 2;
426       hess_scaling_ = fallback_scaling_;
427     }
428 
429     // Setup feasibility restoration phase
430     if (restore_feas_) {
431       // get orignal nlp
432       Function nlp = oracle_;
433       vector<MX> resv;
434       vector<MX> argv = nlp.mx_in();
435 
436       // build fesibility restoration phase nlp
437       MX p = MX::sym("p", nlp.size1_in("x"));
438       MX s = MX::sym("s", nlp.size1_out("g"));
439 
440       MX d = fmin(1.0, 1.0/abs(p)) * (argv.at(0) - p);
441       MX f_rp = 0.5 * rho_ * dot(s, s) + zeta_/2.0 * dot(d, d);
442       MX g_rp = nlp(argv).at(1) - s;
443 
444       MXDict nlp_rp = {{"x", MX::vertcat({argv.at(0), s})},
445                        {"p", MX::vertcat({argv.at(1), p})},
446                        {"f", f_rp},
447                        {"g", g_rp}};
448 
449       // Set options for the SQP method for the restoration problem
450       Dict solver_options;
451       solver_options["globalization"] = true;
452       solver_options["which_second_derv"] = 0;
453       solver_options["restore_feas"] = false;
454       solver_options["hess_update"] = 2;
455       solver_options["hess_lim_mem"] = 1;
456       solver_options["hess_scaling"] = 2;
457       solver_options["opttol"] = opttol_;
458       solver_options["nlinfeastol"] = nlinfeastol_;
459       solver_options["max_iter"] = 1;
460       solver_options["print_time"] = false;
461       solver_options["print_header"] = false;
462       solver_options["print_iteration"] = false;
463       solver_options["print_maxit_reached"] = false;
464 
465       // Create and initialize solver for the restoration problem
466       rp_solver_ = nlpsol("rpsolver", "blocksqp", nlp_rp, solver_options);
467     }
468 
469     // Setup NLP functions
470     create_function("nlp_fg", {"x", "p"}, {"f", "g"});
471     Function gf_jg = create_function("nlp_gf_jg", {"x", "p"},
472                                      {"f", "g", "grad:f:x", "jac:g:x"});
473     Asp_ = gf_jg.sparsity_out("jac_g_x");
474 
475     if (!block_hess_) {
476       // No block-structured Hessian
477       blocks_ = {0, nx_};
478       which_second_derv_ = 0;
479       Hsp_ = Sparsity::dense(nx_, nx_);
480     } else {
481       // Detect block structure
482 
483       // Get the sparsity pattern for the Hessian of the Lagrangian
484       Function grad_lag = oracle_.factory("grad_lag",
485                                           {"x", "p", "lam:f", "lam:g"}, {"grad:gamma:x"},
486                                           {{"gamma", {"f", "g"}}});
487       Hsp_ = grad_lag.sparsity_jac("x", "grad_gamma_x", false, true);
488 
489       // Make sure diagonal exists
490       Hsp_ = Hsp_ + Sparsity::diag(nx_);
491 
492       // Find the strongly connected components of the Hessian
493       // Unlike Sparsity::scc, assume ordered
494       const casadi_int* colind = Hsp_.colind();
495       const casadi_int* row = Hsp_.row();
496       blocks_.push_back(0);
497       casadi_int ind = 0;
498       while (ind < nx_) {
499         // Find the next cutoff
500         casadi_int next=ind+1;
501         while (ind<next && ind<nx_) {
502           for (casadi_int k=colind[ind]; k<colind[ind+1]; ++k) next = max(next, 1+row[k]);
503           ind++;
504         }
505         blocks_.push_back(next);
506       }
507     }
508 
509     // Number of blocks
510     nblocks_ = blocks_.size()-1;
511 
512     // Blocksizes
513     dim_.resize(nblocks_);
514     casadi_int max_size = 0;
515     nnz_H_ = 0;
516     for (casadi_int i=0; i<nblocks_; ++i) {
517       dim_[i] = blocks_[i+1]-blocks_[i];
518       max_size = max(max_size, dim_[i]);
519       nnz_H_ += dim_[i]*dim_[i];
520     }
521 
522     create_function("nlp_hess_l", {"x", "p", "lam:f", "lam:g"},
523                     {"hess:gamma:x:x"}, {{"gamma", {"f", "g"}}});
524     exact_hess_lag_sp_ = get_function("nlp_hess_l").sparsity_out(0);
525 
526     if (verbose_) casadi_message(str(nblocks_) + " blocks of max size " + str(max_size) + ".");
527 
528     // Allocate a QP solver
529     //casadi_assert(!qpsol_plugin.empty(), "'qpsol' option has not been set");
530     //qpsol_ = conic("qpsol", qpsol_plugin, {{"h", Hsp_}, {"a", Asp_}},
531     //               qpsol_options);
532     //alloc(qpsol_);
533 
534     // Allocate memory
535     alloc_w(Asp_.nnz(), true); // jac
536     alloc_w(nx_, true); // lam_xk
537     alloc_w(ng_, true); // lam_gk
538     alloc_w(ng_, true); // gk
539     alloc_w(nx_, true); // grad_fk
540     alloc_w(nx_, true); // grad_lagk
541     alloc_w(nx_+ng_, true); // lam_qp
542     alloc_w(nblocks_, true); // delta_norm
543     alloc_w(nblocks_, true); // delta_norm_old
544     alloc_w(nblocks_, true); // delta_gamma
545     alloc_w(nblocks_, true); // delta_gamma_old
546     alloc_w(nblocks_, true); // delta_h
547     alloc_w(nx_, true); // trial_xk
548     alloc_w(nx_, true); // lbx_qp
549     alloc_w(nx_, true); // ubx_qp
550     alloc_w(ng_, true); // lba_qp
551     alloc_w(ng_, true); // uba_qp
552     alloc_w(ng_, true); // jac_times_dxk
553     alloc_w(nx_*hess_memsize_, true); // deltaMat
554     alloc_w(nx_*hess_memsize_, true); // gammaMat
555     alloc_w(Asp_.nnz(), true); // jac_g
556     alloc_w(nnz_H_, true); // hess_lag
557     alloc_iw(nblocks_, true); // noUpdateCounter
558 
559     // Allocate block diagonal Hessian(s)
560     casadi_int n_hess = hess_update_==1 || hess_update_==4 ? 2 : 1;
561     alloc_res(nblocks_*n_hess, true);
562     alloc_w(n_hess*nnz_H_, true);
563     alloc_iw(nnz_H_ + (nx_+1) + nx_, true); // hessIndRow
564     alloc_w(exact_hess_lag_sp_.nnz(), true); // exact_hess_lag
565   }
566 
init_mem(void * mem) const567   int Blocksqp::init_mem(void* mem) const {
568     if (Nlpsol::init_mem(mem)) return 1;
569     auto m = static_cast<BlocksqpMemory*>(mem);
570 
571     // Create qpOASES memory
572     if (schur_) {
573       m->qpoases_mem = new QpoasesMemory();
574       m->qpoases_mem->linsol_plugin = linsol_plugin_;
575     }
576 
577     m->qp = nullptr;
578     m->colind.resize(Asp_.size2()+1);
579     m->row.resize(Asp_.nnz());
580     return 0;
581   }
582 
set_work(void * mem,const double ** & arg,double ** & res,casadi_int * & iw,double * & w) const583   void Blocksqp::set_work(void* mem, const double**& arg, double**& res,
584                                    casadi_int*& iw, double*& w) const {
585     auto m = static_cast<BlocksqpMemory*>(mem);
586 
587     // Set work in base classes
588     Nlpsol::set_work(mem, arg, res, iw, w);
589 
590     // Temporary memory
591     m->jac = w; w += Asp_.nnz();
592     m->lam_xk = w; w += nx_;
593     m->lam_gk = w; w += ng_;
594     m->gk = w; w += ng_;
595     m->grad_fk = w; w += nx_;
596     m->grad_lagk = w; w += nx_;
597     m->lam_qp = w; w += nx_+ng_;
598     m->delta_norm = w; w += nblocks_;
599     m->delta_norm_old = w; w += nblocks_;
600     m->delta_gamma = w; w += nblocks_;
601     m->delta_gamma_old = w; w += nblocks_;
602     m->delta_h = w; w += nblocks_;
603     m->trial_xk = w; w += nx_;
604     m->lbx_qp = w; w += nx_;
605     m->ubx_qp = w; w += nx_;
606     m->lba_qp = w; w += ng_;
607     m->uba_qp = w; w += ng_;
608     m->jac_times_dxk = w; w += ng_;
609     m->deltaMat = w; w += nx_*hess_memsize_;
610     m->gammaMat = w; w += nx_*hess_memsize_;
611     m->jac_g = w; w += Asp_.nnz();
612     m->hess_lag = w; w += nnz_H_;
613     m->hessIndRow = reinterpret_cast<int*>(iw); iw += nnz_H_ + (nx_+1) + nx_;
614     m->noUpdateCounter = iw; iw += nblocks_;
615 
616     // First Hessian
617     m->hess1 = res; res += nblocks_;
618     for (casadi_int b=0; b<nblocks_; b++) {
619       m->hess1[b] = w; w += dim_[b]*dim_[b];
620     }
621 
622     // Second Hessian, for SR1 or finite differences
623     if (hess_update_ == 1 || hess_update_ == 4) {
624       m->hess2 = res; res += nblocks_;
625       for (casadi_int b=0; b<nblocks_; b++) {
626         m->hess2[b] = w; w += dim_[b]*dim_[b];
627       }
628     } else {
629       m->hess2 = nullptr;
630     }
631 
632     m->exact_hess_lag = w; w += exact_hess_lag_sp_.nnz();
633   }
634 
solve(void * mem) const635   int Blocksqp::solve(void* mem) const {
636     auto m = static_cast<BlocksqpMemory*>(mem);
637     auto d_nlp = &m->d_nlp;
638 
639     casadi_int ret = 0;
640 
641     // Create problem evaluation object
642     vector<casadi_int> blocks = blocks_;
643 
644     /*-------------------------------------------------*/
645     /* Create blockSQP method object and run algorithm */
646     /*-------------------------------------------------*/
647     m->itCount = 0;
648     m->qpItTotal = 0;
649     m->qpIterations = 0;
650     m->qpIterations2 = 0;
651     m->qpResolve = 0;
652     m->rejectedSR1 = 0;
653     m->hessSkipped = 0;
654     m->hessDamped = 0;
655     m->averageSizingFactor = 0.0;
656     m->nFunCalls = 0;
657     m->nDerCalls = 0;
658     m->nRestHeurCalls = 0;
659     m->nRestPhaseCalls = 0;
660 
661     m->nTotalUpdates = 0;
662     m->nTotalSkippedUpdates = 0;
663 
664     casadi_int maxblocksize = 1;
665 
666     for (casadi_int k=0; k<nblocks_+1; k++) {
667       if (k > 0)
668         if (blocks_[k] - blocks_[k-1] > maxblocksize)
669           maxblocksize = blocks_[k] - blocks_[k-1];
670     }
671 
672     if (hess_lim_mem_ && hess_memsize_ == 0)
673       const_cast<Blocksqp*>(this)->hess_memsize_ = maxblocksize;
674 
675     // Reset the SQP metod
676     reset_sqp(m);
677 
678     // Free existing memory, if any
679     if (qp_init_) {
680       delete m->qp;
681       m->qp = nullptr;
682     }
683     if (!m->qp) {
684       if (schur_) {
685         m->qp = new qpOASES::SQProblemSchur(nx_, ng_, qpOASES::HST_UNKNOWN, 50,
686                                             m->qpoases_mem,
687                                             QpoasesInterface::qpoases_init,
688                                             QpoasesInterface::qpoases_sfact,
689                                             QpoasesInterface::qpoases_nfact,
690                                             QpoasesInterface::qpoases_solve);
691       } else {
692         m->qp = new qpOASES::SQProblem(nx_, ng_);
693       }
694     }
695 
696     // Print header and information about the algorithmic parameters
697     if (print_header_) printInfo(m);
698 
699     // Open output files
700     initStats(m);
701     initIterate(m);
702 
703     // Initialize filter with pair (maxConstrViolation, objLowerBound)
704     initializeFilter(m);
705 
706     // Primal-dual initial guess
707     casadi_copy(d_nlp->lam, nx_, m->lam_xk);
708     casadi_scal(nx_, -1., m->lam_xk);
709     casadi_copy(d_nlp->lam + nx_, ng_, m->lam_gk);
710     casadi_scal(ng_, -1., m->lam_gk);
711 
712     casadi_copy(m->lam_xk, nx_, m->lam_qp);
713     casadi_copy(m->lam_gk, ng_, m->lam_qp+nx_);
714 
715     ret = run(m, max_iter_, warmstart_);
716 
717     m->success = ret==0;
718     m->ret_ = ret;
719 
720     if (ret==1) {
721       if (print_maxit_reached_) print("***WARNING: Maximum number of iterations reached\n");
722       m->unified_return_status = SOLVER_RET_LIMITED;
723     }
724 
725 
726     // Get optimal cost
727     d_nlp->f = m->obj;
728     // Get constraints at solution
729     casadi_copy(m->gk, ng_, d_nlp->z + nx_);
730     // Get dual solution (simple bounds)
731     if (d_nlp->lam) {
732       casadi_copy(m->lam_xk, nx_, d_nlp->lam);
733       casadi_scal(nx_, -1., d_nlp->lam);
734     }
735     // Get dual solution (nonlinear bounds)
736     casadi_copy(m->lam_gk, ng_, d_nlp->lam + nx_);
737     casadi_scal(ng_, -1., d_nlp->lam + nx_);
738     return 0;
739   }
740 
run(BlocksqpMemory * m,casadi_int maxIt,casadi_int warmStart) const741   casadi_int Blocksqp::run(BlocksqpMemory* m, casadi_int maxIt, casadi_int warmStart) const {
742     casadi_int it, infoQP = 0;
743     bool skipLineSearch = false;
744     bool hasConverged = false;
745 
746     if (warmStart == 0 || m->itCount == 0) {
747       // SQP iteration 0
748 
749       /// Set initial Hessian approximation
750       calcInitialHessian(m);
751 
752       /// Evaluate all functions and gradients for xk_0
753       switch (evaluate(m, &m->obj, m->gk, m->grad_fk, m->jac_g)) {
754         case -1:
755           m->unified_return_status = SOLVER_RET_NAN;
756           return -1;
757         case 0:
758           break;
759         default:
760           return 1;
761       }
762       m->nDerCalls++;
763 
764       /// Check if converged
765       hasConverged = calcOptTol(m);
766       if (print_iteration_) printProgress(m);
767       updateStats(m);
768       if (hasConverged) {
769         if (print_iteration_ && m->steptype < 2) {
770           print("\n***CONVERGENCE ACHIEVED!***\n");
771         }
772         return 0;
773       }
774       m->itCount++;
775     }
776 
777     /*
778      * SQP Loop: during first iteration, m->itCount = 1
779      */
780     for (it=0; it<maxIt; it++) {
781       /// Solve QP subproblem with qpOASES or QPOPT
782       updateStepBounds(m, 0);
783       infoQP = solveQP(m, m->dxk, m->lam_qp);
784 
785       if (infoQP == 1) {
786           // 1.) Maximum number of iterations reached
787           print("***WARNING: Maximum number of QP iterations exceeded.***\n");
788       } else if (infoQP == 2 || infoQP > 3) {
789           // 2.) QP error (e.g., unbounded), solve again with pos.def. diagonal matrix (identity)
790           print("***WARNING: QP error. Solve again with identity matrix.***\n");
791           resetHessian(m);
792           infoQP = solveQP(m, m->dxk, m->lam_qp);
793           if (infoQP) {
794             // If there is still an error, terminate.
795             print("***WARNING: QP error. Stop.***\n");
796             return -1;
797           } else {
798             m->steptype = 1;
799           }
800         } else if (infoQP == 3) {
801         // 3.) QP infeasible, try to restore feasibility
802         bool qpError = true;
803         skipLineSearch = true; // don't do line search with restoration step
804 
805         // Try to reduce constraint violation by heuristic
806         if (m->steptype < 2) {
807           print("***WARNING: QP infeasible. Trying to reduce constraint violation ...");
808           qpError = feasibilityRestorationHeuristic(m);
809           if (!qpError) {
810             m->steptype = 2;
811             print("Success.***\n");
812           } else {
813             print("Failure.***\n");
814           }
815         }
816 
817         // Invoke feasibility restoration phase
818         //if (qpError && m->steptype < 3 && restore_feas_)
819         if (qpError && restore_feas_ && m->cNorm > 0.01 * nlinfeastol_) {
820           print("***Start feasibility restoration phase.***\n");
821           m->steptype = 3;
822           qpError = feasibilityRestorationPhase(m);
823         }
824 
825         // If everything failed, abort.
826         if (qpError) {
827           print("***WARNING: QP error. Stop.\n");
828           return -1;
829         }
830       }
831 
832       /// Determine steplength alpha
833       if (!globalization_ || (skip_first_globalization_ && m->itCount == 1)) {
834         // No globalization strategy, but reduce step if function cannot be evaluated
835         if (fullstep(m)) {
836           print("***WARNING: Constraint or objective could "
837                          "not be evaluated at new point. Stop.***\n");
838           return -1;
839         }
840         m->steptype = 0;
841       } else if (globalization_ && !skipLineSearch) {
842         // Filter line search based on Waechter et al., 2006 (Ipopt paper)
843         if (filterLineSearch(m) || m->reducedStepCount > max_consec_reduced_steps_) {
844           // Filter line search did not produce a step. Now there are a few things we can try ...
845           bool lsError = true;
846 
847           // Heuristic 1: Check if the full step reduces the KKT error by at
848           // least kappaF, if so, accept the step.
849           lsError = kktErrorReduction(m);
850           if (!lsError)
851             m->steptype = -1;
852 
853           // Heuristic 2: Try to reduce constraint violation by closing
854           // continuity gaps to produce an admissable iterate
855           if (lsError && m->cNorm > 0.01 * nlinfeastol_ && m->steptype < 2) {
856             // Don't do this twice in a row!
857             print("***WARNING: Steplength too short. "
858                   "Trying to reduce constraint violation...");
859 
860             // Integration over whole time interval
861             lsError = feasibilityRestorationHeuristic(m);
862             if (!lsError) {
863                 m->steptype = 2;
864                 print("Success.***\n");
865               } else {
866               print("***WARNING: Failed.***\n");
867             }
868           }
869 
870           // Heuristic 3: Recompute step with a diagonal Hessian
871           if (lsError && m->steptype != 1 && m->steptype != 2) {
872             // After closing continuity gaps, we already take a step with initial Hessian.
873             // If this step is not accepted then this will cause an infinite loop!
874 
875             print("***WARNING: Steplength too short. "
876                   "Trying to find a new step with identity Hessian.***\n");
877             m->steptype = 1;
878 
879             resetHessian(m);
880             continue;
881           }
882 
883           // If this does not yield a successful step, start restoration phase
884           if (lsError && m->cNorm > 0.01 * nlinfeastol_ && restore_feas_) {
885             print("***WARNING: Steplength too short. "
886                            "Start feasibility restoration phase.***\n");
887             m->steptype = 3;
888 
889             // Solve NLP with minimum norm objective
890             lsError = feasibilityRestorationPhase(m);
891           }
892 
893           // If everything failed, abort.
894           if (lsError) {
895             print("***WARNING: Line search error. Stop.***\n");
896             return -1;
897           }
898         } else {
899           m->steptype = 0;
900         }
901       }
902 
903       /// Calculate "old" Lagrange gradient: gamma = dL(xi_k, lambda_k+1)
904       calcLagrangeGradient(m, m->gamma, 0);
905 
906       /// Evaluate functions and gradients at the new xk
907       (void)evaluate(m, &m->obj, m->gk, m->grad_fk, m->jac_g);
908       m->nDerCalls++;
909 
910       /// Check if converged
911       hasConverged = calcOptTol(m);
912 
913       /// Print one line of output for the current iteration
914       if (print_iteration_) printProgress(m);
915       updateStats(m);
916       if (hasConverged && m->steptype < 2) {
917         if (print_iteration_) print("\n***CONVERGENCE ACHIEVED!***\n");
918         m->itCount++;
919         return 0; //Convergence achieved!
920       }
921 
922       /// Calculate difference of old and new Lagrange gradient:
923       // gamma = -gamma + dL(xi_k+1, lambda_k+1)
924       calcLagrangeGradient(m, m->gamma, 1);
925 
926       /// Revise Hessian approximation
927       if (hess_update_ < 4 && !hess_lim_mem_) {
928         calcHessianUpdate(m, hess_update_, hess_scaling_);
929       } else if (hess_update_ < 4 && hess_lim_mem_) {
930         calcHessianUpdateLimitedMemory(m, hess_update_, hess_scaling_);
931       } else if (hess_update_ == 4) {
932         calcHessianUpdateExact(m);
933       }
934 
935       // If limited memory updates  are used, set pointers deltaXi and
936       // gamma to the next column in deltaMat and gammaMat
937       updateDeltaGamma(m);
938 
939       m->itCount++;
940       skipLineSearch = false;
941     }
942 
943     return 1;
944   }
945 
946   /**
947    * Compute gradient of Lagrangian or difference of Lagrangian gradients (sparse version)
948    *
949    * flag == 0: output dL(xk, lambda)
950    * flag == 1: output dL(xi_k+1, lambda_k+1) - L(xi_k, lambda_k+1)
951    * flag == 2: output dL(xi_k+1, lambda_k+1) - df(xi)
952    */
953   void Blocksqp::
calcLagrangeGradient(BlocksqpMemory * m,const double * lam_x,const double * lam_g,const double * grad_f,const double * jacNz,double * grad_lag,casadi_int flag) const954   calcLagrangeGradient(BlocksqpMemory* m,
955     const double* lam_x, const double* lam_g,
956     const double* grad_f, const double *jacNz,
957     double* grad_lag, casadi_int flag) const {
958 
959     // Objective gradient
960     if (flag == 0) {
961       casadi_copy(grad_f, nx_, grad_lag);
962     } else if (flag == 1) {
963       casadi_scal(nx_, -1., grad_lag);
964       casadi_axpy(nx_, 1., grad_f, grad_lag);
965     } else {
966       casadi_fill(grad_lag, nx_, 0.);
967     }
968 
969     // - lambdaT * constrJac
970     const casadi_int* jacIndRow = Asp_.row();
971     const casadi_int* jacIndCol = Asp_.colind();
972     for (casadi_int iVar=0; iVar<nx_; iVar++) {
973       for (casadi_int iCon=jacIndCol[iVar]; iCon<jacIndCol[iVar+1]; iCon++) {
974         grad_lag[iVar] -= lam_g[jacIndRow[iCon]] * jacNz[iCon];
975       }
976     }
977 
978     // - lambdaT * simpleBounds
979     casadi_axpy(nx_, -1., lam_x, grad_lag);
980   }
981 
982   /**
983    * Wrapper if called with standard arguments
984    */
985   void Blocksqp::
calcLagrangeGradient(BlocksqpMemory * m,double * grad_lag,casadi_int flag) const986   calcLagrangeGradient(BlocksqpMemory* m, double* grad_lag, casadi_int flag) const {
987     calcLagrangeGradient(m, m->lam_xk, m->lam_gk, m->grad_fk, m->jac_g,
988       grad_lag, flag);
989   }
990 
991 
992   /**
993    * Compute optimality conditions:
994    * ||grad_lag(xk,lambda)||_infty / (1 + ||lambda||_infty) <= TOL
995    * and
996    * ||constrViolation||_infty / (1 + ||xi||_infty) <= TOL
997    */
calcOptTol(BlocksqpMemory * m) const998   bool Blocksqp::calcOptTol(BlocksqpMemory* m) const {
999     auto d_nlp = &m->d_nlp;
1000     // scaled norm of Lagrangian gradient
1001     calcLagrangeGradient(m, m->grad_lagk, 0);
1002     m->gradNorm = casadi_norm_inf(nx_, m->grad_lagk);
1003     m->tol = m->gradNorm/(1.0+fmax(casadi_norm_inf(nx_, m->lam_xk),
1004                                    casadi_norm_inf(ng_, m->lam_gk)));
1005 
1006     // norm of constraint violation
1007     m->cNorm  = lInfConstraintNorm(m, d_nlp->z, m->gk);
1008     m->cNormS = m->cNorm /(1.0 + casadi_norm_inf(nx_, d_nlp->z));
1009 
1010     return m->tol <= opttol_ && m->cNormS <= nlinfeastol_;
1011   }
1012 
printInfo(BlocksqpMemory * m) const1013   void Blocksqp::printInfo(BlocksqpMemory* m) const {
1014     char hessString1[100];
1015     char hessString2[100];
1016     char globString[100];
1017     char qpString[100];
1018 
1019     /* QP Solver */
1020     if (schur_)
1021       strcpy(qpString, "sparse, Schur complement approach");
1022     else
1023       strcpy(qpString, "sparse, reduced Hessian factorization");
1024 
1025     /* Globalization */
1026     if (globalization_) {
1027       strcpy(globString, "filter line search");
1028     } else {
1029       strcpy(globString, "none (full step)");
1030     }
1031 
1032     /* Hessian approximation */
1033     if (block_hess_ && (hess_update_ == 1 || hess_update_ == 2))
1034       strcpy(hessString1, "block ");
1035     else
1036       strcpy(hessString1, "");
1037 
1038     if (hess_lim_mem_ && (hess_update_ == 1 || hess_update_ == 2))
1039       strcat(hessString1, "L-");
1040 
1041     /* Fallback Hessian */
1042     if (hess_update_ == 1 || hess_update_ == 4
1043       || (hess_update_ == 2 && !hess_damp_)) {
1044         strcpy(hessString2, hessString1);
1045 
1046         /* Fallback Hessian update type */
1047         if (fallback_update_ == 0) {
1048           strcat(hessString2, "Id");
1049         } else if (fallback_update_ == 1) {
1050           strcat(hessString2, "SR1");
1051         } else if (fallback_update_ == 2) {
1052           strcat(hessString2, "BFGS");
1053         } else if (fallback_update_ == 4) {
1054           strcat(hessString2, "Finite differences");
1055         }
1056 
1057         /* Fallback Hessian scaling */
1058         if (fallback_scaling_ == 1) {
1059           strcat(hessString2, ", SP");
1060         } else if (fallback_scaling_ == 2) {
1061           strcat(hessString2, ", OL");
1062         } else if (fallback_scaling_ == 3) {
1063           strcat(hessString2, ", mean");
1064         } else if (fallback_scaling_ == 4) {
1065           strcat(hessString2, ", selective sizing");
1066         }
1067       } else {
1068         strcpy(hessString2, "-");
1069     }
1070 
1071     /* First Hessian update type */
1072     if (hess_update_ == 0) {
1073       strcat(hessString1, "Id");
1074     } else if (hess_update_ == 1) {
1075       strcat(hessString1, "SR1");
1076     } else if (hess_update_ == 2) {
1077       strcat(hessString1, "BFGS");
1078     } else if (hess_update_ == 4) {
1079       strcat(hessString1, "Finite differences");
1080     }
1081 
1082     /* First Hessian scaling */
1083     if (hess_scaling_ == 1) {
1084       strcat(hessString1, ", SP");
1085     } else if (hess_scaling_ == 2) {
1086       strcat(hessString1, ", OL");
1087     } else if (hess_scaling_ == 3) {
1088       strcat(hessString1, ", mean");
1089     } else if (hess_scaling_ == 4) {
1090       strcat(hessString1, ", selective sizing");
1091     }
1092 
1093     print("\n+---------------------------------------------------------------+\n");
1094     print("| Starting blockSQP with the following algorithmic settings:    |\n");
1095     print("+---------------------------------------------------------------+\n");
1096     print("| qpOASES flavor            | %-34s|\n", qpString);
1097     print("| Globalization             | %-34s|\n", globString);
1098     print("| 1st Hessian approximation | %-34s|\n", hessString1);
1099     print("| 2nd Hessian approximation | %-34s|\n", hessString2);
1100     print("+---------------------------------------------------------------+\n\n");
1101   }
1102 
1103   void Blocksqp::
acceptStep(BlocksqpMemory * m,double alpha) const1104   acceptStep(BlocksqpMemory* m, double alpha) const {
1105     acceptStep(m, m->dxk, m->lam_qp, alpha, 0);
1106   }
1107 
1108   void Blocksqp::
acceptStep(BlocksqpMemory * m,const double * deltaXi,const double * lambdaQP,double alpha,casadi_int nSOCS) const1109   acceptStep(BlocksqpMemory* m, const double* deltaXi,
1110     const double* lambdaQP, double alpha, casadi_int nSOCS) const {
1111     auto d_nlp = &m->d_nlp;
1112     double lStpNorm;
1113 
1114     // Current alpha
1115     m->alpha = alpha;
1116     m->nSOCS = nSOCS;
1117 
1118     // Set new x by accepting the current trial step
1119     for (casadi_int k=0; k<nx_; k++) {
1120       d_nlp->z[k] = m->trial_xk[k];
1121       m->dxk[k] = alpha * deltaXi[k];
1122     }
1123 
1124     // Store the infinity norm of the multiplier step
1125     m->lambdaStepNorm = 0.0;
1126     for (casadi_int k=0; k<nx_; k++)
1127       if ((lStpNorm = fabs(alpha*lambdaQP[k] - alpha*m->lam_xk[k])) > m->lambdaStepNorm)
1128         m->lambdaStepNorm = lStpNorm;
1129     for (casadi_int k=0; k<ng_; k++)
1130       if ((lStpNorm = fabs(alpha*lambdaQP[nx_+k] - alpha*m->lam_gk[k])) > m->lambdaStepNorm)
1131         m->lambdaStepNorm = lStpNorm;
1132 
1133     // Set new multipliers
1134     for (casadi_int k=0; k<nx_; k++)
1135       m->lam_xk[k] = (1.0 - alpha)*m->lam_xk[k] + alpha*lambdaQP[k];
1136     for (casadi_int k=0; k<ng_; k++)
1137       m->lam_gk[k] = (1.0 - alpha)*m->lam_gk[k] + alpha*lambdaQP[nx_+k];
1138 
1139     // Count consecutive reduced steps
1140     if (m->alpha < 1.0)
1141       m->reducedStepCount++;
1142     else
1143       m->reducedStepCount = 0;
1144   }
1145 
1146   void Blocksqp::
reduceStepsize(BlocksqpMemory * m,double * alpha) const1147   reduceStepsize(BlocksqpMemory* m, double *alpha) const {
1148     *alpha = (*alpha) * 0.5;
1149   }
1150 
1151   void Blocksqp::
reduceSOCStepsize(BlocksqpMemory * m,double * alphaSOC) const1152   reduceSOCStepsize(BlocksqpMemory* m, double *alphaSOC) const {
1153     auto d_nlp = &m->d_nlp;
1154     // Update bounds on linearized constraints for the next SOC QP:
1155     // That is different from the update for the first SOC QP!
1156     for (casadi_int i=0; i<ng_; i++) {
1157       double lbg = d_nlp->lbz[i + nx_];
1158       double ubg = d_nlp->ubz[i + nx_];
1159       if (lbg != inf) {
1160         m->lba_qp[i] = *alphaSOC * m->lba_qp[i] - m->gk[i];
1161       } else {
1162         m->lba_qp[i] = inf;
1163       }
1164 
1165       if (ubg != inf) {
1166         m->uba_qp[i] = *alphaSOC * m->uba_qp[i] - m->gk[i];
1167       } else {
1168         m->uba_qp[i] = inf;
1169       }
1170     }
1171 
1172     *alphaSOC *= 0.5;
1173   }
1174 
1175   /**
1176    * Take a full Quasi-Newton step, except when integrator fails:
1177    * xk = xk + deltaXi
1178    * lambda = lambdaQP
1179    */
fullstep(BlocksqpMemory * m) const1180   casadi_int Blocksqp::fullstep(BlocksqpMemory* m) const {
1181     auto d_nlp = &m->d_nlp;
1182     double alpha;
1183     double objTrial, cNormTrial;
1184 
1185     // Backtracking line search
1186     alpha = 1.0;
1187     for (casadi_int k=0; k<10; k++) {
1188       // Compute new trial point
1189       for (casadi_int i=0; i<nx_; i++)
1190         m->trial_xk[i] = d_nlp->z[i] + alpha * m->dxk[i];
1191 
1192       // Compute problem functions at trial point
1193       casadi_int info = evaluate(m, m->trial_xk, &objTrial, m->gk);
1194       m->nFunCalls++;
1195       cNormTrial = lInfConstraintNorm(m, m->trial_xk, m->gk);
1196       // Reduce step if evaluation fails, if lower bound is violated
1197       // or if objective or a constraint is NaN
1198       if (info != 0 || objTrial < obj_lo_ || objTrial > obj_up_
1199         || !(objTrial == objTrial) || !(cNormTrial == cNormTrial)) {
1200         print("info=%i, objTrial=%g\n", info, objTrial);
1201         // evaluation error, reduce stepsize
1202         reduceStepsize(m, &alpha);
1203         continue;
1204       } else {
1205         acceptStep(m, alpha);
1206         return 0;
1207       }
1208     }
1209     return 1;
1210   }
1211 
1212   /**
1213    *
1214    * Backtracking line search based on a filter
1215    * as described in Ipopt paper (Waechter 2006)
1216    *
1217    */
filterLineSearch(BlocksqpMemory * m) const1218   casadi_int Blocksqp::filterLineSearch(BlocksqpMemory* m) const {
1219     auto d_nlp = &m->d_nlp;
1220     double alpha = 1.0;
1221     double cNormTrial=0, objTrial, dfTdeltaXi=0;
1222 
1223     // Compute ||constr(xi)|| at old point
1224     double cNorm = lInfConstraintNorm(m, d_nlp->z, m->gk);
1225 
1226     // Backtracking line search
1227     casadi_int k;
1228     for (k=0; k<max_line_search_; k++) {
1229       // Compute new trial point
1230       for (casadi_int i=0; i<nx_; i++)
1231         m->trial_xk[i] = d_nlp->z[i] + alpha * m->dxk[i];
1232 
1233       // Compute grad(f)^T * deltaXi
1234       dfTdeltaXi = 0.0;
1235       for (casadi_int i=0; i<nx_; i++)
1236         dfTdeltaXi += m->grad_fk[i] * m->dxk[i];
1237 
1238       // Compute objective and at ||constr(trial_xk)||_1 at trial point
1239       casadi_int info = evaluate(m, m->trial_xk, &objTrial, m->gk);
1240       m->nFunCalls++;
1241       cNormTrial = lInfConstraintNorm(m, m->trial_xk, m->gk);
1242       // Reduce step if evaluation fails, if lower bound is violated or if objective is NaN
1243       if (info != 0 || objTrial < obj_lo_ || objTrial > obj_up_
1244         || !(objTrial == objTrial) || !(cNormTrial == cNormTrial)) {
1245           // evaluation error, reduce stepsize
1246           reduceStepsize(m, &alpha);
1247           continue;
1248         }
1249 
1250       // Check acceptability to the filter
1251       if (pairInFilter(m, cNormTrial, objTrial)) {
1252         // Trial point is in the prohibited region defined by
1253         // the filter, try second order correction
1254         if (secondOrderCorrection(m, cNorm, cNormTrial, dfTdeltaXi, 0, k)) {
1255           break; // SOC yielded suitable alpha, stop
1256         } else {
1257           reduceStepsize(m, &alpha);
1258           continue;
1259         }
1260       }
1261 
1262       // Check sufficient decrease, case I:
1263       // If we are (almost) feasible and a "switching condition" is satisfied
1264       // require sufficient progress in the objective instead of bi-objective condition
1265       if (cNorm <= theta_min_) {
1266         // Switching condition, part 1: grad(f)^T * deltaXi < 0 ?
1267         if (dfTdeltaXi < 0)
1268           // Switching condition, part 2: alpha * (- grad(f)^T * deltaXi)**sF
1269           // > delta * cNorm**sTheta ?
1270           if (alpha * pow((-dfTdeltaXi), s_f_)
1271           > delta_ * pow(cNorm, s_theta_)) {
1272             // Switching conditions hold: Require satisfaction of Armijo condition for objective
1273             if (objTrial > m->obj + eta_*alpha*dfTdeltaXi) {
1274               // Armijo condition violated, try second order correction
1275               if (secondOrderCorrection(m, cNorm, cNormTrial, dfTdeltaXi, 1, k)) {
1276                 break; // SOC yielded suitable alpha, stop
1277               } else {
1278                 reduceStepsize(m, &alpha);
1279                 continue;
1280               }
1281             } else {
1282               // found suitable alpha, stop
1283               acceptStep(m, alpha);
1284               break;
1285             }
1286           }
1287       }
1288 
1289       // Check sufficient decrease, case II:
1290       // Bi-objective (filter) condition
1291       if (cNormTrial < (1.0 - gamma_theta_) * cNorm
1292       || objTrial < m->obj - gamma_f_ * cNorm) {
1293         // found suitable alpha, stop
1294         acceptStep(m, alpha);
1295         break;
1296       } else {
1297         // Trial point is dominated by current point, try second order correction
1298         if (secondOrderCorrection(m, cNorm, cNormTrial, dfTdeltaXi, 0, k)) {
1299           break; // SOC yielded suitable alpha, stop
1300         } else {
1301           reduceStepsize(m, &alpha);
1302           continue;
1303         }
1304       }
1305     }
1306 
1307     // No step could be found by the line search
1308     if (k == max_line_search_) return 1;
1309 
1310     // Augment the filter if switching condition or Armijo condition does not hold
1311     if (dfTdeltaXi >= 0) {
1312       augmentFilter(m, cNormTrial, objTrial);
1313     } else if (alpha * pow((-dfTdeltaXi), s_f_) > delta_ * pow(cNorm, s_theta_)) {
1314       // careful with neg. exponents!
1315       augmentFilter(m, cNormTrial, objTrial);
1316     } else if (objTrial <= m->obj + eta_*alpha*dfTdeltaXi) {
1317       augmentFilter(m, cNormTrial, objTrial);
1318     }
1319 
1320     return 0;
1321   }
1322 
1323 
1324   /**
1325    *
1326    * Perform a second order correction step, i.e. solve the QP:
1327    *
1328    * min_d d^TBd + d^T grad_fk
1329    * s.t.  bl <= A^Td + constr(xi+alpha*deltaXi) - A^TdeltaXi <= bu
1330    *
1331    */
1332   bool Blocksqp::
secondOrderCorrection(BlocksqpMemory * m,double cNorm,double cNormTrial,double dfTdeltaXi,bool swCond,casadi_int it) const1333   secondOrderCorrection(BlocksqpMemory* m, double cNorm, double cNormTrial,
1334     double dfTdeltaXi, bool swCond, casadi_int it) const {
1335     auto d_nlp = &m->d_nlp;
1336 
1337     // Perform SOC only on the first iteration of backtracking line search
1338     if (it > 0) return false;
1339     // If constraint violation of the trialstep is lower than the current one skip SOC
1340     if (cNormTrial < cNorm) return false;
1341 
1342     casadi_int nSOCS = 0;
1343     double cNormTrialSOC, cNormOld, objTrialSOC;
1344 
1345     // m->gk contains result at first trial point: c(xi+deltaXi)
1346     // m->jac_times_dxk and m->grad_fk are unchanged so far.
1347 
1348     // First SOC step
1349     std::vector<double> deltaXiSOC(nx_, 0.);
1350     std::vector<double> lambdaQPSOC(nx_+ng_, 0.);
1351 
1352     // Second order correction loop
1353     cNormOld = cNorm;
1354     for (casadi_int k=0; k<max_soc_iter_; k++) {
1355       nSOCS++;
1356 
1357       // Update bounds for SOC QP
1358       updateStepBounds(m, 1);
1359 
1360       // Solve SOC QP to obtain new, corrected deltaXi
1361       // (store in separate vector to avoid conflict with original deltaXi
1362       // -> need it in linesearch!)
1363       casadi_int info = solveQP(m, get_ptr(deltaXiSOC), get_ptr(lambdaQPSOC), false);
1364       if (info != 0) return false; // Could not solve QP, abort SOC
1365 
1366       // Set new SOC trial point
1367       for (casadi_int i=0; i<nx_; i++) {
1368         m->trial_xk[i] = d_nlp->z[i] + deltaXiSOC[i];
1369       }
1370 
1371       // Compute objective and ||constr(trialXiSOC)||_1 at SOC trial point
1372       info = evaluate(m, m->trial_xk, &objTrialSOC, m->gk);
1373       m->nFunCalls++;
1374       cNormTrialSOC = lInfConstraintNorm(m, m->trial_xk, m->gk);
1375       if (info != 0 || objTrialSOC < obj_lo_ || objTrialSOC > obj_up_
1376         || !(objTrialSOC == objTrialSOC) || !(cNormTrialSOC == cNormTrialSOC)) {
1377         return false; // evaluation error, abort SOC
1378       }
1379 
1380       // Check acceptability to the filter (in SOC)
1381       if (pairInFilter(m, cNormTrialSOC, objTrialSOC)) {
1382         // Trial point is in the prohibited region defined by the filter, abort SOC
1383         return false;
1384       }
1385 
1386       // Check sufficient decrease, case I (in SOC)
1387       // (Almost feasible and switching condition holds for line search alpha)
1388       if (cNorm <= theta_min_ && swCond) {
1389         if (objTrialSOC > m->obj + eta_*dfTdeltaXi) {
1390           // Armijo condition does not hold for SOC step, next SOC step
1391 
1392           // If constraint violation gets worse by SOC, abort
1393           if (cNormTrialSOC > kappa_soc_ * cNormOld) {
1394             return false;
1395           } else {
1396             cNormOld = cNormTrialSOC;
1397           }
1398           continue;
1399         } else {
1400           // found suitable alpha during SOC, stop
1401           acceptStep(m, get_ptr(deltaXiSOC), get_ptr(lambdaQPSOC), 1.0, nSOCS);
1402           return true;
1403         }
1404       }
1405 
1406       // Check sufficient decrease, case II (in SOC)
1407       if (cNorm > theta_min_ || !swCond) {
1408         if (cNormTrialSOC < (1.0 - gamma_theta_) * cNorm
1409         || objTrialSOC < m->obj - gamma_f_ * cNorm) {
1410           // found suitable alpha during SOC, stop
1411           acceptStep(m, get_ptr(deltaXiSOC), get_ptr(lambdaQPSOC), 1.0, nSOCS);
1412           return true;
1413         } else {
1414           // Trial point is dominated by current point, next SOC step
1415 
1416           // If constraint violation gets worse by SOC, abort
1417           if (cNormTrialSOC > kappa_soc_ * cNormOld) {
1418             return false;
1419           } else {
1420             cNormOld = cNormTrialSOC;
1421           }
1422           continue;
1423         }
1424       }
1425     }
1426 
1427     return false;
1428   }
1429 
1430   /**
1431    * Minimize constraint violation by solving an NLP with minimum norm objective
1432    *
1433    * "The dreaded restoration phase" -- Nick Gould
1434    */
feasibilityRestorationPhase(BlocksqpMemory * m) const1435   casadi_int Blocksqp::feasibilityRestorationPhase(BlocksqpMemory* m) const {
1436     auto d_nlp = &m->d_nlp;
1437     // No Feasibility restoration phase
1438     if (!restore_feas_) return -1;
1439 
1440     m->nRestPhaseCalls++;
1441 
1442     casadi_int ret, info;
1443     casadi_int maxRestIt = 100;
1444     casadi_int warmStart;
1445     double cNormTrial, objTrial, lStpNorm;
1446 
1447 
1448     // Create Input for the minimum norm NLP
1449     DMDict solver_in;
1450 
1451     // The reference point is the starting value for the restoration phase
1452     vector<double> in_x0(d_nlp->z, d_nlp->z+nx_);
1453 
1454     // Initialize slack variables such that the constraints are feasible
1455     for (casadi_int i=0; i<ng_; i++) {
1456       if (m->gk[i] <= d_nlp->lbz[i+nx_])
1457         in_x0.push_back(m->gk[i] - d_nlp->lbz[i+nx_]);
1458       else if (m->gk[i] > d_nlp->ubz[i+nx_])
1459         in_x0.push_back(m->gk[i] - d_nlp->ubz[i+nx_]);
1460       else
1461         in_x0.push_back(0.0);
1462     }
1463 
1464     // Add current iterate xk to parameter p
1465     vector<double> in_p(d_nlp->p, d_nlp->p+np_);
1466     vector<double> in_p2(d_nlp->z, d_nlp->z+nx_);
1467     in_p.insert(in_p.end(), in_p2.begin(), in_p2.end());
1468 
1469     // Set bounds for variables
1470     vector<double> in_lbx(d_nlp->lbz, d_nlp->lbz+nx_);
1471     vector<double> in_ubx(d_nlp->ubz, d_nlp->ubz+nx_);
1472     for (casadi_int i=0; i<ng_; i++) {
1473       in_lbx.push_back(-inf);
1474       in_ubx.push_back(inf);
1475     }
1476 
1477     // Set bounds for constraints
1478     vector<double> in_lbg(d_nlp->lbz+nx_, d_nlp->lbz+nx_+ng_);
1479     vector<double> in_ubg(d_nlp->ubz+nx_, d_nlp->ubz+nx_+ng_);
1480 
1481     solver_in["x0"] = in_x0;
1482     solver_in["p"] = in_p;
1483     solver_in["lbx"] = in_lbx;
1484     solver_in["ubx"] = in_ubx;
1485     solver_in["lbg"] = in_lbg;
1486     solver_in["ubg"] = in_ubg;
1487 
1488       /*
1489 
1490         Consider the following simple call:
1491         auto solver_out = rp_solver_(solver_in);
1492 
1493         This call in fact allocates memory,
1494         calls a memory-less eval(),
1495         and clears up the memory again.
1496 
1497         Since we want to access the memory later on,
1498         we need to unravel the simple call into its parts,
1499         and avoid the memory cleanup
1500 
1501       */
1502 
1503     // perform first iteration for the minimum norm NLP
1504 
1505     // Get the number of inputs and outputs
1506     int n_in = rp_solver_.n_in();
1507     int n_out = rp_solver_.n_out();
1508 
1509     // Get default inputs
1510     vector<DM> arg_v(n_in);
1511     for (casadi_int i=0; i<arg_v.size(); i++)
1512       arg_v[i] = DM::repmat(rp_solver_.default_in(i), rp_solver_.size1_in(i), 1);
1513 
1514     // Assign provided inputs
1515     for (auto&& e : solver_in) arg_v.at(rp_solver_.index_in(e.first)) = e.second;
1516 
1517     // Check sparsities
1518     for (casadi_int i=0; i<arg_v.size(); i++)
1519       casadi_assert_dev(arg_v[i].sparsity()==rp_solver_.sparsity_in(i));
1520 
1521     // Allocate results
1522     std::vector<DM> res(n_out);
1523     for (casadi_int i=0; i<n_out; i++) {
1524       if (res[i].sparsity()!=rp_solver_.sparsity_out(i))
1525         res[i] = DM::zeros(rp_solver_.sparsity_out(i));
1526     }
1527 
1528     // Allocate temporary memory if needed
1529     std::vector<casadi_int> iw_tmp(rp_solver_.sz_iw());
1530     std::vector<double> w_tmp(rp_solver_.sz_w());
1531 
1532     // Get pointers to input arguments
1533     std::vector<const double*> argp(rp_solver_.sz_arg());
1534     for (casadi_int i=0; i<n_in; ++i) argp[i] = get_ptr(arg_v[i]);
1535 
1536     // Get pointers to output arguments
1537     std::vector<double*> resp(rp_solver_.sz_res());
1538     for (casadi_int i=0; i<n_out; i++) resp[i] = get_ptr(res[i]);
1539 
1540     void* mem2 = rp_solver_.memory(0);
1541 
1542     // perform The m
1543     rp_solver_->eval(get_ptr(argp), get_ptr(resp), get_ptr(iw_tmp), get_ptr(w_tmp), mem2);
1544 
1545     // Get BlocksqpMemory and Blocksqp from restoration phase
1546     auto m2 = static_cast<BlocksqpMemory*>(mem2);
1547     const Blocksqp* bp = static_cast<const Blocksqp*>(rp_solver_.get());
1548     ret = m2->ret_;
1549 
1550     warmStart = 1;
1551     for (casadi_int it=0; it<maxRestIt; it++) {
1552       // One iteration for minimum norm NLP
1553       if (it > 0)
1554         ret = bp->run(m2, 1, warmStart);
1555 
1556       // If restMethod yields error, stop restoration phase
1557       if (ret == -1)
1558         break;
1559 
1560       // Get new xi from the restoration phase
1561       for (casadi_int i=0; i<nx_; i++)
1562         m->trial_xk[i] = m2->d_nlp.z[i];
1563 
1564       // Compute objective at trial point
1565       info = evaluate(m, m->trial_xk, &objTrial, m->gk);
1566       m->nFunCalls++;
1567       cNormTrial = lInfConstraintNorm(m, m->trial_xk, m->gk);
1568       if ( info != 0 || objTrial < obj_lo_ || objTrial > obj_up_ ||
1569           !(objTrial == objTrial) || !(cNormTrial == cNormTrial) )
1570         continue;
1571 
1572       // Is this iterate acceptable for the filter?
1573       if (!pairInFilter(m, cNormTrial, objTrial)) {
1574         // success
1575         print("Found a point acceptable for the filter.\n");
1576         ret = 0;
1577         break;
1578       }
1579 
1580       // If minimum norm NLP has converged, declare local infeasibility
1581         if (m2->tol < opttol_ && m2->cNormS < nlinfeastol_) {
1582           ret = 1;
1583           break;
1584         }
1585     }
1586 
1587     // Success or locally infeasible
1588     if (ret == 0 || ret == 1) {
1589         // Store the infinity norm of the multiplier step
1590         m->lambdaStepNorm = 0.0;
1591         // Compute restoration step
1592         for (casadi_int k=0; k<nx_; k++) {
1593             m->dxk[k] = d_nlp->z[k];
1594 
1595             d_nlp->z[k] = m->trial_xk[k];
1596 
1597             // Store lInf norm of dual step
1598             if ((lStpNorm = fabs(m2->lam_xk[k] - m->lam_xk[k])) > m->lambdaStepNorm)
1599                 m->lambdaStepNorm = lStpNorm;
1600             m->lam_xk[k] = m2->lam_xk[k];
1601             m->lam_qp[k] = m2->lam_qp[k];
1602 
1603             m->dxk[k] -= d_nlp->z[k];
1604         }
1605         for (casadi_int k=0; k<ng_; k++) {
1606             // skip the dual variables for the slack variables in the restoration problem
1607             if ((lStpNorm = fabs(m2->lam_gk[k] - m->lam_gk[k])) > m->lambdaStepNorm)
1608                 m->lambdaStepNorm = lStpNorm;
1609             m->lam_gk[k] = m2->lam_gk[k];
1610             m->lam_qp[k] = m2->lam_qp[nx_+ng_+k];
1611         }
1612         m->alpha = 1.0;
1613         m->nSOCS = 0;
1614 
1615         // reset reduced step counter
1616         m->reducedStepCount = 0;
1617 
1618         // reset Hessian and limited memory information
1619         resetHessian(m);
1620     }
1621 
1622     if (ret == 1) {
1623         if (print_iteration_) printProgress(m);
1624         updateStats(m);
1625         print("The problem seems to be locally infeasible. Infeasibilities minimized.\n");
1626     }
1627 
1628     return ret;
1629   }
1630 
1631 
1632   /**
1633    * Try to (partly) improve constraint violation by satisfying
1634    * the (pseudo) continuity constraints, i.e. do a single shooting
1635    * iteration with the current controls and measurement weights q and w
1636    */
feasibilityRestorationHeuristic(BlocksqpMemory * m) const1637   casadi_int Blocksqp::feasibilityRestorationHeuristic(BlocksqpMemory* m) const {
1638     auto d_nlp = &m->d_nlp;
1639     m->nRestHeurCalls++;
1640 
1641     // Call problem specific heuristic to reduce constraint violation.
1642     // For shooting methods that means setting consistent values for
1643     // shooting nodes by one forward integration.
1644     for (casadi_int k=0; k<nx_; k++) // input: last successful step
1645       m->trial_xk[k] = d_nlp->z[k];
1646 
1647     // FIXME(@jaeandersson) Not implemented
1648     return -1;
1649   }
1650 
1651 
1652   /**
1653    * If the line search fails, check if the full step reduces the KKT error by a factor kappaF.
1654    */
kktErrorReduction(BlocksqpMemory * m) const1655   casadi_int Blocksqp::kktErrorReduction(BlocksqpMemory* m) const {
1656     auto d_nlp = &m->d_nlp;
1657     casadi_int info = 0;
1658     double objTrial, cNormTrial, trialGradNorm, trialTol;
1659 
1660     // Compute new trial point
1661     for (casadi_int i=0; i<nx_; i++)
1662       m->trial_xk[i] = d_nlp->z[i] + m->dxk[i];
1663 
1664     // Compute objective and ||constr(trial_xk)|| at trial point
1665     std::vector<double> trialConstr(ng_, 0.);
1666     info = evaluate(m, m->trial_xk, &objTrial, get_ptr(trialConstr));
1667     m->nFunCalls++;
1668     cNormTrial = lInfConstraintNorm(m, m->trial_xk, get_ptr(trialConstr));
1669     if (info != 0 || objTrial < obj_lo_ || objTrial > obj_up_
1670       || !(objTrial == objTrial) || !(cNormTrial == cNormTrial)) {
1671       // evaluation error
1672       return 1;
1673     }
1674 
1675     // Compute KKT error of the new point
1676 
1677     // scaled norm of Lagrangian gradient
1678     std::vector<double> trialGradLagrange(nx_, 0.);
1679     calcLagrangeGradient(m, m->lam_qp, m->lam_qp+nx_, m->grad_fk,
1680                          m->jac_g,
1681                          get_ptr(trialGradLagrange), 0);
1682 
1683     trialGradNorm = casadi_norm_inf(nx_, get_ptr(trialGradLagrange));
1684     trialTol = trialGradNorm/(1.0+casadi_norm_inf(nx_+ng_, m->lam_qp));
1685 
1686     if (fmax(cNormTrial, trialTol) < kappa_f_ * fmax(m->cNorm, m->tol)) {
1687       acceptStep(m, 1.0);
1688       return 0;
1689     } else {
1690       return 1;
1691     }
1692   }
1693 
1694   /**
1695    * Check if current entry is accepted to the filter:
1696    * (cNorm, obj) in F_k
1697    */
1698   bool Blocksqp::
pairInFilter(BlocksqpMemory * m,double cNorm,double obj) const1699   pairInFilter(BlocksqpMemory* m, double cNorm, double obj) const {
1700     /*
1701      * A pair is in the filter if:
1702      * - it increases the objective and
1703      * - it also increases the constraint violation
1704      * The second expression in the if-clause states that we exclude
1705      * entries that are within the feasibility tolerance, e.g.
1706      * if an entry improves the constraint violation from 1e-16 to 1e-17,
1707      * but increases the objective considerably we also think of this entry
1708      * as dominated
1709      */
1710 
1711     for (auto&& f : m->filter) {
1712       if ((cNorm >= (1.0 - gamma_theta_) * f.first ||
1713            (cNorm < 0.01 * nlinfeastol_ && f.first < 0.01 * nlinfeastol_)) &&
1714           obj >= f.second - gamma_f_ * f.first) {
1715         return 1;
1716       }
1717     }
1718 
1719     return 0;
1720   }
1721 
1722 
initializeFilter(BlocksqpMemory * m) const1723   void Blocksqp::initializeFilter(BlocksqpMemory* m) const {
1724     std::pair<double, double> initPair(theta_max_, obj_lo_);
1725 
1726     // Remove all elements
1727     auto iter=m->filter.begin();
1728     while (iter != m->filter.end()) {
1729       std::set< std::pair<double, double> >::iterator iterToRemove = iter;
1730       iter++;
1731       m->filter.erase(iterToRemove);
1732     }
1733 
1734     // Initialize with pair (maxConstrViolation, objLowerBound);
1735     m->filter.insert(initPair);
1736   }
1737 
1738   /**
1739    * Augment the filter:
1740    * F_k+1 = F_k U { (c,f) | c > (1-gammaTheta)cNorm and f > obj-gammaF*c
1741    */
1742   void Blocksqp::
augmentFilter(BlocksqpMemory * m,double cNorm,double obj) const1743   augmentFilter(BlocksqpMemory* m, double cNorm, double obj) const {
1744     std::pair<double, double> entry((1-gamma_theta_)*cNorm,
1745                                     obj-gamma_f_*cNorm);
1746 
1747     // Augment filter by current element
1748     m->filter.insert(entry);
1749 
1750     // Remove dominated elements
1751     auto iter=m->filter.begin();
1752     while (iter != m->filter.end()) {
1753       if (iter->first > entry.first && iter->second > entry.second) {
1754         auto iterToRemove = iter;
1755         iter++;
1756         m->filter.erase(iterToRemove);
1757       } else {
1758         iter++;
1759       }
1760     }
1761   }
1762 
1763   /**
1764    * Initial Hessian: Identity matrix
1765    */
calcInitialHessian(BlocksqpMemory * m) const1766   void Blocksqp::calcInitialHessian(BlocksqpMemory* m) const {
1767     for (casadi_int b=0; b<nblocks_; b++)
1768       //if objective derv is computed exactly, don't set the last block!
1769       if (!(which_second_derv_ == 1 && block_hess_
1770         && b == nblocks_-1))
1771         calcInitialHessian(m, b);
1772   }
1773 
1774 
1775   /**
1776    * Initial Hessian for one block: Identity matrix
1777    */
calcInitialHessian(BlocksqpMemory * m,casadi_int b) const1778   void Blocksqp::calcInitialHessian(BlocksqpMemory* m, casadi_int b) const {
1779     casadi_int dim = dim_[b];
1780     casadi_fill(m->hess[b], dim*dim, 0.);
1781 
1782     // Each block is a diagonal matrix
1783     for (casadi_int i=0; i<dim; i++)
1784       m->hess[b][i+i*dim] = ini_hess_diag_;
1785 
1786     // If we maintain 2 Hessians, also reset the second one
1787     if (m->hess2 != nullptr) {
1788       casadi_fill(m->hess2[b], dim*dim, 0.);
1789       for (casadi_int i=0; i<dim; i++)
1790         m->hess2[b][i+i*dim] = ini_hess_diag_;
1791     }
1792   }
1793 
1794 
resetHessian(BlocksqpMemory * m) const1795   void Blocksqp::resetHessian(BlocksqpMemory* m) const {
1796     for (casadi_int b=0; b<nblocks_; b++) {
1797       if (!(which_second_derv_ == 1 && block_hess_ && b == nblocks_ - 1)) {
1798         // if objective derv is computed exactly, don't set the last block!
1799         resetHessian(m, b);
1800       }
1801     }
1802   }
1803 
1804 
resetHessian(BlocksqpMemory * m,casadi_int b) const1805   void Blocksqp::resetHessian(BlocksqpMemory* m, casadi_int b) const {
1806     casadi_int dim = dim_[b];
1807 
1808     // smallGamma and smallDelta are either subvectors of gamma and delta
1809     // or submatrices of gammaMat, deltaMat, i.e. subvectors of gamma and delta
1810     // from m prev. iterations (for L-BFGS)
1811     double *smallGamma = m->gammaMat + blocks_[b];
1812     double *smallDelta = m->deltaMat + blocks_[b];
1813 
1814     for (casadi_int i=0; i<hess_memsize_; ++i) {
1815       // Remove past information on Lagrangian gradient difference
1816       casadi_fill(smallGamma, dim, 0.);
1817       smallGamma += nx_;
1818 
1819       // Remove past information on steps
1820       smallDelta += nx_;
1821       casadi_fill(smallDelta, dim, 0.);
1822     }
1823 
1824     // Remove information on old scalars (used for COL sizing)
1825     m->delta_norm[b] = 1.0;
1826     m->delta_gamma[b] = 0.0;
1827     m->delta_norm_old[b] = 1.0;
1828     m->delta_gamma_old[b] = 0.0;
1829 
1830     m->noUpdateCounter[b] = -1;
1831 
1832     calcInitialHessian(m, b);
1833   }
1834 
1835   void Blocksqp::
sizeInitialHessian(BlocksqpMemory * m,const double * gamma,const double * delta,casadi_int b,casadi_int option) const1836   sizeInitialHessian(BlocksqpMemory* m, const double* gamma,
1837                      const double* delta, casadi_int b, casadi_int option) const {
1838     casadi_int dim = dim_[b];
1839     double scale;
1840     double myEps = 1.0e3 * eps_;
1841 
1842     if (option == 1) {
1843       // Shanno-Phua
1844       scale = casadi_dot(dim, gamma, gamma)
1845         / fmax(casadi_dot(dim, delta, gamma), myEps);
1846     } else if (option == 2) {
1847       // Oren-Luenberger
1848       scale = casadi_dot(dim, delta, gamma)
1849         / fmax(casadi_dot(dim, delta, delta), myEps);
1850       scale = fmin(scale, 1.0);
1851     } else if (option == 3) {
1852       // Geometric mean of 1 and 2
1853       scale = sqrt(casadi_dot(dim, gamma, gamma)
1854         / fmax(casadi_dot(dim, delta, delta), myEps));
1855     } else {
1856       // Invalid option, ignore
1857       return;
1858     }
1859 
1860     if (scale > 0.0) {
1861       scale = fmax(scale, myEps);
1862       for (casadi_int i=0; i<dim; i++)
1863         for (casadi_int j=0; j<dim; j++)
1864           m->hess[b][i+j*dim] *= scale;
1865     } else {
1866       scale = 1.0;
1867     }
1868 
1869     // statistics: average sizing factor
1870     m->averageSizingFactor += scale;
1871   }
1872 
1873 
1874   void Blocksqp::
sizeHessianCOL(BlocksqpMemory * m,const double * gamma,const double * delta,casadi_int b) const1875   sizeHessianCOL(BlocksqpMemory* m, const double* gamma,
1876                  const double* delta, casadi_int b) const {
1877     casadi_int dim = dim_[b];
1878     double theta, scale, myEps = 1.0e3 * eps_;
1879     double deltaNorm, deltaNormOld, deltaGamma, deltaGammaOld, deltaBdelta;
1880 
1881     // Get sTs, sTs_, sTy, sTy_, sTBs
1882     deltaNorm = m->delta_norm[b];
1883     deltaGamma = m->delta_gamma[b];
1884     deltaNormOld = m->delta_norm_old[b];
1885     deltaGammaOld = m->delta_gamma_old[b];
1886     deltaBdelta = 0.0;
1887     for (casadi_int i=0; i<dim; i++)
1888       for (casadi_int j=0; j<dim; j++)
1889         deltaBdelta += delta[i] * m->hess[b][i+j*dim] * delta[j];
1890 
1891     // Centered Oren-Luenberger factor
1892     if (m->noUpdateCounter[b] == -1) {
1893       // in the first iteration, this should equal the OL factor
1894       theta = 1.0;
1895     } else {
1896       theta = fmin(col_tau1_, col_tau2_ * deltaNorm);
1897     }
1898     if (deltaNorm > myEps && deltaNormOld > myEps) {
1899       scale = (1.0 - theta)*deltaGammaOld / deltaNormOld + theta*deltaBdelta / deltaNorm;
1900       if (scale > eps_)
1901         scale = ((1.0 - theta)*deltaGammaOld / deltaNormOld + theta*deltaGamma / deltaNorm) / scale;
1902     } else {
1903       scale = 1.0;
1904     }
1905 
1906     // Size only if factor is between zero and one
1907     if (scale < 1.0 && scale > 0.0) {
1908       scale = fmax(col_eps_, scale);
1909       //print("Sizing value (COL) block %i = %g\n", b, scale);
1910       for (casadi_int i=0; i<dim; i++)
1911         for (casadi_int j=0; j<dim; j++)
1912           m->hess[b][i+j*dim] *= scale;
1913 
1914       // statistics: average sizing factor
1915       m->averageSizingFactor += scale;
1916     } else {
1917       m->averageSizingFactor += 1.0;
1918     }
1919   }
1920 
1921   /**
1922    * Apply BFGS or SR1 update blockwise and size blocks
1923    */
1924   void Blocksqp::
calcHessianUpdate(BlocksqpMemory * m,casadi_int updateType,casadi_int hessScaling) const1925   calcHessianUpdate(BlocksqpMemory* m, casadi_int updateType, casadi_int hessScaling) const {
1926     casadi_int nBlocks;
1927     bool firstIter;
1928 
1929     //if objective derv is computed exactly, don't set the last block!
1930     if (which_second_derv_ == 1 && block_hess_)
1931       nBlocks = nblocks_ - 1;
1932     else
1933       nBlocks = nblocks_;
1934 
1935     // Statistics: how often is damping active, what is the average COL sizing factor?
1936     m->hessDamped = 0;
1937     m->averageSizingFactor = 0.0;
1938 
1939     for (casadi_int b=0; b<nBlocks; b++) {
1940       casadi_int dim = dim_[b];
1941 
1942       // smallGamma and smallDelta are subvectors of gamma and delta,
1943       // corresponding to partially separability
1944       double* smallGamma = m->gammaMat + blocks_[b];
1945       double* smallDelta = m->deltaMat + blocks_[b];
1946 
1947       // Is this the first iteration or the first after a Hessian reset?
1948       firstIter = (m->noUpdateCounter[b] == -1);
1949 
1950       // Update sTs, sTs_ and sTy, sTy_
1951       m->delta_norm_old[b] = m->delta_norm[b];
1952       m->delta_gamma_old[b] = m->delta_gamma[b];
1953       m->delta_norm[b] = casadi_dot(dim, smallDelta, smallDelta);
1954       m->delta_gamma[b] = casadi_dot(dim, smallDelta, smallGamma);
1955 
1956       // Sizing before the update
1957       if (hessScaling < 4 && firstIter)
1958         sizeInitialHessian(m, smallGamma, smallDelta, b, hessScaling);
1959       else if (hessScaling == 4)
1960         sizeHessianCOL(m, smallGamma, smallDelta, b);
1961 
1962       // Compute the new update
1963       if (updateType == 1) {
1964         calcSR1(m, smallGamma, smallDelta, b);
1965 
1966         // Prepare to compute fallback update as well
1967         m->hess = m->hess2;
1968 
1969         // Sizing the fallback update
1970         if (fallback_scaling_ < 4 && firstIter)
1971           sizeInitialHessian(m, smallGamma, smallDelta, b, fallback_scaling_);
1972         else if (fallback_scaling_ == 4)
1973           sizeHessianCOL(m, smallGamma, smallDelta, b);
1974 
1975         // Compute fallback update
1976         if (fallback_update_ == 2)
1977           calcBFGS(m, smallGamma, smallDelta, b);
1978 
1979         // Reset pointer
1980         m->hess = m->hess1;
1981       } else if (updateType == 2) {
1982         calcBFGS(m, smallGamma, smallDelta, b);
1983       }
1984 
1985       // If an update is skipped to often, reset Hessian block
1986       if (m->noUpdateCounter[b] > max_consec_skipped_updates_) {
1987         resetHessian(m, b);
1988       }
1989     }
1990 
1991     // statistics: average sizing factor
1992     m->averageSizingFactor /= nBlocks;
1993   }
1994 
1995 
1996   void Blocksqp::
calcHessianUpdateLimitedMemory(BlocksqpMemory * m,casadi_int updateType,casadi_int hessScaling) const1997   calcHessianUpdateLimitedMemory(BlocksqpMemory* m,
1998       casadi_int updateType, casadi_int hessScaling) const {
1999     casadi_int nBlocks;
2000     casadi_int m2, pos, posOldest, posNewest;
2001     casadi_int hessDamped, hessSkipped;
2002     double averageSizingFactor;
2003 
2004     //if objective derv is computed exactly, don't set the last block!
2005     if (which_second_derv_ == 1 && block_hess_) {
2006       nBlocks = nblocks_ - 1;
2007     } else {
2008       nBlocks = nblocks_;
2009     }
2010 
2011     // Statistics: how often is damping active, what is the average COL sizing factor?
2012     m->hessDamped = 0;
2013     m->hessSkipped = 0;
2014     m->averageSizingFactor = 0.0;
2015 
2016     for (casadi_int b=0; b<nBlocks; b++) {
2017       casadi_int dim = dim_[b];
2018 
2019       // smallGamma and smallDelta are submatrices of gammaMat, deltaMat,
2020       // i.e. subvectors of gamma and delta from m prev. iterations
2021       double *smallGamma = m->gammaMat + blocks_[b];
2022       double *smallDelta = m->deltaMat + blocks_[b];
2023 
2024       // Memory structure
2025       if (m->itCount > hess_memsize_) {
2026         m2 = hess_memsize_;
2027         posOldest = m->itCount % m2;
2028         posNewest = (m->itCount-1) % m2;
2029       } else {
2030         m2 = m->itCount;
2031         posOldest = 0;
2032         posNewest = m2-1;
2033       }
2034 
2035       // Set B_0 (pretend it's the first step)
2036       calcInitialHessian(m, b);
2037       m->delta_norm[b] = 1.0;
2038       m->delta_norm_old[b] = 1.0;
2039       m->delta_gamma[b] = 0.0;
2040       m->delta_gamma_old[b] = 0.0;
2041       m->noUpdateCounter[b] = -1;
2042 
2043       // Size the initial update, but with the most recent delta/gamma-pair
2044       double *gammai = smallGamma + nx_*posNewest;
2045       double *deltai = smallDelta + nx_*posNewest;
2046       sizeInitialHessian(m, gammai, deltai, b, hessScaling);
2047 
2048       for (casadi_int i=0; i<m2; i++) {
2049         pos = (posOldest+i) % m2;
2050 
2051         // Get new vector from list
2052         gammai = smallGamma + nx_*pos;
2053         deltai = smallDelta + nx_*pos;
2054 
2055         // Update sTs, sTs_ and sTy, sTy_
2056         m->delta_norm_old[b] = m->delta_norm[b];
2057         m->delta_gamma_old[b] = m->delta_gamma[b];
2058         m->delta_norm[b] = casadi_dot(dim, deltai, deltai);
2059         m->delta_gamma[b] = casadi_dot(dim, gammai, deltai);
2060 
2061         // Save statistics, we want to record them only for the most recent update
2062         averageSizingFactor = m->averageSizingFactor;
2063         hessDamped = m->hessDamped;
2064         hessSkipped = m->hessSkipped;
2065 
2066         // Selective sizing before the update
2067         if (hessScaling == 4) sizeHessianCOL(m, gammai, deltai, b);
2068 
2069         // Compute the new update
2070         if (updateType == 1) {
2071           calcSR1(m, gammai, deltai, b);
2072         } else if (updateType == 2) {
2073           calcBFGS(m, gammai, deltai, b);
2074         }
2075 
2076         m->nTotalUpdates++;
2077 
2078         // Count damping statistics only for the most recent update
2079         if (pos != posNewest) {
2080           m->hessDamped = hessDamped;
2081           m->hessSkipped = hessSkipped;
2082           if (hessScaling == 4)
2083             m->averageSizingFactor = averageSizingFactor;
2084         }
2085       }
2086 
2087       // If an update is skipped to often, reset Hessian block
2088       if (m->noUpdateCounter[b] > max_consec_skipped_updates_) {
2089         resetHessian(m, b);
2090       }
2091     }
2092     //blocks
2093     m->averageSizingFactor /= nBlocks;
2094   }
2095 
2096 
2097   void Blocksqp::
calcHessianUpdateExact(BlocksqpMemory * m) const2098   calcHessianUpdateExact(BlocksqpMemory* m) const {
2099     // compute exact hessian
2100     (void)evaluate(m, m->exact_hess_lag);
2101 
2102     // assign exact hessian to blocks
2103     const casadi_int* col = exact_hess_lag_sp_.colind();
2104     const casadi_int* row = exact_hess_lag_sp_.row();
2105     casadi_int s, dim;
2106 
2107     for (casadi_int k=0; k<nblocks_; k++) {
2108       s = blocks_[k];
2109       dim = dim_[k];
2110       for (casadi_int i=0; i<dim; i++)
2111         // Set diagonal to 0 (may have been 1 because of CalcInitialHessian)
2112         m->hess[k][i + i*dim] = 0.0;
2113       for (casadi_int j=0; j<dim; j++) {
2114         for (casadi_int i=col[j+s]; i<col[j+1+s]; i++) {
2115           m->hess[k][row[i]-row[col[s]] + j*dim] = m->exact_hess_lag[i];
2116           if (row[i]-row[col[s]] < j)
2117             m->hess[k][j + (row[i]-row[col[s]])*dim] = m->exact_hess_lag[i];
2118         }
2119       }
2120     }
2121 
2122     // Prepare to compute fallback update as well
2123     m->hess = m->hess2;
2124     if (fallback_update_ == 2 && !hess_lim_mem_)
2125         calcHessianUpdate(m, fallback_update_, fallback_scaling_);
2126     else if (fallback_update_ == 0)
2127         calcInitialHessian(m);  // Set fallback as Identity
2128 
2129     // Reset pointer
2130     m->hess = m->hess1;
2131   }
2132 
2133 
2134   void Blocksqp::
calcBFGS(BlocksqpMemory * m,const double * gamma,const double * delta,casadi_int b) const2135   calcBFGS(BlocksqpMemory* m, const double* gamma,
2136     const double* delta, casadi_int b) const {
2137     casadi_int dim = dim_[b];
2138     double h1 = 0.0;
2139     double h2 = 0.0;
2140     double thetaPowell = 0.0;
2141     casadi_int damped;
2142 
2143     /* Work with a local copy of gamma because damping may need to change gamma.
2144      * Note that m->gamma needs to remain unchanged!
2145      * This may be important in a limited memory context:
2146      * When information is "forgotten", B_i-1 is different and the
2147      *  original gamma might lead to an undamped update with the new B_i-1! */
2148     std::vector<double> gamma2(gamma, gamma+dim);
2149 
2150     double *B = m->hess[b];
2151 
2152     // Bdelta = B*delta (if sizing is enabled, B is the sized B!)
2153     // h1 = delta^T * B * delta
2154     // h2 = delta^T * gamma
2155     vector<double> Bdelta(dim, 0.0);
2156     for (casadi_int i=0; i<dim; i++) {
2157       for (casadi_int k=0; k<dim; k++)
2158         Bdelta[i] += B[i+k*dim] * delta[k];
2159 
2160       h1 += delta[i] * Bdelta[i];
2161       //h2 += delta[i] * gamma[i];
2162     }
2163     h2 = m->delta_gamma[b];
2164 
2165     /* Powell's damping strategy to maintain pos. def. (Nocedal/Wright p.537; SNOPT paper)
2166      * Interpolates between current approximation and unmodified BFGS */
2167     damped = 0;
2168     if (hess_damp_)
2169       if (h2 < hess_damp_fac_ * h1 / m->alpha && fabs(h1 - h2) > 1.0e-12) {
2170         // At the first iteration h1 and h2 are equal due to COL scaling
2171 
2172         thetaPowell = (1.0 - hess_damp_fac_)*h1 / (h1 - h2);
2173 
2174         // Redefine gamma and h2 = delta^T * gamma
2175         h2 = 0.0;
2176         for (casadi_int i=0; i<dim; i++) {
2177           gamma2[i] = thetaPowell*gamma2[i] + (1.0 - thetaPowell)*Bdelta[i];
2178           h2 += delta[i] * gamma2[i];
2179         }
2180 
2181         // Also redefine deltaGamma for computation of sizing factor in the next iteration
2182         m->delta_gamma[b] = h2;
2183 
2184         damped = 1;
2185       }
2186 
2187     // For statistics: count number of damped blocks
2188     m->hessDamped += damped;
2189 
2190     // B_k+1 = B_k - Bdelta * (Bdelta)^T / h1 + gamma * gamma^T / h2
2191     double myEps = 1.0e2 * eps_;
2192     if (fabs(h1) < myEps || fabs(h2) < myEps) {
2193       // don't perform update because of bad condition, might introduce negative eigenvalues
2194       m->noUpdateCounter[b]++;
2195       m->hessDamped -= damped;
2196       m->hessSkipped++;
2197       m->nTotalSkippedUpdates++;
2198     } else {
2199       for (casadi_int i=0; i<dim; i++)
2200         for (casadi_int j=0; j<dim; j++)
2201           B[i+j*dim] += - Bdelta[i]*Bdelta[j]/h1 + gamma2[i]*gamma2[j]/h2;
2202 
2203       m->noUpdateCounter[b] = 0;
2204     }
2205   }
2206 
2207 
2208   void Blocksqp::
calcSR1(BlocksqpMemory * m,const double * gamma,const double * delta,casadi_int b) const2209   calcSR1(BlocksqpMemory* m, const double* gamma,
2210           const double* delta, casadi_int b) const {
2211     casadi_int dim = dim_[b];
2212     double *B = m->hess[b];
2213     double myEps = 1.0e2 * eps_;
2214     double r = 1.0e-8;
2215     double h = 0.0;
2216 
2217     // gmBdelta = gamma - B*delta
2218     // h = (gamma - B*delta)^T * delta
2219     vector<double> gmBdelta(dim);
2220     for (casadi_int i=0; i<dim; i++) {
2221       gmBdelta[i] = gamma[i];
2222       for (casadi_int k=0; k<dim; k++)
2223         gmBdelta[i] -= B[i+k*dim] * delta[k];
2224 
2225       h += (gmBdelta[i] * delta[i]);
2226     }
2227 
2228     // B_k+1 = B_k + gmBdelta * gmBdelta^T / h
2229     if (fabs(h) < r * casadi_norm_2(dim, delta)
2230       *casadi_norm_2(dim, get_ptr(gmBdelta)) || fabs(h) < myEps) {
2231       // Skip update if denominator is too small
2232       m->noUpdateCounter[b]++;
2233       m->hessSkipped++;
2234       m->nTotalSkippedUpdates++;
2235     } else {
2236       for (casadi_int i=0; i<dim; i++)
2237         for (casadi_int j=0; j<dim; j++)
2238           B[i+j*dim] += gmBdelta[i]*gmBdelta[j]/h;
2239       m->noUpdateCounter[b] = 0;
2240     }
2241   }
2242 
2243 
2244   /**
2245    * Set deltaXi and gamma as a column in the matrix containing
2246    * the m most recent delta and gamma
2247    */
updateDeltaGamma(BlocksqpMemory * m) const2248   void Blocksqp::updateDeltaGamma(BlocksqpMemory* m) const {
2249     if (hess_memsize_ == 1) return;
2250 
2251     m->dxk = m->deltaMat + nx_*(m->itCount % hess_memsize_);
2252     m->gamma = m->gammaMat + nx_*(m->itCount % hess_memsize_);
2253   }
2254 
2255   void Blocksqp::
computeNextHessian(BlocksqpMemory * m,casadi_int idx,casadi_int maxQP) const2256   computeNextHessian(BlocksqpMemory* m, casadi_int idx, casadi_int maxQP) const {
2257     // Compute fallback update only once
2258     if (idx == 1) {
2259         // Switch storage
2260         m->hess = m->hess2;
2261 
2262         // If last block contains exact Hessian, we need to copy it
2263         if (which_second_derv_ == 1) {
2264           casadi_int dim = dim_[nblocks_-1];
2265           casadi_copy(m->hess1[nblocks_-1], dim*dim, m->hess2[nblocks_-1]);
2266         }
2267 
2268         // Limited memory: compute fallback update only when needed
2269         if (hess_lim_mem_) {
2270             m->itCount--;
2271             casadi_int hessDampSave = hess_damp_;
2272             const_cast<Blocksqp*>(this)->hess_damp_ = 1;
2273             calcHessianUpdateLimitedMemory(m, fallback_update_, fallback_scaling_);
2274             const_cast<Blocksqp*>(this)->hess_damp_ = hessDampSave;
2275             m->itCount++;
2276           }
2277         /* Full memory: both updates must be computed in every iteration
2278          * so switching storage is enough */
2279       }
2280 
2281     // 'Nontrivial' convex combinations
2282     if (maxQP > 2) {
2283         /* Convexification parameter: mu_l = l / (maxQP-1).
2284          * Compute it only in the first iteration, afterwards update
2285          * by recursion: mu_l/mu_(l-1) */
2286         double idxF = idx;
2287         double mu = (idx==1) ? 1.0 / (maxQP-1) : idxF / (idxF - 1.0);
2288         double mu1 = 1.0 - mu;
2289         for (casadi_int b=0; b<nblocks_; b++) {
2290           casadi_int dim = dim_[b];
2291           for (casadi_int i=0; i<dim; i++) {
2292             for (casadi_int j=0; j<dim; j++) {
2293               m->hess2[b][i+j*dim] *= mu;
2294               m->hess2[b][i+j*dim] += mu1 * m->hess1[b][i+j*dim];
2295             }
2296           }
2297         }
2298     }
2299   }
2300 
2301 
2302   /**
2303    * Inner loop of SQP algorithm:
2304    * Solve a sequence of QPs until pos. def. assumption (G3*) is satisfied.
2305    */
2306   casadi_int Blocksqp::
solveQP(BlocksqpMemory * m,double * deltaXi,double * lambdaQP,bool matricesChanged) const2307   solveQP(BlocksqpMemory* m, double* deltaXi, double* lambdaQP,
2308     bool matricesChanged) const {
2309     casadi_int maxQP, l;
2310     if (globalization_ &&
2311         (hess_update_ == 1 || hess_update_ == 4) &&
2312         matricesChanged &&
2313         m->itCount > 1) {
2314         maxQP = max_conv_qp_ + 1;
2315       } else {
2316       maxQP = 1;
2317     }
2318 
2319     /*
2320      * Prepare for qpOASES
2321      */
2322 
2323     // Setup QProblem data
2324     if (matricesChanged) {
2325       delete m->A;
2326       m->A = nullptr;
2327       copy_vector(Asp_.colind(), m->colind);
2328       copy_vector(Asp_.row(), m->row);
2329       int* jacIndRow = get_ptr(m->row);
2330       int* jacIndCol = get_ptr(m->colind);
2331       m->A = new qpOASES::SparseMatrix(ng_, nx_,
2332                                        jacIndRow, jacIndCol, m->jac_g);
2333     }
2334     double *g = m->grad_fk;
2335     double *lb = m->lbx_qp;
2336     double *lu = m->ubx_qp;
2337     double *lbA = m->lba_qp;
2338     double *luA = m->uba_qp;
2339 
2340     // qpOASES options
2341     qpOASES::Options opts;
2342     if (matricesChanged && maxQP > 1)
2343       opts.enableInertiaCorrection = qpOASES::BT_FALSE;
2344     else
2345       opts.enableInertiaCorrection = qpOASES::BT_TRUE;
2346     opts.enableEqualities = qpOASES::BT_TRUE;
2347     opts.initialStatusBounds = qpOASES::ST_INACTIVE;
2348     opts.printLevel = qpOASES::PL_NONE;
2349     opts.numRefinementSteps = 2;
2350     opts.epsLITests =  2.2204e-08;
2351     m->qp->setOptions(opts);
2352 
2353     // Other variables for qpOASES
2354     double cpuTime = matricesChanged ? max_time_qp_ : 0.1*max_time_qp_;
2355     int maxIt = matricesChanged ? max_it_qp_ : static_cast<int>(0.1*max_it_qp_);
2356     qpOASES::SolutionAnalysis solAna;
2357     qpOASES::returnValue ret = qpOASES::RET_INFO_UNDEFINED;
2358 
2359     /*
2360      * QP solving loop for convex combinations (sequential)
2361      */
2362     for (l=0; l<maxQP; l++) {
2363         /*
2364          * Compute a new Hessian
2365          */
2366         if (l > 0) {
2367           // If the solution of the first QP was rejected, consider second Hessian
2368           m->qpResolve++;
2369           computeNextHessian(m, l, maxQP);
2370         }
2371 
2372         if (l == maxQP-1) {
2373           // Enable inertia correction for supposedly convex QPs, just in case
2374           opts.enableInertiaCorrection = qpOASES::BT_TRUE;
2375           m->qp->setOptions(opts);
2376         }
2377 
2378         /*
2379          * Prepare the current Hessian for qpOASES
2380          */
2381         if (matricesChanged) {
2382           // Convert block-Hessian to sparse format
2383           convertHessian(m);
2384           delete m->H;
2385           m->H = nullptr;
2386           m->H = new qpOASES::SymSparseMat(nx_, nx_,
2387                                            m->hessIndRow, m->hessIndCol,
2388                                            m->hess_lag);
2389           m->H->createDiagInfo();
2390         }
2391 
2392         /*
2393          * Call qpOASES
2394          */
2395         if (matricesChanged) {
2396             maxIt = max_it_qp_;
2397             cpuTime = max_time_qp_;
2398             if (m->qp->getStatus() == qpOASES::QPS_HOMOTOPYQPSOLVED ||
2399                 m->qp->getStatus() == qpOASES::QPS_SOLVED) {
2400                 ret = m->qp->hotstart(m->H, g, m->A, lb, lu, lbA, luA, maxIt, &cpuTime);
2401               } else {
2402                 if (warmstart_) {
2403                   ret = m->qp->init(m->H, g, m->A, lb, lu, lbA, luA, maxIt, &cpuTime,
2404                     deltaXi, lambdaQP);
2405                 } else {
2406                   ret = m->qp->init(m->H, g, m->A, lb, lu, lbA, luA, maxIt, &cpuTime);
2407                 }
2408               }
2409           } else {
2410             // Second order correction: H and A do not change
2411             maxIt = static_cast<int>(0.1*max_it_qp_);
2412             cpuTime = 0.1*max_time_qp_;
2413             ret = m->qp->hotstart(g, lb, lu, lbA, luA, maxIt, &cpuTime);
2414           }
2415 
2416         /*
2417          * Check assumption (G3*) if nonconvex QP was solved
2418          */
2419         if (l < maxQP-1 && matricesChanged) {
2420             if (ret == qpOASES::SUCCESSFUL_RETURN) {
2421                 if (schur_) {
2422                   ret = solAna.checkCurvatureOnStronglyActiveConstraints(
2423                     dynamic_cast<qpOASES::SQProblemSchur*>(m->qp));
2424                 } else {
2425                   ret = solAna.checkCurvatureOnStronglyActiveConstraints(m->qp);
2426                 }
2427               }
2428 
2429             if (ret == qpOASES::SUCCESSFUL_RETURN) {
2430               // QP was solved successfully and curvature is positive after removing bounds
2431                 m->qpIterations = maxIt + 1;
2432                 break; // Success!
2433               } else {
2434               // QP solution is rejected, save statistics
2435                 if (ret == qpOASES::RET_SETUP_AUXILIARYQP_FAILED)
2436                   m->qpIterations2++;
2437                 else
2438                   m->qpIterations2 += maxIt + 1;
2439                 m->rejectedSR1++;
2440               }
2441           } else {
2442             // Convex QP was solved, no need to check assumption (G3*)
2443             m->qpIterations += maxIt + 1;
2444           }
2445 
2446       } // End of QP solving loop
2447 
2448     /*
2449      * Post-processing
2450      */
2451 
2452     // Get solution from qpOASES
2453     m->qp->getPrimalSolution(deltaXi);
2454     m->qp->getDualSolution(lambdaQP);
2455     m->qpObj = m->qp->getObjVal();
2456 
2457     // Compute constrJac*deltaXi, need this for second order correction step
2458     casadi_fill(m->jac_times_dxk, ng_, 0.);
2459     casadi_mv(m->jac_g, Asp_, deltaXi, m->jac_times_dxk, 0);
2460 
2461     // Print qpOASES error code, if any
2462     if (ret != qpOASES::SUCCESSFUL_RETURN && matricesChanged)
2463       print("***WARNING: qpOASES error message: \"%s\"\n",
2464               qpOASES::MessageHandling::getErrorCodeMessage(ret));
2465 
2466     // Point Hessian again to the first Hessian
2467     m->hess = m->hess1;
2468 
2469     /* For full-memory Hessian: Restore fallback Hessian if convex combinations
2470      * were used during the loop */
2471     if (!hess_lim_mem_ && maxQP > 2 && matricesChanged) {
2472         double mu = 1.0 / l;
2473         double mu1 = 1.0 - mu;
2474         casadi_int nBlocks = (which_second_derv_ == 1) ? nblocks_-1 : nblocks_;
2475         for (casadi_int b=0; b<nBlocks; b++) {
2476           casadi_int dim = dim_[b];
2477           for (casadi_int i=0; i<dim; i++) {
2478             for (casadi_int j=0; j<dim; j++) {
2479               m->hess2[b][i+j*dim] *= mu;
2480               m->hess2[b][i+j*dim] += mu1 * m->hess1[b][i+j*dim];
2481             }
2482           }
2483         }
2484     }
2485 
2486     /* Return code depending on qpOASES returnvalue
2487      * 0: Success
2488      * 1: Maximum number of iterations reached
2489      * 2: Unbounded
2490      * 3: Infeasible
2491      * 4: Other error */
2492      switch (ret) {
2493         case qpOASES::SUCCESSFUL_RETURN:
2494           return 0;
2495         case qpOASES::RET_MAX_NWSR_REACHED:
2496           return 1;
2497         case qpOASES::RET_HESSIAN_NOT_SPD:
2498         case qpOASES::RET_HESSIAN_INDEFINITE:
2499         case qpOASES::RET_INIT_FAILED_UNBOUNDEDNESS:
2500         case qpOASES::RET_QP_UNBOUNDED:
2501         case qpOASES::RET_HOTSTART_STOPPED_UNBOUNDEDNESS:
2502           return 2;
2503         case qpOASES::RET_INIT_FAILED_INFEASIBILITY:
2504         case qpOASES::RET_QP_INFEASIBLE:
2505         case qpOASES::RET_HOTSTART_STOPPED_INFEASIBILITY:
2506           return 3;
2507         default:
2508           return 4;
2509      }
2510   }
2511 
2512   /**
2513    * Set bounds on the step (in the QP), either according
2514    * to variable bounds in the NLP or according to
2515    * trust region box radius
2516    */
updateStepBounds(BlocksqpMemory * m,bool soc) const2517   void Blocksqp::updateStepBounds(BlocksqpMemory* m, bool soc) const {
2518     auto d_nlp = &m->d_nlp;
2519     // Bounds on step
2520     for (casadi_int i=0; i<nx_; i++) {
2521       double lbx = d_nlp->lbz[i];
2522       if (lbx != inf) {
2523         m->lbx_qp[i] = lbx - d_nlp->z[i];
2524       } else {
2525         m->lbx_qp[i] = inf;
2526       }
2527 
2528       double ubx = d_nlp->ubz[i];
2529       if (ubx != inf) {
2530         m->ubx_qp[i] = ubx - d_nlp->z[i];
2531       } else {
2532         m->ubx_qp[i] = inf;
2533       }
2534     }
2535 
2536     // Bounds on linearized constraints
2537     for (casadi_int i=0; i<ng_; i++) {
2538       double lbg = d_nlp->lbz[i+nx_];
2539       if (lbg != inf) {
2540         m->lba_qp[i] = lbg - m->gk[i];
2541         if (soc) m->lba_qp[i] += m->jac_times_dxk[i];
2542       } else {
2543         m->lba_qp[i] = inf;
2544       }
2545 
2546       double ubg = d_nlp->ubz[i+nx_];
2547       if (ubg != inf) {
2548         m->uba_qp[i] = ubg - m->gk[i];
2549         if (soc) m->uba_qp[i] += m->jac_times_dxk[i];
2550       } else {
2551         m->uba_qp[i] = inf;
2552       }
2553     }
2554   }
2555 
printProgress(BlocksqpMemory * m) const2556   void Blocksqp::printProgress(BlocksqpMemory* m) const {
2557     /*
2558      * m->steptype:
2559      *-1: full step was accepted because it reduces the KKT error although line search failed
2560      * 0: standard line search step
2561      * 1: Hessian has been reset to identity
2562      * 2: feasibility restoration heuristic has been called
2563      * 3: feasibility restoration phase has been called
2564      */
2565 
2566      // Print headline every twenty iterations
2567     if (m->itCount % 20 == 0) {
2568       print("%-8s", "   it");
2569       print("%-21s", " qpIt");
2570       print("%-9s", "obj");
2571       print("%-11s", "feas");
2572       print("%-7s", "opt");
2573       print("%-11s", "|lgrd|");
2574       print("%-9s", "|stp|");
2575       print("%-10s", "|lstp|");
2576       print("%-8s", "alpha");
2577       print("%-6s", "nSOCS");
2578       print("%-18s", "sk, da, sca");
2579       print("%-6s", "QPr,mu");
2580       print("\n");
2581     }
2582 
2583     if (m->itCount == 0) {
2584       // Values for first iteration
2585       print("%5i  ", m->itCount);
2586       print("%11i ", 0);
2587       print("% 10e  ", m->obj);
2588       print("%-10.2e", m->cNormS);
2589       print("%-10.2e", m->tol);
2590       print("\n");
2591     } else {
2592       // All values
2593       print("%5i  ", m->itCount);
2594       print("%5i+%5i ", m->qpIterations, m->qpIterations2);
2595       print("% 10e  ", m->obj);
2596       print("%-10.2e", m->cNormS);
2597       print("%-10.2e", m->tol);
2598       print("%-10.2e", m->gradNorm);
2599       print("%-10.2e", casadi_norm_inf(nx_, m->dxk));
2600       print("%-10.2e", m->lambdaStepNorm);
2601       print("%-9.1e", m->alpha);
2602       print("%5i", m->nSOCS);
2603       print("%3i, %3i, %-9.1e", m->hessSkipped, m->hessDamped, m->averageSizingFactor);
2604       print("%i, %-9.1e", m->qpResolve, casadi_norm_1(nblocks_, m->delta_h)/nblocks_);
2605       print("\n");
2606     }
2607   }
2608 
initStats(BlocksqpMemory * m) const2609   void Blocksqp::initStats(BlocksqpMemory* m) const {
2610     m->itCount = 0;
2611     m->qpItTotal = 0;
2612     m->qpIterations = 0;
2613     m->hessSkipped = 0;
2614     m->hessDamped = 0;
2615     m->averageSizingFactor = 0.0;
2616   }
2617 
updateStats(BlocksqpMemory * m) const2618   void Blocksqp::updateStats(BlocksqpMemory* m) const {
2619     // Do not accidentally print hessSkipped in the next iteration
2620     m->hessSkipped = 0;
2621     m->hessDamped = 0;
2622 
2623     // qpIterations = number of iterations for the QP that determines the step,
2624     // can be a resolve (+SOC)
2625     // qpIterations2 = number of iterations for a QP which solution was discarded
2626     m->qpItTotal += m->qpIterations;
2627     m->qpItTotal += m->qpIterations2;
2628     m->qpIterations = 0;
2629     m->qpIterations2 = 0;
2630     m->qpResolve = 0;
2631   }
2632 
2633   /**
2634    * Allocate memory for variables
2635    * required by all optimization
2636    * algorithms except for the Jacobian
2637    */
reset_sqp(BlocksqpMemory * m) const2638   void Blocksqp::reset_sqp(BlocksqpMemory* m) const {
2639     // dual variables (for general constraints and variable bounds)
2640     casadi_fill(m->lam_xk, nx_, 0.);
2641     casadi_fill(m->lam_gk, ng_, 0.);
2642 
2643     // constraint vector with lower and upper bounds
2644     // (Box constraints are not included in the constraint list)
2645     casadi_fill(m->gk, ng_, 0.);
2646 
2647     // gradient of objective
2648     casadi_fill(m->grad_fk, nx_, 0.);
2649 
2650     // gradient of Lagrangian
2651     casadi_fill(m->grad_lagk, nx_, 0.);
2652 
2653     // current step
2654     casadi_fill(m->deltaMat, nx_*hess_memsize_, 0.);
2655     m->dxk = m->deltaMat;
2656 
2657     // trial step (temporary variable, for line search)
2658     casadi_fill(m->trial_xk, nx_, 0.);
2659 
2660     // bounds for step (QP subproblem)
2661     casadi_fill(m->lbx_qp, nx_, 0.);
2662     casadi_fill(m->ubx_qp, nx_, 0.);
2663     casadi_fill(m->lba_qp, ng_, 0.);
2664     casadi_fill(m->uba_qp, ng_, 0.);
2665 
2666     // product of constraint Jacobian with step (deltaXi)
2667     casadi_fill(m->jac_times_dxk, ng_, 0.);
2668 
2669     // dual variables of QP (simple bounds and general constraints)
2670     casadi_fill(m->lam_qp, nx_+ng_, 0.);
2671 
2672     // line search parameters
2673     casadi_fill(m->delta_h, nblocks_, 0.);
2674 
2675     // filter as a set of pairs
2676     m->filter.clear();
2677 
2678     // difference of Lagrangian gradients
2679     casadi_fill(m->gammaMat, nx_*hess_memsize_, 0.);
2680     m->gamma = m->gammaMat;
2681 
2682     // Scalars that are used in various Hessian update procedures
2683     casadi_fill(m->noUpdateCounter, nblocks_, casadi_int(-1));
2684 
2685     // For selective sizing: for each block save sTs, sTs_, sTy, sTy_
2686     casadi_fill(m->delta_norm, nblocks_, 1.);
2687     casadi_fill(m->delta_norm_old, nblocks_, 1.);
2688     casadi_fill(m->delta_gamma, nblocks_, 0.);
2689     casadi_fill(m->delta_gamma_old, nblocks_, 0.);
2690 
2691     // Create one Matrix for one diagonal block in the Hessian
2692     for (casadi_int b=0; b<nblocks_; b++) {
2693       casadi_int dim = dim_[b];
2694       casadi_fill(m->hess1[b], dim*dim, 0.);
2695     }
2696 
2697     // For SR1 or finite differences, maintain two Hessians
2698     if (hess_update_ == 1 || hess_update_ == 4) {
2699       for (casadi_int b=0; b<nblocks_; b++) {
2700         casadi_int dim = dim_[b];
2701         casadi_fill(m->hess2[b], dim*dim, 0.);
2702       }
2703     }
2704 
2705     // Set Hessian pointer
2706     m->hess = m->hess1;
2707   }
2708 
2709   /**
2710    * Convert array *hess to a single symmetric sparse matrix in
2711    * Harwell-Boeing format (as used by qpOASES)
2712    */
2713   void Blocksqp::
convertHessian(BlocksqpMemory * m) const2714   convertHessian(BlocksqpMemory* m) const {
2715     casadi_int count, colCountTotal, rowOffset;
2716     casadi_int nnz;
2717 
2718     // 1) count nonzero elements
2719     nnz = 0;
2720     for (casadi_int b=0; b<nblocks_; b++) {
2721       casadi_int dim = dim_[b];
2722       for (casadi_int i=0; i<dim; i++) {
2723         for (casadi_int j=0; j<dim; j++) {
2724           if (fabs(m->hess[b][i+j*dim]) > eps_) {
2725             nnz++;
2726           }
2727         }
2728       }
2729     }
2730 
2731     m->hessIndCol = m->hessIndRow + nnz;
2732     m->hessIndLo = m->hessIndCol + (nx_+1);
2733 
2734     // 2) store matrix entries columnwise in hessNz
2735     count = 0; // runs over all nonzero elements
2736     colCountTotal = 0; // keep track of position in large matrix
2737     rowOffset = 0;
2738     for (casadi_int b=0; b<nblocks_; b++) {
2739       casadi_int dim = dim_[b];
2740 
2741       for (casadi_int i=0; i<dim; i++) {
2742         // column 'colCountTotal' starts at element 'count'
2743         m->hessIndCol[colCountTotal] = count;
2744 
2745         for (casadi_int j=0; j<dim; j++) {
2746           if (fabs(m->hess[b][i+j*dim]) > eps_) {
2747               m->hess_lag[count] = m->hess[b][i+j*dim];
2748               m->hessIndRow[count] = j + rowOffset;
2749               count++;
2750           }
2751         }
2752         colCountTotal++;
2753       }
2754       rowOffset += dim;
2755     }
2756     m->hessIndCol[colCountTotal] = count;
2757 
2758     // 3) Set reference to lower triangular matrix
2759     for (casadi_int j=0; j<nx_; j++) {
2760       casadi_int i;
2761       for (i=m->hessIndCol[j]; i<m->hessIndCol[j+1] && m->hessIndRow[i]<j; i++) {}
2762       m->hessIndLo[j] = i;
2763     }
2764 
2765     if (count != nnz)
2766       print("***WARNING: Error in convertHessian: %i elements processed, "
2767             "should be %i elements!\n", count, nnz);
2768   }
2769 
initIterate(BlocksqpMemory * m) const2770   void Blocksqp::initIterate(BlocksqpMemory* m) const {
2771     m->alpha = 1.0;
2772     m->nSOCS = 0;
2773     m->reducedStepCount = 0;
2774     m->steptype = 0;
2775 
2776     m->obj = inf;
2777     m->tol = inf;
2778     m->cNorm = theta_max_;
2779     m->gradNorm = inf;
2780     m->lambdaStepNorm = 0.0;
2781   }
2782 
2783   casadi_int Blocksqp::
evaluate(BlocksqpMemory * m,double * f,double * g,double * grad_f,double * jac_g) const2784   evaluate(BlocksqpMemory* m,
2785            double *f, double *g,
2786            double *grad_f, double *jac_g) const {
2787     auto d_nlp = &m->d_nlp;
2788     m->arg[0] = d_nlp->z; // x
2789     m->arg[1] = d_nlp->p; // p
2790     m->res[0] = f; // f
2791     m->res[1] = g; // g
2792     m->res[2] = grad_f; // grad:f:x
2793     m->res[3] = jac_g; // jac:g:x
2794     return calc_function(m, "nlp_gf_jg");
2795   }
2796 
2797   casadi_int Blocksqp::
evaluate(BlocksqpMemory * m,const double * xk,double * f,double * g) const2798   evaluate(BlocksqpMemory* m, const double *xk, double *f,
2799            double *g) const {
2800     auto d_nlp = &m->d_nlp;
2801     m->arg[0] = xk; // x
2802     m->arg[1] = d_nlp->p; // p
2803     m->res[0] = f; // f
2804     m->res[1] = g; // g
2805     return calc_function(m, "nlp_fg");
2806   }
2807 
2808   casadi_int Blocksqp::
evaluate(BlocksqpMemory * m,double * exact_hess_lag) const2809   evaluate(BlocksqpMemory* m,
2810            double *exact_hess_lag) const {
2811     auto d_nlp = &m->d_nlp;
2812     static std::vector<double> ones;
2813     ones.resize(nx_);
2814     for (casadi_int i=0; i<nx_; ++i) ones[i] = 1.0;
2815     static std::vector<double> minus_lam_gk;
2816     minus_lam_gk.resize(ng_);
2817     // Langrange function in blocksqp is L = f - lambdaT * g, whereas + in casadi
2818     for (casadi_int i=0; i<ng_; ++i) minus_lam_gk[i] = -m->lam_gk[i];
2819     m->arg[0] = d_nlp->z; // x
2820     m->arg[1] = d_nlp->p; // p
2821     m->arg[2] = get_ptr(ones); // lam:f
2822     m->arg[3] = get_ptr(minus_lam_gk); // lam:g
2823     m->res[0] = exact_hess_lag; // hess:gamma:x:x
2824     return calc_function(m, "nlp_hess_l");;
2825   }
2826 
BlocksqpMemory()2827   BlocksqpMemory::BlocksqpMemory() {
2828     qpoases_mem = nullptr;
2829     H = nullptr;
2830     A = nullptr;
2831     qp = nullptr;
2832   }
2833 
~BlocksqpMemory()2834   BlocksqpMemory::~BlocksqpMemory() {
2835     delete qpoases_mem;
2836     delete H;
2837     delete A;
2838     delete qp;
2839   }
2840 
2841   double Blocksqp::
lInfConstraintNorm(BlocksqpMemory * m,const double * xk,const double * g) const2842   lInfConstraintNorm(BlocksqpMemory* m, const double* xk, const double* g) const {
2843     auto d_nlp = &m->d_nlp;
2844     return fmax(casadi_max_viol(nx_, xk, d_nlp->lbz, d_nlp->ubz),
2845                 casadi_max_viol(ng_, g, d_nlp->lbz+nx_, d_nlp->ubz+nx_));
2846   }
2847 
2848 
Blocksqp(DeserializingStream & s)2849   Blocksqp::Blocksqp(DeserializingStream& s) : Nlpsol(s) {
2850     s.version("Blocksqp", 1);
2851     s.unpack("Blocksqp::nblocks", nblocks_);
2852     s.unpack("Blocksqp::blocks", blocks_);
2853     s.unpack("Blocksqp::dim", dim_);
2854     s.unpack("Blocksqp::nnz_H", nnz_H_);
2855     s.unpack("Blocksqp::Asp", Asp_);
2856     s.unpack("Blocksqp::Hsp", Hsp_);
2857     s.unpack("Blocksqp::exact_hess_lag_sp_", exact_hess_lag_sp_);
2858     s.unpack("Blocksqp::linsol_plugin", linsol_plugin_);
2859     s.unpack("Blocksqp::print_header", print_header_);
2860     s.unpack("Blocksqp::print_iteration", print_iteration_);
2861     s.unpack("Blocksqp::eps", eps_);
2862     s.unpack("Blocksqp::opttol", opttol_);
2863     s.unpack("Blocksqp::nlinfeastol", nlinfeastol_);
2864     s.unpack("Blocksqp::schur", schur_);
2865     s.unpack("Blocksqp::globalization", globalization_);
2866     s.unpack("Blocksqp::restore_feas", restore_feas_);
2867     s.unpack("Blocksqp::max_line_search", max_line_search_);
2868     s.unpack("Blocksqp::max_consec_reduced_steps", max_consec_reduced_steps_);
2869     s.unpack("Blocksqp::max_consec_skipped_updates", max_consec_skipped_updates_);
2870     s.unpack("Blocksqp::max_it_qp", max_it_qp_);
2871     s.unpack("Blocksqp::max_iter", max_iter_);
2872     s.unpack("Blocksqp::warmstart", warmstart_);
2873     s.unpack("Blocksqp::qp_init", qp_init_);
2874     s.unpack("Blocksqp::block_hess", block_hess_);
2875     s.unpack("Blocksqp::hess_scaling", hess_scaling_);
2876     s.unpack("Blocksqp::fallback_scaling", fallback_scaling_);
2877     s.unpack("Blocksqp::max_time_qp", max_time_qp_);
2878     s.unpack("Blocksqp::ini_hess_diag", ini_hess_diag_);
2879     s.unpack("Blocksqp::col_eps", col_eps_);
2880     s.unpack("Blocksqp::col_tau1", col_tau1_);
2881     s.unpack("Blocksqp::col_tau2", col_tau2_);
2882     s.unpack("Blocksqp::hess_damp", hess_damp_);
2883     s.unpack("Blocksqp::hess_damp_fac", hess_damp_fac_);
2884     s.unpack("Blocksqp::hess_update", hess_update_);
2885     s.unpack("Blocksqp::fallback_update", fallback_update_);
2886     s.unpack("Blocksqp::hess_lim_mem", hess_lim_mem_);
2887     s.unpack("Blocksqp::hess_memsize", hess_memsize_);
2888     s.unpack("Blocksqp::which_second_derv", which_second_derv_);
2889     s.unpack("Blocksqp::skip_first_globalization", skip_first_globalization_);
2890     s.unpack("Blocksqp::conv_strategy", conv_strategy_);
2891     s.unpack("Blocksqp::max_conv_qp", max_conv_qp_);
2892     s.unpack("Blocksqp::max_soc_iter", max_soc_iter_);
2893     s.unpack("Blocksqp::gamma_theta", gamma_theta_);
2894     s.unpack("Blocksqp::gamma_f", gamma_f_);
2895     s.unpack("Blocksqp::kappa_soc", kappa_soc_);
2896     s.unpack("Blocksqp::kappa_f", kappa_f_);
2897     s.unpack("Blocksqp::theta_max", theta_max_);
2898     s.unpack("Blocksqp::theta_min", theta_min_);
2899     s.unpack("Blocksqp::delta", delta_);
2900     s.unpack("Blocksqp::s_theta", s_theta_);
2901     s.unpack("Blocksqp::s_f", s_f_);
2902     s.unpack("Blocksqp::kappa_minus", kappa_minus_);
2903     s.unpack("Blocksqp::kappa_plus", kappa_plus_);
2904     s.unpack("Blocksqp::kappa_plus_max", kappa_plus_max_);
2905     s.unpack("Blocksqp::delta_h0", delta_h0_);
2906     s.unpack("Blocksqp::eta", eta_);
2907     s.unpack("Blocksqp::obj_lo", obj_lo_);
2908     s.unpack("Blocksqp::obj_up", obj_up_);
2909     s.unpack("Blocksqp::rho", rho_);
2910     s.unpack("Blocksqp::zeta", zeta_);
2911     s.unpack("Blocksqp::rp_solver", rp_solver_);
2912     s.unpack("Blocksqp::print_maxit_reached", print_maxit_reached_);
2913 
2914   }
2915 
serialize_body(SerializingStream & s) const2916   void Blocksqp::serialize_body(SerializingStream &s) const {
2917     Nlpsol::serialize_body(s);
2918     s.version("Blocksqp", 1);
2919     s.pack("Blocksqp::nblocks", nblocks_);
2920     s.pack("Blocksqp::blocks", blocks_);
2921     s.pack("Blocksqp::dim", dim_);
2922     s.pack("Blocksqp::nnz_H", nnz_H_);
2923     s.pack("Blocksqp::Asp", Asp_);
2924     s.pack("Blocksqp::Hsp", Hsp_);
2925     s.pack("Blocksqp::exact_hess_lag_sp_", exact_hess_lag_sp_);
2926     s.pack("Blocksqp::linsol_plugin", linsol_plugin_);
2927     s.pack("Blocksqp::print_header", print_header_);
2928     s.pack("Blocksqp::print_iteration", print_iteration_);
2929     s.pack("Blocksqp::eps", eps_);
2930     s.pack("Blocksqp::opttol", opttol_);
2931     s.pack("Blocksqp::nlinfeastol", nlinfeastol_);
2932     s.pack("Blocksqp::schur", schur_);
2933     s.pack("Blocksqp::globalization", globalization_);
2934     s.pack("Blocksqp::restore_feas", restore_feas_);
2935     s.pack("Blocksqp::max_line_search", max_line_search_);
2936     s.pack("Blocksqp::max_consec_reduced_steps", max_consec_reduced_steps_);
2937     s.pack("Blocksqp::max_consec_skipped_updates", max_consec_skipped_updates_);
2938     s.pack("Blocksqp::max_it_qp", max_it_qp_);
2939     s.pack("Blocksqp::max_iter", max_iter_);
2940     s.pack("Blocksqp::warmstart", warmstart_);
2941     s.pack("Blocksqp::qp_init", qp_init_);
2942     s.pack("Blocksqp::block_hess", block_hess_);
2943     s.pack("Blocksqp::hess_scaling", hess_scaling_);
2944     s.pack("Blocksqp::fallback_scaling", fallback_scaling_);
2945     s.pack("Blocksqp::max_time_qp", max_time_qp_);
2946     s.pack("Blocksqp::ini_hess_diag", ini_hess_diag_);
2947     s.pack("Blocksqp::col_eps", col_eps_);
2948     s.pack("Blocksqp::col_tau1", col_tau1_);
2949     s.pack("Blocksqp::col_tau2", col_tau2_);
2950     s.pack("Blocksqp::hess_damp", hess_damp_);
2951     s.pack("Blocksqp::hess_damp_fac", hess_damp_fac_);
2952     s.pack("Blocksqp::hess_update", hess_update_);
2953     s.pack("Blocksqp::fallback_update", fallback_update_);
2954     s.pack("Blocksqp::hess_lim_mem", hess_lim_mem_);
2955     s.pack("Blocksqp::hess_memsize", hess_memsize_);
2956     s.pack("Blocksqp::which_second_derv", which_second_derv_);
2957     s.pack("Blocksqp::skip_first_globalization", skip_first_globalization_);
2958     s.pack("Blocksqp::conv_strategy", conv_strategy_);
2959     s.pack("Blocksqp::max_conv_qp", max_conv_qp_);
2960     s.pack("Blocksqp::max_soc_iter", max_soc_iter_);
2961     s.pack("Blocksqp::gamma_theta", gamma_theta_);
2962     s.pack("Blocksqp::gamma_f", gamma_f_);
2963     s.pack("Blocksqp::kappa_soc", kappa_soc_);
2964     s.pack("Blocksqp::kappa_f", kappa_f_);
2965     s.pack("Blocksqp::theta_max", theta_max_);
2966     s.pack("Blocksqp::theta_min", theta_min_);
2967     s.pack("Blocksqp::delta", delta_);
2968     s.pack("Blocksqp::s_theta", s_theta_);
2969     s.pack("Blocksqp::s_f", s_f_);
2970     s.pack("Blocksqp::kappa_minus", kappa_minus_);
2971     s.pack("Blocksqp::kappa_plus", kappa_plus_);
2972     s.pack("Blocksqp::kappa_plus_max", kappa_plus_max_);
2973     s.pack("Blocksqp::delta_h0", delta_h0_);
2974     s.pack("Blocksqp::eta", eta_);
2975     s.pack("Blocksqp::obj_lo", obj_lo_);
2976     s.pack("Blocksqp::obj_up", obj_up_);
2977     s.pack("Blocksqp::rho", rho_);
2978     s.pack("Blocksqp::zeta", zeta_);
2979     s.pack("Blocksqp::rp_solver", rp_solver_);
2980     s.pack("Blocksqp::print_maxit_reached", print_maxit_reached_);
2981   }
2982 
2983 } // namespace casadi
2984