1 // @HEADER
2 // ************************************************************************
3 //
4 //               Rapid Optimization Library (ROL) Package
5 //                 Copyright (2014) Sandia Corporation
6 //
7 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8 // license for use of this work by or on behalf of the U.S. Government.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // Questions? Contact lead developers:
38 //              Drew Kouri   (dpkouri@sandia.gov) and
39 //              Denis Ridzal (dridzal@sandia.gov)
40 //
41 // ************************************************************************
42 // @HEADER
43 
44 #ifndef ROL_MOREAUYOSIDAPENALTYSTEP_H
45 #define ROL_MOREAUYOSIDAPENALTYSTEP_H
46 
47 #include "ROL_MoreauYosidaPenalty.hpp"
48 #include "ROL_Types.hpp"
49 #include "ROL_AugmentedLagrangianStep.hpp"
50 #include "ROL_CompositeStep.hpp"
51 #include "ROL_FletcherStep.hpp"
52 #include "ROL_BundleStep.hpp"
53 #include "ROL_TrustRegionStep.hpp"
54 #include "ROL_LineSearchStep.hpp"
55 #include "ROL_Algorithm.hpp"
56 #include "ROL_ConstraintStatusTest.hpp"
57 #include "ROL_BundleStatusTest.hpp"
58 #include "ROL_ParameterList.hpp"
59 
60 /** @ingroup step_group
61     \class ROL::MoreauYosidaPenaltyStep
62     \brief Implements the computation of optimization steps using Moreau-Yosida
63            regularized bound constraints.
64 
65     To describe the generalized Moreau-Yosida penalty method, we consider the
66     following abstract setting.  Suppose \f$\mathcal{X}\f$ is a Hilbert space
67     of functions mapping \f$\Xi\f$ to \f$\mathbb{R}\f$.  For example,
68     \f$\Xi\subset\mathbb{R}^n\f$ and \f$\mathcal{X}=L^2(\Xi)\f$ or
69     \f$\Xi = \{1,\ldots,n\}\f$ and \f$\mathcal{X}=\mathbb{R}^n\f$. We assume
70     \f$ f:\mathcal{X}\to\mathbb{R}\f$ is twice-continuously Fréchet
71     differentiable and \f$a,\,b\in\mathcal{X}\f$ with \f$a\le b\f$ almost
72     everywhere in \f$\Xi\f$.  Note that the generalized Moreau-Yosida penalty
73     method will also work with secant approximations of the Hessian.
74 
75     The generalized Moreau-Yosida penalty method is a proveably convergent
76     algorithm for convex optimization problems and may not converge for general
77     nonlinear, nonconvex problems.  The algorithm solves
78     \f[
79        \min_x \quad f(x) \quad \text{s.t.} \quad c(x) = 0, \quad a \le x \le b.
80     \f]
81     We can respresent the bound constraints using the indicator function
82     \f$\iota_{[a,b]}(x) = 0\f$ if \f$a \le x \le b\f$ and equals \f$\infty\f$
83     otherwise.  Using this indicator function, we can write our optimization
84     problem as the (nonsmooth) equality constrained program
85     \f[
86        \min_x \quad f(x) + \iota_{[a,b]}(x) \quad \text{s.t.}\quad c(x) = 0.
87     \f]
88     Since the indicator function is not continuously Fréchet
89     differentiable, we cannot apply our existing algorithms (such as, Composite
90     Step SQP) to the above equality constrained problem.  To circumvent this
91     issue, we smooth the indicator function using generalized Moreau-Yosida
92     regularization, i.e., we replace \f$\iota_{[a,b]}\f$ in the objective
93     function with
94     \f[
95        \varphi(x,\mu,c) = \inf_y\; \{\; \iota_{[a,b]}(x-y)
96          + \langle \mu, y\rangle_{\mathcal{X}}
97          + \frac{c}{2}\|y\|_{\mathcal{X}}^2 \;\}.
98     \f]
99     One can show that \f$\varphi(\cdot,\mu,c)\f$ for any \f$\mu\in\mathcal{X}\f$
100     and \f$c > 0\f$ is continuously Fréchet
101     differentiable with respect to \f$x\f$.  Thus, using this penalty,
102     Step::compute solves the following subproblem: given
103     \f$c_k>0\f$ and \f$\mu_k\in\mathcal{X}\f$, determine \f$x_k\in\mathcal{X}\f$
104     that solves
105     \f[
106       \min_{x} \quad f(x) + \varphi(x,\mu_k,c_k)\quad\text{s.t.}
107          c(x) = 0.
108     \f]
109     The multipliers \f$\mu_k\f$ are then updated in Step::update as
110     \f$\mu_{k+1} = \nabla_x\varphi(x_k,\mu_k,c_k)\f$ and \f$c_k\f$ is
111     potentially increased (although this is not always necessary).
112 
113     For more information on this method see:
114     \li D. P. Bertsekas. "Approximations Procedures Based on the Method of
115     Multipliers." Journal of Optimization Theory and Applications,
116     Vol. 23(4), 1977.
117     \li K. Ito, K. Kunisch. "Augmented Lagrangian Methods for Nonsmooth,
118     Convex, Optimization in Hilbert Space." Nonlinear Analysis, 2000.
119 */
120 
121 
122 namespace ROL {
123 
124 template<class Real>
125 class AugmentedLagrangianStep;
126 
127 template <class Real>
128 class MoreauYosidaPenaltyStep : public Step<Real> {
129 private:
130   ROL::Ptr<StatusTest<Real>>      status_;
131   ROL::Ptr<Step<Real>>            step_;
132   ROL::Ptr<Algorithm<Real>>       algo_;
133   ROL::Ptr<Vector<Real>>          x_;
134   ROL::Ptr<Vector<Real>>          g_;
135   ROL::Ptr<Vector<Real>>          l_;
136   ROL::Ptr<BoundConstraint<Real>> bnd_;
137 
138   Real compViolation_;
139   Real gLnorm_;
140   Real tau_;
141   bool print_;
142   bool updatePenalty_;
143 
144   ROL::ParameterList parlist_;
145   int subproblemIter_;
146   bool hasEquality_;
147 
148   EStep stepType_;
149   std::string stepname_;
150 
updateState(const Vector<Real> & x,const Vector<Real> & l,Objective<Real> & obj,Constraint<Real> & con,BoundConstraint<Real> & bnd,AlgorithmState<Real> & algo_state)151   void updateState(const Vector<Real> &x, const Vector<Real> &l,
152                    Objective<Real> &obj,
153                    Constraint<Real> &con, BoundConstraint<Real> &bnd,
154                    AlgorithmState<Real> &algo_state) {
155     MoreauYosidaPenalty<Real> &myPen
156       = dynamic_cast<MoreauYosidaPenalty<Real>&>(obj);
157     Real zerotol = std::sqrt(ROL_EPSILON<Real>());
158     ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
159     // Update objective and constraint.
160     myPen.update(x,true,algo_state.iter);
161     con.update(x,true,algo_state.iter);
162     // Compute norm of the gradient of the Lagrangian
163     algo_state.value = myPen.value(x, zerotol);
164     myPen.gradient(*(state->gradientVec), x, zerotol);
165     con.applyAdjointJacobian(*g_,l,x,zerotol);
166     state->gradientVec->plus(*g_);
167     gLnorm_ = (state->gradientVec)->norm();
168     // Compute constraint violation
169     con.value(*(state->constraintVec),x, zerotol);
170     algo_state.cnorm = (state->constraintVec)->norm();
171     compViolation_ = myPen.testComplementarity(x);
172     algo_state.gnorm = std::max(gLnorm_,compViolation_);
173     // Update state
174     algo_state.nfval++;
175     algo_state.ngrad++;
176     algo_state.ncval++;
177   }
178 
updateState(const Vector<Real> & x,Objective<Real> & obj,BoundConstraint<Real> & bnd,AlgorithmState<Real> & algo_state)179   void updateState(const Vector<Real> &x,
180                    Objective<Real> &obj,
181                    BoundConstraint<Real> &bnd,
182                    AlgorithmState<Real> &algo_state) {
183     MoreauYosidaPenalty<Real> &myPen
184       = dynamic_cast<MoreauYosidaPenalty<Real>&>(obj);
185     Real zerotol = std::sqrt(ROL_EPSILON<Real>());
186     ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
187     // Update objective and constraint.
188     myPen.update(x,true,algo_state.iter);
189     // Compute norm of the gradient of the Lagrangian
190     algo_state.value = myPen.value(x, zerotol);
191     myPen.gradient(*(state->gradientVec), x, zerotol);
192     gLnorm_ = (state->gradientVec)->norm();
193     // Compute constraint violation
194     algo_state.cnorm = static_cast<Real>(0);
195     compViolation_ = myPen.testComplementarity(x);
196     algo_state.gnorm = std::max(gLnorm_,compViolation_);
197     // Update state
198     algo_state.nfval++;
199     algo_state.ngrad++;
200   }
201 
202 public:
203 
204   using Step<Real>::initialize;
205   using Step<Real>::compute;
206   using Step<Real>::update;
207 
~MoreauYosidaPenaltyStep()208   ~MoreauYosidaPenaltyStep() {}
209 
MoreauYosidaPenaltyStep(ROL::ParameterList & parlist)210   MoreauYosidaPenaltyStep(ROL::ParameterList &parlist)
211     : Step<Real>(), algo_(ROL::nullPtr),
212       x_(ROL::nullPtr), g_(ROL::nullPtr), l_(ROL::nullPtr),
213       tau_(10), print_(false), parlist_(parlist), subproblemIter_(0),
214       hasEquality_(false) {
215     // Parse parameters
216     Real ten(10), oem6(1.e-6), oem8(1.e-8);
217     ROL::ParameterList& steplist = parlist.sublist("Step").sublist("Moreau-Yosida Penalty");
218     Step<Real>::getState()->searchSize = steplist.get("Initial Penalty Parameter",ten);
219     tau_ = steplist.get("Penalty Parameter Growth Factor",ten);
220     updatePenalty_ = steplist.get("Update Penalty",true);
221     print_ = steplist.sublist("Subproblem").get("Print History",false);
222     // Set parameters for step subproblem
223     Real gtol = steplist.sublist("Subproblem").get("Optimality Tolerance",oem8);
224     Real ctol = steplist.sublist("Subproblem").get("Feasibility Tolerance",oem8);
225     Real stol = oem6*std::min(gtol,ctol);
226     int maxit = steplist.sublist("Subproblem").get("Iteration Limit",1000);
227     parlist_.sublist("Status Test").set("Gradient Tolerance",   gtol);
228     parlist_.sublist("Status Test").set("Constraint Tolerance", ctol);
229     parlist_.sublist("Status Test").set("Step Tolerance",       stol);
230     parlist_.sublist("Status Test").set("Iteration Limit",      maxit);
231     // Get step name from parameterlist
232     stepname_ = steplist.sublist("Subproblem").get("Step Type","Composite Step");
233     stepType_ = StringToEStep(stepname_);
234   }
235 
236   /** \brief Initialize step with equality constraint.
237   */
initialize(Vector<Real> & x,const Vector<Real> & g,Vector<Real> & l,const Vector<Real> & c,Objective<Real> & obj,Constraint<Real> & con,BoundConstraint<Real> & bnd,AlgorithmState<Real> & algo_state)238   void initialize( Vector<Real> &x, const Vector<Real> &g, Vector<Real> &l, const Vector<Real> &c,
239                    Objective<Real> &obj, Constraint<Real> &con, BoundConstraint<Real> &bnd,
240                    AlgorithmState<Real> &algo_state ) {
241     hasEquality_ = true;
242     // Initialize step state
243     ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
244     state->descentVec    = x.clone();
245     state->gradientVec   = g.clone();
246     state->constraintVec = c.clone();
247     // Initialize additional storage
248     x_ = x.clone();
249     g_ = g.clone();
250     l_ = l.clone();
251     // Project x onto the feasible set
252     if ( bnd.isActivated() ) {
253       bnd.project(x);
254     }
255     // Initialize the algorithm state
256     algo_state.nfval = 0;
257     algo_state.ncval = 0;
258     algo_state.ngrad = 0;
259     updateState(x,l,obj,con,bnd,algo_state);
260   }
261 
262   /** \brief Initialize step without equality constraint.
263   */
initialize(Vector<Real> & x,const Vector<Real> & g,Objective<Real> & obj,BoundConstraint<Real> & bnd,AlgorithmState<Real> & algo_state)264   void initialize( Vector<Real> &x, const Vector<Real> &g,
265                    Objective<Real> &obj, BoundConstraint<Real> &bnd,
266                    AlgorithmState<Real> &algo_state ) {
267     // Initialize step state
268     ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
269     state->descentVec    = x.clone();
270     state->gradientVec   = g.clone();
271     // Initialize additional storage
272     x_ = x.clone();
273     g_ = g.clone();
274     // Project x onto the feasible set
275     if ( bnd.isActivated() ) {
276       bnd.project(x);
277     }
278     // Initialize the algorithm state
279     algo_state.nfval = 0;
280     algo_state.ncval = 0;
281     algo_state.ngrad = 0;
282     updateState(x,obj,bnd,algo_state);
283 
284     bnd_ = ROL::makePtr<BoundConstraint<Real>>();
285     bnd_->deactivate();
286   }
287 
288   /** \brief Compute step (equality and bound constraints).
289   */
compute(Vector<Real> & s,const Vector<Real> & x,const Vector<Real> & l,Objective<Real> & obj,Constraint<Real> & con,BoundConstraint<Real> & bnd,AlgorithmState<Real> & algo_state)290   void compute( Vector<Real> &s, const Vector<Real> &x, const Vector<Real> &l,
291                 Objective<Real> &obj, Constraint<Real> &con,
292                 BoundConstraint<Real> &bnd,
293                 AlgorithmState<Real> &algo_state ) {
294     //MoreauYosidaPenalty<Real> &myPen
295     //  = dynamic_cast<MoreauYosidaPenalty<Real>&>(obj);
296     Real one(1);
297     Ptr<Objective<Real>> penObj;
298     if (stepType_ == STEP_AUGMENTEDLAGRANGIAN) {
299       Ptr<Objective<Real>>  raw_obj = makePtrFromRef(obj);
300       Ptr<Constraint<Real>> raw_con = makePtrFromRef(con);
301       Ptr<StepState<Real>>  state   = Step<Real>::getState();
302       penObj = makePtr<AugmentedLagrangian<Real>>(raw_obj,raw_con,l,one,x,*(state->constraintVec),parlist_);
303       step_  = makePtr<AugmentedLagrangianStep<Real>>(parlist_);
304     }
305     else if (stepType_ == STEP_FLETCHER) {
306       Ptr<Objective<Real>>  raw_obj = makePtrFromRef(obj);
307       Ptr<Constraint<Real>> raw_con = makePtrFromRef(con);
308       Ptr<StepState<Real>>  state   = Step<Real>::getState();
309       penObj = makePtr<Fletcher<Real>>(raw_obj,raw_con,x,*(state->constraintVec),parlist_);
310       step_  = makePtr<FletcherStep<Real>>(parlist_);
311     }
312     else {
313       penObj    = makePtrFromRef(obj);
314       stepname_ = "Composite Step";
315       stepType_ = STEP_COMPOSITESTEP;
316       step_     = makePtr<CompositeStep<Real>>(parlist_);
317     }
318     status_ = makePtr<ConstraintStatusTest<Real>>(parlist_);
319     algo_   = ROL::makePtr<Algorithm<Real>>(step_,status_,false);
320     x_->set(x); l_->set(l);
321     algo_->run(*x_,*l_,*penObj,con,print_);
322     s.set(*x_); s.axpy(-one,x);
323     subproblemIter_ = (algo_->getState())->iter;
324   }
325 
326   /** \brief Compute step for bound constraints.
327   */
compute(Vector<Real> & s,const Vector<Real> & x,Objective<Real> & obj,BoundConstraint<Real> & bnd,AlgorithmState<Real> & algo_state)328   void compute( Vector<Real> &s, const Vector<Real> &x, Objective<Real> &obj,
329                         BoundConstraint<Real> &bnd,
330                         AlgorithmState<Real> &algo_state ) {
331     Real one(1);
332     MoreauYosidaPenalty<Real> &myPen
333       = dynamic_cast<MoreauYosidaPenalty<Real>&>(obj);
334     if (stepType_ == STEP_BUNDLE) {
335       status_ = makePtr<BundleStatusTest<Real>>(parlist_);
336       step_   = makePtr<BundleStep<Real>>(parlist_);
337     }
338     else if (stepType_ == STEP_LINESEARCH) {
339       status_ = makePtr<StatusTest<Real>>(parlist_);
340       step_   = makePtr<LineSearchStep<Real>>(parlist_);
341     }
342     else {
343       status_ = makePtr<StatusTest<Real>>(parlist_);
344       step_   = makePtr<TrustRegionStep<Real>>(parlist_);
345     }
346     algo_ = ROL::makePtr<Algorithm<Real>>(step_,status_,false);
347     x_->set(x);
348     algo_->run(*x_,myPen,*bnd_,print_);
349     s.set(*x_); s.axpy(-one,x);
350     subproblemIter_ = (algo_->getState())->iter;
351   }
352 
353 
354   /** \brief Update step, if successful (equality and bound constraints).
355   */
update(Vector<Real> & x,Vector<Real> & l,const Vector<Real> & s,Objective<Real> & obj,Constraint<Real> & con,BoundConstraint<Real> & bnd,AlgorithmState<Real> & algo_state)356   void update( Vector<Real> &x, Vector<Real> &l, const Vector<Real> &s,
357                Objective<Real> &obj, Constraint<Real> &con,
358                BoundConstraint<Real> &bnd,
359                AlgorithmState<Real> &algo_state ) {
360     MoreauYosidaPenalty<Real> &myPen
361       = dynamic_cast<MoreauYosidaPenalty<Real>&>(obj);
362     ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
363     state->SPiter = subproblemIter_;
364     state->descentVec->set(s);
365     // Update iterate and Lagrange multiplier
366     x.plus(s);
367     l.set(*l_);
368     // Update objective and constraint
369     algo_state.iter++;
370     con.update(x,true,algo_state.iter);
371     myPen.update(x,true,algo_state.iter);
372     // Update state
373     updateState(x,l,obj,con,bnd,algo_state);
374     // Update multipliers
375     if (updatePenalty_) {
376       state->searchSize *= tau_;
377     }
378     myPen.updateMultipliers(state->searchSize,x);
379     algo_state.nfval += myPen.getNumberFunctionEvaluations() + ((algo_->getState())->nfval);
380     algo_state.ngrad += myPen.getNumberGradientEvaluations() + ((algo_->getState())->ngrad);
381     algo_state.ncval += (algo_->getState())->ncval;
382     algo_state.snorm = s.norm();
383     algo_state.iterateVec->set(x);
384     algo_state.lagmultVec->set(l);
385   }
386 
387   /** \brief Update step, for bound constraints.
388   */
update(Vector<Real> & x,const Vector<Real> & s,Objective<Real> & obj,BoundConstraint<Real> & bnd,AlgorithmState<Real> & algo_state)389   void update( Vector<Real> &x, const Vector<Real> &s,
390                Objective<Real> &obj, BoundConstraint<Real> &bnd,
391                AlgorithmState<Real> &algo_state ) {
392     MoreauYosidaPenalty<Real> &myPen
393       = dynamic_cast<MoreauYosidaPenalty<Real>&>(obj);
394     ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
395     state->descentVec->set(s);
396     // Update iterate and Lagrange multiplier
397     x.plus(s);
398     // Update objective and constraint
399     algo_state.iter++;
400     myPen.update(x,true,algo_state.iter);
401     // Update state
402     updateState(x,obj,bnd,algo_state);
403     // Update multipliers
404     if (updatePenalty_) {
405       state->searchSize *= tau_;
406     }
407     myPen.updateMultipliers(state->searchSize,x);
408     algo_state.nfval += myPen.getNumberFunctionEvaluations() + ((algo_->getState())->nfval);
409     algo_state.ngrad += myPen.getNumberGradientEvaluations() + ((algo_->getState())->ngrad);
410     algo_state.snorm = s.norm();
411     algo_state.iterateVec->set(x);
412   }
413 
414   /** \brief Print iterate header.
415   */
printHeader(void) const416   std::string printHeader( void ) const {
417     std::stringstream hist;
418     hist << "  ";
419     hist << std::setw(6)  << std::left << "iter";
420     hist << std::setw(15) << std::left << "fval";
421     if (hasEquality_) {
422       hist << std::setw(15) << std::left << "cnorm";
423     }
424     hist << std::setw(15) << std::left << "gnorm";
425     hist << std::setw(15) << std::left << "ifeas";
426     hist << std::setw(15) << std::left << "snorm";
427     hist << std::setw(10) << std::left << "penalty";
428     hist << std::setw(8) << std::left << "#fval";
429     hist << std::setw(8) << std::left << "#grad";
430     if (hasEquality_) {
431       hist << std::setw(8) << std::left << "#cval";
432     }
433     hist << std::setw(8) << std::left << "subIter";
434     hist << "\n";
435     return hist.str();
436   }
437 
438   /** \brief Print step name.
439   */
printName(void) const440   std::string printName( void ) const {
441     std::stringstream hist;
442     hist << "\n" << " Moreau-Yosida Penalty solver";
443     hist << "\n";
444     return hist.str();
445   }
446 
447   /** \brief Print iterate status.
448   */
print(AlgorithmState<Real> & algo_state,bool pHeader=false) const449   std::string print( AlgorithmState<Real> &algo_state, bool pHeader = false ) const {
450     std::stringstream hist;
451     hist << std::scientific << std::setprecision(6);
452     if ( algo_state.iter == 0 ) {
453       hist << printName();
454     }
455     if ( pHeader ) {
456       hist << printHeader();
457     }
458     if ( algo_state.iter == 0 ) {
459       hist << "  ";
460       hist << std::setw(6)  << std::left << algo_state.iter;
461       hist << std::setw(15) << std::left << algo_state.value;
462       if (hasEquality_) {
463         hist << std::setw(15) << std::left << algo_state.cnorm;
464       }
465       hist << std::setw(15) << std::left << gLnorm_;
466       hist << std::setw(15) << std::left << compViolation_;
467       hist << std::setw(15) << std::left << " ";
468       hist << std::scientific << std::setprecision(2);
469       hist << std::setw(10) << std::left << Step<Real>::getStepState()->searchSize;
470       hist << "\n";
471     }
472     else {
473       hist << "  ";
474       hist << std::setw(6)  << std::left << algo_state.iter;
475       hist << std::setw(15) << std::left << algo_state.value;
476       if (hasEquality_) {
477         hist << std::setw(15) << std::left << algo_state.cnorm;
478       }
479       hist << std::setw(15) << std::left << gLnorm_;
480       hist << std::setw(15) << std::left << compViolation_;
481       hist << std::setw(15) << std::left << algo_state.snorm;
482       hist << std::scientific << std::setprecision(2);
483       hist << std::setw(10) << std::left << Step<Real>::getStepState()->searchSize;
484       hist << std::scientific << std::setprecision(6);
485       hist << std::setw(8) << std::left << algo_state.nfval;
486       hist << std::setw(8) << std::left << algo_state.ngrad;
487       if (hasEquality_) {
488         hist << std::setw(8) << std::left << algo_state.ncval;
489       }
490       hist << std::setw(8) << std::left << subproblemIter_;
491       hist << "\n";
492     }
493     return hist.str();
494   }
495 
496 }; // class MoreauYosidaPenaltyStep
497 
498 } // namespace ROL
499 
500 #endif
501