ROL
ROL_InteriorPointStep.hpp
Go to the documentation of this file.
1// @HEADER
2// *****************************************************************************
3// Rapid Optimization Library (ROL) Package
4//
5// Copyright 2014 NTESS and the ROL contributors.
6// SPDX-License-Identifier: BSD-3-Clause
7// *****************************************************************************
8// @HEADER
9
10#ifndef ROL_INTERIORPOINTSTEP_H
11#define ROL_INTERIORPOINTSTEP_H
12
13#include "ROL_CompositeStep.hpp"
15#include "ROL_FletcherStep.hpp"
16#include "ROL_BundleStep.hpp"
21#include "ROL_InteriorPoint.hpp"
23#include "ROL_Types.hpp"
25
26
27namespace ROL {
28
29template<class Real>
30class AugmentedLagrangianStep;
31
32template <class Real>
33class InteriorPointStep : public Step<Real> {
34
37
38private:
39
40 ROL::Ptr<StatusTest<Real> > status_;
41 ROL::Ptr<Step<Real> > step_;
42 ROL::Ptr<Algorithm<Real> > algo_;
43 ROL::Ptr<BoundConstraint<Real> > bnd_;
44 ROL::ParameterList parlist_;
45
46 // Storage
47 ROL::Ptr<Vector<Real> > x_;
48 ROL::Ptr<Vector<Real> > g_;
49 ROL::Ptr<Vector<Real> > l_;
50 ROL::Ptr<Vector<Real> > c_;
51
52 Real mu_; // Barrier parameter
53 Real mumin_; // Minimal value of barrier parameter
54 Real mumax_; // Maximal value of barrier parameter
55 Real rho_; // Barrier parameter reduction factor
56
57 // For the subproblem
58 int subproblemIter_; // Status test maximum number of iterations
59
60 int verbosity_; // Adjust level of detail in printing step information
61 bool print_;
62
64
66 std::string stepname_;
67
68public:
69
70 using Step<Real>::initialize;
71 using Step<Real>::compute;
72 using Step<Real>::update;
73
75
76 InteriorPointStep(ROL::ParameterList &parlist) :
77 Step<Real>(),
78 status_(ROL::nullPtr),
79 step_(ROL::nullPtr),
80 algo_(ROL::nullPtr),
81 parlist_(parlist),
82 x_(ROL::nullPtr),
83 g_(ROL::nullPtr),
84 l_(ROL::nullPtr),
85 c_(ROL::nullPtr),
86 hasEquality_(false),
88 stepname_("Composite Step") {
89
90 using ROL::ParameterList;
91
92 verbosity_ = parlist.sublist("General").get("Print Verbosity",0);
93
94 // List of general Interior Point parameters
95 ParameterList& iplist = parlist.sublist("Step").sublist("Interior Point");
96 mu_ = iplist.get("Initial Barrier Penalty",1.0);
97 mumin_ = iplist.get("Minimum Barrier Penalty",1.e-4);
98 mumax_ = iplist.get("Maximum Barrier Penalty",1e8);
99 rho_ = iplist.get("Barrier Penalty Reduction Factor",0.5);
100
101 // Subproblem step information
102 print_ = iplist.sublist("Subproblem").get("Print History",false);
103 Real gtol = iplist.sublist("Subproblem").get("Optimality Tolerance",1e-8);
104 Real ctol = iplist.sublist("Subproblem").get("Feasibility Tolerance",1e-8);
105 Real stol = static_cast<Real>(1e-6)*std::min(gtol,ctol);
106 int maxit = iplist.sublist("Subproblem").get("Iteration Limit",1000);
107 parlist_.sublist("Status Test").set("Gradient Tolerance", gtol);
108 parlist_.sublist("Status Test").set("Constraint Tolerance", ctol);
109 parlist_.sublist("Status Test").set("Step Tolerance", stol);
110 parlist_.sublist("Status Test").set("Iteration Limit", maxit);
111 // Get step name from parameterlist
112 stepname_ = iplist.sublist("Subproblem").get("Step Type","Composite Step");
114 }
115
119 Vector<Real> &l, const Vector<Real> &c,
121 AlgorithmState<Real> &algo_state ) {
122 hasEquality_ = true;
123
124 ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
125 state->descentVec = x.clone();
126 state->gradientVec = g.clone();
127 state->constraintVec = c.clone();
128
129 // Initialize storage
130 x_ = x.clone();
131 g_ = g.clone();
132 l_ = l.clone();
133 c_ = c.clone();
134
135 x_->set(x);
136
137 auto& ipobj = dynamic_cast<IPOBJ&>(obj);
138 auto& ipcon = dynamic_cast<IPCON&>(con);
139
140 // Set initial penalty
141 ipobj.updatePenalty(mu_);
142
143 algo_state.nfval = 0;
144 algo_state.ncval = 0;
145 algo_state.ngrad = 0;
146
147 Real zerotol = 0.0;
148 obj.update(x,true,algo_state.iter);
149 algo_state.value = obj.value(x,zerotol);
150
151 obj.gradient(*g_,x,zerotol);
152 algo_state.gnorm = g_->norm();
153
154 con.value(*c_,x,zerotol);
155 algo_state.cnorm = c_->norm();
156
157 algo_state.nfval += ipobj.getNumberFunctionEvaluations();
158 algo_state.ngrad += ipobj.getNumberGradientEvaluations();
159 algo_state.ncval += ipcon.getNumberConstraintEvaluations();
160
161 }
162
163
164
167 AlgorithmState<Real> &algo_state ) {
168 bnd.projectInterior(x);
169 initialize(x,g,l,c,obj,con,algo_state);
170 }
171
172
177 AlgorithmState<Real> &algo_state ) {
178 bnd.projectInterior(x);
179
180 ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
181 state->descentVec = x.clone();
182 state->gradientVec = g.clone();
183
184 // Initialize storage
185 x_ = x.clone(); x_->set(x);
186 g_ = g.clone();
187
188 // Set initial penalty
189 auto& ipobj = dynamic_cast<IPOBJ&>(obj);
190 ipobj.updatePenalty(mu_);
191
192 algo_state.nfval = 0;
193 algo_state.ncval = 0;
194 algo_state.ngrad = 0;
195
196 Real zerotol = std::sqrt(ROL_EPSILON<Real>());
197 obj.update(x,true,algo_state.iter);
198 algo_state.value = obj.value(x,zerotol);
199
200 obj.gradient(*g_,x,zerotol);
201 algo_state.gnorm = g_->norm();
202
203 algo_state.cnorm = static_cast<Real>(0);
204
205 algo_state.nfval += ipobj.getNumberFunctionEvaluations();
206 algo_state.ngrad += ipobj.getNumberGradientEvaluations();
207
208 bnd_ = ROL::makePtr<BoundConstraint<Real>>();
209 bnd_->deactivate();
210 }
211
212
213
217 const Vector<Real> &x,
218 const Vector<Real> &l,
219 Objective<Real> &obj,
220 Constraint<Real> &con,
221 AlgorithmState<Real> &algo_state ) {
222 // Grab interior point objective and constraint
223 //auto& ipobj = dynamic_cast<IPOBJ&>(obj);
224 //auto& ipcon = dynamic_cast<IPCON&>(con);
225
226 Real one(1);
227 // Create the algorithm
228 Ptr<Objective<Real>> penObj;
230 Ptr<Objective<Real>> raw_obj = makePtrFromRef(obj);
231 Ptr<Constraint<Real>> raw_con = makePtrFromRef(con);
232 Ptr<StepState<Real>> state = Step<Real>::getState();
233 penObj = makePtr<AugmentedLagrangian<Real>>(raw_obj,raw_con,l,one,x,*(state->constraintVec),parlist_);
234 step_ = makePtr<AugmentedLagrangianStep<Real>>(parlist_);
235 }
236 else if (stepType_ == STEP_FLETCHER) {
237 Ptr<Objective<Real>> raw_obj = makePtrFromRef(obj);
238 Ptr<Constraint<Real>> raw_con = makePtrFromRef(con);
239 Ptr<StepState<Real>> state = Step<Real>::getState();
240 penObj = makePtr<Fletcher<Real>>(raw_obj,raw_con,x,*(state->constraintVec),parlist_);
241 step_ = makePtr<FletcherStep<Real>>(parlist_);
242 }
243 else {
244 penObj = makePtrFromRef(obj);
245 stepname_ = "Composite Step";
247 step_ = makePtr<CompositeStep<Real>>(parlist_);
248 }
249 status_ = makePtr<ConstraintStatusTest<Real>>(parlist_);
250 algo_ = ROL::makePtr<Algorithm<Real>>(step_,status_,false);
251
252 // Run the algorithm
253 x_->set(x); l_->set(l);
254 algo_->run(*x_,*g_,*l_,*c_,*penObj,con,print_);
255 s.set(*x_); s.axpy(-one,x);
256
257 // Get number of iterations from the subproblem solve
258 subproblemIter_ = (algo_->getState())->iter;
259 }
260
262 const Vector<Real> &x,
263 const Vector<Real> &l,
264 Objective<Real> &obj,
265 Constraint<Real> &con,
267 AlgorithmState<Real> &algo_state ) {
268 compute(s,x,l,obj,con,algo_state);
269 }
270
271 // Bound constrained
273 const Vector<Real> &x,
274 Objective<Real> &obj,
276 AlgorithmState<Real> &algo_state ) {
277 // Grab interior point objective and constraint
278 auto& ipobj = dynamic_cast<IPOBJ&>(obj);
279
280 // Create the algorithm
281 if (stepType_ == STEP_BUNDLE) {
282 status_ = makePtr<BundleStatusTest<Real>>(parlist_);
283 step_ = makePtr<BundleStep<Real>>(parlist_);
284 }
285 else if (stepType_ == STEP_LINESEARCH) {
286 status_ = makePtr<StatusTest<Real>>(parlist_);
287 step_ = makePtr<LineSearchStep<Real>>(parlist_);
288 }
289 else {
290 status_ = makePtr<StatusTest<Real>>(parlist_);
291 step_ = makePtr<TrustRegionStep<Real>>(parlist_);
292 }
293 algo_ = ROL::makePtr<Algorithm<Real>>(step_,status_,false);
294
295 // Run the algorithm
296 x_->set(x);
297 algo_->run(*x_,*g_,ipobj,*bnd_,print_);
298 s.set(*x_); s.axpy(static_cast<Real>(-1),x);
299
300 // Get number of iterations from the subproblem solve
301 subproblemIter_ = (algo_->getState())->iter;
302 }
303
304
305
309 Vector<Real> &l,
310 const Vector<Real> &s,
311 Objective<Real> &obj,
312 Constraint<Real> &con,
313 AlgorithmState<Real> &algo_state ) {
314 // Grab interior point objective and constraint
315 auto& ipobj = dynamic_cast<IPOBJ&>(obj);
316 auto& ipcon = dynamic_cast<IPCON&>(con);
317
318 // If we can change the barrier parameter, do so
319 if( (rho_< 1.0 && mu_ > mumin_) || (rho_ > 1.0 && mu_ < mumax_) ) {
320 mu_ *= rho_;
321 ipobj.updatePenalty(mu_);
322 }
323
324 ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
325 state->SPiter = subproblemIter_;
326
327 // Update optimization vector
328 x.plus(s);
329
330 algo_state.iterateVec->set(x);
331 state->descentVec->set(s);
332 algo_state.snorm = s.norm();
333 algo_state.iter++;
334
335 Real zerotol = 0.0;
336
337 algo_state.value = ipobj.value(x,zerotol);
338 algo_state.value = ipobj.getObjectiveValue();
339
340 ipcon.value(*c_,x,zerotol);
341 state->constraintVec->set(*c_);
342
343 ipobj.gradient(*g_,x,zerotol);
344 state->gradientVec->set(*g_);
345
346 ipcon.applyAdjointJacobian(*g_,*l_,x,zerotol);
347 state->gradientVec->plus(*g_);
348
349 algo_state.gnorm = g_->norm();
350 algo_state.cnorm = state->constraintVec->norm();
351 algo_state.snorm = s.norm();
352
353 algo_state.nfval += ipobj.getNumberFunctionEvaluations();
354 algo_state.ngrad += ipobj.getNumberGradientEvaluations();
355 algo_state.ncval += ipcon.getNumberConstraintEvaluations();
356
357 }
358
360 Vector<Real> &l,
361 const Vector<Real> &s,
362 Objective<Real> &obj,
363 Constraint<Real> &con,
365 AlgorithmState<Real> &algo_state ) {
366 update(x,l,s,obj,con,algo_state);
367
368 ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
369 x_->set(x);
370 x_->axpy(static_cast<Real>(-1),state->gradientVec->dual());
371 bnd.project(*x_);
372 x_->axpy(static_cast<Real>(-1),x);
373 algo_state.gnorm = x_->norm();
374 }
375
377 const Vector<Real> &s,
378 Objective<Real> &obj,
380 AlgorithmState<Real> &algo_state ) {
381 // Grab interior point objective
382 auto& ipobj = dynamic_cast<IPOBJ&>(obj);
383
384 // If we can change the barrier parameter, do so
385 if( (rho_< 1.0 && mu_ > mumin_) || (rho_ > 1.0 && mu_ < mumax_) ) {
386 mu_ *= rho_;
387 ipobj.updatePenalty(mu_);
388 }
389
390 ROL::Ptr<StepState<Real> > state = Step<Real>::getState();
391
392 // Update optimization vector
393 x.plus(s);
394
395 algo_state.iterateVec->set(x);
396 state->descentVec->set(s);
397 algo_state.snorm = s.norm();
398 algo_state.iter++;
399
400 Real zerotol = std::sqrt(ROL_EPSILON<Real>());
401
402 algo_state.value = ipobj.value(x,zerotol);
403 algo_state.value = ipobj.getObjectiveValue();
404
405 ipobj.gradient(*g_,x,zerotol);
406 state->gradientVec->set(*g_);
407
408 x_->set(x);
409 x_->axpy(static_cast<Real>(-1),state->gradientVec->dual());
410 bnd.project(*x_);
411 x_->axpy(static_cast<Real>(-1),x);
412
413 algo_state.gnorm = x_->norm();
414 algo_state.snorm = s.norm();
415
416 algo_state.nfval += ipobj.getNumberFunctionEvaluations();
417 algo_state.ngrad += ipobj.getNumberGradientEvaluations();
418 }
419
422 std::string printHeader( void ) const {
423 std::stringstream hist;
424
425 if( verbosity_ > 0 ) {
426
427 hist << std::string(116,'-') << "\n";
428 hist << "Interior Point status output definitions\n\n";
429
430 hist << " IPiter - Number of interior point steps taken\n";
431 hist << " SPiter - Number of subproblem solver iterations\n";
432 hist << " penalty - Penalty parameter multiplying the barrier objective\n";
433 hist << " fval - Number of objective evaluations\n";
434 if (hasEquality_) {
435 hist << " cnorm - Norm of the composite constraint\n";
436 hist << " gLnorm - Norm of the Lagrangian's gradient\n";
437 }
438 else {
439 hist << " gnorm - Norm of the projected norm of the objective gradient\n";
440 }
441 hist << " snorm - Norm of step (update to optimzation and slack vector)\n";
442 hist << " #fval - Number of objective function evaluations\n";
443 hist << " #grad - Number of gradient evaluations\n";
444 if (hasEquality_) {
445 hist << " #cval - Number of composite constraint evaluations\n";
446 }
447 hist << std::string(116,'-') << "\n";
448 }
449
450 hist << " ";
451 hist << std::setw(9) << std::left << "IPiter";
452 hist << std::setw(9) << std::left << "SPiter";
453 hist << std::setw(15) << std::left << "penalty";
454 hist << std::setw(15) << std::left << "fval";
455 if (hasEquality_) {
456 hist << std::setw(15) << std::left << "cnorm";
457 hist << std::setw(15) << std::left << "gLnorm";
458 }
459 else {
460 hist << std::setw(15) << std::left << "gnorm";
461 }
462 hist << std::setw(15) << std::left << "snorm";
463 hist << std::setw(8) << std::left << "#fval";
464 hist << std::setw(8) << std::left << "#grad";
465 if (hasEquality_) {
466 hist << std::setw(8) << std::left << "#cval";
467 }
468
469 hist << "\n";
470 return hist.str();
471 }
472
475 std::string printName( void ) const {
476 std::stringstream hist;
477 hist << "\n" << "Primal Interior Point Solver\n";
478 return hist.str();
479 }
480
483 std::string print( AlgorithmState<Real> &algo_state, bool pHeader = false ) const {
484 std::stringstream hist;
485 hist << std::scientific << std::setprecision(6);
486 if ( algo_state.iter == 0 ) {
487 hist << printName();
488 }
489 if ( pHeader ) {
490 hist << printHeader();
491 }
492 if ( algo_state.iter == 0 ) {
493 hist << " ";
494 hist << std::setw(9) << std::left << algo_state.iter;
495 hist << std::setw(9) << std::left << subproblemIter_;
496 hist << std::setw(15) << std::left << mu_;
497 hist << std::setw(15) << std::left << algo_state.value;
498 if (hasEquality_) {
499 hist << std::setw(15) << std::left << algo_state.cnorm;
500 }
501 hist << std::setw(15) << std::left << algo_state.gnorm;
502 hist << "\n";
503 }
504 else {
505 hist << " ";
506 hist << std::setw(9) << std::left << algo_state.iter;
507 hist << std::setw(9) << std::left << subproblemIter_;
508 hist << std::setw(15) << std::left << mu_;
509 hist << std::setw(15) << std::left << algo_state.value;
510 if (hasEquality_) {
511 hist << std::setw(15) << std::left << algo_state.cnorm;
512 }
513 hist << std::setw(15) << std::left << algo_state.gnorm;
514 hist << std::setw(15) << std::left << algo_state.snorm;
515// hist << std::scientific << std::setprecision(6);
516 hist << std::setw(8) << std::left << algo_state.nfval;
517 hist << std::setw(8) << std::left << algo_state.ngrad;
518 if (hasEquality_) {
519 hist << std::setw(8) << std::left << algo_state.ncval;
520 }
521 hist << "\n";
522 }
523 return hist.str();
524 }
525
526}; // class InteriorPointStep
527
528} // namespace ROL
529
530#endif // ROL_INTERIORPOINTSTEP_H
Contains definitions of custom data types in ROL.
Provides the interface to apply upper and lower bound constraints.
virtual void projectInterior(Vector< Real > &x)
Project optimization variables into the interior of the feasible set.
virtual void project(Vector< Real > &x)
Project optimization variables onto the bounds.
Has both inequality and equality constraints. Treat inequality constraint as equality with slack vari...
Defines the general constraint operator interface.
virtual void value(Vector< Real > &c, const Vector< Real > &x, Real &tol)=0
Evaluate the constraint operator at .
ROL::Ptr< StatusTest< Real > > status_
ROL::Ptr< BoundConstraint< Real > > bnd_
ROL::Ptr< Step< Real > > step_
void initialize(Vector< Real > &x, const Vector< Real > &g, Objective< Real > &obj, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Initialize step with no equality constraint.
std::string print(AlgorithmState< Real > &algo_state, bool pHeader=false) const
Print iterate status.
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Compute step (equality constraints).
void update(Vector< Real > &x, const Vector< Real > &s, Objective< Real > &obj, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Update step, if successful.
Constraint_Partitioned< Real > IPCON
InteriorPoint::PenalizedObjective< Real > IPOBJ
ROL::Ptr< Vector< Real > > c_
std::string printName(void) const
Print step name.
void compute(Vector< Real > &s, const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Compute step.
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Initialize step with equality constraint.
InteriorPointStep(ROL::ParameterList &parlist)
std::string printHeader(void) const
Print iterate header.
ROL::Ptr< Vector< Real > > g_
ROL::Ptr< Vector< Real > > x_
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Initialize step with equality constraint.
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, BoundConstraint< Real > &bnd, AlgorithmState< Real > &algo_state)
Update step, if successful (equality constraints).
ROL::Ptr< Vector< Real > > l_
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, if successful (equality constraints).
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, Constraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step (equality constraints).
ROL::Ptr< Algorithm< Real > > algo_
Provides the interface to evaluate objective functions.
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
virtual Real value(const Vector< Real > &x, Real &tol)=0
Compute value.
virtual void update(const Vector< Real > &x, UpdateType type, int iter=-1)
Update objective function.
Provides the interface to compute optimization steps.
Definition ROL_Step.hpp:34
ROL::Ptr< StepState< Real > > getState(void)
Definition ROL_Step.hpp:39
Defines the linear algebra or vector space interface.
virtual Real norm() const =0
Returns where .
virtual void set(const Vector &x)
Set where .
virtual void plus(const Vector &x)=0
Compute , where .
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
@ STEP_BUNDLE
@ STEP_AUGMENTEDLAGRANGIAN
@ STEP_LINESEARCH
@ STEP_COMPOSITESTEP
@ STEP_FLETCHER
EStep StringToEStep(std::string s)
State for algorithm class. Will be used for restarts.
ROL::Ptr< Vector< Real > > iterateVec