ROL
ROL_CompositeStep.hpp
Go to the documentation of this file.
1 // @HEADER
2 // ************************************************************************
3 //
4 // Rapid Optimization Library (ROL) Package
5 // Copyright (2014) Sandia Corporation
6 //
7 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8 // license for use of this work by or on behalf of the U.S. Government.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // Questions? Contact lead developers:
38 // Drew Kouri (dpkouri@sandia.gov) and
39 // Denis Ridzal (dridzal@sandia.gov)
40 //
41 // ************************************************************************
42 // @HEADER
43 
44 #ifndef ROL_COMPOSITESTEP_H
45 #define ROL_COMPOSITESTEP_H
46 
47 #include "ROL_Types.hpp"
48 #include "ROL_Step.hpp"
49 #include <sstream>
50 #include <iomanip>
51 #include "Teuchos_SerialDenseMatrix.hpp"
52 #include "Teuchos_LAPACK.hpp"
53 
60 namespace ROL {
61 
62 template <class Real>
63 class CompositeStep : public Step<Real> {
64 private:
65 
66  // Vectors used for cloning.
67  Teuchos::RCP<Vector<Real> > xvec_;
68  Teuchos::RCP<Vector<Real> > gvec_;
69  Teuchos::RCP<Vector<Real> > cvec_;
70  Teuchos::RCP<Vector<Real> > lvec_;
71 
72  // Diagnostic return flags for subalgorithms.
73  int flagCG_;
74  int flagAC_;
75  int iterCG_;
76 
77  // Stopping conditions.
80  Real tolCG_;
81  Real tolOSS_;
83 
84  // Tolerances and stopping conditions for subalgorithms.
85  Real lmhtol_;
86  Real qntol_;
87  Real pgtol_;
88  Real projtol_;
89  Real tangtol_;
90  Real tntmax_;
91 
92  // Trust-region parameters.
93  Real zeta_;
94  Real Delta_;
95  Real penalty_;
96  Real eta_;
97 
98  Real ared_;
99  Real pred_;
100  Real snorm_;
101  Real nnorm_;
102  Real tnorm_;
103 
104  // Output flags.
105  bool infoQN_;
106  bool infoLM_;
107  bool infoTS_;
108  bool infoAC_;
109  bool infoLS_;
110  bool infoALL_;
111 
112  // Performance summary.
119 
120  template <typename T> int sgn(T val) {
121  return (T(0) < val) - (val < T(0));
122  }
123 
124  void printInfoLS(const std::vector<Real> &res) const {
125  if (infoLS_) {
126  std::stringstream hist;
127  hist << std::scientific << std::setprecision(8);
128  hist << "\n Augmented System Solver:\n";
129  hist << " True Residual\n";
130  for (unsigned j=0; j<res.size(); j++) {
131  hist << " " << std::left << std::setw(14) << res[j] << "\n";
132  }
133  hist << "\n";
134  std::cout << hist.str();
135  }
136  }
137 
138  Real setTolOSS(const Real intol) const {
139  return tolOSSfixed_ ? tolOSS_ : intol;
140  }
141 
142 public:
143 
144  virtual ~CompositeStep() {}
145 
146  CompositeStep( Teuchos::ParameterList & parlist ) : Step<Real>() {
147  //Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
148  flagCG_ = 0;
149  flagAC_ = 0;
150  iterCG_ = 0;
151 
152  Teuchos::ParameterList& steplist = parlist.sublist("Step").sublist("Composite Step");
153 
154  //maxiterOSS_ = steplist.sublist("Optimality System Solver").get("Iteration Limit", 50);
155  tolOSS_ = steplist.sublist("Optimality System Solver").get("Nominal Relative Tolerance", 1e-8);
156  tolOSSfixed_ = steplist.sublist("Optimality System Solver").get("Fix Tolerance", true);
157 
158  maxiterCG_ = steplist.sublist("Tangential Subproblem Solver").get("Iteration Limit", 20);
159  tolCG_ = steplist.sublist("Tangential Subproblem Solver").get("Relative Tolerance", 1e-2);
160 
161  int outLvl = steplist.get("Output Level", 0);
162 
163  lmhtol_ = tolOSS_;
164  qntol_ = tolOSS_;
165  pgtol_ = tolOSS_;
166  projtol_ = tolOSS_;
167  tangtol_ = tolOSS_;
168  tntmax_ = 2.0;
169 
170  zeta_ = 0.8;
171  Delta_ = 1e2;
172  penalty_ = 1.0;
173  eta_ = 1e-8;
174 
175  snorm_ = 0.0;
176  nnorm_ = 0.0;
177  tnorm_ = 0.0;
178 
179  infoALL_ = false;
180  if (outLvl > 0) {
181  infoALL_ = true;
182  }
183  infoQN_ = false;
184  infoLM_ = false;
185  infoTS_ = false;
186  infoAC_ = false;
187  infoLS_ = false;
188  infoQN_ = infoQN_ || infoALL_;
189  infoLM_ = infoLM_ || infoALL_;
190  infoTS_ = infoTS_ || infoALL_;
191  infoAC_ = infoAC_ || infoALL_;
192  infoLS_ = infoLS_ || infoALL_;
193 
194  totalIterCG_ = 0;
195  totalProj_ = 0;
196  totalNegCurv_ = 0;
197  totalRef_ = 0;
198  totalCallLS_ = 0;
199  totalIterLS_ = 0;
200  }
201 
206  AlgorithmState<Real> &algo_state ) {
207  //Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
208  Teuchos::RCP<StepState<Real> > state = Step<Real>::getState();
209  state->descentVec = x.clone();
210  state->gradientVec = g.clone();
211  state->constraintVec = c.clone();
212 
213  xvec_ = x.clone();
214  gvec_ = g.clone();
215  lvec_ = l.clone();
216  cvec_ = c.clone();
217 
218  Teuchos::RCP<Vector<Real> > ajl = gvec_->clone();
219  Teuchos::RCP<Vector<Real> > gl = gvec_->clone();
220 
221  algo_state.nfval = 0;
222  algo_state.ncval = 0;
223  algo_state.ngrad = 0;
224 
225  Real zerotol = std::sqrt(ROL_EPSILON);
226 
227  // Update objective and constraint.
228  obj.update(x,true,algo_state.iter);
229  algo_state.value = obj.value(x, zerotol);
230  algo_state.nfval++;
231  con.update(x,true,algo_state.iter);
232  con.value(*cvec_, x, zerotol);
233  algo_state.cnorm = cvec_->norm();
234  algo_state.ncval++;
235  obj.gradient(*gvec_, x, zerotol);
236 
237  // Compute gradient of Lagrangian at new multiplier guess.
238  computeLagrangeMultiplier(l, x, *gvec_, con);
239  con.applyAdjointJacobian(*ajl, l, x, zerotol);
240  gl->set(*gvec_); gl->plus(*ajl);
241  algo_state.ngrad++;
242  algo_state.gnorm = gl->norm();
243  }
244 
247  void compute( Vector<Real> &s, const Vector<Real> &x, const Vector<Real> &l,
249  AlgorithmState<Real> &algo_state ) {
250  //Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
251  Real zerotol = std::sqrt(ROL_EPSILON);
252  Real f = 0.0;
253  Teuchos::RCP<Vector<Real> > n = xvec_->clone();
254  Teuchos::RCP<Vector<Real> > c = cvec_->clone();
255  Teuchos::RCP<Vector<Real> > t = xvec_->clone();
256  Teuchos::RCP<Vector<Real> > tCP = xvec_->clone();
257  Teuchos::RCP<Vector<Real> > g = gvec_->clone();
258  Teuchos::RCP<Vector<Real> > gf = gvec_->clone();
259  Teuchos::RCP<Vector<Real> > Wg = xvec_->clone();
260  Teuchos::RCP<Vector<Real> > ajl = gvec_->clone();
261 
262  Real f_new = 0.0;
263  Teuchos::RCP<Vector<Real> > l_new = lvec_->clone();
264  Teuchos::RCP<Vector<Real> > c_new = cvec_->clone();
265  Teuchos::RCP<Vector<Real> > g_new = gvec_->clone();
266  Teuchos::RCP<Vector<Real> > gf_new = gvec_->clone();
267 
268  // Evaluate objective ... should have been stored.
269  f = obj.value(x, zerotol);
270  algo_state.nfval++;
271  // Compute gradient of objective ... should have been stored.
272  obj.gradient(*gf, x, zerotol);
273  // Evaluate constraint ... should have been stored.
274  con.value(*c, x, zerotol);
275 
276  // Compute quasi-normal step.
277  computeQuasinormalStep(*n, *c, x, zeta_*Delta_, con);
278 
279  // Compute gradient of Lagrangian ... should have been stored.
280  con.applyAdjointJacobian(*ajl, l, x, zerotol);
281  g->set(*gf);
282  g->plus(*ajl);
283  algo_state.ngrad++;
284 
285  // Solve tangential subproblem.
286  solveTangentialSubproblem(*t, *tCP, *Wg, x, *g, *n, l, Delta_, obj, con);
287  totalIterCG_ += iterCG_;
288 
289  // Check acceptance of subproblem solutions, adjust merit function penalty parameter, ensure global convergence.
290  accept(s, *n, *t, f_new, *c_new, *gf_new, *l_new, *g_new, x, l, f, *gf, *c, *g, *tCP, *Wg, obj, con, algo_state);
291  }
292 
297  AlgorithmState<Real> &algo_state ) {
298  //Teuchos::RCP<StepState<Real> > state = Step<Real>::getState();
299 
300  Real zero = 0.0;
301  Real half = 0.5;
302  Real zerotol = std::sqrt(ROL_EPSILON);//zero;
303  Real ratio = zero;
304 
305  Teuchos::RCP<Vector<Real> > g = gvec_->clone();
306  Teuchos::RCP<Vector<Real> > ajl = gvec_->clone();
307  Teuchos::RCP<Vector<Real> > gl = gvec_->clone();
308  Teuchos::RCP<Vector<Real> > c = cvec_->clone();
309 
310  // Determine if the step gives sufficient reduction in the merit function,
311  // update the trust-region radius.
312  ratio = ared_/pred_;
313  if ((std::abs(ared_) < 1e-12) && std::abs(pred_) < 1e-12) {
314  ratio = 1.0;
315  }
316  if (ratio >= eta_) {
317  x.plus(s);
318  if (ratio >= 0.9) {
319  Delta_ = std::max(7.0*snorm_, Delta_);
320  }
321  else if (ratio >= 0.8) {
322  Delta_ = std::max(2.0*snorm_, Delta_);
323  }
324  obj.update(x,true,algo_state.iter);
325  con.update(x,true,algo_state.iter);
326  flagAC_ = 1;
327  }
328  else {
329  Delta_ = half*std::max(nnorm_, tnorm_);
330  obj.update(x,false,algo_state.iter);
331  con.update(x,false,algo_state.iter);
332  flagAC_ = 0;
333  } // if (ratio >= eta)
334 
335  Real val = obj.value(x, zerotol);
336  algo_state.nfval++;
337  obj.gradient(*g, x, zerotol);
338  computeLagrangeMultiplier(l, x, *g, con);
339  con.applyAdjointJacobian(*ajl, l, x, zerotol);
340  gl->set(*g); gl->plus(*ajl);
341  algo_state.ngrad++;
342  con.value(*c, x, zerotol);
343 
344  Teuchos::RCP<StepState<Real> > state = Step<Real>::getState();
345  state->gradientVec->set(*gl);
346  state->constraintVec->set(*c);
347 
348  algo_state.value = val;
349  algo_state.gnorm = gl->norm();
350  algo_state.cnorm = c->norm();
351  algo_state.iter++;
352  algo_state.snorm = snorm_;
353 
354  // Update algorithm state
355  //(algo_state.iterateVec)->set(x);
356  }
357 
363  AlgorithmState<Real> &algo_state ) {}
364 
370  AlgorithmState<Real> &algo_state ) {}
371 
374  std::string printHeader( void ) const {
375  std::stringstream hist;
376  hist << " ";
377  hist << std::setw(6) << std::left << "iter";
378  hist << std::setw(15) << std::left << "fval";
379  hist << std::setw(15) << std::left << "cnorm";
380  hist << std::setw(15) << std::left << "gLnorm";
381  hist << std::setw(15) << std::left << "snorm";
382  hist << std::setw(10) << std::left << "delta";
383  hist << std::setw(10) << std::left << "nnorm";
384  hist << std::setw(10) << std::left << "tnorm";
385  hist << std::setw(8) << std::left << "#fval";
386  hist << std::setw(8) << std::left << "#grad";
387  hist << std::setw(8) << std::left << "iterCG";
388  hist << std::setw(8) << std::left << "flagCG";
389  hist << std::setw(8) << std::left << "accept";
390  hist << std::setw(8) << std::left << "linsys";
391  hist << "\n";
392  return hist.str();
393  }
394 
395  std::string printName( void ) const {
396  std::stringstream hist;
397  hist << "\n" << " Composite-step trust-region solver";
398  hist << "\n";
399  return hist.str();
400  }
401 
404  std::string print( AlgorithmState<Real> & algo_state, bool pHeader = false ) const {
405  //const Teuchos::RCP<const StepState<Real> >& step_state = Step<Real>::getStepState();
406 
407  std::stringstream hist;
408  hist << std::scientific << std::setprecision(6);
409  if ( algo_state.iter == 0 ) {
410  hist << printName();
411  }
412  if ( pHeader ) {
413  hist << printHeader();
414  }
415  if ( algo_state.iter == 0 ) {
416  hist << " ";
417  hist << std::setw(6) << std::left << algo_state.iter;
418  hist << std::setw(15) << std::left << algo_state.value;
419  hist << std::setw(15) << std::left << algo_state.cnorm;
420  hist << std::setw(15) << std::left << algo_state.gnorm;
421  hist << "\n";
422  }
423  else {
424  hist << " ";
425  hist << std::setw(6) << std::left << algo_state.iter;
426  hist << std::setw(15) << std::left << algo_state.value;
427  hist << std::setw(15) << std::left << algo_state.cnorm;
428  hist << std::setw(15) << std::left << algo_state.gnorm;
429  hist << std::setw(15) << std::left << algo_state.snorm;
430  hist << std::scientific << std::setprecision(2);
431  hist << std::setw(10) << std::left << Delta_;
432  hist << std::setw(10) << std::left << nnorm_;
433  hist << std::setw(10) << std::left << tnorm_;
434  hist << std::scientific << std::setprecision(6);
435  hist << std::setw(8) << std::left << algo_state.nfval;
436  hist << std::setw(8) << std::left << algo_state.ngrad;
437  hist << std::setw(8) << std::left << iterCG_;
438  hist << std::setw(8) << std::left << flagCG_;
439  hist << std::setw(8) << std::left << flagAC_;
440  hist << std::left << totalCallLS_ << "/" << totalIterLS_;
441  hist << "\n";
442  }
443  return hist.str();
444  }
445 
458 
459  Real zerotol = std::sqrt(ROL_EPSILON);
460  std::vector<Real> augiters;
461 
462  if (infoLM_) {
463  std::stringstream hist;
464  hist << "\n Lagrange multiplier step\n";
465  std::cout << hist.str();
466  }
467 
468  /* Apply adjoint of constraint Jacobian to current multiplier. */
469  Teuchos::RCP<Vector<Real> > ajl = gvec_->clone();
470  con.applyAdjointJacobian(*ajl, l, x, zerotol);
471 
472  /* Form right-hand side of the augmented system. */
473  Teuchos::RCP<Vector<Real> > b1 = gvec_->clone();
474  Teuchos::RCP<Vector<Real> > b2 = cvec_->clone();
475  // b1 is the negative gradient of the Lagrangian
476  b1->set(gf); b1->plus(*ajl); b1->scale(-1.0);
477  // b2 is zero
478  b2->zero();
479 
480  /* Declare left-hand side of augmented system. */
481  Teuchos::RCP<Vector<Real> > v1 = xvec_->clone();
482  Teuchos::RCP<Vector<Real> > v2 = lvec_->clone();
483 
484  /* Compute linear solver tolerance. */
485  Real b1norm = b1->norm();
486  Real tol = setTolOSS(lmhtol_*b1norm);
487 
488  /* Solve augmented system. */
489  augiters = con.solveAugmentedSystem(*v1, *v2, *b1, *b2, x, tol);
490  totalCallLS_++;
491  totalIterLS_ = totalIterLS_ + augiters.size();
492  printInfoLS(augiters);
493 
494  /* Return updated Lagrange multiplier. */
495  // v2 is the multiplier update
496  l.plus(*v2);
497 
498  } // computeLagrangeMultiplier
499 
500 
524 
525  if (infoQN_) {
526  std::stringstream hist;
527  hist << "\n Quasi-normal step\n";
528  std::cout << hist.str();
529  }
530 
531  Real zero = 0.0;
532  Real one = 1.0;
533  Real zerotol = std::sqrt(ROL_EPSILON); //zero;
534  std::vector<Real> augiters;
535 
536  /* Compute Cauchy step nCP. */
537  Teuchos::RCP<Vector<Real> > nCP = xvec_->clone();
538  Teuchos::RCP<Vector<Real> > nCPdual = gvec_->clone();
539  Teuchos::RCP<Vector<Real> > nN = xvec_->clone();
540  Teuchos::RCP<Vector<Real> > ctemp = cvec_->clone();
541  Teuchos::RCP<Vector<Real> > dualc0 = lvec_->clone();
542  dualc0->set(c.dual());
543  con.applyAdjointJacobian(*nCPdual, *dualc0, x, zerotol);
544  nCP->set(nCPdual->dual());
545  con.applyJacobian(*ctemp, *nCP, x, zerotol);
546 
547  Real normsquare_ctemp = ctemp->dot(*ctemp);
548  if (normsquare_ctemp != zero) {
549  nCP->scale( -(nCP->dot(*nCP))/normsquare_ctemp );
550  }
551 
552  /* If the Cauchy step nCP is outside the trust region,
553  return the scaled Cauchy step. */
554  Real norm_nCP = nCP->norm();
555  if (norm_nCP >= delta) {
556  n.set(*nCP);
557  n.scale( delta/norm_nCP );
558  if (infoQN_) {
559  std::stringstream hist;
560  hist << " taking partial Cauchy step\n";
561  std::cout << hist.str();
562  }
563  return;
564  }
565 
566  /* Compute 'Newton' step, for example, by solving a problem
567  related to finding the minimum norm solution of min || c(x_k)*s + c ||^2. */
568  // Compute tolerance for linear solver.
569  con.applyJacobian(*ctemp, *nCP, x, zerotol);
570  ctemp->plus(c);
571  Real tol = setTolOSS(qntol_*ctemp->norm());
572  // Form right-hand side.
573  ctemp->scale(-one);
574  nCPdual->set(nCP->dual());
575  nCPdual->scale(-one);
576  // Declare left-hand side of augmented system.
577  Teuchos::RCP<Vector<Real> > dn = xvec_->clone();
578  Teuchos::RCP<Vector<Real> > y = lvec_->clone();
579  // Solve augmented system.
580  augiters = con.solveAugmentedSystem(*dn, *y, *nCPdual, *ctemp, x, tol);
581  totalCallLS_++;
582  totalIterLS_ = totalIterLS_ + augiters.size();
583  printInfoLS(augiters);
584 
585  nN->set(*dn);
586  nN->plus(*nCP);
587 
588  /* Either take full or partial Newton step, depending on
589  the trust-region constraint. */
590  Real norm_nN = nN->norm();
591  if (norm_nN <= delta) {
592  // Take full feasibility step.
593  n.set(*nN);
594  if (infoQN_) {
595  std::stringstream hist;
596  hist << " taking full Newton step\n";
597  std::cout << hist.str();
598  }
599  }
600  else {
601  // Take convex combination n = nCP+tau*(nN-nCP),
602  // so that ||n|| = delta. In other words, solve
603  // scalar quadratic equation: ||nCP+tau*(nN-nCP)||^2 = delta^2.
604  Real aa = dn->dot(*dn);
605  Real bb = dn->dot(*nCP);
606  Real cc = norm_nCP*norm_nCP - delta*delta;
607  Real tau = (-bb+sqrt(bb*bb-aa*cc))/aa;
608  n.set(*nCP);
609  n.axpy(tau, *dn);
610  if (infoQN_) {
611  std::stringstream hist;
612  hist << " taking dogleg step\n";
613  std::cout << hist.str();
614  }
615  }
616 
617  } // computeQuasinormalStep
618 
619 
634  const Vector<Real> &x, const Vector<Real> &g, const Vector<Real> &n, const Vector<Real> &l,
635  Real delta, Objective<Real> &obj, EqualityConstraint<Real> &con) {
636 
637  /* Initialization of the CG step. */
638  bool orthocheck = true; // set to true if want to check orthogonality
639  // of Wr and r, otherwise set to false
640  Real tol_ortho = 0.5; // orthogonality measure; represets a bound on norm( \hat{S}, 2), where
641  // \hat{S} is defined in Heinkenschloss/Ridzal., "A Matrix-Free Trust-Region SQP Method"
642  Real S_max = 1.0; // another orthogonality measure; norm(S) needs to be bounded by
643  // a modest constant; norm(S) is small if the approximation of
644  // the null space projector is good
645  Real zero = 0.0;
646  Real one = 1.0;
647  Real zerotol = std::sqrt(ROL_EPSILON);
648  std::vector<Real> augiters;
649  iterCG_ = 1;
650  flagCG_ = 0;
651  t.zero();
652  tCP.zero();
653  Teuchos::RCP<Vector<Real> > r = gvec_->clone();
654  Teuchos::RCP<Vector<Real> > pdesc = xvec_->clone();
655  Teuchos::RCP<Vector<Real> > tprev = xvec_->clone();
656  Teuchos::RCP<Vector<Real> > Wr = xvec_->clone();
657  Teuchos::RCP<Vector<Real> > Hp = gvec_->clone();
658  Teuchos::RCP<Vector<Real> > xtemp = xvec_->clone();
659  Teuchos::RCP<Vector<Real> > gtemp = gvec_->clone();
660  Teuchos::RCP<Vector<Real> > ltemp = lvec_->clone();
661  Teuchos::RCP<Vector<Real> > czero = cvec_->clone();
662  czero->zero();
663  r->set(g);
664  obj.hessVec(*gtemp, n, x, zerotol);
665  r->plus(*gtemp);
666  con.applyAdjointHessian(*gtemp, l, n, x, zerotol);
667  r->plus(*gtemp);
668  Real normg = r->norm();
669  Real normWg = zero;
670  Real pHp = zero;
671  Real rp = zero;
672  Real alpha = zero;
673  Real normp = zero;
674  Real normr = zero;
675  Real normt = zero;
676  std::vector<Real> normWr(maxiterCG_+1, zero);
677 
678  std::vector<Teuchos::RCP<Vector<Real > > > p; // stores search directions
679  std::vector<Teuchos::RCP<Vector<Real > > > Hps; // stores duals of hessvec's applied to p's
680  std::vector<Teuchos::RCP<Vector<Real > > > rs; // stores duals of residuals
681  std::vector<Teuchos::RCP<Vector<Real > > > Wrs; // stores duals of projected residuals
682 
683  Real rptol = 1e-12;
684 
685  if (infoTS_) {
686  std::stringstream hist;
687  hist << "\n Tangential subproblem\n";
688  hist << std::setw(6) << std::right << "iter" << std::setw(18) << "||Wr||/||Wr0||" << std::setw(15) << "||s||";
689  hist << std::setw(15) << "delta" << std::setw(15) << "||c'(x)s||" << "\n";
690  std::cout << hist.str();
691  }
692 
693  if (normg == 0) {
694  if (infoTS_) {
695  std::stringstream hist;
696  hist << " >>> Tangential subproblem: Initial gradient is zero! \n";
697  std::cout << hist.str();
698  }
699  iterCG_ = 0; Wg.zero(); flagCG_ = 0;
700  return;
701  }
702 
703  /* Start CG loop. */
704  while (iterCG_ < maxiterCG_) {
705 
706  // Store tangential Cauchy point (which is the current iterate in the second iteration).
707  if (iterCG_ == 2) {
708  tCP.set(t);
709  }
710 
711  // Compute (inexact) projection W*r.
712  if (iterCG_ == 1) {
713  // Solve augmented system.
714  Real tol = setTolOSS(pgtol_);
715  augiters = con.solveAugmentedSystem(*Wr, *ltemp, *r, *czero, x, tol);
716  totalCallLS_++;
717  totalIterLS_ = totalIterLS_ + augiters.size();
718  printInfoLS(augiters);
719 
720  Wg.set(*Wr);
721  normWg = Wg.norm();
722  if (orthocheck) {
723  Wrs.push_back(xvec_->clone());
724  (Wrs[iterCG_-1])->set(*Wr);
725  }
726  // Check if done (small initial projected residual).
727  if (normWg == zero) {
728  flagCG_ = 0;
729  iterCG_--;
730  if (infoTS_) {
731  std::stringstream hist;
732  hist << " Initial projected residual is close to zero! \n";
733  std::cout << hist.str();
734  }
735  return;
736  }
737  // Set first residual to projected gradient.
738  // change r->set(Wg);
739  r->set(Wg.dual());
740  if (orthocheck) {
741  rs.push_back(xvec_->clone());
742  // change (rs[0])->set(*r);
743  (rs[0])->set(r->dual());
744  }
745  }
746  else {
747  // Solve augmented system.
748  Real tol = setTolOSS(projtol_);
749  augiters = con.solveAugmentedSystem(*Wr, *ltemp, *r, *czero, x, tol);
750  totalCallLS_++;
751  totalIterLS_ = totalIterLS_ + augiters.size();
752  printInfoLS(augiters);
753 
754  if (orthocheck) {
755  Wrs.push_back(xvec_->clone());
756  (Wrs[iterCG_-1])->set(*Wr);
757  }
758  }
759 
760  normWr[iterCG_-1] = Wr->norm();
761 
762  if (infoTS_) {
763  Teuchos::RCP<Vector<Real> > ct = cvec_->clone();
764  con.applyJacobian(*ct, t, x, zerotol);
765  Real linc = ct->norm();
766  std::stringstream hist;
767  hist << std::scientific << std::setprecision(6);
768  hist << std::setw(6) << std::right << iterCG_-1 << std::setw(18) << normWr[iterCG_-1]/normWg << std::setw(15) << t.norm();
769  hist << std::setw(15) << delta << std::setw(15) << linc << "\n";
770  std::cout << hist.str();
771  }
772 
773  // Check if done (small relative residual).
774  if (normWr[iterCG_-1]/normWg < tolCG_) {
775  flagCG_ = 0;
776  iterCG_ = iterCG_-1;
777  if (infoTS_) {
778  std::stringstream hist;
779  hist << " || W(g + H*(n+s)) || <= cgtol*|| W(g + H*n)|| \n";
780  std::cout << hist.str();
781  }
782  return;
783  }
784 
785  // Check nonorthogonality, one-norm of (WR*R/diag^2 - I)
786  if (orthocheck) {
787  Teuchos::SerialDenseMatrix<int,Real> Wrr(iterCG_,iterCG_); // holds matrix Wrs'*rs
788  Teuchos::SerialDenseMatrix<int,Real> T(iterCG_,iterCG_); // holds matrix T=(1/diag)*Wrs'*rs*(1/diag)
789  Teuchos::SerialDenseMatrix<int,Real> Tm1(iterCG_,iterCG_); // holds matrix Tm1=T-I
790  for (int i=0; i<iterCG_; i++) {
791  for (int j=0; j<iterCG_; j++) {
792  Wrr(i,j) = (Wrs[i])->dot(*rs[j]);
793  T(i,j) = Wrr(i,j)/(normWr[i]*normWr[j]);
794  Tm1(i,j) = T(i,j);
795  if (i==j) {
796  Tm1(i,j) = Tm1(i,j) - 1.0;
797  }
798  }
799  }
800  if (Tm1.normOne() >= tol_ortho) {
801  Teuchos::LAPACK<int,Real> lapack;
802  std::vector<int> ipiv(iterCG_);
803  int info;
804  std::vector<Real> work(3*iterCG_);
805  // compute inverse of T
806  lapack.GETRF(iterCG_, iterCG_, T.values(), T.stride(), &ipiv[0], &info);
807  lapack.GETRI(iterCG_, T.values(), T.stride(), &ipiv[0], &work[0], 3*iterCG_, &info);
808  Tm1 = T;
809  for (int i=0; i<iterCG_; i++) {
810  Tm1(i,i) = Tm1(i,i) - 1.0;
811  }
812  if (Tm1.normOne() > S_max) {
813  flagCG_ = 4;
814  if (infoTS_) {
815  std::stringstream hist;
816  hist << " large nonorthogonality in W(R)'*R detected \n";
817  std::cout << hist.str();
818  }
819  return;
820  }
821  }
822  }
823 
824  // Full orthogonalization.
825  p.push_back(xvec_->clone());
826  (p[iterCG_-1])->set(*Wr);
827  (p[iterCG_-1])->scale(-one);
828  for (int j=1; j<iterCG_; j++) {
829  Real scal = (p[iterCG_-1])->dot(*(Hps[j-1])) / (p[j-1])->dot(*(Hps[j-1]));
830  Teuchos::RCP<Vector<Real> > pj = xvec_->clone();
831  pj->set(*p[j-1]);
832  pj->scale(-scal);
833  (p[iterCG_-1])->plus(*pj);
834  }
835 
836  // change Hps.push_back(gvec_->clone());
837  Hps.push_back(xvec_->clone());
838  // change obj.hessVec(*(Hps[iterCG_-1]), *(p[iterCG_-1]), x, zerotol);
839  obj.hessVec(*Hp, *(p[iterCG_-1]), x, zerotol);
840  con.applyAdjointHessian(*gtemp, l, *(p[iterCG_-1]), x, zerotol);
841  // change (Hps[iterCG_-1])->plus(*gtemp);
842  Hp->plus(*gtemp);
843  // "Preconditioning" step.
844  (Hps[iterCG_-1])->set(Hp->dual());
845 
846  pHp = (p[iterCG_-1])->dot(*(Hps[iterCG_-1]));
847  // change rp = (p[iterCG_-1])->dot(*r);
848  rp = (p[iterCG_-1])->dot(*(rs[iterCG_-1]));
849 
850  normp = (p[iterCG_-1])->norm();
851  normr = r->norm();
852 
853  // Negative curvature stopping condition.
854  if (pHp <= 0) {
855  pdesc->set(*(p[iterCG_-1])); // p is the descent direction
856  if ((std::abs(rp) >= rptol*normp*normr) && (sgn(rp) == 1)) {
857  pdesc->scale(-one); // -p is the descent direction
858  }
859  flagCG_ = 2;
860  Real a = pdesc->dot(*pdesc);
861  Real b = pdesc->dot(t);
862  Real c = t.dot(t) - delta*delta;
863  // Positive root of a*theta^2 + 2*b*theta + c = 0.
864  Real theta = (-b + std::sqrt(b*b - a*c)) / a;
865  xtemp->set(*(p[iterCG_-1]));
866  xtemp->scale(theta);
867  t.plus(*xtemp);
868  // Store as tangential Cauchy point if terminating in first iteration.
869  if (iterCG_ == 1) {
870  tCP.set(t);
871  }
872  if (infoTS_) {
873  std::stringstream hist;
874  hist << " negative curvature detected \n";
875  std::cout << hist.str();
876  }
877  return;
878  }
879 
880  // Want to enforce nonzero alpha's.
881  if (std::abs(rp) < rptol*normp*normr) {
882  flagCG_ = 5;
883  if (infoTS_) {
884  std::stringstream hist;
885  hist << " Zero alpha due to inexactness. \n";
886  std::cout << hist.str();
887  }
888  return;
889  }
890 
891  alpha = - rp/pHp;
892 
893  // Iterate update.
894  tprev->set(t);
895  xtemp->set(*(p[iterCG_-1]));
896  xtemp->scale(alpha);
897  t.plus(*xtemp);
898 
899  // Trust-region stopping condition.
900  normt = t.norm();
901  if (normt >= delta) {
902  pdesc->set(*(p[iterCG_-1])); // p is the descent direction
903  if (sgn(rp) == 1) {
904  pdesc->scale(-one); // -p is the descent direction
905  }
906  Real a = pdesc->dot(*pdesc);
907  Real b = pdesc->dot(*tprev);
908  Real c = tprev->dot(*tprev) - delta*delta;
909  // Positive root of a*theta^2 + 2*b*theta + c = 0.
910  Real theta = (-b + std::sqrt(b*b - a*c)) / a;
911  xtemp->set(*(p[iterCG_-1]));
912  xtemp->scale(theta);
913  t.set(*tprev);
914  t.plus(*xtemp);
915  // Store as tangential Cauchy point if terminating in first iteration.
916  if (iterCG_ == 1) {
917  tCP.set(t);
918  }
919  flagCG_ = 3;
920  if (infoTS_) {
921  std::stringstream hist;
922  hist << " trust-region condition active \n";
923  std::cout << hist.str();
924  }
925  return;
926  }
927 
928  // Residual update.
929  xtemp->set(*(Hps[iterCG_-1]));
930  xtemp->scale(alpha);
931  // change r->plus(*gtemp);
932  r->plus(xtemp->dual());
933  if (orthocheck) {
934  // change rs.push_back(gvec_->clone());
935  rs.push_back(xvec_->clone());
936  // change (rs[iterCG_])->set(*r);
937  (rs[iterCG_])->set(r->dual());
938  }
939 
940  iterCG_++;
941 
942  } // while (iterCG_ < maxiterCG_)
943 
944  flagCG_ = 1;
945  if (infoTS_) {
946  std::stringstream hist;
947  hist << " maximum number of iterations reached \n";
948  std::cout << hist.str();
949  }
950 
951  } // solveTangentialSubproblem
952 
953 
956  void accept(Vector<Real> &s, Vector<Real> &n, Vector<Real> &t, Real f_new, Vector<Real> &c_new,
957  Vector<Real> &gf_new, Vector<Real> &l_new, Vector<Real> &g_new,
958  const Vector<Real> &x, const Vector<Real> &l, Real f, const Vector<Real> &gf, const Vector<Real> &c,
959  const Vector<Real> &g, Vector<Real> &tCP, Vector<Real> &Wg,
961 
962  Real beta = 1e-8; // predicted reduction parameter
963  Real tol_red_tang = 1e-3; // internal reduction factor for tangtol
964  Real tol_red_all = 1e-1; // internal reduction factor for qntol, lmhtol, pgtol, projtol, tangtol
965  //bool glob_refine = true; // true - if subsolver tolerances are adjusted in this routine, keep adjusted values globally
966  // false - if subsolver tolerances are adjusted in this routine, discard adjusted values
967  Real tol_fdiff = 1e-12; // relative objective function difference for ared computation
968  int ct_max = 10; // maximum number of globalization tries
969  Real mintol = 1e-16; // smallest projection tolerance value
970 
971  // Determines max value of |rpred|/pred.
972  Real rpred_over_pred = 0.5*(1-eta_);
973 
974  if (infoAC_) {
975  std::stringstream hist;
976  hist << "\n Composite step acceptance\n";
977  std::cout << hist.str();
978  }
979 
980  Real zero = 0.0;
981  Real one = 1.0;
982  Real two = 2.0;
983  Real half = one/two;
984  Real zerotol = std::sqrt(ROL_EPSILON);
985  std::vector<Real> augiters;
986 
987  Real pred = zero;
988  Real ared = zero;
989  Real rpred = zero;
990  Real part_pred = zero;
991  Real linc_preproj = zero;
992  Real linc_postproj = zero;
993  Real tangtol_start = zero;
994  Real tangtol = tangtol_;
995  //Real projtol = projtol_;
996  bool flag = false;
997  int num_proj = 0;
998  bool try_tCP = false;
999  Real fdiff = zero;
1000 
1001  Teuchos::RCP<Vector<Real> > xtrial = xvec_->clone();
1002  Teuchos::RCP<Vector<Real> > Jl = gvec_->clone();
1003  Teuchos::RCP<Vector<Real> > gfJl = gvec_->clone();
1004  Teuchos::RCP<Vector<Real> > Jnc = cvec_->clone();
1005  Teuchos::RCP<Vector<Real> > t_orig = xvec_->clone();
1006  Teuchos::RCP<Vector<Real> > t_dual = gvec_->clone();
1007  Teuchos::RCP<Vector<Real> > Jt_orig = cvec_->clone();
1008  Teuchos::RCP<Vector<Real> > t_m_tCP = xvec_->clone();
1009  Teuchos::RCP<Vector<Real> > ltemp = lvec_->clone();
1010  Teuchos::RCP<Vector<Real> > xtemp = xvec_->clone();
1011  Teuchos::RCP<Vector<Real> > rt = cvec_->clone();
1012  Teuchos::RCP<Vector<Real> > Hn = gvec_->clone();
1013  Teuchos::RCP<Vector<Real> > Hto = gvec_->clone();
1014  Teuchos::RCP<Vector<Real> > cxxvec = gvec_->clone();
1015  Teuchos::RCP<Vector<Real> > czero = cvec_->clone();
1016  czero->zero();
1017  Real Jnc_normsquared = zero;
1018  Real c_normsquared = zero;
1019 
1020  // Compute and store some quantities for later use. Necessary
1021  // because of the function and constraint updates below.
1022  con.applyAdjointJacobian(*Jl, l, x, zerotol);
1023  con.applyJacobian(*Jnc, n, x, zerotol);
1024  Jnc->plus(c);
1025  Jnc_normsquared = Jnc->dot(*Jnc);
1026  c_normsquared = c.dot(c);
1027 
1028  for (int ct=0; ct<ct_max; ct++) {
1029 
1030  try_tCP = true;
1031  t_m_tCP->set(t);
1032  t_m_tCP->scale(-one);
1033  t_m_tCP->plus(tCP);
1034  if (t_m_tCP->norm() == zero) {
1035  try_tCP = false;
1036  }
1037 
1038  t_orig->set(t);
1039  con.applyJacobian(*Jt_orig, *t_orig, x, zerotol);
1040  linc_preproj = Jt_orig->norm();
1041  pred = one;
1042  rpred = two*rpred_over_pred*pred;
1043  flag = false;
1044  num_proj = 1;
1045  tangtol_start = tangtol;
1046 
1047  while (std::abs(rpred)/pred > rpred_over_pred) {
1048  // Compute projected tangential step.
1049  if (flag) {
1050  tangtol = tol_red_tang*tangtol;
1051  num_proj++;
1052  if (tangtol < mintol) {
1053  if (infoAC_) {
1054  std::stringstream hist;
1055  hist << "\n The projection of the tangential step cannot be done with sufficient precision.\n";
1056  hist << " Is the quasi-normal step very small? Continuing with no global convergence guarantees.\n";
1057  std::cout << hist.str();
1058  }
1059  break;
1060  }
1061  }
1062  // Solve augmented system.
1063  Real tol = setTolOSS(tangtol);
1064  // change augiters = con.solveAugmentedSystem(t, *ltemp, *t_orig, *czero, x, tol);
1065  t_dual->set(t_orig->dual());
1066  augiters = con.solveAugmentedSystem(t, *ltemp, *t_dual, *czero, x, tol);
1067  totalCallLS_++;
1068  totalIterLS_ = totalIterLS_ + augiters.size();
1069  printInfoLS(augiters);
1070  totalProj_++;
1071  con.applyJacobian(*rt, t, x, zerotol);
1072  linc_postproj = rt->norm();
1073 
1074  // Compute composite step.
1075  s.set(t);
1076  s.plus(n);
1077 
1078  // Compute some quantities before updating the objective and the constraint.
1079  obj.hessVec(*Hn, n, x, zerotol);
1080  con.applyAdjointHessian(*cxxvec, l, n, x, zerotol);
1081  Hn->plus(*cxxvec);
1082  obj.hessVec(*Hto, *t_orig, x, zerotol);
1083  con.applyAdjointHessian(*cxxvec, l, *t_orig, x, zerotol);
1084  Hto->plus(*cxxvec);
1085 
1086  // Compute objective, constraint, etc. values at the trial point.
1087  xtrial->set(x);
1088  xtrial->plus(s);
1089  obj.update(*xtrial,false,algo_state.iter);
1090  con.update(*xtrial,false,algo_state.iter);
1091  f_new = obj.value(*xtrial, zerotol);
1092  obj.gradient(gf_new, *xtrial, zerotol);
1093  con.value(c_new, *xtrial, zerotol);
1094  l_new.set(l);
1095  computeLagrangeMultiplier(l_new, *xtrial, gf_new, con);
1096 
1097  // Penalty parameter update.
1098  part_pred = - Wg.dot(*t_orig);
1099  gfJl->set(gf);
1100  gfJl->plus(*Jl);
1101  // change part_pred -= gfJl->dot(n);
1102  part_pred -= n.dot(gfJl->dual());
1103  // change part_pred -= half*Hn->dot(n);
1104  part_pred -= half*n.dot(Hn->dual());
1105  // change part_pred -= half*Hto->dot(*t_orig);
1106  part_pred -= half*t_orig->dot(Hto->dual());
1107  ltemp->set(l_new);
1108  ltemp->axpy(-one, l);
1109  // change part_pred -= Jnc->dot(*ltemp);
1110  part_pred -= Jnc->dot(ltemp->dual());
1111 
1112  if ( part_pred < -half*penalty_*(c_normsquared-Jnc_normsquared) ) {
1113  penalty_ = ( -two * part_pred / (c_normsquared-Jnc_normsquared) ) + beta;
1114  }
1115 
1116  pred = part_pred + penalty_*(c_normsquared-Jnc_normsquared);
1117 
1118  // Computation of rpred.
1119  // change rpred = - ltemp->dot(*rt) - penalty_ * rt->dot(*rt) - two * penalty_ * rt->dot(*Jnc);
1120  rpred = - rt->dot(ltemp->dual()) - penalty_ * rt->dot(*rt) - two * penalty_ * rt->dot(*Jnc);
1121  // change Teuchos::RCP<Vector<Real> > lrt = lvec_->clone();
1122  //lrt->set(*rt);
1123  //rpred = - ltemp->dot(*rt) - penalty_ * std::pow(rt->norm(), 2) - two * penalty_ * lrt->dot(*Jnc);
1124  flag = 1;
1125 
1126  } // while (std::abs(rpred)/pred > rpred_over_pred)
1127 
1128  tangtol = tangtol_start;
1129 
1130  // Check if the solution of the tangential subproblem is
1131  // disproportionally large compared to total trial step.
1132  xtemp->set(n);
1133  xtemp->plus(t);
1134  if ( t_orig->norm()/xtemp->norm() < tntmax_ ) {
1135  break;
1136  }
1137  else {
1138  t_m_tCP->set(*t_orig);
1139  t_m_tCP->scale(-one);
1140  t_m_tCP->plus(tCP);
1141  if ((t_m_tCP->norm() > 0) && try_tCP) {
1142  if (infoAC_) {
1143  std::stringstream hist;
1144  hist << " ---> now trying tangential Cauchy point\n";
1145  std::cout << hist.str();
1146  }
1147  t.set(tCP);
1148  }
1149  else {
1150  if (infoAC_) {
1151  std::stringstream hist;
1152  hist << " ---> recomputing quasi-normal step and re-solving tangential subproblem\n";
1153  std::cout << hist.str();
1154  }
1155  totalRef_++;
1156  // Reset global quantities.
1157  obj.update(x);
1158  con.update(x);
1159  /*lmhtol = tol_red_all*lmhtol;
1160  qntol = tol_red_all*qntol;
1161  pgtol = tol_red_all*pgtol;
1162  projtol = tol_red_all*projtol;
1163  tangtol = tol_red_all*tangtol;
1164  if (glob_refine) {
1165  lmhtol_ = lmhtol;
1166  qntol_ = qntol;
1167  pgtol_ = pgtol;
1168  projtol_ = projtol;
1169  tangtol_ = tangtol;
1170  }*/
1171  if (!tolOSSfixed_) {
1172  lmhtol_ *= tol_red_all;
1173  qntol_ *= tol_red_all;
1174  pgtol_ *= tol_red_all;
1175  projtol_ *= tol_red_all;
1176  tangtol_ *= tol_red_all;
1177  }
1178  // Recompute the quasi-normal step.
1179  computeQuasinormalStep(n, c, x, zeta_*Delta_, con);
1180  // Solve tangential subproblem.
1181  solveTangentialSubproblem(t, tCP, Wg, x, g, n, l, Delta_, obj, con);
1182  totalIterCG_ += iterCG_;
1183  if (flagCG_ == 1) {
1184  totalNegCurv_++;
1185  }
1186  }
1187  } // else w.r.t. ( t_orig->norm()/xtemp->norm() < tntmax )
1188 
1189  } // for (int ct=0; ct<ct_max; ct++)
1190 
1191  // Compute actual reduction;
1192  fdiff = f - f_new;
1193  // Heuristic 1: If fdiff is very small compared to f, set it to 0,
1194  // in order to prevent machine precision issues.
1195  if (std::abs(fdiff / (f+1e-24)) < tol_fdiff) {
1196  fdiff = 1e-14;
1197  }
1198  // change ared = fdiff + (l.dot(c) - l_new.dot(c_new)) + penalty_*(c.dot(c) - c_new.dot(c_new));
1199  // change ared = fdiff + (l.dot(c) - l_new.dot(c_new)) + penalty_*(std::pow(c.norm(),2) - std::pow(c_new.norm(),2));
1200  ared = fdiff + (c.dot(l.dual()) - c_new.dot(l_new.dual())) + penalty_*(c.dot(c) - c_new.dot(c_new));
1201 
1202  // Store actual and predicted reduction.
1203  ared_ = ared;
1204  pred_ = pred;
1205 
1206  // Store step and vector norms.
1207  snorm_ = s.norm();
1208  nnorm_ = n.norm();
1209  tnorm_ = t.norm();
1210 
1211  // Print diagnostics.
1212  if (infoAC_) {
1213  std::stringstream hist;
1214  hist << "\n Trial step info ...\n";
1215  hist << " n_norm = " << nnorm_ << "\n";
1216  hist << " t_norm = " << tnorm_ << "\n";
1217  hist << " s_norm = " << snorm_ << "\n";
1218  hist << " xtrial_norm = " << xtrial->norm() << "\n";
1219  hist << " f_old = " << f << "\n";
1220  hist << " f_trial = " << f_new << "\n";
1221  hist << " f_old-f_trial = " << f-f_new << "\n";
1222  hist << " ||c_old|| = " << c.norm() << "\n";
1223  hist << " ||c_trial|| = " << c_new.norm() << "\n";
1224  hist << " ||Jac*t_preproj|| = " << linc_preproj << "\n";
1225  hist << " ||Jac*t_postproj|| = " << linc_postproj << "\n";
1226  hist << " ||t_tilde||/||t|| = " << t_orig->norm() / t.norm() << "\n";
1227  hist << " ||t_tilde||/||n+t|| = " << t_orig->norm() / snorm_ << "\n";
1228  hist << " # projections = " << num_proj << "\n";
1229  hist << " penalty param = " << penalty_ << "\n";
1230  hist << " ared = " << ared_ << "\n";
1231  hist << " pred = " << pred_ << "\n";
1232  hist << " ared/pred = " << ared_/pred_ << "\n";
1233  std::cout << hist.str();
1234  }
1235 
1236  } // accept
1237 
1238 }; // class CompositeStep
1239 
1240 } // namespace ROL
1241 
1242 #endif
Provides the interface to evaluate objective functions.
Teuchos::RCP< Vector< Real > > lvec_
virtual const Vector & dual() const
Return dual representation of , for example, the result of applying a Riesz map, or change of basis...
Definition: ROL_Vector.hpp:213
std::string printHeader(void) const
Print iterate header.
virtual void scale(const Real alpha)=0
Compute where .
Real setTolOSS(const Real intol) const
virtual void plus(const Vector &x)=0
Compute , where .
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
Definition: ROL_Vector.hpp:143
void compute(Vector< Real > &s, const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step for bound constraints; here only to satisfy the interface requirements, does nothing, needs refactoring.
virtual Real value(const Vector< Real > &x, Real &tol)=0
Compute value.
Provides the interface to compute optimization steps.
Definition: ROL_Step.hpp:67
virtual void hessVec(Vector< Real > &hv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply Hessian approximation to vector.
Teuchos::RCP< StepState< Real > > getState(void)
Definition: ROL_Step.hpp:72
Contains definitions of custom data types in ROL.
virtual void applyAdjointHessian(Vector< Real > &ahuv, const Vector< Real > &u, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the derivative of the adjoint of the constraint Jacobian at to vector in direction ...
virtual Teuchos::RCP< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
virtual void zero()
Set to zero vector.
Definition: ROL_Vector.hpp:157
Defines the linear algebra or vector space interface.
Definition: ROL_Vector.hpp:74
virtual Real dot(const Vector &x) const =0
Compute where .
std::string print(AlgorithmState< Real > &algo_state, bool pHeader=false) const
Print iterate status.
virtual void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update constraint functions. x is the optimization variable, flag = true if optimization variable is ...
State for algorithm class. Will be used for restarts.
Definition: ROL_Types.hpp:77
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
virtual std::vector< Real > solveAugmentedSystem(Vector< Real > &v1, Vector< Real > &v2, const Vector< Real > &b1, const Vector< Real > &b2, const Vector< Real > &x, Real &tol)
Approximately solves the augmented system where , , , , is an identity or Riesz operator...
Defines the equality constraint operator interface.
Teuchos::RCP< Vector< Real > > cvec_
void computeQuasinormalStep(Vector< Real > &n, const Vector< Real > &c, const Vector< Real > &x, Real delta, EqualityConstraint< Real > &con)
Compute quasi-normal step by minimizing the norm of the linearized constraint.
void update(Vector< Real > &x, const Vector< Real > &s, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, for bound constraints; here only to satisfy the interface requirements, does nothing, needs refactoring.
virtual void applyAdjointJacobian(Vector< Real > &ajv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the adjoint of the the constraint Jacobian at , , to vector .
void compute(Vector< Real > &s, const Vector< Real > &x, const Vector< Real > &l, Objective< Real > &obj, EqualityConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step.
Implements the computation of optimization steps with composite-step trust-region methods...
void solveTangentialSubproblem(Vector< Real > &t, Vector< Real > &tCP, Vector< Real > &Wg, const Vector< Real > &x, const Vector< Real > &g, const Vector< Real > &n, const Vector< Real > &l, Real delta, Objective< Real > &obj, EqualityConstraint< Real > &con)
Solve tangential subproblem.
void computeLagrangeMultiplier(Vector< Real > &l, const Vector< Real > &x, const Vector< Real > &gf, EqualityConstraint< Real > &con)
Compute Lagrange multipliers by solving the least-squares problem minimizing the gradient of the Lagr...
Provides the interface to apply upper and lower bound constraints.
Teuchos::RCP< Vector< Real > > gvec_
CompositeStep(Teuchos::ParameterList &parlist)
virtual void applyJacobian(Vector< Real > &jv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the constraint Jacobian at , , to vector .
Teuchos::RCP< Vector< Real > > xvec_
void initialize(Vector< Real > &x, const Vector< Real > &g, Vector< Real > &l, const Vector< Real > &c, Objective< Real > &obj, EqualityConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Initialize step.
virtual void set(const Vector &x)
Set where .
Definition: ROL_Vector.hpp:196
void printInfoLS(const std::vector< Real > &res) const
virtual Real norm() const =0
Returns where .
virtual void value(Vector< Real > &c, const Vector< Real > &x, Real &tol)=0
Evaluate the constraint operator at .
virtual void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update objective function.
void update(Vector< Real > &x, Vector< Real > &l, const Vector< Real > &s, Objective< Real > &obj, EqualityConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, if successful.
void accept(Vector< Real > &s, Vector< Real > &n, Vector< Real > &t, Real f_new, Vector< Real > &c_new, Vector< Real > &gf_new, Vector< Real > &l_new, Vector< Real > &g_new, const Vector< Real > &x, const Vector< Real > &l, Real f, const Vector< Real > &gf, const Vector< Real > &c, const Vector< Real > &g, Vector< Real > &tCP, Vector< Real > &Wg, Objective< Real > &obj, EqualityConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Check acceptance of subproblem solutions, adjust merit function penalty parameter, ensure global convergence.
static const double ROL_EPSILON
Platform-dependent machine epsilon.
Definition: ROL_Types.hpp:118
std::string printName(void) const
Print step name.