ROL
ROL_TrustRegionStep.hpp
Go to the documentation of this file.
1 // @HEADER
2 // ************************************************************************
3 //
4 // Rapid Optimization Library (ROL) Package
5 // Copyright (2014) Sandia Corporation
6 //
7 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8 // license for use of this work by or on behalf of the U.S. Government.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // Questions? Contact lead developers:
38 // Drew Kouri (dpkouri@sandia.gov) and
39 // Denis Ridzal (dridzal@sandia.gov)
40 //
41 // ************************************************************************
42 // @HEADER
43 
44 #ifndef ROL_TRUSTREGIONSTEP_H
45 #define ROL_TRUSTREGIONSTEP_H
46 
47 #include "ROL_Step.hpp"
48 #include "ROL_Types.hpp"
49 #include "ROL_Secant.hpp"
50 #include "ROL_TrustRegion.hpp"
51 #include <sstream>
52 #include <iomanip>
53 
126 namespace ROL {
127 
128 template <class Real>
129 class TrustRegionStep : public Step<Real> {
130 private:
131 
132  Teuchos::RCP<Secant<Real> > secant_;
133  Teuchos::RCP<TrustRegion<Real> > trustRegion_;
134 
135  Teuchos::RCP<Vector<Real> > xnew_;
136  Teuchos::RCP<Vector<Real> > xold_;
137  Teuchos::RCP<Vector<Real> > gp_;
138 
141 
144 
146 
147  std::vector<bool> useInexact_;
148  int TRflag_ ;
149  int TR_nfval_;
150  int TR_ngrad_;
151  int CGflag_;
152  int CGiter_;
153 
154  Real delMax_;
155 
156  Real alpha_init_;
157  int max_fval_;
158 
159  Real scale0_;
160  Real scale1_;
161 
162  bool softUp_;
163  Real scaleEps_;
164 
180  AlgorithmState<Real> &algo_state ) {
181  Teuchos::RCP<StepState<Real> > state = Step<Real>::getState();
182  if ( useInexact_[1] ) {
183  Real c = scale0_*std::max(1.e-2,std::min(1.0,1.e4*algo_state.gnorm));
184  Real gtol1 = c*(state->searchSize);
185  Real gtol0 = scale1_*gtol1 + 1.0;
186  while ( gtol0 > gtol1*scale1_ ) {
187  obj.gradient(*(state->gradientVec),x,gtol1);
188  algo_state.gnorm = computeCriticalityMeasure(*(state->gradientVec),x,con);
189  gtol0 = gtol1;
190  c = scale0_*std::max(1.e-2,std::min(1.0,1.e4*algo_state.gnorm));
191  gtol1 = c*std::min(algo_state.gnorm,state->searchSize);
192  }
193  algo_state.ngrad++;
194  }
195  else {
196  Real gtol = std::sqrt(ROL_EPSILON);
197  obj.gradient(*(state->gradientVec),x,gtol);
198  algo_state.ngrad++;
199  algo_state.gnorm = computeCriticalityMeasure(*(state->gradientVec),x,con);
200  }
201  }
202 
212  if ( con.isActivated() ) {
213  if ( useProjectedGrad_ ) {
214  gp_->set(g);
215  con.computeProjectedGradient( *gp_, x );
216  return gp_->norm();
217  }
218  else {
219  xnew_->set(x);
220  xnew_->axpy(-1.0,g.dual());
221  con.project(*xnew_);
222  xnew_->axpy(-1.0,x);
223  return xnew_->norm();
224  }
225  }
226  else {
227  return g.norm();
228  }
229  }
230 
231 public:
232 
233  virtual ~TrustRegionStep() {}
234 
242  TrustRegionStep( Teuchos::ParameterList & parlist )
243  : Step<Real>(),
244  secant_(Teuchos::null), trustRegion_(Teuchos::null),
245  xnew_(Teuchos::null), xold_(Teuchos::null), gp_(Teuchos::null),
246  etr_(TRUSTREGION_DOGLEG), esec_(SECANT_LBFGS),
247  useSecantHessVec_(false), useSecantPrecond_(false),
248  useProjectedGrad_(false),
249  TRflag_(0), TR_nfval_(0), TR_ngrad_(0),
250  CGflag_(0), CGiter_(0),
251  delMax_(1.e4),
252  alpha_init_(1.), max_fval_(20),
253  scale0_(1.), scale1_(1.),
254  softUp_(false), scaleEps_(1.) {
255  Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
256  // Trust-Region Parameters
257  step_state->searchSize = parlist.sublist("Step").sublist("Trust Region").get("Initial Radius", -1.0);
258  delMax_ = parlist.sublist("Step").sublist("Trust Region").get("Maximum Radius", 1000.0);
259  // Inexactness Information
260  useInexact_.clear();
261  useInexact_.push_back(parlist.sublist("General").get("Inexact Objective Function", false));
262  useInexact_.push_back(parlist.sublist("General").get("Inexact Gradient", false));
263  useInexact_.push_back(parlist.sublist("General").get("Inexact Hessian-Times-A-Vector", false));
264  // Trust-Region Inexactness Parameters
265  scale0_ = parlist.sublist("Step").sublist("Trust Region").sublist("Inexact").sublist("Gradient").get("Tolerance Scaling",1.e-1);
266  scale1_ = parlist.sublist("Step").sublist("Trust Region").sublist("Inexact").sublist("Gradient").get("Relative Tolerance",2.0);
267  // Initialize Trust Region Subproblem Solver Object
268  etr_ = StringToETrustRegion(parlist.sublist("Step").sublist("Trust Region").get("Subproblem Solver","Dogleg"));
269  useProjectedGrad_ = parlist.sublist("General").get("Projected Gradient Criticality Measure", false);
270  max_fval_ = parlist.sublist("Step").sublist("Line Search").get("Function Evaluation Limit", 20);
271  alpha_init_ = parlist.sublist("Step").sublist("Line Search").get("Initial Step Size", 1.0);
272  trustRegion_ = TrustRegionFactory<Real>(parlist);
273  // Secant Object
274  esec_ = StringToESecant(parlist.sublist("General").sublist("Secant").get("Type","Limited-Memory BFGS"));
275  useSecantPrecond_ = parlist.sublist("General").sublist("Secant").get("Use as Preconditioner", false);
276  useSecantHessVec_ = parlist.sublist("General").sublist("Secant").get("Use as Hessian", false);
277  secant_ = SecantFactory<Real>(parlist);
278  // Changing Objective Functions
279  softUp_ = parlist.sublist("General").get("Variable Objective Function",false);
280  // Scale for epsilon active sets
281  scaleEps_ = parlist.sublist("General").get("Scale for Epsilon Active Sets",1.0);
282  }
283 
293  TrustRegionStep( Teuchos::RCP<Secant<Real> > &secant, Teuchos::ParameterList &parlist )
294  : Step<Real>(),
295  secant_(secant), trustRegion_(Teuchos::null),
296  xnew_(Teuchos::null), xold_(Teuchos::null), gp_(Teuchos::null),
298  useSecantHessVec_(false), useSecantPrecond_(false),
299  useProjectedGrad_(false),
300  TRflag_(0), TR_nfval_(0), TR_ngrad_(0),
301  CGflag_(0), CGiter_(0),
302  delMax_(1.e4),
303  alpha_init_(1.), max_fval_(20),
304  scale0_(1.), scale1_(1.),
305  softUp_(false), scaleEps_(1.) {
306  Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
307  // Trust-Region Parameters
308  step_state->searchSize = parlist.sublist("Step").sublist("Trust Region").get("Initial Radius", -1.0);
309  delMax_ = parlist.sublist("Step").sublist("Trust Region").get("Maximum Radius", 1000.0);
310  // Inexactness Information
311  useInexact_.clear();
312  useInexact_.push_back(parlist.sublist("General").get("Inexact Objective Function", false));
313  useInexact_.push_back(parlist.sublist("General").get("Inexact Gradient", false));
314  useInexact_.push_back(parlist.sublist("General").get("Inexact Hessian-Times-A-Vector", false));
315  // Trust-Region Inexactness Parameters
316  scale0_ = parlist.sublist("Step").sublist("Trust Region").sublist("Inexact").sublist("Gradient").get("Tolerance Scaling",1.e-1);
317  scale1_ = parlist.sublist("Step").sublist("Trust Region").sublist("Inexact").sublist("Gradient").get("Relative Tolerance",2.0);
318  // Initialize Trust Region Subproblem Solver Object
319  etr_ = StringToETrustRegion(parlist.sublist("Step").sublist("Trust Region").get("Subproblem Solver","Dogleg"));
320  useProjectedGrad_ = parlist.sublist("General").get("Projected Gradient Criticality Measure", false);
321  max_fval_ = parlist.sublist("Step").sublist("Line Search").get("Function Evaluation Limit", 20);
322  alpha_init_ = parlist.sublist("Step").sublist("Line Search").get("Initial Step Size", 1.0);
323  trustRegion_ = TrustRegionFactory<Real>(parlist);
324  // Secant Object
325  useSecantPrecond_ = parlist.sublist("General").sublist("Secant").get("Use as Preconditioner", false);
326  useSecantHessVec_ = parlist.sublist("General").sublist("Secant").get("Use as Hessian", false);
327  if ( secant_ == Teuchos::null ) {
328  Teuchos::ParameterList Slist;
329  Slist.sublist("General").sublist("Secant").set("Type","Limited-Memory BFGS");
330  Slist.sublist("General").sublist("Secant").set("Maximum Storage",10);
331  secant_ = SecantFactory<Real>(Slist);
332  }
333  // Changing Objective Functions
334  softUp_ = parlist.sublist("General").get("Variable Objective Function",false);
335  // Scale for epsilon active sets
336  scaleEps_ = parlist.sublist("General").get("Scale for Epsilon Active Sets",1.0);
337  }
338 
347  void initialize( Vector<Real> &x, const Vector<Real> &s, const Vector<Real> &g,
349  AlgorithmState<Real> &algo_state ) {
350  Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
351 
352  trustRegion_->initialize(x,s,g);
353 
354  Real htol = std::sqrt(ROL_EPSILON);
355  Real ftol = 0.1*ROL_OVERFLOW;
356 
357  step_state->descentVec = s.clone();
358  step_state->gradientVec = g.clone();
359 
360  if ( con.isActivated() ) {
361  con.project(x);
362  xnew_ = x.clone();
363  xold_ = x.clone();
364  }
365  gp_ = g.clone();
366 
367  // Update approximate gradient and approximate objective function.
368  obj.update(x,true,algo_state.iter);
369  updateGradient(x,obj,con,algo_state);
370  algo_state.snorm = 1.e10;
371  algo_state.value = obj.value(x,ftol);
372  algo_state.nfval++;
373 
374  // Try to apply inverse Hessian
375  if ( !useSecantHessVec_ &&
376  (etr_ == TRUSTREGION_DOGLEG || etr_ == TRUSTREGION_DOUBLEDOGLEG) ) {
377  try {
378  Teuchos::RCP<Vector<Real> > v = g.clone();
379  Teuchos::RCP<Vector<Real> > hv = x.clone();
380  obj.invHessVec(*hv,*v,x,htol);
381  }
382  catch (std::exception &e) {
383  useSecantHessVec_ = true;
384  }
385  }
386 
387  // Evaluate Objective Function at Cauchy Point
388  if ( step_state->searchSize <= 0.0 ) {
389  Teuchos::RCP<Vector<Real> > Bg = g.clone();
390  if ( useSecantHessVec_ ) {
391  secant_->applyB(*Bg,(step_state->gradientVec)->dual(),x);
392  }
393  else {
394  obj.hessVec(*Bg,(step_state->gradientVec)->dual(),x,htol);
395  }
396  Real gBg = Bg->dot(*(step_state->gradientVec));
397  Real alpha = 1.0;
398  if ( gBg > ROL_EPSILON ) {
399  alpha = algo_state.gnorm*algo_state.gnorm/gBg;
400  }
401  // Evaluate the objective function at the Cauchy point
402  Teuchos::RCP<Vector<Real> > cp = s.clone();
403  cp->set((step_state->gradientVec)->dual());
404  cp->scale(-alpha);
405  Teuchos::RCP<Vector<Real> > xcp = x.clone();
406  xcp->set(x);
407  xcp->plus(*cp);
408  if ( con.isActivated() ) {
409  con.project(*xcp);
410  }
411  obj.update(*xcp);
412  Real fnew = obj.value(*xcp,ftol); // MUST DO SOMETHING HERE WITH FTOL
413  algo_state.nfval++;
414  // Perform cubic interpolation to determine initial trust region radius
415  Real gs = cp->dot((step_state->gradientVec)->dual());
416  Real a = fnew - algo_state.value - gs - 0.5*alpha*alpha*gBg;
417  if ( std::abs(a) < ROL_EPSILON ) {
418  // a = 0 implies the objective is quadratic in the negative gradient direction
419  step_state->searchSize = std::min(alpha*algo_state.gnorm,delMax_);
420  }
421  else {
422  Real b = 0.5*alpha*alpha*gBg;
423  Real c = gs;
424  if ( b*b-3.0*a*c > ROL_EPSILON ) {
425  // There is at least one critical point
426  Real t1 = (-b-std::sqrt(b*b-3.0*a*c))/(3.0*a);
427  Real t2 = (-b+std::sqrt(b*b-3.0*a*c))/(3.0*a);
428  if ( 6.0*a*t1 + 2.0*b > 0.0 ) {
429  // t1 is the minimizer
430  step_state->searchSize = std::min(t1*alpha*algo_state.gnorm,delMax_);
431  }
432  else {
433  // t2 is the minimizer
434  step_state->searchSize = std::min(t2*alpha*algo_state.gnorm,delMax_);
435  }
436  }
437  else {
438  step_state->searchSize = std::min(alpha*algo_state.gnorm,delMax_);
439  }
440  }
441  }
442  }
443 
455  AlgorithmState<Real> &algo_state ) {
456  Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState();
457 
458  Real eps = 0.0;
459  if ( con.isActivated() ) {
460  eps = scaleEps_*algo_state.gnorm;
461  }
462  ProjectedObjective<Real> pObj(obj,con,secant_,useSecantPrecond_,useSecantHessVec_,eps);
463 
464  CGflag_ = 0;
465  CGiter_ = 0;
466  trustRegion_->run(s,algo_state.snorm,step_state->searchSize,CGflag_,CGiter_,
467  x,*(step_state->gradientVec),algo_state.gnorm,pObj);
468 
469  if ( con.isActivated() ) {
470  xnew_->set(x);
471  xnew_->plus(s);
472  con.project(*xnew_);
473  s.set(*xnew_);
474  s.axpy(-1.0,x);
475  }
476  }
477 
490  AlgorithmState<Real> &algo_state ) {
491  Teuchos::RCP<StepState<Real> > state = Step<Real>::getState();
492 
493  Real tol = std::sqrt(ROL_EPSILON);
494 
495  Real eps = 0.0;
496  if ( con.isActivated() ) {
497  eps = algo_state.gnorm;
498  }
499  ProjectedObjective<Real> pObj(obj,con,secant_,useSecantPrecond_,useSecantHessVec_,eps);
500 
501  // Store previous step for constraint computations
502  if ( con.isActivated() ) {
503  xold_->set(x);
504  }
505 
506  // Update trust-region information;
507  // Performs a hard update on the objective function
508  TRflag_ = 0;
509  TR_nfval_ = 0;
510  TR_ngrad_ = 0;
511  Real fold = algo_state.value;
512  Real fnew = 0.0;
513  algo_state.iter++;
514  trustRegion_->update(x,fnew,state->searchSize,TR_nfval_,TR_ngrad_,TRflag_,
515  s,algo_state.snorm,fold,*(state->gradientVec),algo_state.iter,pObj);
516  algo_state.nfval += TR_nfval_;
517  algo_state.ngrad += TR_ngrad_;
518  algo_state.value = fnew;
519 
520  // If step is accepted ...
521  // Compute new gradient and update secant storage
522  if ( TRflag_ == 0 || TRflag_ == 1 ) {
523  // Perform line search (smoothing) to ensure decrease
524  if ( con.isActivated() ) {
525  // Compute new gradient
526  obj.gradient(*gp_,x,tol); // MUST DO SOMETHING HERE WITH TOL
527  algo_state.ngrad++;
528  // Compute smoothed step
529  Real alpha = 1.0;
530  xnew_->set(x);
531  xnew_->axpy(-alpha*alpha_init_,gp_->dual());
532  con.project(*xnew_);
533  // Compute new objective value
534  if ( softUp_ ) {
535  obj.update(*xnew_);
536  }
537  else {
538  obj.update(*xnew_,true,algo_state.iter);
539  }
540  Real ftmp = obj.value(*xnew_,tol); // MUST DO SOMETHING HERE WITH TOL
541  algo_state.nfval++;
542  // Perform smoothing
543  int cnt = 0;
544  alpha = 1.0/alpha_init_;
545  while ( (fnew-ftmp) <= 1.e-4*(fnew-fold) ) {
546  xnew_->set(x);
547  xnew_->axpy(-alpha*alpha_init_,gp_->dual());
548  con.project(*xnew_);
549  if ( softUp_ ) {
550  obj.update(*xnew_,false,algo_state.iter);
551  }
552  else {
553  obj.update(*xnew_,true,algo_state.iter);
554  }
555  ftmp = obj.value(*xnew_,tol); // MUST DO SOMETHING HERE WITH TOL
556  algo_state.nfval++;
557  if ( cnt >= max_fval_ ) {
558  break;
559  }
560  alpha *= 0.5;
561  cnt++;
562  }
563  // Store objective function and iteration information
564  fnew = ftmp;
565  x.set(*xnew_);
566  }
567  else {
568  if (softUp_) {
569  pObj.update(x,true,algo_state.iter);
570  }
571  }
572 
573  // Store previous gradient for secant update
574  if ( useSecantHessVec_ || useSecantPrecond_ ) {
575  gp_->set(*(state->gradientVec));
576  }
577 
578  // Update objective function and approximate model
579  updateGradient(x,obj,con,algo_state);
580 
581  // Update secant information
582  if ( useSecantHessVec_ || useSecantPrecond_ ) {
583  if ( con.isActivated() ) { // Compute new constrained step
584  xnew_->set(x);
585  xnew_->axpy(-1.0,*xold_);
586  secant_->update(*(state->gradientVec),*gp_,*xnew_,algo_state.snorm,algo_state.iter+1);
587  }
588  else {
589  secant_->update(*(state->gradientVec),*gp_,s,algo_state.snorm,algo_state.iter+1);
590  }
591  }
592 
593  // Update algorithm state
594  (algo_state.iterateVec)->set(x);
595  }
596  else { // Step was rejected
597  if ( softUp_ ) {
598  obj.update(x,true,algo_state.iter);
599  fnew = pObj.value(x,tol);
600  algo_state.nfval++;
601  algo_state.value = fnew;
602  }
603  }
604 
605  }
606 
611  std::string printHeader( void ) const {
612  std::stringstream hist;
613  hist << " ";
614  hist << std::setw(6) << std::left << "iter";
615  hist << std::setw(15) << std::left << "value";
616  hist << std::setw(15) << std::left << "gnorm";
617  hist << std::setw(15) << std::left << "snorm";
618  hist << std::setw(15) << std::left << "delta";
619  hist << std::setw(10) << std::left << "#fval";
620  hist << std::setw(10) << std::left << "#grad";
621  hist << std::setw(10) << std::left << "tr_flag";
622  if ( etr_ == TRUSTREGION_TRUNCATEDCG ) {
623  hist << std::setw(10) << std::left << "iterCG";
624  hist << std::setw(10) << std::left << "flagCG";
625  }
626  hist << "\n";
627  return hist.str();
628  }
629 
634  std::string printName( void ) const {
635  std::stringstream hist;
636  hist << "\n" << ETrustRegionToString(etr_) << " Trust-Region solver";
637  if ( useSecantPrecond_ || useSecantHessVec_ ) {
638  if ( useSecantPrecond_ && !useSecantHessVec_ ) {
639  hist << " with " << ESecantToString(esec_) << " preconditioning\n";
640  }
641  else if ( !useSecantPrecond_ && useSecantHessVec_ ) {
642  hist << " with " << ESecantToString(esec_) << " Hessian approximation\n";
643  }
644  else {
645  hist << " with " << ESecantToString(esec_) << " preconditioning and Hessian approximation\n";
646  }
647  }
648  else {
649  hist << "\n";
650  }
651  return hist.str();
652  }
653 
661  std::string print( AlgorithmState<Real> & algo_state, bool print_header = false ) const {
662  const Teuchos::RCP<const StepState<Real> >& step_state = Step<Real>::getStepState();
663 
664  std::stringstream hist;
665  hist << std::scientific << std::setprecision(6);
666  if ( algo_state.iter == 0 ) {
667  hist << printName();
668  }
669  if ( print_header ) {
670  hist << printHeader();
671  }
672  if ( algo_state.iter == 0 ) {
673  hist << " ";
674  hist << std::setw(6) << std::left << algo_state.iter;
675  hist << std::setw(15) << std::left << algo_state.value;
676  hist << std::setw(15) << std::left << algo_state.gnorm;
677  hist << std::setw(15) << std::left << " ";
678  hist << std::setw(15) << std::left << step_state->searchSize;
679  hist << "\n";
680  }
681  else {
682  hist << " ";
683  hist << std::setw(6) << std::left << algo_state.iter;
684  hist << std::setw(15) << std::left << algo_state.value;
685  hist << std::setw(15) << std::left << algo_state.gnorm;
686  hist << std::setw(15) << std::left << algo_state.snorm;
687  hist << std::setw(15) << std::left << step_state->searchSize;
688  hist << std::setw(10) << std::left << algo_state.nfval;
689  hist << std::setw(10) << std::left << algo_state.ngrad;
690  hist << std::setw(10) << std::left << TRflag_;
691  if ( etr_ == TRUSTREGION_TRUNCATEDCG ) {
692  hist << std::setw(10) << std::left << CGiter_;
693  hist << std::setw(10) << std::left << CGflag_;
694  }
695  hist << "\n";
696  }
697  return hist.str();
698  }
699 
700 }; // class Step
701 
702 } // namespace ROL
703 
704 #endif
Provides the interface to evaluate objective functions.
ESecant esec_
Secant type.
virtual const Vector & dual() const
Return dual representation of , for example, the result of applying a Riesz map, or change of basis...
Definition: ROL_Vector.hpp:213
Real value(const Vector< Real > &x, Real &tol)
Compute value.
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
Definition: ROL_Vector.hpp:143
bool useSecantPrecond_
Flag whether to use a secant preconditioner.
virtual Real value(const Vector< Real > &x, Real &tol)=0
Compute value.
Provides the interface to compute optimization steps.
Definition: ROL_Step.hpp:67
virtual void hessVec(Vector< Real > &hv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply Hessian approximation to vector.
Teuchos::RCP< StepState< Real > > getState(void)
Definition: ROL_Step.hpp:72
Contains definitions of custom data types in ROL.
Real alpha_init_
Initial line-search parameter for projected methods.
virtual Teuchos::RCP< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
TrustRegionStep(Teuchos::RCP< Secant< Real > > &secant, Teuchos::ParameterList &parlist)
Constructor.
ESecant StringToESecant(std::string s)
Definition: ROL_Types.hpp:444
Defines the linear algebra or vector space interface.
Definition: ROL_Vector.hpp:74
Teuchos::RCP< Vector< Real > > gp_
Container for previous gradient vector.
Teuchos::RCP< Vector< Real > > xold_
Container for previous iteration vector.
Teuchos::RCP< Secant< Real > > secant_
Container for secant approximation.
void compute(Vector< Real > &s, const Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Compute step.
std::string printName(void) const
Print step name.
State for algorithm class. Will be used for restarts.
Definition: ROL_Types.hpp:77
virtual void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
Compute gradient.
bool isActivated(void)
Check if bounds are on.
ETrustRegion etr_
Trust-region subproblem solver type.
std::string printHeader(void) const
Print iterate header.
Teuchos::RCP< Vector< Real > > xnew_
Container for updated iteration vector.
ESecant
Enumeration of secant update algorithms.
Definition: ROL_Types.hpp:387
int CGflag_
Truncated CG termination flag.
virtual Teuchos::RCP< const StepState< Real > > getStepState(void) const
Get state for step object.
Definition: ROL_Step.hpp:192
void update(Vector< Real > &x, const Vector< Real > &s, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Update step, if successful.
Real computeCriticalityMeasure(const Vector< Real > &g, const Vector< Real > &x, BoundConstraint< Real > &con)
Compute the criticality measure.
bool useSecantHessVec_
Flag whether to use a secant Hessian.
std::vector< bool > useInexact_
Contains flags for inexact (0) objective function, (1) gradient, (2) Hessian.
Provides interface for and implements limited-memory secant operators.
Definition: ROL_Secant.hpp:68
Real scale0_
Scale for inexact gradient computation.
void initialize(Vector< Real > &x, const Vector< Real > &s, const Vector< Real > &g, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Initialize step.
Real delMax_
Maximum trust-region radius.
virtual void invHessVec(Vector< Real > &hv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply inverse Hessian approximation to vector.
TrustRegionStep(Teuchos::ParameterList &parlist)
Constructor.
std::string print(AlgorithmState< Real > &algo_state, bool print_header=false) const
Print iterate status.
Provides the interface to apply upper and lower bound constraints.
int TR_ngrad_
Trust-region gradient evaluation counter.
void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update objective function.
void computeProjectedGradient(Vector< Real > &g, const Vector< Real > &x)
Compute projected gradient.
ETrustRegion StringToETrustRegion(std::string s)
Definition: ROL_Types.hpp:840
void updateGradient(Vector< Real > &x, Objective< Real > &obj, BoundConstraint< Real > &con, AlgorithmState< Real > &algo_state)
Update gradient to iteratively satisfy inexactness condition.
int TR_nfval_
Trust-region function evaluation counter.
Teuchos::RCP< Vector< Real > > iterateVec
Definition: ROL_Types.hpp:91
virtual void set(const Vector &x)
Set where .
Definition: ROL_Vector.hpp:196
virtual Real norm() const =0
Returns where .
int max_fval_
Maximum function evaluations in line-search for projected methods.
virtual void update(const Vector< Real > &x, bool flag=true, int iter=-1)
Update objective function.
int TRflag_
Trust-region exit flag.
ETrustRegion
Enumeration of trust-region solver types.
Definition: ROL_Types.hpp:786
std::string ETrustRegionToString(ETrustRegion tr)
Definition: ROL_Types.hpp:794
int CGiter_
Truncated CG iteration count.
std::string ESecantToString(ESecant tr)
Definition: ROL_Types.hpp:396
static const double ROL_OVERFLOW
Platform-dependent maximum double.
Definition: ROL_Types.hpp:126
bool useProjectedGrad_
Flag whether to use the projected gradient criticality measure.
Teuchos::RCP< TrustRegion< Real > > trustRegion_
Container for trust-region object.
Real scale1_
Scale for inexact gradient computation.
virtual void project(Vector< Real > &x)
Project optimization variables onto the bounds.
Provides the interface to compute optimization steps with trust regions.
static const double ROL_EPSILON
Platform-dependent machine epsilon.
Definition: ROL_Types.hpp:118