Logo ROOT   6.13/01
Reference Guide
IpoptMinimizer.cxx
Go to the documentation of this file.
1 #include <Math/IpoptMinimizer.h>
3 #include <Math/IFunction.h>
6 #include <TString.h>
7 
8 using namespace ROOT;
9 using namespace ROOT::Math;
10 using namespace Ipopt;
11 using namespace ROOT::Fit;
12 //_______________________________________________________________________
14 {
15  fIpotApp = IpoptApplicationFactory();
16  fInternalTNLP = new InternalTNLP(this);
17  fIpotApp->Options()->SetStringValue("hessian_approximation", "limited-memory");
18 }
19 
20 //_______________________________________________________________________
22 {
23  fIpotApp->Options()->SetStringValue("hessian_approximation", "limited-memory");
24  fIpotApp->Options()->SetStringValue("linear_solver", type);
25 }
26 
27 //_______________________________________________________________________
29 {
30  fIpotApp = nullptr;
31 }
32 
33 //_______________________________________________________________________
34 IpoptMinimizer::IpoptMinimizer::InternalTNLP::InternalTNLP::InternalTNLP(IpoptMinimizer *minimizer)
35 {
36  fNNZerosJacobian = 0;
37  fNNZerosHessian = 0;
38  nlp_lower_bound_inf = -1e19;
39  nlp_upper_bound_inf = 1e19;
40  fMinimizer = minimizer;
41 }
42 
43 //_______________________________________________________________________
44 IpoptMinimizer::IpoptMinimizer::InternalTNLP::~InternalTNLP()
45 {
46 }
47 
48 //_______________________________________________________________________
49 bool IpoptMinimizer::IpoptMinimizer::InternalTNLP::get_nlp_info(Index &n, Index &m, Index &nnz_jac_g, Index &nnz_h_lag,
50  IndexStyleEnum &index_style)
51 {
52  n = fMinimizer->NDim();
53  m = 0; // n - fMinimizer->NFree();//total variables with constraints
54  nnz_jac_g = fNNZerosJacobian;
55  nnz_h_lag = fNNZerosHessian;
56  // use the C style indexing (0-based)
57  index_style = TNLP::C_STYLE;
58 
59  return true;
60 }
61 
62 //_______________________________________________________________________
63 bool IpoptMinimizer::IpoptMinimizer::InternalTNLP::get_bounds_info(Index n, Number *x_l, Number *x_u, Index /*m*/,
64  Number * /*g_l*/, Number * /*g_u*/)
65 {
66  // here, the n and m we gave IPOPT in get_nlp_info are passed back to us.
67  // If desired, we could assert to make sure they are what we think they are.
68  // TODO: print a meesage telling that information is not good whit the variables and constraints
69  R__ASSERT(n == (Index)fMinimizer->NDim());
70  // R__ASSERT(m == fMinimizer->NDim() - fMinimizer->NFree());
71  for (Index i = 0; i < n; i++) {
72  ParameterSettings varsettings;
73  if (fMinimizer->GetVariableSettings(i, varsettings)) {
74  if (varsettings.HasLowerLimit()) {
75  x_l[i] = varsettings.LowerLimit();
76  } else {
77  x_l[i] = nlp_lower_bound_inf;
78  }
79  if (varsettings.HasUpperLimit()) {
80  x_u[i] = varsettings.UpperLimit();
81  } else {
82  x_u[i] = nlp_upper_bound_inf;
83  }
84  } else {
85  MATH_ERROR_MSG("IpoptMinimizer::InternalTNLP::get_bounds_info", Form("Variable index = %d not found", i));
86  }
87  }
88  return true;
89 }
90 
91 //_______________________________________________________________________
92 bool IpoptMinimizer::IpoptMinimizer::InternalTNLP::get_starting_point(Index n, bool /*init_x*/, Number *x,
93  bool /*init_z*/, Number * /*z_L*/,
94  Number * /*z_U*/, Index /*m*/,
95  bool /*init_lambda*/, Number * /*lambda*/)
96 {
97  R__ASSERT(n == (Index)fMinimizer->NDim());
98  for (Index i = 0; i < n; i++) {
99  ParameterSettings varsettings;
100  if (fMinimizer->GetVariableSettings(i, varsettings)) {
101  x[i] = varsettings.Value();
102  } else {
103  MATH_ERROR_MSG("IpoptMinimizer::InternalTNLP::get_starting_point", Form("Variable index = %d not found", i));
104  }
105  }
106  return true;
107 }
108 
109 //_______________________________________________________________________
110 bool IpoptMinimizer::IpoptMinimizer::InternalTNLP::eval_f(Index n, const Number *x, bool /*new_x*/, Number &obj_value)
111 {
112 
113  auto fun = fMinimizer->ObjFunction();
114  R__ASSERT(n == (Index)fun->NDim());
115  obj_value = (*fun)(x);
116  return true;
117 }
118 
119 //_______________________________________________________________________
120 bool IpoptMinimizer::IpoptMinimizer::InternalTNLP::eval_grad_f(Index n, const Number *x, bool /*new_x*/, Number *grad_f)
121 {
122  auto gfun = fMinimizer->GradObjFunction();
123  if (!gfun)
124  return false;
125  else {
126  R__ASSERT(n == (Index)gfun->NDim());
127  gfun->Gradient(x, grad_f);
128  }
129  return true;
130 }
131 
132 //_______________________________________________________________________
133 bool IpoptMinimizer::IpoptMinimizer::InternalTNLP::eval_g(Index /*n*/, const Number * /*x*/, bool /*new_x*/,
134  Index /*m*/, Number * /*g*/)
135 {
136  return false;
137 }
138 
139 //_______________________________________________________________________
140 bool IpoptMinimizer::IpoptMinimizer::InternalTNLP::eval_jac_g(Index /*n*/, const Number * /*x*/, bool /*new_x*/,
141  Index /*m*/, Index /*nele_jac*/, Index * /*iRow*/,
142  Index * /*jCol*/, Number * /*values*/)
143 {
144  return false;
145 }
146 
147 //_______________________________________________________________________
148 bool IpoptMinimizer::IpoptMinimizer::InternalTNLP::eval_h(Index /*n*/, const Number * /*x*/, bool /*new_x*/,
149  Number /*obj_factor*/, Index /*m*/, const Number * /*lambda*/,
150  bool /*new_lambda*/, Index /*nele_hess*/, Index * /*iRow*/,
151  Index * /*jCol*/, Number * /*values*/)
152 {
153  return false;
154 }
155 
156 //_______________________________________________________________________
157 void IpoptMinimizer::IpoptMinimizer::InternalTNLP::finalize_solution(SolverReturn /*status*/, Index n, const Number *x,
158  const Number *z_L, const Number *z_U, Index m,
159  const Number *g, const Number * /*lambda*/,
160  Number obj_value, const IpoptData * /*ip_data*/,
161  IpoptCalculatedQuantities * /*ip_cq*/)
162 {
163  // here is where we would store the solution to variables, or write to a file, etc
164  // so we could use the solution.
165 
166  // For this example, we write the solution to the console
167  std::cout << std::endl << std::endl << "Solution of the primal variables, x" << std::endl;
168  for (Index i = 0; i < n; i++) {
169  std::cout << "x[" << i << "] = " << x[i] << std::endl;
170  }
171 
172  std::cout << std::endl << std::endl << "Solution of the bound multipliers, z_L and z_U" << std::endl;
173  for (Index i = 0; i < n; i++) {
174  std::cout << "z_L[" << i << "] = " << z_L[i] << std::endl;
175  }
176  for (Index i = 0; i < n; i++) {
177  std::cout << "z_U[" << i << "] = " << z_U[i] << std::endl;
178  }
179 
180  std::cout << std::endl << std::endl << "Objective value" << std::endl;
181  std::cout << "f(x*) = " << obj_value << std::endl;
182 
183  std::cout << std::endl << "Final value of the constraints:" << std::endl;
184  for (Index i = 0; i < m; i++) {
185  std::cout << "g(" << i << ") = " << g[i] << std::endl;
186  }
187  fMinimizer->SetFinalValues(x);
188 
189  fMinimizer->SetMinValue(obj_value);
190 }
191 
193 {
194  // set the function to minimizer
195  // need to calculate numerically the derivatives: do via class MultiNumGradFunction
196  // no need to clone the passed function
197  ROOT::Math::MultiNumGradFunction gradFunc(func);
198  // IGradientFunctionMultiDim gradFunc(func);
199  // function is cloned inside so can be delete afterwards
200  // called base class method setfunction
201  // (note: write explicitly otherwise it will call back itself)
202  BasicMinimizer::SetFunction(gradFunc);
203  // BasicMinimizer::SetFunction(func);
204 }
205 
206 //_______________________________________________________________________
208 {
209  fInternalTNLP->fNNZerosJacobian = nzeros;
210 }
211 
212 //_______________________________________________________________________
214 {
215  fInternalTNLP->fNNZerosHessian = nzeros;
216 }
217 
218 //_______________________________________________________________________
219 void IpoptMinimizer::SetOptionStringValue(const char *var, const char *value)
220 {
221  fIpotApp->Options()->SetStringValue(var, value);
222 }
223 
224 //_______________________________________________________________________
226 {
227  ApplicationReturnStatus status;
228  status = fIpotApp->Initialize();
229  if (status != Solve_Succeeded) {
230  std::cout << std::endl << std::endl << "*** Error during initialization!" << std::endl;
231  return (int)status;
232  }
233 
234 
235  status = fIpotApp->OptimizeTNLP(fInternalTNLP);
236 
237  if (status == Solve_Succeeded) {
238  // Retrieve some statistics about the solve
239  Index iter_count = fIpotApp->Statistics()->IterationCount();
240  std::cout << std::endl << std::endl << "*** The problem solved in " << iter_count << " iterations!" << std::endl;
241 
242  Number final_obj = fIpotApp->Statistics()->FinalObjective();
243  std::cout << std::endl
244  << std::endl
245  << "*** The final value of the objective function is " << final_obj << '.' << std::endl;
246  return true;
247  } else {
248  return false;
249  }
250 }
Namespace for new ROOT classes and functions.
Definition: TFoamSampler.h:19
Class, describing value, limits and step size of the parameters Provides functionality also to set/re...
virtual void SetOptionStringValue(const char *var, const char *value)
double Value() const
copy constructor and assignment operators (leave them to the compiler)
virtual ~IpoptMinimizer()
Destructor.
Base Minimizer class, which defines the basic funcionality of various minimizer implementations (apar...
IpoptMinimizer()
Default constructor.
virtual void SetNNZerosHessian(UInt_t nzeros)
Namespace for the fitting classes.
Definition: TFoamSampler.h:21
virtual unsigned int NDim() const
number of dimensions
#define MATH_ERROR_MSG(loc, str)
Definition: Error.h:50
you should not use this method at all Int_t Int_t Double_t Double_t Double_t Int_t Double_t Double_t Double_t Double_t Int_t m
Definition: TRolke.cxx:637
Documentation for the abstract class IBaseFunctionMultiDim.
Definition: IFunction.h:62
IpoptMinimizer class.
bool HasUpperLimit() const
check if parameter has upper limit
virtual bool Minimize()
method to perform the minimization
virtual void SetFunction(const ROOT::Math::IMultiGenFunction &func)
set the function to minimize
* x
Deprecated and error prone model selection interface.
Definition: TRolke.cxx:630
MultiNumGradFunction class to wrap a normal function in a gradient function using numerical gradient ...
double UpperLimit() const
return upper limit value
Ipopt::SmartPtr< Ipopt::IpoptApplication > fIpotApp
virtual void SetFunction(const ROOT::Math::IMultiGenFunction &func)
set the function to minimize
Ipopt::SmartPtr< InternalTNLP > fInternalTNLP
double LowerLimit() const
return lower limit value
virtual void SetNNZerosJacobian(UInt_t nzeros)
Internal class to create a TNLP object, required for Ipopt minimization in c++, every method is overl...
bool HasLowerLimit() const
check if parameter has lower limit