Logo ROOT   6.13/01
Reference Guide
IpoptMinimizer.h
Go to the documentation of this file.
1 // @(#)root/ipopt:$Id$
2 // Author: Omar.Zapata@cern.ch Thu Dec 28 2:15:00 2017
3 
4 /*************************************************************************
5  * Copyright (C) 2017-2018, Omar Zapata *
6  * All rights reserved. *
7  * *
8  * For the licensing terms see $ROOTSYS/LICENSE. *
9  * For the list of contributors see $ROOTSYS/README/CREDITS. *
10  *************************************************************************/
11 
12 #ifndef ROOT_Math_IpoptMinimizer
13 #define ROOT_Math_IpoptMinimizer
14 
15 #include "Math/Minimizer.h"
16 
17 #include "Math/IFunctionfwd.h"
18 
19 #include "Math/IParamFunctionfwd.h"
20 
21 #include "Math/BasicMinimizer.h"
22 
23 #include <vector>
24 #include <map>
25 #include <string>
26 
27 #define HAVE_CSTDDEF
28 #include <coin/IpTNLP.hpp>
29 #include <coin/IpSmartPtr.hpp>
30 #undef HAVE_CSTDDEF
31 
32 #include <coin/IpIpoptApplication.hpp>
33 #include <coin/IpSolveStatistics.hpp>
34 
35 namespace ROOT {
36 
37 namespace Math {
38 /**
39  enumeration specifying the types of Ipopt solvers
40  @ingroup MultiMin
41 */
43 
44 //_____________________________________________________________________________________
45 /**
46  * \class IpoptMinimizer
47  * IpoptMinimizer class.
48  * Implementation for Ipopt (Interior Point OPTimizer) is a software package for large-scale ​nonlinear optimization.
49  * It is designed to find (local) solutions of mathematical optimization problems.
50  *
51  * The following information is required by IPOPT:
52  * - Problem dimensions
53  * -# number of variables
54  * -# number of constraints
55  * - Problem bounds
56  * -# variable bounds
57  * -# constraint bounds
58  * - Initial starting point
59  * -# Initial values for the primal \f$ x\f$ variables
60  * -# Initial values for the multipliers (only required for a warm start option)
61  * - Problem Structure
62  * -# number of nonzeros in the Jacobian of the constraints
63  * -# number of nonzeros in the Hessian of the Lagrangian function
64  * -# sparsity structure of the Jacobian of the constraints
65  * -# sparsity structure of the Hessian of the Lagrangian function
66  * - Evaluation of Problem Functions
67  * -# Information evaluated using a given point ( \f$ x,\lambda, \sigma_f\f$ coming from IPOPT)
68  * -# Objective function, \f$ f(x)\f$
69  * -# Gradient of the objective \f$ \nabla f(x)\f$
70  * -# Constraint function values, \f$ g(x)\f$
71  * -# Jacobian of the constraints, \f$ \nabla g(x)^T\f$
72  * -# Hessian of the Lagrangian function, \f$ \sigma_f \nabla^2 f(x) + \sum_{i=1}^m\lambda_i\nabla^2 g_i(x)\f$
73  *
74  * (this is not required if a quasi-Newton options is chosen to approximate the second derivatives)
75  * The problem dimensions and bounds are straightforward and come solely from the problem definition. The initial starting point is used by the algorithm when it begins iterating to solve the problem. If IPOPT has difficulty converging, or if it converges to a locally infeasible point, adjusting the starting point may help. Depending on the starting point, IPOPT may also converge to different local solutions.
76 
77  See <A HREF="https://projects.coin-or.org/Ipopt">Ipopt doc</A>
78  from more info on the Ipopt minimization algorithms.
79 
80  @ingroup MultiMin
81 */
82 using Ipopt::Number;
83 using Ipopt::Index;
84 using Ipopt::SolverReturn;
85 using Ipopt::IpoptData;
86 using Ipopt::IpoptCalculatedQuantities;
87 
89 private:
90  Ipopt::SmartPtr<Ipopt::IpoptApplication> fIpotApp;
91 
92 protected:
93  /**
94  * \class InternalTNLP
95  * Internal class to create a TNLP object, required for Ipopt minimization
96  * in c++, every method is overloaded to pass the information to Ipopt solvers.
97  * @ingroup MultiMin
98  */
99  class InternalTNLP : public Ipopt::TNLP {
100  friend class IpoptMinimizer;
106 
107  public:
108  InternalTNLP(IpoptMinimizer *minimizer);
109 
110  /** default destructor */
111  virtual ~InternalTNLP();
112 
113  /**
114  * Give IPOPT the information about the size of the problem (and hence, the size of the arrays that it needs to
115  * allocate).
116  * \param n (out), the number of variables in the problem (dimension of \f$ x\f$).
117  * \param m (out), the number of constraints in the problem (dimension of \f$ g(x)\f$).
118  * \param nnz_jac_g (out), the number of nonzero entries in the Jacobian.
119  * \param nnz_h_lag (out), the number of nonzero entries in the Hessian.
120  * \param index_style (out), the numbering style used for row/col entries in the sparse matrix format (C_STYLE:
121  * 0-based,
122  * FORTRAN_STYLE: 1-based). default C_STYLE;
123  * \return true if everything is right, false in other case.
124  */
125  virtual bool get_nlp_info(Index &n, Index &m, Index &nnz_jac_g, Index &nnz_h_lag, IndexStyleEnum &index_style);
126  /**
127  * Give IPOPT the value of the bounds on the variables and constraints.
128  * The values of n and m that you specified in get_nlp_info are passed to you for debug checking.
129  * Setting a lower bound to a value less than or equal to the value of the option nlp_lower_bound_inf will cause
130  * IPOPT
131  * to assume no lower bound.
132  * Likewise, specifying the upper bound above or equal to the value of the option nlp_upper_bound_inf will cause
133  * IPOPT to
134  * assume no upper bound.
135  * These options, nlp_lower_bound_inf and nlp_upper_bound_inf, are set to \f$ -10^{19}\f$ and \f$ 10^{19}\f$,
136  * respectively,
137  * by default.
138  * \param n (in), the number of variables in the problem (dimension of \f$ x\f$).
139  * \param x_l (out) the lower bounds \f$ x^L\f$ for \f$ x\f$.
140  * \param x_u (out) the upper bounds \f$ x^U\f$ for \f$ x\f$.
141  * \param m (in), the number of constraints in the problem (dimension of \f$ g(x)\f$).
142  * \param g_l (out) the lower bounds \f$ g^L\f$ for \f$ g(x)\f$.
143  * \param g_u (out) the upper bounds \f$ g^U\f$ for \f$ g(x)\f$.
144  * \return true if everything is right, false in other case.
145  */
146 
147  virtual bool get_bounds_info(Index n, Number *x_l, Number *x_u, Index m, Number *g_l, Number *g_u);
148  /**
149  * Give IPOPT the starting point before it begins iterating.
150  * The variables n and m are passed in for your convenience. These variables will have the same values you
151  * specified in
152  * get_nlp_info.
153  * Depending on the options that have been set, IPOPT may or may not require bounds for the primal variables \f$
154  * x\f$,
155  * the bound multipliers \f$ z^L\f$ and \f$ z^U\f$, and the constraint multipliers \f$ \lambda \f$.
156  * The boolean flags init_x, init_z, and init_lambda tell you whether or not you should provide initial values for
157  * \f$
158  * x\f$, \f$ z^L\f$, \f$ z^U\f$, or \f$ \lambda \f$ respectively.
159  * The default options only require an initial value for the primal variables \f$ x\f$.
160  * Note, the initial values for bound multiplier components for ``infinity'' bounds ( \f$ x_L^{(i)}=-\infty \f$ or
161  * \f$
162  * x_U^{(i)}=\infty \f$) are ignored.
163  *
164  * \param n (in), the number of variables in the problem (dimension of \f$ x\f$).
165  * \param init_x (in), if true, this method must provide an initial value for \f$ x\f$.
166  * \param x (out), the initial values for the primal variables, \f$ x\f$.
167  * \param init_z (in), if true, this method must provide an initial value for the bound multipliers \f$ z^L\f$ and
168  * \f$
169  * z^U\f$.
170  * \param z_L (out), the initial values for the bound multipliers, \f$ z^L\f$.
171  * \param z_U (out), the initial values for the bound multipliers, \f$ z^U\f$.
172  * \param m (in), the number of constraints in the problem (dimension of \f$ g(x)\f$).
173  * \param init_lambda: (in), if true, this method must provide an initial value for the constraint multipliers, \f$
174  * \lambda\f$.
175  * \param lambda (out), the initial values for the constraint multipliers, \f$ \lambda\f$.
176  * \return true if everything is right, false in other case.
177  */
178 
179  virtual bool get_starting_point(Index n, bool init_x, Number *x, bool init_z, Number *z_L, Number *z_U, Index m,
180  bool init_lambda, Number *lambda);
181  /**
182  * Return the value of the objective function at the point \f$ x\f$.
183  * \param n (in), the number of variables in the problem (dimension of \f$ x\f$).
184  * \param x (in), the values for the primal variables, \f$ x\f$, at which \f$ f(x)\f$ is to be evaluated.
185  * \param new_x (in), false if any evaluation method was previously called with the same values in x, true
186  * otherwise.
187  * \param obj_value (out) the value of the objective function (\f$ f(x)\f$).
188  * The boolean variable new_x will be false if the last call to any of the evaluation methods (eval_*) used the
189  * same \f$
190  * x\f$ values. * * This can be helpful when users have efficient implementations that calculate multiple outputs
191  * at
192  * once.
193  * IPOPT internally caches results from the TNLP and generally, this flag can be ignored.
194  * The variable n is passed in for your convenience. This variable will have the same value you specified in
195  * get_nlp_info.
196  * \return true if everything is right, false in other case.
197  */
198 
199  virtual bool eval_f(Index n, const Number *x, bool new_x, Number &obj_value);
200 
201  /**
202  * Return the gradient of the objective function at the point \f$ x\f$.
203  * \param n (in), the number of variables in the problem (dimension of \f$ x\f$).
204  * \param x (in), the values for the primal variables, \f$ x\f$, at which \f$ \nabla f(x)\f$ is to be evaluated.
205  * \param new_x (in), false if any evaluation method was previously called with the same values in x, true
206  * otherwise.
207  * \param grad_f: (out) the array of values for the gradient of the objective function ( \f$ \nabla f(x)\f$).
208  * The gradient array is in the same order as the \f$ x\f$ variables (i.e., the gradient of the objective with
209  * respect to x[2] should be put in grad_f[2]).
210  * The boolean variable new_x will be false if the last call to any of the evaluation methods (eval_*) used the
211  * same \f$ x\f$ values.
212  * This can be helpful when users have efficient implementations that calculate multiple outputs at once. IPOPT
213  * internally caches results from the TNLP and generally, this flag can be ignored.
214  *
215  * The variable n is passed in for your convenience. This variable will have the same value you specified in
216  * get_nlp_info.
217  * \return true if everything is right, false in other case.
218  */
219  virtual bool eval_grad_f(Index n, const Number *x, bool new_x, Number *grad_f);
220  /**
221  * Return the value of the constraint function at the point \f$ x\f$.
222  * \param n (in), the number of variables in the problem (dimension of \f$ x\f$).
223  * \param x (in), the values for the primal variables, \f$ x\f$, at which the constraint functions, \f$ g(x)\f$,
224  * are to be evaluated.
225  * \param new_x (in), false if any evaluation method was previously called with the same values in x, true
226  * otherwise.
227  * \param m (in), the number of constraints in the problem (dimension of \f$ g(x)\f$).
228  * \param g (out) the array of constraint function values, \f$ g(x)\f$.
229  * The values returned in g should be only the \f$ g(x)\f$ values, do not add or subtract the bound values \f$
230  * g^L\f$ or \f$ g^U\f$.
231  * The boolean variable new_x will be false if the last call to any of the evaluation methods (eval_*) used the
232  * same \f$ x\f$ values.
233  * This can be helpful when users have efficient implementations that calculate multiple outputs at once. IPOPT
234  * internally caches results from the TNLP and generally, this flag can be ignored.
235  * The variables n and m are passed in for your convenience. These variables will have the same values you
236  * specified in get_nlp_info.
237  * \return true if everything is right, false in other case.
238  */
239  virtual bool eval_g(Index n, const Number *x, bool new_x, Index m, Number *g);
240 
241  /**
242  * Return either the sparsity structure of the Jacobian of the constraints, or the values for the Jacobian of the
243  * constraints at the point \f$ x\f$.
244  * \param n (in), the number of variables in the problem (dimension of \f$ x\f$).
245  * \param x (in), the values for the primal variables, \f$ x\f$, at which the constraint Jacobian, \f$ \nabla
246  * g(x)^T\f$, is to be evaluated.
247  * \param new_x (in), false if any evaluation method was previously called with the same values in x, true
248  * otherwise.
249  * \param m (in), the number of constraints in the problem (dimension of \f$ g(x)\f$).
250  * \param n_ele_jac (in), the number of nonzero elements in the Jacobian (dimension of iRow, jCol, and values).
251  * \param iRow (out), the row indices of entries in the Jacobian of the constraints.
252  * \param jCol (out), the column indices of entries in the Jacobian of the constraints.
253  * \param values (out), the values of the entries in the Jacobian of the constraints.
254  * The Jacobian is the matrix of derivatives where the derivative of constraint \f$ g^{(i)}\f$ with respect to
255  * variable \f$ x^{(j)}\f$ is placed in row \f$ i\f$ and column \f$ j\f$. See Appendix A for a discussion of the
256  * sparse matrix format used in this method.
257  *
258  * If the iRow and jCol arguments are not NULL, then IPOPT wants you to fill in the sparsity structure of the
259  * Jacobian (the row and column indices only). At this time, the x argument and the values argument will be NULL.
260  *
261  * If the x argument and the values argument are not NULL, then IPOPT wants you to fill in the values of the
262  * Jacobian as calculated from the array x (using the same order as you used when specifying the sparsity
263  * structure). At this time, the iRow and jCol arguments will be NULL;
264  *
265  * The boolean variable new_x will be false if the last call to any of the evaluation methods (eval_*) used the
266  * same \f$ x\f$ values. This can be helpful when users have efficient implementations that calculate multiple
267  * outputs at once. IPOPT internally caches results from the TNLP and generally, this flag can be ignored.
268  *
269  * The variables n, m, and nele_jac are passed in for your convenience. These arguments will have the same values
270  * you specified in get_nlp_info.
271  * \return true if everything is right, false in other case.
272  */
273  virtual bool eval_jac_g(Index n, const Number *x, bool new_x, Index m, Index nele_jac, Index *iRow, Index *jCol,
274  Number *values);
275  /**
276  * Return either the sparsity structure of the Hessian of the Lagrangian, or the values of the Hessian of the Lagrangian <a href="https://www.coin-or.org/Ipopt/documentation/node22.html#eq:IpoptLAG">(9)</a> for the given values for \f$ x\f$, \f$ \sigma_f\f$, and \f$ \lambda\f$.
277  * \param n (in), the number of variables in the problem (dimension of \f$ x\f$).
278  * \param x (in), the values for the primal variables, \f$ x\f$, at which the Hessian is to be evaluated.
279  * \param new_x (in), false if any evaluation method was previously called with the same values in x, true otherwise.
280  * \param obj_factor (in), factor in front of the objective term in the Hessian, \f$ \sigma_f\f$.
281  * \param m (in), the number of constraints in the problem (dimension of \f$ g(x)\f$).
282  * \param lambda (in), the values for the constraint multipliers, \f$ \lambda\f$, at which the Hessian is to be evaluated.
283  * \param new_lambda (in), false if any evaluation method was previously called with the same values in lambda, true otherwise.
284  * \param nele_hess (in), the number of nonzero elements in the Hessian (dimension of iRow, jCol, and values).
285  * \param iRow (out), the row indices of entries in the Hessian.
286  * \param jCol (out), the column indices of entries in the Hessian.
287  * \param values (out), the values of the entries in the Hessian.
288  *
289  * The Hessian matrix that IPOPT uses is defined in <a href="https://www.coin-or.org/Ipopt/documentation/node22.html#eq:IpoptLAG">(9)</a>. See Appendix A for a discussion of the sparse symmetric matrix format used in this method.
290  *
291  * If the iRow and jCol arguments are not NULL, then IPOPT wants you to fill in the sparsity structure of the Hessian (the row and column indices for the lower or upper triangular part only). In this case, the x, lambda, and values arrays will be NULL.
292  * \return true if everything is right, false in other case.
293  */
294  virtual bool eval_h(Index n, const Number *x, bool new_x, Number obj_factor, Index m, const Number *lambda,
295  bool new_lambda, Index nele_hess, Index *iRow, Index *jCol, Number *values);
296  /**
297  * This method is called by IPOPT after the algorithm has finished (successfully or even with most errors).
298  * \param status (in), gives the status of the algorithm as specified in IpAlgTypes.hpp,
299  * SUCCESS: Algorithm terminated successfully at a locally optimal point, satisfying the convergence
300  * tolerances
301  * (can be specified by options).
302  * MAXITER_EXCEEDED: Maximum number of iterations exceeded (can be specified by an option).
303  * CPUTIME_EXCEEDED: Maximum number of CPU seconds exceeded (can be specified by an option).
304  * STOP_AT_TINY_STEP: Algorithm proceeds with very little progress.
305  * STOP_AT_ACCEPTABLE_POINT: Algorithm stopped at a point that was converged, not to ``desired'' tolerances,
306  * but to
307  * ``acceptable'' tolerances (see the acceptable-... options).
308  * LOCAL_INFEASIBILITY: Algorithm converged to a point of local infeasibility. Problem may be infeasible.
309  * USER_REQUESTED_STOP: The user call-back function intermediate_callback (see Section 3.3.4) returned false,
310  * i.e.,
311  * the user code requested a premature termination of the optimization.
312  * DIVERGING_ITERATES: It seems that the iterates diverge.
313  * RESTORATION_FAILURE: Restoration phase failed, algorithm doesn't know how to proceed.
314  * ERROR_IN_STEP_COMPUTATION: An unrecoverable error occurred while IPOPT tried to compute the search
315  * direction.
316  * INVALID_NUMBER_DETECTED: Algorithm received an invalid number (such as NaN or Inf) from the NLP; see also
317  * option
318  * check_derivatives_for_naninf.
319  * INTERNAL_ERROR: An unknown internal error occurred. Please contact the IPOPT authors through the mailing
320  * list.
321  * \param n (in), the number of variables in the problem (dimension of \f$ x\f$).
322  * \param x (in), the final values for the primal variables, \f$ x_*\f$.
323  * \param z_L (in), the final values for the lower bound multipliers, \f$ z^L_*\f$.
324  * \param z_U (in), the final values for the upper bound multipliers, \f$ z^U_*\f$.
325  * \param m (in), the number of constraints in the problem (dimension of \f$ g(x)\f$).
326  * \param g (in), the final value of the constraint function values, \f$ g(x_*)\f$.
327  * \param lambda (in), the final values of the constraint multipliers, \f$ \lambda_*\f$.
328  * \param obj_value (in), the final value of the objective, \f$ f(x_*)\f$.
329  * \param ip_data are provided for expert users.
330  * \param ip_cq are provided for expert users.
331  * This method gives you the return status of the algorithm (SolverReturn), and the values of the variables, the
332  * objective and constraint function values when the algorithm exited.
333  * \return true if everything is right, false in other case.
334  */
335  virtual void finalize_solution(SolverReturn status, Index n, const Number *x, const Number *z_L,
336  const Number *z_U, Index m, const Number *g, const Number *lambda,
337  Number obj_value, const IpoptData *ip_data, IpoptCalculatedQuantities *ip_cq);
338 
339  private:
340  /** @name Methods to block default compiler methods.
341  * The compiler automatically generates the following three methods.
342  * Since the default compiler implementation is generally not what
343  * you want (for all but the most simple classes), we usually
344  * put the declarations of these methods in the private section
345  * and never implement them. This prevents the compiler from
346  * implementing an incorrect "default" behavior without us
347  * knowing. (See Scott Meyers book, "Effective C++")
348  *
349  */
350  InternalTNLP(const InternalTNLP &);
352  };
353 
354 public:
355  /**
356  Default constructor
357  */
358  IpoptMinimizer();
359  /**
360  Constructor with a string giving name of algorithm
361  */
362  IpoptMinimizer(const char *type);
363 
364  /**
365  Destructor
366  */
367  virtual ~IpoptMinimizer();
368 
369 private:
370  // usually copying is non trivial, so we make this unaccessible
371 
372  /**
373  Copy constructor
374  */
376 
377  /**
378  Assignment operator
379  */
381  {
382  if (this == &rhs) {
383  return *this; // time saving self-test
384  }
385  return *this;
386  }
387 
388 public:
389  /// set the function to minimize
390  virtual void SetFunction(const ROOT::Math::IMultiGenFunction &func);
391 
392  /// set the function to minimize
394 
395  /// method to perform the minimization
396  virtual bool Minimize();
397 
398  virtual void SetNNZerosJacobian(UInt_t nzeros);
399 
400  virtual void SetNNZerosHessian(UInt_t nzeros);
401 
402  virtual void SetOptionStringValue(const char *var, const char *value);
403 
404  /// return expected distance reached from the minimum
405  virtual double Edm() const { return 0; } // not impl. }
406 
407  /// return pointer to gradient values at the minimum
408  // virtual const double *MinGradient() const;
409 
410  /// number of function calls to reach the minimum
411  // virtual unsigned int NCalls() const;
412 
413  /// minimizer provides error and error matrix
414  virtual bool ProvidesError() const { return false; }
415 
416  /// return errors at the minimum
417  virtual const double *Errors() const { return 0; }
418 
419  /** return covariance matrices elements
420  if the variable is fixed the matrix is zero
421  The ordering of the variables is the same as in errors
422  */
423  virtual double CovMatrix(unsigned int, unsigned int) const { return 0; }
424 protected:
425  Ipopt::SmartPtr<InternalTNLP> fInternalTNLP;
426  ClassDef(IpoptMinimizer, 0) //
427 };
428 
429 } // end namespace Math
430 
431 } // end namespace ROOT
432 
433 #endif /* ROOT_Math_IpoptMinimizer */
IpoptMinimizer(const IpoptMinimizer &)
Copy constructor.
Interface (abstract class) for multi-dimensional functions providing a gradient calculation.
Definition: IFunction.h:326
Namespace for new ROOT classes and functions.
Definition: TFoamSampler.h:19
InternalTNLP & operator=(const InternalTNLP &)
InternalTNLP(IpoptMinimizer *minimizer)
virtual void SetOptionStringValue(const char *var, const char *value)
virtual ~IpoptMinimizer()
Destructor.
Base Minimizer class, which defines the basic funcionality of various minimizer implementations (apar...
virtual void finalize_solution(SolverReturn status, Index n, const Number *x, const Number *z_L, const Number *z_U, Index m, const Number *g, const Number *lambda, Number obj_value, const IpoptData *ip_data, IpoptCalculatedQuantities *ip_cq)
This method is called by IPOPT after the algorithm has finished (successfully or even with most error...
virtual void SetNNZerosHessian(UInt_t nzeros)
virtual ~InternalTNLP()
default destructor
virtual bool eval_jac_g(Index n, const Number *x, bool new_x, Index m, Index nele_jac, Index *iRow, Index *jCol, Number *values)
Return either the sparsity structure of the Jacobian of the constraints, or the values for the Jacobi...
virtual void SetFunction(const ROOT::Math::IMultiGradFunction &func)
set the function to minimize
IpoptMinimizer & operator=(const IpoptMinimizer &rhs)
Assignment operator.
you should not use this method at all Int_t Int_t Double_t Double_t Double_t Int_t Double_t Double_t Double_t Double_t Int_t m
Definition: TRolke.cxx:637
virtual bool eval_grad_f(Index n, const Number *x, bool new_x, Number *grad_f)
Return the gradient of the objective function at the point .
Documentation for the abstract class IBaseFunctionMultiDim.
Definition: IFunction.h:62
IpoptMinimizer class.
virtual double Edm() const
return expected distance reached from the minimum
virtual bool get_bounds_info(Index n, Number *x_l, Number *x_u, Index m, Number *g_l, Number *g_u)
Give IPOPT the value of the bounds on the variables and constraints.
virtual bool Minimize()
method to perform the minimization
virtual bool eval_h(Index n, const Number *x, bool new_x, Number obj_factor, Index m, const Number *lambda, bool new_lambda, Index nele_hess, Index *iRow, Index *jCol, Number *values)
Return either the sparsity structure of the Hessian of the Lagrangian, or the values of the Hessian o...
virtual double CovMatrix(unsigned int, unsigned int) const
return covariance matrices elements if the variable is fixed the matrix is zero The ordering of the v...
virtual void SetFunction(const ROOT::Math::IMultiGenFunction &func)
set the function to minimize
virtual bool eval_f(Index n, const Number *x, bool new_x, Number &obj_value)
Return the value of the objective function at the point .
virtual bool ProvidesError() const
return pointer to gradient values at the minimum
virtual bool get_nlp_info(Index &n, Index &m, Index &nnz_jac_g, Index &nnz_h_lag, IndexStyleEnum &index_style)
Give IPOPT the information about the size of the problem (and hence, the size of the arrays that it n...
* x
Deprecated and error prone model selection interface.
Definition: TRolke.cxx:630
EIpoptMinimizerSolver
enumeration specifying the types of Ipopt solvers
virtual const double * Errors() const
return errors at the minimum
Ipopt::SmartPtr< Ipopt::IpoptApplication > fIpotApp
virtual void SetFunction(const ROOT::Math::IMultiGenFunction &func)
set the function to minimize
Namespace for new Math classes and functions.
Ipopt::SmartPtr< InternalTNLP > fInternalTNLP
virtual bool get_starting_point(Index n, bool init_x, Number *x, bool init_z, Number *z_L, Number *z_U, Index m, bool init_lambda, Number *lambda)
Give IPOPT the starting point before it begins iterating.
virtual void SetNNZerosJacobian(UInt_t nzeros)
virtual bool eval_g(Index n, const Number *x, bool new_x, Index m, Number *g)
Return the value of the constraint function at the point .
Internal class to create a TNLP object, required for Ipopt minimization in c++, every method is overl...