23 #ifndef O2SCL_GSL_MMIN_CONF_H 24 #define O2SCL_GSL_MMIN_CONF_H 30 #include <gsl/gsl_blas.h> 31 #include <gsl/gsl_multimin.h> 33 #include <boost/numeric/ublas/vector.hpp> 34 #include <boost/numeric/ublas/matrix.hpp> 36 #include <o2scl/mmin.h> 37 #include <o2scl/misc.h> 38 #include <o2scl/cblas.h> 40 #ifndef DOXYGEN_NO_O2NS 67 class def_auto_grad_t =
68 gradient_gsl<multi_funct,boost::numeric::ublas::vector<double> > >
71 #ifndef DOXYGEN_INTERNAL 75 typedef boost::numeric::ublas::vector<double>
ubvector;
95 double stepx,
double lambda, vec_t &x1x,
98 for(
size_t i=0;i<this->
dim;i++) dx[i]=0.0;
101 for(
size_t i=0;i<this->
dim;i++) x1x[i]=x[i];
114 double lambda,
double pg,
double stepa,
115 double stepc,
double fa,
double fc,
116 vec_t &x1x, vec_t &dx, vec_t &
gradient,
117 double *stepx,
double *f) {
128 double u = fabs (pg * lambda * stepc);
129 stepb = 0.5 * stepc * u / ((fc - fa) + u);
131 take_step(x, px, stepb, lambda, x1x, dx);
135 bool vector_equal=
true;
136 for(
size_t i=0;i<
dim;i++) {
137 if (x[i]!=x1x[i]) vector_equal=
false;
146 (*grad)(
dim,x1x,gradient);
148 (*agrad)(
dim,x1x,gradient);
158 if (fb >= fa && stepb > 0.0) {
165 }
while (trial_failed);
172 (*grad)(
dim,x1x,gradient);
174 (*agrad)(
dim,x1x,gradient);
186 void min(
const vec_t &x,
const vec_t &xp,
double lambda,
187 double stepa,
double stepb,
double stepc,
double fa,
188 double fb,
double fc,
double xtol, vec_t &x1x,
189 vec_t &dx1x, vec_t &x2x, vec_t &dx2x, vec_t &
gradient,
190 double *xstep,
double *f,
double *gnorm_u) {
199 double old2 = fabs(w - v);
200 double old1 = fabs(v - u);
202 double stepm, fm, pg, gnorm1;
206 for(
size_t i=0;i<
dim;i++) {
228 double e1 = ((fv - fu) * dw * dw + (fu - fw) * dv * dv);
229 double e2 = 2.0 * ((fv - fu) * dw + (fu - fw) * dv);
235 if (du > 0.0 && du < (stepc - stepb) && fabs(du) < 0.5 * old2) {
237 }
else if (du < 0.0 && du > (stepa - stepb) &&
238 fabs(du) < 0.5 * old2) {
240 }
else if ((stepc - stepb) > (stepb - stepa)) {
241 stepm = 0.38 * (stepc - stepb) + stepb;
243 stepm = stepb - 0.38 * (stepb - stepa);
247 take_step (x, xp, stepm, lambda, x1x, dx1x);
258 }
else if (fm < fw) {
270 }
else if (fm <= fb) {
273 old1 = fabs(u - stepm);
281 for(
size_t i=0;i<
dim;i++) {
287 (*grad)(
dim,x1x,gradient);
289 (*agrad)(
dim,x1x,gradient);
299 if (fabs (pg * lambda / gnorm1) < xtol) {
344 int base_set(func_t &ufunc, auto_grad_t &u_def_grad) {
403 class vec_t = boost::numeric::ublas::vector<double>,
406 boost::numeric::ublas::vector<double> >,
407 class def_auto_grad_t =
410 public mmin_gsl_base<func_t,vec_t,dfunc_t,auto_grad_t,def_auto_grad_t> {
412 #ifndef DOXYGEN_INTERNAL 484 double fa = it_min, fb, fc;
486 double stepa = 0.0, stepb, stepc = step;
491 if (pnorm == 0.0 || g0norm == 0.0) {
492 for(
size_t i=0;i<this->
dim;i++) dx[i]=0.0;
494 "in mmin_conf::iterate().",
502 dir = (pg >= 0.0) ? +1.0 : -1.0;
507 this->
take_step (x, p, stepc, dir / pnorm, x1, dx);
511 fc=(*this->
func)(this->
dim,x1);
518 for(
size_t i=0;i<this->
dim;i++) {
523 (*this->
grad)(this->dim,x1,gradient);
525 (*this->
agrad)(this->dim,x1,gradient);
536 dx1,gradient,&stepb,&fb);
540 "in mmin_conf::iterate().",
544 this->
min(x,p,dir / pnorm,stepa,stepb,stepc,fa,fb,fc,tol,
545 x1,dx1,x2,dx,gradient,&step,&it_min,&g1norm);
547 for(
size_t i=0;i<this->
dim;i++) x[i]=x2[i];
550 iter=(iter+1) % this->dim;
554 for(
size_t i=0;i<this->
dim;i++) p[i]=gradient[i];
560 double beta=-pow(g1norm/g0norm, 2.0);
567 for(
size_t i=0;i<this->
dim;i++) {
608 virtual int set(vec_t &x,
double u_step_size,
double tol_u,
617 for(
size_t i=0;i<this->
dim;i++) ugx[i]=x[i];
624 it_min=ufunc(this->dim,x);
625 this->
agrad->set_function(ufunc);
626 (*this->
agrad)(this->dim,x,ugg);
630 for(
size_t i=0;i<this->
dim;i++) {
644 virtual int set_de(vec_t &x,
double u_step_size,
double tol_u,
645 func_t &ufunc, dfunc_t &udfunc) {
653 for(
size_t i=0;i<this->
dim;i++) ugx[i]=x[i];
660 it_min=ufunc(this->dim,x);
661 udfunc(this->dim,x,ugg);
665 for(
size_t i=0;i<this->
dim;i++) {
684 virtual int mmin(
size_t nn, vec_t &xx,
double &fmin,
688 O2SCL_ERR2(
"Tried to min over zero variables ",
696 set(xx,step_size,lmin_tol,ufunc);
723 for(
size_t i=0;i<nn;i++) xx[i]=ugx[i];
730 std::string err=
"Exceeded max number of iterations, "+
731 dtos(this->ntrial)+
", in mmin_conf::mmin().";
741 virtual int mmin_de(
size_t nn, vec_t &xx,
double &fmin,
742 func_t &ufunc, dfunc_t &udfunc) {
745 O2SCL_ERR2(
"Tried to min over zero variables ",
753 set_de(xx,step_size,lmin_tol,ufunc,udfunc);
780 for(
size_t i=0;i<nn;i++) xx[i]=ugx[i];
787 std::string err=
"Exceeded max number of iterations, "+
788 dtos(this->ntrial)+
", in mmin_conf::mmin().";
797 virtual const char *
type() {
return "mmin_conf";}
799 #ifndef DOXYGEN_INTERNAL 806 (
const mmin_conf<func_t,vec_t,dfunc_t,auto_grad_t,def_auto_grad_t>&);
812 #ifndef DOXYGEN_NO_O2NS Class for automatically computing gradients [abstract base].
int base_set_de(func_t &ufunc, dfunc_t &udfunc)
Set the function and the gradient gradient .
std::function< double(size_t, const boost::numeric::ublas::vector< double > &)> multi_funct
Multi-dimensional function typedef.
virtual int iterate()
Perform an iteration.
int base_allocate(size_t nn)
Allocate memory.
The main O<span style='position: relative; top: 0.3em; font-size: 0.8em'>2</span>scl O$_2$scl names...
virtual const char * type()
Return string denoting type("mmin_conf")
#define O2SCL_CONV_RET(d, n, b)
Set a "convergence" error and return the error value.
virtual int mmin_de(size_t nn, vec_t &xx, double &fmin, func_t &ufunc, dfunc_t &udfunc)
Calculate the minimum min of func w.r.t the array x of size nvar.
virtual int free()
Free the allocated memory.
bool grad_given
If true, a gradient has been specified.
const char * type()
Return string denoting type ("mmin_base")
void min(const vec_t &x, const vec_t &xp, double lambda, double stepa, double stepb, double stepc, double fa, double fb, double fc, double xtol, vec_t &x1x, vec_t &dx1x, vec_t &x2x, vec_t &dx2x, vec_t &gradient, double *xstep, double *f, double *gnorm_u)
Perform the minimization.
std::function< int(size_t, boost::numeric::ublas::vector< double > &, boost::numeric::ublas::vector< double > &)> grad_funct
Array of multi-dimensional functions typedef.
int base_set(func_t &ufunc, auto_grad_t &u_def_grad)
Set the function.
invalid argument supplied by user
int restart()
Reset the minimizer to use the current point as a new starting point.
void intermediate_point(const vec_t &x, const vec_t &px, double lambda, double pg, double stepa, double stepc, double fa, double fc, vec_t &x1x, vec_t &dx, vec_t &gradient, double *stepx, double *f)
Line minimization.
exceeded max number of iterations
virtual int mmin(size_t nn, vec_t &xx, double &fmin, func_t &ufunc)
Calculate the minimum min of func w.r.t the array x of size nvar.
func_t * func
User-specified function.
#define O2SCL_CONV2_RET(d, d2, n, b)
Set an error and return the error value, two-string version.
Base minimization routines for mmin_conf and mmin_conp.
iteration has not converged
int verbose
Output control.
int base_free()
Clear allocated memory.
int nmaxiter
Maximum iterations for line minimization (default 10)
int iter
Iteration number.
double dnrm2(const size_t N, const vec_t &X)
Compute the norm of the vector X.
bool err_nonconv
If true, call the error handler if the routine does not "converge".
iteration is not making progress toward solution
Multidimensional minimization [abstract base].
int print_iter(size_t nv, vec2_t &x, double y, int iter, double value, double limit, std::string comment)
Print out iteration information.
#define O2SCL_ERR2(d, d2, n)
Set an error, two-string version.
double deriv_h
Stepsize for finite-differencing ( default )
std::string dtos(double x, int prec=6, bool auto_prec=false)
Convert a double to a string.
auto_grad_t * agrad
Automatic gradient object.
double ddot(const size_t N, const vec_t &X, const vec2_t &Y)
Compute .
Simple automatic computation of gradient by finite differencing.
int last_ntrial
The number of iterations for in the most recent minimization.
vec_t ugx
Proposed minimum.
void dscal(const double alpha, const size_t N, vec_t &X)
Compute .
double lmin_tol
Tolerance for the line minimization (default )
void daxpy(const double alpha, const size_t N, const vec_t &X, vec2_t &Y)
Compute .
double tol_rel
Function value tolerance.
double step_size
Size of the initial step (default 0.01)
virtual int set_de(vec_t &x, double u_step_size, double tol_u, func_t &ufunc, dfunc_t &udfunc)
Set the function and initial guess.
def_auto_grad_t def_grad
Default automatic gradient gradient object.
Multidimensional minimization by the Fletcher-Reeves conjugate gradient gradient algorithm (GSL) ...
dfunc_t * grad
User-specified gradient.
virtual int allocate(size_t n)
Allocate the memory.
void take_step(const vec_t &x, const vec_t &px, double stepx, double lambda, vec_t &x1x, vec_t &dx)
Take a step.
int ntrial
Maximum number of iterations.