alglibinternal.h   alglibinternal.h 
skipping to change at line 112 skipping to change at line 112
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
// //
// THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (FUNCTIONS) // THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (FUNCTIONS)
// //
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
namespace alglib_impl namespace alglib_impl
{ {
ae_int_t getrdfserializationcode(ae_state *_state);
ae_int_t getkdtreeserializationcode(ae_state *_state);
ae_int_t getmlpserializationcode(ae_state *_state);
void taskgenint1d(double a, void taskgenint1d(double a,
double b, double b,
ae_int_t n, ae_int_t n,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_state *_state); ae_state *_state);
void taskgenint1dequidist(double a, void taskgenint1dequidist(double a,
double b, double b,
ae_int_t n, ae_int_t n,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
skipping to change at line 152 skipping to change at line 155
void ivectorsetlengthatleast(/* Integer */ ae_vector* x, void ivectorsetlengthatleast(/* Integer */ ae_vector* x,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
void rvectorsetlengthatleast(/* Real */ ae_vector* x, void rvectorsetlengthatleast(/* Real */ ae_vector* x,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
void rmatrixsetlengthatleast(/* Real */ ae_matrix* x, void rmatrixsetlengthatleast(/* Real */ ae_matrix* x,
ae_int_t m, ae_int_t m,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
void rmatrixresize(/* Real */ ae_matrix* x,
ae_int_t m,
ae_int_t n,
ae_state *_state);
ae_bool isfinitevector(/* Real */ ae_vector* x, ae_bool isfinitevector(/* Real */ ae_vector* x,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
ae_bool isfinitecvector(/* Complex */ ae_vector* z, ae_bool isfinitecvector(/* Complex */ ae_vector* z,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
ae_bool apservisfinitematrix(/* Real */ ae_matrix* x, ae_bool apservisfinitematrix(/* Real */ ae_matrix* x,
ae_int_t m, ae_int_t m,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
skipping to change at line 188 skipping to change at line 195
double safepythag2(double x, double y, ae_state *_state); double safepythag2(double x, double y, ae_state *_state);
double safepythag3(double x, double y, double z, ae_state *_state); double safepythag3(double x, double y, double z, ae_state *_state);
ae_int_t saferdiv(double x, double y, double* r, ae_state *_state); ae_int_t saferdiv(double x, double y, double* r, ae_state *_state);
double safeminposrv(double x, double y, double v, ae_state *_state); double safeminposrv(double x, double y, double v, ae_state *_state);
void apperiodicmap(double* x, void apperiodicmap(double* x,
double a, double a,
double b, double b,
double* k, double* k,
ae_state *_state); ae_state *_state);
double boundval(double x, double b1, double b2, ae_state *_state); double boundval(double x, double b1, double b2, ae_state *_state);
void alloccomplex(ae_serializer* s, ae_complex v, ae_state *_state);
void serializecomplex(ae_serializer* s, ae_complex v, ae_state *_state);
ae_complex unserializecomplex(ae_serializer* s, ae_state *_state);
void allocrealarray(ae_serializer* s,
/* Real */ ae_vector* v,
ae_int_t n,
ae_state *_state);
void serializerealarray(ae_serializer* s,
/* Real */ ae_vector* v,
ae_int_t n,
ae_state *_state);
void unserializerealarray(ae_serializer* s,
/* Real */ ae_vector* v,
ae_state *_state);
void allocintegerarray(ae_serializer* s,
/* Integer */ ae_vector* v,
ae_int_t n,
ae_state *_state);
void serializeintegerarray(ae_serializer* s,
/* Integer */ ae_vector* v,
ae_int_t n,
ae_state *_state);
void unserializeintegerarray(ae_serializer* s,
/* Integer */ ae_vector* v,
ae_state *_state);
void allocrealmatrix(ae_serializer* s,
/* Real */ ae_matrix* v,
ae_int_t n0,
ae_int_t n1,
ae_state *_state);
void serializerealmatrix(ae_serializer* s,
/* Real */ ae_matrix* v,
ae_int_t n0,
ae_int_t n1,
ae_state *_state);
void unserializerealmatrix(ae_serializer* s,
/* Real */ ae_matrix* v,
ae_state *_state);
void copyintegerarray(/* Integer */ ae_vector* src,
/* Integer */ ae_vector* dst,
ae_state *_state);
void copyrealarray(/* Real */ ae_vector* src,
/* Real */ ae_vector* dst,
ae_state *_state);
void copyrealmatrix(/* Real */ ae_matrix* src,
/* Real */ ae_matrix* dst,
ae_state *_state);
ae_int_t recsearch(/* Integer */ ae_vector* a,
ae_int_t nrec,
ae_int_t nheader,
ae_int_t i0,
ae_int_t i1,
/* Integer */ ae_vector* b,
ae_state *_state);
ae_bool _apbuffers_init(apbuffers* p, ae_state *_state, ae_bool make_automa tic); ae_bool _apbuffers_init(apbuffers* p, ae_state *_state, ae_bool make_automa tic);
ae_bool _apbuffers_init_copy(apbuffers* dst, apbuffers* src, ae_state *_sta te, ae_bool make_automatic); ae_bool _apbuffers_init_copy(apbuffers* dst, apbuffers* src, ae_state *_sta te, ae_bool make_automatic);
void _apbuffers_clear(apbuffers* p); void _apbuffers_clear(apbuffers* p);
void tagsort(/* Real */ ae_vector* a, void tagsort(/* Real */ ae_vector* a,
ae_int_t n, ae_int_t n,
/* Integer */ ae_vector* p1, /* Integer */ ae_vector* p1,
/* Integer */ ae_vector* p2, /* Integer */ ae_vector* p2,
ae_state *_state); ae_state *_state);
void tagsortbuf(/* Real */ ae_vector* a, void tagsortbuf(/* Real */ ae_vector* a,
ae_int_t n, ae_int_t n,
skipping to change at line 657 skipping to change at line 718
double* stp, double* stp,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
void mcsrch(ae_int_t n, void mcsrch(ae_int_t n,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
double* f, double* f,
/* Real */ ae_vector* g, /* Real */ ae_vector* g,
/* Real */ ae_vector* s, /* Real */ ae_vector* s,
double* stp, double* stp,
double stpmax, double stpmax,
double gtol,
ae_int_t* info, ae_int_t* info,
ae_int_t* nfev, ae_int_t* nfev,
/* Real */ ae_vector* wa, /* Real */ ae_vector* wa,
linminstate* state, linminstate* state,
ae_int_t* stage, ae_int_t* stage,
ae_state *_state); ae_state *_state);
void armijocreate(ae_int_t n, void armijocreate(ae_int_t n,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
double f, double f,
/* Real */ ae_vector* s, /* Real */ ae_vector* s,
skipping to change at line 684 skipping to change at line 746
ae_int_t* info, ae_int_t* info,
double* stp, double* stp,
double* f, double* f,
ae_state *_state); ae_state *_state);
ae_bool _linminstate_init(linminstate* p, ae_state *_state, ae_bool make_au tomatic); ae_bool _linminstate_init(linminstate* p, ae_state *_state, ae_bool make_au tomatic);
ae_bool _linminstate_init_copy(linminstate* dst, linminstate* src, ae_state *_state, ae_bool make_automatic); ae_bool _linminstate_init_copy(linminstate* dst, linminstate* src, ae_state *_state, ae_bool make_automatic);
void _linminstate_clear(linminstate* p); void _linminstate_clear(linminstate* p);
ae_bool _armijostate_init(armijostate* p, ae_state *_state, ae_bool make_au tomatic); ae_bool _armijostate_init(armijostate* p, ae_state *_state, ae_bool make_au tomatic);
ae_bool _armijostate_init_copy(armijostate* dst, armijostate* src, ae_state *_state, ae_bool make_automatic); ae_bool _armijostate_init_copy(armijostate* dst, armijostate* src, ae_state *_state, ae_bool make_automatic);
void _armijostate_clear(armijostate* p); void _armijostate_clear(armijostate* p);
void trimprepare(double f, double* threshold, ae_state *_state);
void trimfunction(double* f,
/* Real */ ae_vector* g,
ae_int_t n,
double threshold,
ae_state *_state);
void ftbasegeneratecomplexfftplan(ae_int_t n, void ftbasegeneratecomplexfftplan(ae_int_t n,
ftplan* plan, ftplan* plan,
ae_state *_state); ae_state *_state);
void ftbasegeneraterealfftplan(ae_int_t n, ftplan* plan, ae_state *_state); void ftbasegeneraterealfftplan(ae_int_t n, ftplan* plan, ae_state *_state);
void ftbasegeneraterealfhtplan(ae_int_t n, ftplan* plan, ae_state *_state); void ftbasegeneraterealfhtplan(ae_int_t n, ftplan* plan, ae_state *_state);
void ftbaseexecuteplan(/* Real */ ae_vector* a, void ftbaseexecuteplan(/* Real */ ae_vector* a,
ae_int_t aoffset, ae_int_t aoffset,
ae_int_t n, ae_int_t n,
ftplan* plan, ftplan* plan,
ae_state *_state); ae_state *_state);
 End of changes. 5 change blocks. 
0 lines changed or deleted 68 lines changed or added


 alglibmisc.h   alglibmisc.h 
skipping to change at line 44 skipping to change at line 44
ae_int_t s2; ae_int_t s2;
double v; double v;
ae_int_t magicv; ae_int_t magicv;
} hqrndstate; } hqrndstate;
typedef struct typedef struct
{ {
ae_int_t n; ae_int_t n;
ae_int_t nx; ae_int_t nx;
ae_int_t ny; ae_int_t ny;
ae_int_t normtype; ae_int_t normtype;
ae_int_t distmatrixtype;
ae_matrix xy; ae_matrix xy;
ae_vector tags; ae_vector tags;
ae_vector boxmin; ae_vector boxmin;
ae_vector boxmax; ae_vector boxmax;
ae_vector curboxmin;
ae_vector curboxmax;
double curdist;
ae_vector nodes; ae_vector nodes;
ae_vector splits; ae_vector splits;
ae_vector x; ae_vector x;
ae_int_t kneeded; ae_int_t kneeded;
double rneeded; double rneeded;
ae_bool selfmatch; ae_bool selfmatch;
double approxf; double approxf;
ae_int_t kcur; ae_int_t kcur;
ae_vector idx; ae_vector idx;
ae_vector r; ae_vector r;
ae_vector buf; ae_vector buf;
ae_vector curboxmin;
ae_vector curboxmax;
double curdist;
ae_int_t debugcounter; ae_int_t debugcounter;
} kdtree; } kdtree;
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
// //
// THIS SECTION CONTAINS C++ INTERFACE // THIS SECTION CONTAINS C++ INTERFACE
// //
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
skipping to change at line 219 skipping to change at line 218
Random number generator: exponential distribution Random number generator: exponential distribution
State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). State structure must be initialized with HQRNDRandomize() or HQRNDSeed().
-- ALGLIB -- -- ALGLIB --
Copyright 11.08.2007 by Bochkanov Sergey Copyright 11.08.2007 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double hqrndexponential(const hqrndstate &state, const double lambdav); double hqrndexponential(const hqrndstate &state, const double lambdav);
/************************************************************************* /*************************************************************************
This function serializes data structure to string.
Important properties of s_out:
* it contains alphanumeric characters, dots, underscores, minus signs
* these symbols are grouped into words, which are separated by spaces
and Windows-style (CR+LF) newlines
* although serializer uses spaces and CR+LF as separators, you can
replace any separator character by arbitrary combination of spaces,
tabs, Windows or Unix newlines. It allows flexible reformatting of
the string in case you want to include it into text or XML file.
But you should not insert separators into the middle of the "words"
nor you should change case of letters.
* s_out can be freely moved between 32-bit and 64-bit systems, little
and big endian machines, and so on. You can serialize structure on
32-bit machine and unserialize it on 64-bit one (or vice versa), or
serialize it on SPARC and unserialize on x86. You can also
serialize it in C++ version of ALGLIB and unserialize in C# one,
and vice versa.
*************************************************************************/
void kdtreeserialize(kdtree &obj, std::string &s_out);
/*************************************************************************
This function unserializes data structure from string.
*************************************************************************/
void kdtreeunserialize(std::string &s_in, kdtree &obj);
/*************************************************************************
KD-tree creation KD-tree creation
This subroutine creates KD-tree from set of X-values and optional Y-values This subroutine creates KD-tree from set of X-values and optional Y-values
INPUT PARAMETERS INPUT PARAMETERS
XY - dataset, array[0..N-1,0..NX+NY-1]. XY - dataset, array[0..N-1,0..NX+NY-1].
one row corresponds to one point. one row corresponds to one point.
first NX columns contain X-values, next NY (NY may be zero) first NX columns contain X-values, next NY (NY may be zero)
columns may contain associated Y-values columns may contain associated Y-values
N - number of points, N>=1 N - number of points, N>=1
skipping to change at line 659 skipping to change at line 685
ae_state *_state); ae_state *_state);
void kdtreequeryresultsxyi(kdtree* kdt, void kdtreequeryresultsxyi(kdtree* kdt,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_state *_state); ae_state *_state);
void kdtreequeryresultstagsi(kdtree* kdt, void kdtreequeryresultstagsi(kdtree* kdt,
/* Integer */ ae_vector* tags, /* Integer */ ae_vector* tags,
ae_state *_state); ae_state *_state);
void kdtreequeryresultsdistancesi(kdtree* kdt, void kdtreequeryresultsdistancesi(kdtree* kdt,
/* Real */ ae_vector* r, /* Real */ ae_vector* r,
ae_state *_state); ae_state *_state);
void kdtreealloc(ae_serializer* s, kdtree* tree, ae_state *_state);
void kdtreeserialize(ae_serializer* s, kdtree* tree, ae_state *_state);
void kdtreeunserialize(ae_serializer* s, kdtree* tree, ae_state *_state);
ae_bool _kdtree_init(kdtree* p, ae_state *_state, ae_bool make_automatic); ae_bool _kdtree_init(kdtree* p, ae_state *_state, ae_bool make_automatic);
ae_bool _kdtree_init_copy(kdtree* dst, kdtree* src, ae_state *_state, ae_bo ol make_automatic); ae_bool _kdtree_init_copy(kdtree* dst, kdtree* src, ae_state *_state, ae_bo ol make_automatic);
void _kdtree_clear(kdtree* p); void _kdtree_clear(kdtree* p);
} }
#endif #endif
 End of changes. 5 change blocks. 
4 lines changed or deleted 33 lines changed or added


 ap.h   ap.h 
skipping to change at line 66 skipping to change at line 66
#define AE_SPARC 2 #define AE_SPARC 2
/* /*
* automatically determine compiler * automatically determine compiler
*/ */
#define AE_COMPILER AE_UNKNOWN #define AE_COMPILER AE_UNKNOWN
#ifdef __GNUC__ #ifdef __GNUC__
#undef AE_COMPILER #undef AE_COMPILER
#define AE_COMPILER AE_GNUC #define AE_COMPILER AE_GNUC
#endif #endif
#ifdef __SUNPRO_C #if defined(__SUNPRO_C)||defined(__SUNPRO_CC)
#undef AE_COMPILER #undef AE_COMPILER
#define AE_COMPILER AE_SUNC #define AE_COMPILER AE_SUNC
#endif #endif
#ifdef _MSC_VER #ifdef _MSC_VER
#undef AE_COMPILER #undef AE_COMPILER
#define AE_COMPILER AE_MSVC #define AE_COMPILER AE_MSVC
#endif #endif
/* /*
* if we work under C++ environment, define several conditions * if we work under C++ environment, define several conditions
*/ */
#ifdef AE_USE_CPP #ifdef AE_USE_CPP
#define AE_USE_CPP_BOOL #define AE_USE_CPP_BOOL
#define AE_USE_CPP_ERROR_HANDLING #define AE_USE_CPP_ERROR_HANDLING
#define AE_USE_CPP_SERIALIZATION
#endif #endif
/* /*
* define ae_int32_t, ae_int64_t, ae_int_t, ae_bool, ae_complex, ae_error_t ype and ae_datatype * define ae_int32_t, ae_int64_t, ae_int_t, ae_bool, ae_complex, ae_error_t ype and ae_datatype
*/ */
#if defined(AE_HAVE_STDINT) #if defined(AE_HAVE_STDINT)
#include <stdint.h> #include <stdint.h>
#endif #endif
#if defined(AE_INT32_T) #if defined(AE_INT32_T)
skipping to change at line 163 skipping to change at line 164
#ifdef AE_USE_CPP #ifdef AE_USE_CPP
} // end of namespace declaration, subsequent includes must be out of names pace } // end of namespace declaration, subsequent includes must be out of names pace
#endif #endif
#if AE_COMPILER==AE_MSVC #if AE_COMPILER==AE_MSVC
#include <emmintrin.h> #include <emmintrin.h>
#define AE_HAS_SSE2_INTRINSICS #define AE_HAS_SSE2_INTRINSICS
#endif #endif
#if (AE_COMPILER==AE_GNUC)||(AE_COMPILER==AE_SUNC) #if AE_COMPILER==AE_GNUC
#include <xmmintrin.h> #include <xmmintrin.h>
#define AE_HAS_SSE2_INTRINSICS #define AE_HAS_SSE2_INTRINSICS
#endif #endif
#if AE_COMPILER==AE_SUNC
#include <xmmintrin.h>
#include <emmintrin.h>
#define AE_HAS_SSE2_INTRINSICS
#endif
#ifdef AE_USE_CPP #ifdef AE_USE_CPP
namespace alglib_impl { // namespace declaration continued namespace alglib_impl { // namespace declaration continued
#endif #endif
#endif #endif
#endif #endif
typedef struct { double x, y; } ae_complex; typedef struct { double x, y; } ae_complex;
typedef enum typedef enum
skipping to change at line 316 skipping to change at line 323
} ae_dyn_block; } ae_dyn_block;
/************************************************************************ /************************************************************************
frame marker frame marker
************************************************************************/ ************************************************************************/
typedef struct ae_frame typedef struct ae_frame
{ {
ae_dyn_block db_marker; ae_dyn_block db_marker;
} ae_frame; } ae_frame;
/************************************************************************
ALGLIB environment state
************************************************************************/
typedef struct typedef struct
{ {
ae_int_t endianness; ae_int_t endianness;
double v_nan; double v_nan;
double v_posinf; double v_posinf;
double v_neginf; double v_neginf;
ae_dyn_block * volatile p_top_block; ae_dyn_block * volatile p_top_block;
ae_dyn_block last_block; ae_dyn_block last_block;
#ifndef AE_USE_CPP_ERROR_HANDLING #ifndef AE_USE_CPP_ERROR_HANDLING
jmp_buf * volatile break_jump; jmp_buf * volatile break_jump;
#endif #endif
ae_error_type volatile last_error; ae_error_type volatile last_error;
const char* volatile error_msg; const char* volatile error_msg;
} ae_state; } ae_state;
/************************************************************************
Serializer
************************************************************************/
typedef struct
{
ae_int_t mode;
ae_int_t entries_needed;
ae_int_t entries_saved;
ae_int_t bytes_asked;
ae_int_t bytes_written;
#ifdef AE_USE_CPP_SERIALIZATION
std::string *out_cppstr;
#endif
char *out_str;
const char *in_str;
} ae_serializer;
typedef void(*ae_deallocator)(void*); typedef void(*ae_deallocator)(void*);
typedef struct ae_vector typedef struct ae_vector
{ {
ae_int_t cnt; ae_int_t cnt;
ae_datatype datatype; ae_datatype datatype;
ae_dyn_block data; ae_dyn_block data;
union union
{ {
void *p_ptr; void *p_ptr;
skipping to change at line 422 skipping to change at line 450
ae_bool x_is_symmetric(x_matrix *a); ae_bool x_is_symmetric(x_matrix *a);
ae_bool x_is_hermitian(x_matrix *a); ae_bool x_is_hermitian(x_matrix *a);
ae_bool x_force_symmetric(x_matrix *a); ae_bool x_force_symmetric(x_matrix *a);
ae_bool x_force_hermitian(x_matrix *a); ae_bool x_force_hermitian(x_matrix *a);
ae_bool ae_is_symmetric(ae_matrix *a); ae_bool ae_is_symmetric(ae_matrix *a);
ae_bool ae_is_hermitian(ae_matrix *a); ae_bool ae_is_hermitian(ae_matrix *a);
ae_bool ae_force_symmetric(ae_matrix *a); ae_bool ae_force_symmetric(ae_matrix *a);
ae_bool ae_force_hermitian(ae_matrix *a); ae_bool ae_force_hermitian(ae_matrix *a);
void ae_serializer_init(ae_serializer *serializer);
void ae_serializer_clear(ae_serializer *serializer);
void ae_serializer_alloc_start(ae_serializer *serializer);
void ae_serializer_alloc_entry(ae_serializer *serializer);
ae_int_t ae_serializer_get_alloc_size(ae_serializer *serializer);
#ifdef AE_USE_CPP_SERIALIZATION
void ae_serializer_sstart_str(ae_serializer *serializer, std::string *buf);
void ae_serializer_ustart_str(ae_serializer *serializer, const std::string
*buf);
#endif
void ae_serializer_sstart_str(ae_serializer *serializer, char *buf);
void ae_serializer_ustart_str(ae_serializer *serializer, const char *buf);
void ae_serializer_serialize_bool(ae_serializer *serializer, ae_bool v, ae_
state *state);
void ae_serializer_serialize_int(ae_serializer *serializer, ae_int_t v, ae_
state *state);
void ae_serializer_serialize_double(ae_serializer *serializer, double v, ae
_state *state);
void ae_serializer_unserialize_bool(ae_serializer *serializer, ae_bool *v,
ae_state *state);
void ae_serializer_unserialize_int(ae_serializer *serializer, ae_int_t *v,
ae_state *state);
void ae_serializer_unserialize_double(ae_serializer *serializer, double *v,
ae_state *state);
void ae_serializer_stop(ae_serializer *serializer);
/************************************************************************ /************************************************************************
Service functions Service functions
************************************************************************/ ************************************************************************/
void ae_assert(ae_bool cond, const char *msg, ae_state *state); void ae_assert(ae_bool cond, const char *msg, ae_state *state);
ae_int_t ae_cpuid(); ae_int_t ae_cpuid();
/************************************************************************ /************************************************************************
Real math functions: Real math functions:
* IEEE-compliant floating point comparisons * IEEE-compliant floating point comparisons
* standard functions * standard functions
 End of changes. 7 change blocks. 
2 lines changed or deleted 60 lines changed or added


 dataanalysis.h   dataanalysis.h 
skipping to change at line 99 skipping to change at line 99
double avgerror; double avgerror;
double avgrelerror; double avgrelerror;
double cvrmserror; double cvrmserror;
double cvavgerror; double cvavgerror;
double cvavgrelerror; double cvavgrelerror;
ae_int_t ncvdefects; ae_int_t ncvdefects;
ae_vector cvdefects; ae_vector cvdefects;
} lrreport; } lrreport;
typedef struct typedef struct
{ {
ae_int_t hlnetworktype;
ae_int_t hlnormtype;
ae_vector hllayersizes;
ae_vector hlconnections;
ae_vector hlneurons;
ae_vector structinfo; ae_vector structinfo;
ae_vector weights; ae_vector weights;
ae_vector columnmeans; ae_vector columnmeans;
ae_vector columnsigmas; ae_vector columnsigmas;
ae_vector neurons; ae_vector neurons;
ae_vector dfdnet; ae_vector dfdnet;
ae_vector derror; ae_vector derror;
ae_vector x; ae_vector x;
ae_vector y; ae_vector y;
ae_matrix chunks; ae_matrix chunks;
ae_vector nwbuf; ae_vector nwbuf;
ae_vector integerbuf;
} multilayerperceptron; } multilayerperceptron;
typedef struct typedef struct
{ {
ae_vector w; ae_vector w;
} logitmodel; } logitmodel;
typedef struct typedef struct
{ {
ae_bool brackt; ae_bool brackt;
ae_bool stage1; ae_bool stage1;
ae_int_t infoc; ae_int_t infoc;
skipping to change at line 150 skipping to change at line 156
double width1; double width1;
double xtrapf; double xtrapf;
} logitmcstate; } logitmcstate;
typedef struct typedef struct
{ {
ae_int_t ngrad; ae_int_t ngrad;
ae_int_t nhess; ae_int_t nhess;
} mnlreport; } mnlreport;
typedef struct typedef struct
{ {
ae_int_t n;
ae_vector states;
ae_int_t npairs;
ae_matrix data;
ae_matrix ec;
ae_matrix bndl;
ae_matrix bndu;
ae_matrix c;
ae_vector ct;
ae_int_t ccnt;
ae_vector pw;
ae_matrix priorp;
double regterm;
minbleicstate bs;
ae_int_t repinneriterationscount;
ae_int_t repouteriterationscount;
ae_int_t repnfev;
ae_int_t repterminationtype;
minbleicreport br;
ae_vector tmpp;
ae_vector effectivew;
ae_vector effectivebndl;
ae_vector effectivebndu;
ae_matrix effectivec;
ae_vector effectivect;
ae_vector h;
ae_matrix p;
} mcpdstate;
typedef struct
{
ae_int_t inneriterationscount;
ae_int_t outeriterationscount;
ae_int_t nfev;
ae_int_t terminationtype;
} mcpdreport;
typedef struct
{
ae_int_t ngrad; ae_int_t ngrad;
ae_int_t nhess; ae_int_t nhess;
ae_int_t ncholesky; ae_int_t ncholesky;
} mlpreport; } mlpreport;
typedef struct typedef struct
{ {
double relclserror; double relclserror;
double avgce; double avgce;
double rmserror; double rmserror;
double avgerror; double avgerror;
skipping to change at line 407 skipping to change at line 450
mnlreport(); mnlreport();
mnlreport(const mnlreport &rhs); mnlreport(const mnlreport &rhs);
mnlreport& operator=(const mnlreport &rhs); mnlreport& operator=(const mnlreport &rhs);
virtual ~mnlreport(); virtual ~mnlreport();
ae_int_t &ngrad; ae_int_t &ngrad;
ae_int_t &nhess; ae_int_t &nhess;
}; };
/************************************************************************* /*************************************************************************
This structure is a MCPD (Markov Chains for Population Data) solver.
You should use ALGLIB functions in order to work with this object.
-- ALGLIB --
Copyright 23.05.2010 by Bochkanov Sergey
*************************************************************************/
class _mcpdstate_owner
{
public:
_mcpdstate_owner();
_mcpdstate_owner(const _mcpdstate_owner &rhs);
_mcpdstate_owner& operator=(const _mcpdstate_owner &rhs);
virtual ~_mcpdstate_owner();
alglib_impl::mcpdstate* c_ptr();
alglib_impl::mcpdstate* c_ptr() const;
protected:
alglib_impl::mcpdstate *p_struct;
};
class mcpdstate : public _mcpdstate_owner
{
public:
mcpdstate();
mcpdstate(const mcpdstate &rhs);
mcpdstate& operator=(const mcpdstate &rhs);
virtual ~mcpdstate();
};
/*************************************************************************
This structure is a MCPD training report:
InnerIterationsCount - number of inner iterations of the
underlying optimization algorithm
OuterIterationsCount - number of outer iterations of the
underlying optimization algorithm
NFEV - number of merit function evaluations
TerminationType - termination type
(same as for MinBLEIC optimizer, positive
values denote success, negative ones -
failure)
-- ALGLIB --
Copyright 23.05.2010 by Bochkanov Sergey
*************************************************************************/
class _mcpdreport_owner
{
public:
_mcpdreport_owner();
_mcpdreport_owner(const _mcpdreport_owner &rhs);
_mcpdreport_owner& operator=(const _mcpdreport_owner &rhs);
virtual ~_mcpdreport_owner();
alglib_impl::mcpdreport* c_ptr();
alglib_impl::mcpdreport* c_ptr() const;
protected:
alglib_impl::mcpdreport *p_struct;
};
class mcpdreport : public _mcpdreport_owner
{
public:
mcpdreport();
mcpdreport(const mcpdreport &rhs);
mcpdreport& operator=(const mcpdreport &rhs);
virtual ~mcpdreport();
ae_int_t &inneriterationscount;
ae_int_t &outeriterationscount;
ae_int_t &nfev;
ae_int_t &terminationtype;
};
/*************************************************************************
Training report: Training report:
* NGrad - number of gradient calculations * NGrad - number of gradient calculations
* NHess - number of Hessian calculations * NHess - number of Hessian calculations
* NCholesky - number of Cholesky decompositions * NCholesky - number of Cholesky decompositions
*************************************************************************/ *************************************************************************/
class _mlpreport_owner class _mlpreport_owner
{ {
public: public:
_mlpreport_owner(); _mlpreport_owner();
_mlpreport_owner(const _mlpreport_owner &rhs); _mlpreport_owner(const _mlpreport_owner &rhs);
skipping to change at line 548 skipping to change at line 662
Note: Note:
content of all arrays is changed by subroutine; content of all arrays is changed by subroutine;
it doesn't allocate temporaries. it doesn't allocate temporaries.
-- ALGLIB -- -- ALGLIB --
Copyright 11.12.2008 by Bochkanov Sergey Copyright 11.12.2008 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void dsoptimalsplit2fast(real_1d_array &a, integer_1d_array &c, integer_1d_ array &tiesbuf, integer_1d_array &cntbuf, real_1d_array &bufr, integer_1d_a rray &bufi, const ae_int_t n, const ae_int_t nc, const double alpha, ae_int _t &info, double &threshold, double &rms, double &cvrms); void dsoptimalsplit2fast(real_1d_array &a, integer_1d_array &c, integer_1d_ array &tiesbuf, integer_1d_array &cntbuf, real_1d_array &bufr, integer_1d_a rray &bufi, const ae_int_t n, const ae_int_t nc, const double alpha, ae_int _t &info, double &threshold, double &rms, double &cvrms);
/************************************************************************* /*************************************************************************
This function serializes data structure to string.
Important properties of s_out:
* it contains alphanumeric characters, dots, underscores, minus signs
* these symbols are grouped into words, which are separated by spaces
and Windows-style (CR+LF) newlines
* although serializer uses spaces and CR+LF as separators, you can
replace any separator character by arbitrary combination of spaces,
tabs, Windows or Unix newlines. It allows flexible reformatting of
the string in case you want to include it into text or XML file.
But you should not insert separators into the middle of the "words"
nor you should change case of letters.
* s_out can be freely moved between 32-bit and 64-bit systems, little
and big endian machines, and so on. You can serialize structure on
32-bit machine and unserialize it on 64-bit one (or vice versa), or
serialize it on SPARC and unserialize on x86. You can also
serialize it in C++ version of ALGLIB and unserialize in C# one,
and vice versa.
*************************************************************************/
void dfserialize(decisionforest &obj, std::string &s_out);
/*************************************************************************
This function unserializes data structure from string.
*************************************************************************/
void dfunserialize(std::string &s_in, decisionforest &obj);
/*************************************************************************
This subroutine builds random decision forest. This subroutine builds random decision forest.
INPUT PARAMETERS: INPUT PARAMETERS:
XY - training set XY - training set
NPoints - training set size, NPoints>=1 NPoints - training set size, NPoints>=1
NVars - number of independent variables, NVars>=1 NVars - number of independent variables, NVars>=1
NClasses - task type: NClasses - task type:
* NClasses=1 - regression task with one * NClasses=1 - regression task with one
dependent variable dependent variable
* NClasses>1 - classification task with * NClasses>1 - classification task with
skipping to change at line 583 skipping to change at line 724
DF - model built DF - model built
Rep - training report, contains error on a training set Rep - training report, contains error on a training set
and out-of-bag estimates of generalization error. and out-of-bag estimates of generalization error.
-- ALGLIB -- -- ALGLIB --
Copyright 19.02.2009 by Bochkanov Sergey Copyright 19.02.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void dfbuildrandomdecisionforest(const real_2d_array &xy, const ae_int_t np oints, const ae_int_t nvars, const ae_int_t nclasses, const ae_int_t ntrees , const double r, ae_int_t &info, decisionforest &df, dfreport &rep); void dfbuildrandomdecisionforest(const real_2d_array &xy, const ae_int_t np oints, const ae_int_t nvars, const ae_int_t nclasses, const ae_int_t ntrees , const double r, ae_int_t &info, decisionforest &df, dfreport &rep);
/************************************************************************* /*************************************************************************
This subroutine builds random decision forest.
This function gives ability to tune number of variables used when choosing
best split.
INPUT PARAMETERS:
XY - training set
NPoints - training set size, NPoints>=1
NVars - number of independent variables, NVars>=1
NClasses - task type:
* NClasses=1 - regression task with one
dependent variable
* NClasses>1 - classification task with
NClasses classes.
NTrees - number of trees in a forest, NTrees>=1.
recommended values: 50-100.
NRndVars - number of variables used when choosing best split
R - percent of a training set used to build
individual trees. 0<R<=1.
recommended values: 0.1 <= R <= 0.66.
OUTPUT PARAMETERS:
Info - return code:
* -2, if there is a point with class number
outside of [0..NClasses-1].
* -1, if incorrect parameters was passed
(NPoints<1, NVars<1, NClasses<1, NTrees<1, R<=0
or R>1).
* 1, if task has been solved
DF - model built
Rep - training report, contains error on a training set
and out-of-bag estimates of generalization error.
-- ALGLIB --
Copyright 19.02.2009 by Bochkanov Sergey
*************************************************************************/
void dfbuildrandomdecisionforestx1(const real_2d_array &xy, const ae_int_t
npoints, const ae_int_t nvars, const ae_int_t nclasses, const ae_int_t ntre
es, const ae_int_t nrndvars, const double r, ae_int_t &info, decisionforest
&df, dfreport &rep);
/*************************************************************************
Procesing Procesing
INPUT PARAMETERS: INPUT PARAMETERS:
DF - decision forest model DF - decision forest model
X - input vector, array[0..NVars-1]. X - input vector, array[0..NVars-1].
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
Y - result. Regression estimate when solving regression task, Y - result. Regression estimate when solving regression task,
vector of posterior probabilities for classification task. vector of posterior probabilities for classification task.
skipping to change at line 720 skipping to change at line 899
Restarts - number of restarts, Restarts>=1 Restarts - number of restarts, Restarts>=1
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
Info - return code: Info - return code:
* -3, if task is degenerate (number of distinct points is * -3, if task is degenerate (number of distinct points is
less than K) less than K)
* -1, if incorrect NPoints/NFeatures/K/Restarts was pas sed * -1, if incorrect NPoints/NFeatures/K/Restarts was pas sed
* 1, if subroutine finished successfully * 1, if subroutine finished successfully
C - array[0..NVars-1,0..K-1].matrix whose columns store C - array[0..NVars-1,0..K-1].matrix whose columns store
cluster's centers cluster's centers
XYC - array which contains number of clusters dataset points XYC - array[NPoints], which contains cluster indexes
belong to.
-- ALGLIB -- -- ALGLIB --
Copyright 21.03.2009 by Bochkanov Sergey Copyright 21.03.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void kmeansgenerate(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t k, const ae_int_t restarts, ae_int_t &info, real_2d_array &c, integer_1d_array &xyc); void kmeansgenerate(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t k, const ae_int_t restarts, ae_int_t &info, real_2d_array &c, integer_1d_array &xyc);
/************************************************************************* /*************************************************************************
Multiclass Fisher LDA Multiclass Fisher LDA
Subroutine finds coefficients of linear combination which optimally separat es Subroutine finds coefficients of linear combination which optimally separat es
skipping to change at line 986 skipping to change at line 1164
RESULT: RESULT:
average relative error. average relative error.
-- ALGLIB -- -- ALGLIB --
Copyright 30.08.2008 by Bochkanov Sergey Copyright 30.08.2008 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double lravgrelerror(const linearmodel &lm, const real_2d_array &xy, const ae_int_t npoints); double lravgrelerror(const linearmodel &lm, const real_2d_array &xy, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
This function serializes data structure to string.
Important properties of s_out:
* it contains alphanumeric characters, dots, underscores, minus signs
* these symbols are grouped into words, which are separated by spaces
and Windows-style (CR+LF) newlines
* although serializer uses spaces and CR+LF as separators, you can
replace any separator character by arbitrary combination of spaces,
tabs, Windows or Unix newlines. It allows flexible reformatting of
the string in case you want to include it into text or XML file.
But you should not insert separators into the middle of the "words"
nor you should change case of letters.
* s_out can be freely moved between 32-bit and 64-bit systems, little
and big endian machines, and so on. You can serialize structure on
32-bit machine and unserialize it on 64-bit one (or vice versa), or
serialize it on SPARC and unserialize on x86. You can also
serialize it in C++ version of ALGLIB and unserialize in C# one,
and vice versa.
*************************************************************************/
void mlpserialize(multilayerperceptron &obj, std::string &s_out);
/*************************************************************************
This function unserializes data structure from string.
*************************************************************************/
void mlpunserialize(std::string &s_in, multilayerperceptron &obj);
/*************************************************************************
Creates neural network with NIn inputs, NOut outputs, without hidden Creates neural network with NIn inputs, NOut outputs, without hidden
layers, with linear output layer. Network weights are filled with small layers, with linear output layer. Network weights are filled with small
random values. random values.
-- ALGLIB -- -- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpcreate0(const ae_int_t nin, const ae_int_t nout, multilayerperceptr on &network); void mlpcreate0(const ae_int_t nin, const ae_int_t nout, multilayerperceptr on &network);
/************************************************************************* /*************************************************************************
skipping to change at line 1135 skipping to change at line 1340
/************************************************************************* /*************************************************************************
Tells whether network is SOFTMAX-normalized (i.e. classifier) or not. Tells whether network is SOFTMAX-normalized (i.e. classifier) or not.
-- ALGLIB -- -- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
bool mlpissoftmax(const multilayerperceptron &network); bool mlpissoftmax(const multilayerperceptron &network);
/************************************************************************* /*************************************************************************
This function returns total number of layers (including input, hidden and
output layers).
-- ALGLIB --
Copyright 25.03.2011 by Bochkanov Sergey
*************************************************************************/
ae_int_t mlpgetlayerscount(const multilayerperceptron &network);
/*************************************************************************
This function returns size of K-th layer.
K=0 corresponds to input layer, K=CNT-1 corresponds to output layer.
Size of the output layer is always equal to the number of outputs, although
when we have softmax-normalized network, last neuron doesn't have any
connections - it is just zero.
-- ALGLIB --
Copyright 25.03.2011 by Bochkanov Sergey
*************************************************************************/
ae_int_t mlpgetlayersize(const multilayerperceptron &network, const ae_int_
t k);
/*************************************************************************
This function returns offset/scaling coefficients for I-th input of the
network.
INPUT PARAMETERS:
Network - network
I - input index
OUTPUT PARAMETERS:
Mean - mean term
Sigma - sigma term, guaranteed to be nonzero.
I-th input is passed through linear transformation
IN[i] = (IN[i]-Mean)/Sigma
before feeding to the network
-- ALGLIB --
Copyright 25.03.2011 by Bochkanov Sergey
*************************************************************************/
void mlpgetinputscaling(const multilayerperceptron &network, const ae_int_t
i, double &mean, double &sigma);
/*************************************************************************
This function returns offset/scaling coefficients for I-th output of the
network.
INPUT PARAMETERS:
Network - network
I - input index
OUTPUT PARAMETERS:
Mean - mean term
Sigma - sigma term, guaranteed to be nonzero.
I-th output is passed through linear transformation
OUT[i] = OUT[i]*Sigma+Mean
before returning it to user. In case we have SOFTMAX-normalized network,
we return (Mean,Sigma)=(0.0,1.0).
-- ALGLIB --
Copyright 25.03.2011 by Bochkanov Sergey
*************************************************************************/
void mlpgetoutputscaling(const multilayerperceptron &network, const ae_int_
t i, double &mean, double &sigma);
/*************************************************************************
This function returns information about Ith neuron of Kth layer
INPUT PARAMETERS:
Network - network
K - layer index
I - neuron index (within layer)
OUTPUT PARAMETERS:
FKind - activation function type (used by MLPActivationFunction
())
this value is zero for input or linear neurons
Threshold - also called offset, bias
zero for input neurons
NOTE: this function throws exception if layer or neuron with given index
do not exists.
-- ALGLIB --
Copyright 25.03.2011 by Bochkanov Sergey
*************************************************************************/
void mlpgetneuroninfo(const multilayerperceptron &network, const ae_int_t k
, const ae_int_t i, ae_int_t &fkind, double &threshold);
/*************************************************************************
This function returns information about connection from I0-th neuron of
K0-th layer to I1-th neuron of K1-th layer.
INPUT PARAMETERS:
Network - network
K0 - layer index
I0 - neuron index (within layer)
K1 - layer index
I1 - neuron index (within layer)
RESULT:
connection weight (zero for non-existent connections)
This function:
1. throws exception if layer or neuron with given index do not exists.
2. returns zero if neurons exist, but there is no connection between them
-- ALGLIB --
Copyright 25.03.2011 by Bochkanov Sergey
*************************************************************************/
double mlpgetweight(const multilayerperceptron &network, const ae_int_t k0,
const ae_int_t i0, const ae_int_t k1, const ae_int_t i1);
/*************************************************************************
This function sets offset/scaling coefficients for I-th input of the
network.
INPUT PARAMETERS:
Network - network
I - input index
Mean - mean term
Sigma - sigma term (if zero, will be replaced by 1.0)
NTE: I-th input is passed through linear transformation
IN[i] = (IN[i]-Mean)/Sigma
before feeding to the network. This function sets Mean and Sigma.
-- ALGLIB --
Copyright 25.03.2011 by Bochkanov Sergey
*************************************************************************/
void mlpsetinputscaling(const multilayerperceptron &network, const ae_int_t
i, const double mean, const double sigma);
/*************************************************************************
This function sets offset/scaling coefficients for I-th output of the
network.
INPUT PARAMETERS:
Network - network
I - input index
Mean - mean term
Sigma - sigma term (if zero, will be replaced by 1.0)
OUTPUT PARAMETERS:
NOTE: I-th output is passed through linear transformation
OUT[i] = OUT[i]*Sigma+Mean
before returning it to user. This function sets Sigma/Mean. In case we
have SOFTMAX-normalized network, you can not set (Sigma,Mean) to anything
other than(0.0,1.0) - this function will throw exception.
-- ALGLIB --
Copyright 25.03.2011 by Bochkanov Sergey
*************************************************************************/
void mlpsetoutputscaling(const multilayerperceptron &network, const ae_int_
t i, const double mean, const double sigma);
/*************************************************************************
This function modifies information about Ith neuron of Kth layer
INPUT PARAMETERS:
Network - network
K - layer index
I - neuron index (within layer)
FKind - activation function type (used by MLPActivationFunction
())
this value must be zero for input neurons
(you can not set activation function for input neurons)
Threshold - also called offset, bias
this value must be zero for input neurons
(you can not set threshold for input neurons)
NOTES:
1. this function throws exception if layer or neuron with given index do
not exists.
2. this function also throws exception when you try to set non-linear
activation function for input neurons (any kind of network) or for outpu
t
neurons of classifier network.
3. this function throws exception when you try to set non-zero threshold fo
r
input neurons (any kind of network).
-- ALGLIB --
Copyright 25.03.2011 by Bochkanov Sergey
*************************************************************************/
void mlpsetneuroninfo(const multilayerperceptron &network, const ae_int_t k
, const ae_int_t i, const ae_int_t fkind, const double threshold);
/*************************************************************************
This function modifies information about connection from I0-th neuron of
K0-th layer to I1-th neuron of K1-th layer.
INPUT PARAMETERS:
Network - network
K0 - layer index
I0 - neuron index (within layer)
K1 - layer index
I1 - neuron index (within layer)
W - connection weight (must be zero for non-existent
connections)
This function:
1. throws exception if layer or neuron with given index do not exists.
2. throws exception if you try to set non-zero weight for non-existent
connection
-- ALGLIB --
Copyright 25.03.2011 by Bochkanov Sergey
*************************************************************************/
void mlpsetweight(const multilayerperceptron &network, const ae_int_t k0, c
onst ae_int_t i0, const ae_int_t k1, const ae_int_t i1, const double w);
/*************************************************************************
Neural network activation function
INPUT PARAMETERS:
NET - neuron input
K - function index (zero for linear function)
OUTPUT PARAMETERS:
F - function
DF - its derivative
D2F - its second derivative
-- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/
void mlpactivationfunction(const double net, const ae_int_t k, double &f, d
ouble &df, double &d2f);
/*************************************************************************
Procesing Procesing
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network Network - neural network
X - input vector, array[0..NIn-1]. X - input vector, array[0..NIn-1].
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
Y - result. Regression estimate when solving regression task, Y - result. Regression estimate when solving regression task,
vector of posterior probabilities for classification task. vector of posterior probabilities for classification task.
skipping to change at line 1588 skipping to change at line 2014
/************************************************************************* /*************************************************************************
Classification error on test set = MNLRelClsError*NPoints Classification error on test set = MNLRelClsError*NPoints
-- ALGLIB -- -- ALGLIB --
Copyright 10.09.2008 by Bochkanov Sergey Copyright 10.09.2008 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
ae_int_t mnlclserror(const logitmodel &lm, const real_2d_array &xy, const a e_int_t npoints); ae_int_t mnlclserror(const logitmodel &lm, const real_2d_array &xy, const a e_int_t npoints);
/************************************************************************* /*************************************************************************
DESCRIPTION:
This function creates MCPD (Markov Chains for Population Data) solver.
This solver can be used to find transition matrix P for N-dimensional
prediction problem where transition from X[i] to X[i+1] is modelled as
X[i+1] = P*X[i]
where X[i] and X[i+1] are N-dimensional population vectors (components of
each X are non-negative), and P is a N*N transition matrix (elements of P
are non-negative, each column sums to 1.0).
Such models arise when when:
* there is some population of individuals
* individuals can have different states
* individuals can transit from one state to another
* population size is constant, i.e. there is no new individuals and no one
leaves population
* you want to model transitions of individuals from one state into another
USAGE:
Here we give very brief outline of the MCPD. We strongly recommend you to
read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide
on data analysis which is available at http://www.alglib.net/dataanalysis/
1. User initializes algorithm state with MCPDCreate() call
2. User adds one or more tracks - sequences of states which describe
evolution of a system being modelled from different starting conditions
3. User may add optional boundary, equality and/or linear constraints on
the coefficients of P by calling one of the following functions:
* MCPDSetEC() to set equality constraints
* MCPDSetBC() to set bound constraints
* MCPDSetLC() to set linear constraints
4. Optionally, user may set custom weights for prediction errors (by
default, algorithm assigns non-equal, automatically chosen weights for
errors in the prediction of different components of X). It can be done
with a call of MCPDSetPredictionWeights() function.
5. User calls MCPDSolve() function which takes algorithm state and
pointer (delegate, etc.) to callback function which calculates F/G.
6. User calls MCPDResults() to get solution
INPUT PARAMETERS:
N - problem dimension, N>=1
OUTPUT PARAMETERS:
State - structure stores algorithm state
-- ALGLIB --
Copyright 23.05.2010 by Bochkanov Sergey
*************************************************************************/
void mcpdcreate(const ae_int_t n, mcpdstate &s);
/*************************************************************************
DESCRIPTION:
This function is a specialized version of MCPDCreate() function, and we
recommend you to read comments for this function for general information
about MCPD solver.
This function creates MCPD (Markov Chains for Population Data) solver
for "Entry-state" model, i.e. model where transition from X[i] to X[i+1]
is modelled as
X[i+1] = P*X[i]
where
X[i] and X[i+1] are N-dimensional state vectors
P is a N*N transition matrix
and one selected component of X[] is called "entry" state and is treated
in a special way:
system state always transits from "entry" state to some another state
system state can not transit from any state into "entry" state
Such conditions basically mean that row of P which corresponds to "entry"
state is zero.
Such models arise when:
* there is some population of individuals
* individuals can have different states
* individuals can transit from one state to another
* population size is NOT constant - at every moment of time there is some
(unpredictable) amount of "new" individuals, which can transit into one
of the states at the next turn, but still no one leaves population
* you want to model transitions of individuals from one state into another
* but you do NOT want to predict amount of "new" individuals because it
does not depends on individuals already present (hence system can not
transit INTO entry state - it can only transit FROM it).
This model is discussed in more details in the ALGLIB User Guide (see
http://www.alglib.net/dataanalysis/ for more data).
INPUT PARAMETERS:
N - problem dimension, N>=2
EntryState- index of entry state, in 0..N-1
OUTPUT PARAMETERS:
State - structure stores algorithm state
-- ALGLIB --
Copyright 23.05.2010 by Bochkanov Sergey
*************************************************************************/
void mcpdcreateentry(const ae_int_t n, const ae_int_t entrystate, mcpdstate
&s);
/*************************************************************************
DESCRIPTION:
This function is a specialized version of MCPDCreate() function, and we
recommend you to read comments for this function for general information
about MCPD solver.
This function creates MCPD (Markov Chains for Population Data) solver
for "Exit-state" model, i.e. model where transition from X[i] to X[i+1]
is modelled as
X[i+1] = P*X[i]
where
X[i] and X[i+1] are N-dimensional state vectors
P is a N*N transition matrix
and one selected component of X[] is called "exit" state and is treated
in a special way:
system state can transit from any state into "exit" state
system state can not transit from "exit" state into any other state
transition operator discards "exit" state (makes it zero at each turn)
Such conditions basically mean that column of P which corresponds to
"exit" state is zero. Multiplication by such P may decrease sum of vector
components.
Such models arise when:
* there is some population of individuals
* individuals can have different states
* individuals can transit from one state to another
* population size is NOT constant - individuals can move into "exit" state
and leave population at the next turn, but there are no new individuals
* amount of individuals which leave population can be predicted
* you want to model transitions of individuals from one state into another
(including transitions into the "exit" state)
This model is discussed in more details in the ALGLIB User Guide (see
http://www.alglib.net/dataanalysis/ for more data).
INPUT PARAMETERS:
N - problem dimension, N>=2
ExitState- index of exit state, in 0..N-1
OUTPUT PARAMETERS:
State - structure stores algorithm state
-- ALGLIB --
Copyright 23.05.2010 by Bochkanov Sergey
*************************************************************************/
void mcpdcreateexit(const ae_int_t n, const ae_int_t exitstate, mcpdstate &
s);
/*************************************************************************
DESCRIPTION:
This function is a specialized version of MCPDCreate() function, and we
recommend you to read comments for this function for general information
about MCPD solver.
This function creates MCPD (Markov Chains for Population Data) solver
for "Entry-Exit-states" model, i.e. model where transition from X[i] to
X[i+1] is modelled as
X[i+1] = P*X[i]
where
X[i] and X[i+1] are N-dimensional state vectors
P is a N*N transition matrix
one selected component of X[] is called "entry" state and is treated in a
special way:
system state always transits from "entry" state to some another state
system state can not transit from any state into "entry" state
and another one component of X[] is called "exit" state and is treated in
a special way too:
system state can transit from any state into "exit" state
system state can not transit from "exit" state into any other state
transition operator discards "exit" state (makes it zero at each turn)
Such conditions basically mean that:
row of P which corresponds to "entry" state is zero
column of P which corresponds to "exit" state is zero
Multiplication by such P may decrease sum of vector components.
Such models arise when:
* there is some population of individuals
* individuals can have different states
* individuals can transit from one state to another
* population size is NOT constant
* at every moment of time there is some (unpredictable) amount of "new"
individuals, which can transit into one of the states at the next turn
* some individuals can move (predictably) into "exit" state and leave
population at the next turn
* you want to model transitions of individuals from one state into another,
including transitions from the "entry" state and into the "exit" state.
* but you do NOT want to predict amount of "new" individuals because it
does not depends on individuals already present (hence system can not
transit INTO entry state - it can only transit FROM it).
This model is discussed in more details in the ALGLIB User Guide (see
http://www.alglib.net/dataanalysis/ for more data).
INPUT PARAMETERS:
N - problem dimension, N>=2
EntryState- index of entry state, in 0..N-1
ExitState- index of exit state, in 0..N-1
OUTPUT PARAMETERS:
State - structure stores algorithm state
-- ALGLIB --
Copyright 23.05.2010 by Bochkanov Sergey
*************************************************************************/
void mcpdcreateentryexit(const ae_int_t n, const ae_int_t entrystate, const
ae_int_t exitstate, mcpdstate &s);
/*************************************************************************
This function is used to add a track - sequence of system states at the
different moments of its evolution.
You may add one or several tracks to the MCPD solver. In case you have
several tracks, they won't overwrite each other. For example, if you pass
two tracks, A1-A2-A3 (system at t=A+1, t=A+2 and t=A+3) and B1-B2-B3, then
solver will try to model transitions from t=A+1 to t=A+2, t=A+2 to t=A+3,
t=B+1 to t=B+2, t=B+2 to t=B+3. But it WONT mix these two tracks - i.e. it
wont try to model transition from t=A+3 to t=B+1.
INPUT PARAMETERS:
S - solver
XY - track, array[K,N]:
* I-th row is a state at t=I
* elements of XY must be non-negative (exception will be
thrown on negative elements)
K - number of points in a track
* if given, only leading K rows of XY are used
* if not given, automatically determined from size of XY
NOTES:
1. Track may contain either proportional or population data:
* with proportional data all rows of XY must sum to 1.0, i.e. we have
proportions instead of absolute population values
* with population data rows of XY contain population counts and generall
y
do not sum to 1.0 (although they still must be non-negative)
-- ALGLIB --
Copyright 23.05.2010 by Bochkanov Sergey
*************************************************************************/
void mcpdaddtrack(const mcpdstate &s, const real_2d_array &xy, const ae_int
_t k);
void mcpdaddtrack(const mcpdstate &s, const real_2d_array &xy);
/*************************************************************************
This function is used to add equality constraints on the elements of the
transition matrix P.
MCPD solver has four types of constraints which can be placed on P:
* user-specified equality constraints (optional)
* user-specified bound constraints (optional)
* user-specified general linear constraints (optional)
* basic constraints (always present):
* non-negativity: P[i,j]>=0
* consistency: every column of P sums to 1.0
Final constraints which are passed to the underlying optimizer are
calculated as intersection of all present constraints. For example, you
may specify boundary constraint on P[0,0] and equality one:
0.1<=P[0,0]<=0.9
P[0,0]=0.5
Such combination of constraints will be silently reduced to their
intersection, which is P[0,0]=0.5.
This function can be used to place equality constraints on arbitrary
subset of elements of P. Set of constraints is specified by EC, which may
contain either NAN's or finite numbers from [0,1]. NAN denotes absence of
constraint, finite number denotes equality constraint on specific element
of P.
You can also use MCPDAddEC() function which allows to ADD equality
constraint for one element of P without changing constraints for other
elements.
These functions (MCPDSetEC and MCPDAddEC) interact as follows:
* there is internal matrix of equality constraints which is stored in the
MCPD solver
* MCPDSetEC() replaces this matrix by another one (SET)
* MCPDAddEC() modifies one element of this matrix and leaves other ones
unchanged (ADD)
* thus MCPDAddEC() call preserves all modifications done by previous
calls, while MCPDSetEC() completely discards all changes done to the
equality constraints.
INPUT PARAMETERS:
S - solver
EC - equality constraints, array[N,N]. Elements of EC can be
either NAN's or finite numbers from [0,1]. NAN denotes
absence of constraints, while finite value denotes
equality constraint on the corresponding element of P.
NOTES:
1. infinite values of EC will lead to exception being thrown. Values less
than 0.0 or greater than 1.0 will lead to error code being returned after
call to MCPDSolve().
-- ALGLIB --
Copyright 23.05.2010 by Bochkanov Sergey
*************************************************************************/
void mcpdsetec(const mcpdstate &s, const real_2d_array &ec);
/*************************************************************************
This function is used to add equality constraints on the elements of the
transition matrix P.
MCPD solver has four types of constraints which can be placed on P:
* user-specified equality constraints (optional)
* user-specified bound constraints (optional)
* user-specified general linear constraints (optional)
* basic constraints (always present):
* non-negativity: P[i,j]>=0
* consistency: every column of P sums to 1.0
Final constraints which are passed to the underlying optimizer are
calculated as intersection of all present constraints. For example, you
may specify boundary constraint on P[0,0] and equality one:
0.1<=P[0,0]<=0.9
P[0,0]=0.5
Such combination of constraints will be silently reduced to their
intersection, which is P[0,0]=0.5.
This function can be used to ADD equality constraint for one element of P
without changing constraints for other elements.
You can also use MCPDSetEC() function which allows you to specify
arbitrary set of equality constraints in one call.
These functions (MCPDSetEC and MCPDAddEC) interact as follows:
* there is internal matrix of equality constraints which is stored in the
MCPD solver
* MCPDSetEC() replaces this matrix by another one (SET)
* MCPDAddEC() modifies one element of this matrix and leaves other ones
unchanged (ADD)
* thus MCPDAddEC() call preserves all modifications done by previous
calls, while MCPDSetEC() completely discards all changes done to the
equality constraints.
INPUT PARAMETERS:
S - solver
I - row index of element being constrained
J - column index of element being constrained
C - value (constraint for P[I,J]). Can be either NAN (no
constraint) or finite value from [0,1].
NOTES:
1. infinite values of C will lead to exception being thrown. Values less
than 0.0 or greater than 1.0 will lead to error code being returned after
call to MCPDSolve().
-- ALGLIB --
Copyright 23.05.2010 by Bochkanov Sergey
*************************************************************************/
void mcpdaddec(const mcpdstate &s, const ae_int_t i, const ae_int_t j, cons
t double c);
/*************************************************************************
This function is used to add bound constraints on the elements of the
transition matrix P.
MCPD solver has four types of constraints which can be placed on P:
* user-specified equality constraints (optional)
* user-specified bound constraints (optional)
* user-specified general linear constraints (optional)
* basic constraints (always present):
* non-negativity: P[i,j]>=0
* consistency: every column of P sums to 1.0
Final constraints which are passed to the underlying optimizer are
calculated as intersection of all present constraints. For example, you
may specify boundary constraint on P[0,0] and equality one:
0.1<=P[0,0]<=0.9
P[0,0]=0.5
Such combination of constraints will be silently reduced to their
intersection, which is P[0,0]=0.5.
This function can be used to place bound constraints on arbitrary
subset of elements of P. Set of constraints is specified by BndL/BndU
matrices, which may contain arbitrary combination of finite numbers or
infinities (like -INF<x<=0.5 or 0.1<=x<+INF).
You can also use MCPDAddBC() function which allows to ADD bound constraint
for one element of P without changing constraints for other elements.
These functions (MCPDSetBC and MCPDAddBC) interact as follows:
* there is internal matrix of bound constraints which is stored in the
MCPD solver
* MCPDSetBC() replaces this matrix by another one (SET)
* MCPDAddBC() modifies one element of this matrix and leaves other ones
unchanged (ADD)
* thus MCPDAddBC() call preserves all modifications done by previous
calls, while MCPDSetBC() completely discards all changes done to the
equality constraints.
INPUT PARAMETERS:
S - solver
BndL - lower bounds constraints, array[N,N]. Elements of BndL can
be finite numbers or -INF.
BndU - upper bounds constraints, array[N,N]. Elements of BndU can
be finite numbers or +INF.
-- ALGLIB --
Copyright 23.05.2010 by Bochkanov Sergey
*************************************************************************/
void mcpdsetbc(const mcpdstate &s, const real_2d_array &bndl, const real_2d
_array &bndu);
/*************************************************************************
This function is used to add bound constraints on the elements of the
transition matrix P.
MCPD solver has four types of constraints which can be placed on P:
* user-specified equality constraints (optional)
* user-specified bound constraints (optional)
* user-specified general linear constraints (optional)
* basic constraints (always present):
* non-negativity: P[i,j]>=0
* consistency: every column of P sums to 1.0
Final constraints which are passed to the underlying optimizer are
calculated as intersection of all present constraints. For example, you
may specify boundary constraint on P[0,0] and equality one:
0.1<=P[0,0]<=0.9
P[0,0]=0.5
Such combination of constraints will be silently reduced to their
intersection, which is P[0,0]=0.5.
This function can be used to ADD bound constraint for one element of P
without changing constraints for other elements.
You can also use MCPDSetBC() function which allows to place bound
constraints on arbitrary subset of elements of P. Set of constraints is
specified by BndL/BndU matrices, which may contain arbitrary combination
of finite numbers or infinities (like -INF<x<=0.5 or 0.1<=x<+INF).
These functions (MCPDSetBC and MCPDAddBC) interact as follows:
* there is internal matrix of bound constraints which is stored in the
MCPD solver
* MCPDSetBC() replaces this matrix by another one (SET)
* MCPDAddBC() modifies one element of this matrix and leaves other ones
unchanged (ADD)
* thus MCPDAddBC() call preserves all modifications done by previous
calls, while MCPDSetBC() completely discards all changes done to the
equality constraints.
INPUT PARAMETERS:
S - solver
I - row index of element being constrained
J - column index of element being constrained
BndL - lower bound
BndU - upper bound
-- ALGLIB --
Copyright 23.05.2010 by Bochkanov Sergey
*************************************************************************/
void mcpdaddbc(const mcpdstate &s, const ae_int_t i, const ae_int_t j, cons
t double bndl, const double bndu);
/*************************************************************************
This function is used to set linear equality/inequality constraints on the
elements of the transition matrix P.
This function can be used to set one or several general linear constraints
on the elements of P. Two types of constraints are supported:
* equality constraints
* inequality constraints (both less-or-equal and greater-or-equal)
Coefficients of constraints are specified by matrix C (one of the
parameters). One row of C corresponds to one constraint. Because
transition matrix P has N*N elements, we need N*N columns to store all
coefficients (they are stored row by row), and one more column to store
right part - hence C has N*N+1 columns. Constraint kind is stored in the
CT array.
Thus, I-th linear constraint is
P[0,0]*C[I,0] + P[0,1]*C[I,1] + .. + P[0,N-1]*C[I,N-1] +
+ P[1,0]*C[I,N] + P[1,1]*C[I,N+1] + ... +
+ P[N-1,N-1]*C[I,N*N-1] ?=? C[I,N*N]
where ?=? can be either "=" (CT[i]=0), "<=" (CT[i]<0) or ">=" (CT[i]>0).
Your constraint may involve only some subset of P (less than N*N elements).
For example it can be something like
P[0,0] + P[0,1] = 0.5
In this case you still should pass matrix with N*N+1 columns, but all its
elements (except for C[0,0], C[0,1] and C[0,N*N-1]) will be zero.
INPUT PARAMETERS:
S - solver
C - array[K,N*N+1] - coefficients of constraints
(see above for complete description)
CT - array[K] - constraint types
(see above for complete description)
K - number of equality/inequality constraints, K>=0:
* if given, only leading K elements of C/CT are used
* if not given, automatically determined from sizes of C/CT
-- ALGLIB --
Copyright 23.05.2010 by Bochkanov Sergey
*************************************************************************/
void mcpdsetlc(const mcpdstate &s, const real_2d_array &c, const integer_1d
_array &ct, const ae_int_t k);
void mcpdsetlc(const mcpdstate &s, const real_2d_array &c, const integer_1d
_array &ct);
/*************************************************************************
This function allows to tune amount of Tikhonov regularization being
applied to your problem.
By default, regularizing term is equal to r*||P-prior_P||^2, where r is a
small non-zero value, P is transition matrix, prior_P is identity matrix,
||X||^2 is a sum of squared elements of X.
This function allows you to change coefficient r. You can also change
prior values with MCPDSetPrior() function.
INPUT PARAMETERS:
S - solver
V - regularization coefficient, finite non-negative value. It
is not recommended to specify zero value unless you are
pretty sure that you want it.
-- ALGLIB --
Copyright 23.05.2010 by Bochkanov Sergey
*************************************************************************/
void mcpdsettikhonovregularizer(const mcpdstate &s, const double v);
/*************************************************************************
This function allows to set prior values used for regularization of your
problem.
By default, regularizing term is equal to r*||P-prior_P||^2, where r is a
small non-zero value, P is transition matrix, prior_P is identity matrix,
||X||^2 is a sum of squared elements of X.
This function allows you to change prior values prior_P. You can also
change r with MCPDSetTikhonovRegularizer() function.
INPUT PARAMETERS:
S - solver
PP - array[N,N], matrix of prior values:
1. elements must be real numbers from [0,1]
2. columns must sum to 1.0.
First property is checked (exception is thrown otherwise),
while second one is not checked/enforced.
-- ALGLIB --
Copyright 23.05.2010 by Bochkanov Sergey
*************************************************************************/
void mcpdsetprior(const mcpdstate &s, const real_2d_array &pp);
/*************************************************************************
This function is used to change prediction weights
MCPD solver scales prediction errors as follows
Error(P) = ||W*(y-P*x)||^2
where
x is a system state at time t
y is a system state at time t+1
P is a transition matrix
W is a diagonal scaling matrix
By default, weights are chosen in order to minimize relative prediction
error instead of absolute one. For example, if one component of state is
about 0.5 in magnitude and another one is about 0.05, then algorithm will
make corresponding weights equal to 2.0 and 20.0.
INPUT PARAMETERS:
S - solver
PW - array[N], weights:
* must be non-negative values (exception will be thrown oth
erwise)
* zero values will be replaced by automatically chosen valu
es
-- ALGLIB --
Copyright 23.05.2010 by Bochkanov Sergey
*************************************************************************/
void mcpdsetpredictionweights(const mcpdstate &s, const real_1d_array &pw);
/*************************************************************************
This function is used to start solution of the MCPD problem.
After return from this function, you can use MCPDResults() to get solution
and completion code.
-- ALGLIB --
Copyright 23.05.2010 by Bochkanov Sergey
*************************************************************************/
void mcpdsolve(const mcpdstate &s);
/*************************************************************************
MCPD results
INPUT PARAMETERS:
State - algorithm state
OUTPUT PARAMETERS:
P - array[N,N], transition matrix
Rep - optimization report. You should check Rep.TerminationType
in order to distinguish successful termination from
unsuccessful one. Speaking short, positive values denote
success, negative ones are failures.
More information about fields of this structure can be
found in the comments on MCPDReport datatype.
-- ALGLIB --
Copyright 23.05.2010 by Bochkanov Sergey
*************************************************************************/
void mcpdresults(const mcpdstate &s, real_2d_array &p, mcpdreport &rep);
/*************************************************************************
Neural network training using modified Levenberg-Marquardt with exact Neural network training using modified Levenberg-Marquardt with exact
Hessian calculation and regularization. Subroutine trains neural network Hessian calculation and regularization. Subroutine trains neural network
with restarts from random positions. Algorithm is well suited for small with restarts from random positions. Algorithm is well suited for small
and medium scale problems (hundreds of weights). and medium scale problems (hundreds of weights).
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network with initialized geometry Network - neural network with initialized geometry
XY - training set XY - training set
NPoints - training set size NPoints - training set size
Decay - weight decay constant, >=0.001 Decay - weight decay constant, >=0.001
skipping to change at line 2232 skipping to change at line 3266
void dfbuildrandomdecisionforest(/* Real */ ae_matrix* xy, void dfbuildrandomdecisionforest(/* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_int_t nvars, ae_int_t nvars,
ae_int_t nclasses, ae_int_t nclasses,
ae_int_t ntrees, ae_int_t ntrees,
double r, double r,
ae_int_t* info, ae_int_t* info,
decisionforest* df, decisionforest* df,
dfreport* rep, dfreport* rep,
ae_state *_state); ae_state *_state);
void dfbuildrandomdecisionforestx1(/* Real */ ae_matrix* xy,
ae_int_t npoints,
ae_int_t nvars,
ae_int_t nclasses,
ae_int_t ntrees,
ae_int_t nrndvars,
double r,
ae_int_t* info,
decisionforest* df,
dfreport* rep,
ae_state *_state);
void dfbuildinternal(/* Real */ ae_matrix* xy, void dfbuildinternal(/* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_int_t nvars, ae_int_t nvars,
ae_int_t nclasses, ae_int_t nclasses,
ae_int_t ntrees, ae_int_t ntrees,
ae_int_t samplesize, ae_int_t samplesize,
ae_int_t nfeatures, ae_int_t nfeatures,
ae_int_t flags, ae_int_t flags,
ae_int_t* info, ae_int_t* info,
decisionforest* df, decisionforest* df,
skipping to change at line 2273 skipping to change at line 3318
ae_state *_state); ae_state *_state);
double dfavgerror(decisionforest* df, double dfavgerror(decisionforest* df,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double dfavgrelerror(decisionforest* df, double dfavgrelerror(decisionforest* df,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
void dfcopy(decisionforest* df1, decisionforest* df2, ae_state *_state); void dfcopy(decisionforest* df1, decisionforest* df2, ae_state *_state);
void dfalloc(ae_serializer* s, decisionforest* forest, ae_state *_state);
void dfserialize(ae_serializer* s,
decisionforest* forest,
ae_state *_state);
void dfunserialize(ae_serializer* s,
decisionforest* forest,
ae_state *_state);
ae_bool _decisionforest_init(decisionforest* p, ae_state *_state, ae_bool m ake_automatic); ae_bool _decisionforest_init(decisionforest* p, ae_state *_state, ae_bool m ake_automatic);
ae_bool _decisionforest_init_copy(decisionforest* dst, decisionforest* src, ae_state *_state, ae_bool make_automatic); ae_bool _decisionforest_init_copy(decisionforest* dst, decisionforest* src, ae_state *_state, ae_bool make_automatic);
void _decisionforest_clear(decisionforest* p); void _decisionforest_clear(decisionforest* p);
ae_bool _dfreport_init(dfreport* p, ae_state *_state, ae_bool make_automati c); ae_bool _dfreport_init(dfreport* p, ae_state *_state, ae_bool make_automati c);
ae_bool _dfreport_init_copy(dfreport* dst, dfreport* src, ae_state *_state, ae_bool make_automatic); ae_bool _dfreport_init_copy(dfreport* dst, dfreport* src, ae_state *_state, ae_bool make_automatic);
void _dfreport_clear(dfreport* p); void _dfreport_clear(dfreport* p);
ae_bool _dfinternalbuffers_init(dfinternalbuffers* p, ae_state *_state, ae_ bool make_automatic); ae_bool _dfinternalbuffers_init(dfinternalbuffers* p, ae_state *_state, ae_ bool make_automatic);
ae_bool _dfinternalbuffers_init_copy(dfinternalbuffers* dst, dfinternalbuff ers* src, ae_state *_state, ae_bool make_automatic); ae_bool _dfinternalbuffers_init_copy(dfinternalbuffers* dst, dfinternalbuff ers* src, ae_state *_state, ae_bool make_automatic);
void _dfinternalbuffers_clear(dfinternalbuffers* p); void _dfinternalbuffers_clear(dfinternalbuffers* p);
void kmeansgenerate(/* Real */ ae_matrix* xy, void kmeansgenerate(/* Real */ ae_matrix* xy,
skipping to change at line 2458 skipping to change at line 3510
ae_state *_state); ae_state *_state);
void mlpcreatec2(ae_int_t nin, void mlpcreatec2(ae_int_t nin,
ae_int_t nhid1, ae_int_t nhid1,
ae_int_t nhid2, ae_int_t nhid2,
ae_int_t nout, ae_int_t nout,
multilayerperceptron* network, multilayerperceptron* network,
ae_state *_state); ae_state *_state);
void mlpcopy(multilayerperceptron* network1, void mlpcopy(multilayerperceptron* network1,
multilayerperceptron* network2, multilayerperceptron* network2,
ae_state *_state); ae_state *_state);
void mlpserialize(multilayerperceptron* network, void mlpserializeold(multilayerperceptron* network,
/* Real */ ae_vector* ra, /* Real */ ae_vector* ra,
ae_int_t* rlen, ae_int_t* rlen,
ae_state *_state); ae_state *_state);
void mlpunserialize(/* Real */ ae_vector* ra, void mlpunserializeold(/* Real */ ae_vector* ra,
multilayerperceptron* network, multilayerperceptron* network,
ae_state *_state); ae_state *_state);
void mlprandomize(multilayerperceptron* network, ae_state *_state); void mlprandomize(multilayerperceptron* network, ae_state *_state);
void mlprandomizefull(multilayerperceptron* network, ae_state *_state); void mlprandomizefull(multilayerperceptron* network, ae_state *_state);
void mlpinitpreprocessor(multilayerperceptron* network, void mlpinitpreprocessor(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t ssize, ae_int_t ssize,
ae_state *_state); ae_state *_state);
void mlpproperties(multilayerperceptron* network, void mlpproperties(multilayerperceptron* network,
ae_int_t* nin, ae_int_t* nin,
ae_int_t* nout, ae_int_t* nout,
ae_int_t* wcount, ae_int_t* wcount,
ae_state *_state); ae_state *_state);
ae_bool mlpissoftmax(multilayerperceptron* network, ae_state *_state); ae_bool mlpissoftmax(multilayerperceptron* network, ae_state *_state);
ae_int_t mlpgetlayerscount(multilayerperceptron* network,
ae_state *_state);
ae_int_t mlpgetlayersize(multilayerperceptron* network,
ae_int_t k,
ae_state *_state);
void mlpgetinputscaling(multilayerperceptron* network,
ae_int_t i,
double* mean,
double* sigma,
ae_state *_state);
void mlpgetoutputscaling(multilayerperceptron* network,
ae_int_t i,
double* mean,
double* sigma,
ae_state *_state);
void mlpgetneuroninfo(multilayerperceptron* network,
ae_int_t k,
ae_int_t i,
ae_int_t* fkind,
double* threshold,
ae_state *_state);
double mlpgetweight(multilayerperceptron* network,
ae_int_t k0,
ae_int_t i0,
ae_int_t k1,
ae_int_t i1,
ae_state *_state);
void mlpsetinputscaling(multilayerperceptron* network,
ae_int_t i,
double mean,
double sigma,
ae_state *_state);
void mlpsetoutputscaling(multilayerperceptron* network,
ae_int_t i,
double mean,
double sigma,
ae_state *_state);
void mlpsetneuroninfo(multilayerperceptron* network,
ae_int_t k,
ae_int_t i,
ae_int_t fkind,
double threshold,
ae_state *_state);
void mlpsetweight(multilayerperceptron* network,
ae_int_t k0,
ae_int_t i0,
ae_int_t k1,
ae_int_t i1,
double w,
ae_state *_state);
void mlpactivationfunction(double net,
ae_int_t k,
double* f,
double* df,
double* d2f,
ae_state *_state);
void mlpprocess(multilayerperceptron* network, void mlpprocess(multilayerperceptron* network,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_state *_state); ae_state *_state);
void mlpprocessi(multilayerperceptron* network, void mlpprocessi(multilayerperceptron* network,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_state *_state); ae_state *_state);
double mlperror(multilayerperceptron* network, double mlperror(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
skipping to change at line 2564 skipping to change at line 3672
ae_state *_state); ae_state *_state);
void mlpinternalprocessvector(/* Integer */ ae_vector* structinfo, void mlpinternalprocessvector(/* Integer */ ae_vector* structinfo,
/* Real */ ae_vector* weights, /* Real */ ae_vector* weights,
/* Real */ ae_vector* columnmeans, /* Real */ ae_vector* columnmeans,
/* Real */ ae_vector* columnsigmas, /* Real */ ae_vector* columnsigmas,
/* Real */ ae_vector* neurons, /* Real */ ae_vector* neurons,
/* Real */ ae_vector* dfdnet, /* Real */ ae_vector* dfdnet,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_state *_state); ae_state *_state);
void mlpalloc(ae_serializer* s,
multilayerperceptron* network,
ae_state *_state);
void mlpserialize(ae_serializer* s,
multilayerperceptron* network,
ae_state *_state);
void mlpunserialize(ae_serializer* s,
multilayerperceptron* network,
ae_state *_state);
ae_bool _multilayerperceptron_init(multilayerperceptron* p, ae_state *_stat e, ae_bool make_automatic); ae_bool _multilayerperceptron_init(multilayerperceptron* p, ae_state *_stat e, ae_bool make_automatic);
ae_bool _multilayerperceptron_init_copy(multilayerperceptron* dst, multilay erperceptron* src, ae_state *_state, ae_bool make_automatic); ae_bool _multilayerperceptron_init_copy(multilayerperceptron* dst, multilay erperceptron* src, ae_state *_state, ae_bool make_automatic);
void _multilayerperceptron_clear(multilayerperceptron* p); void _multilayerperceptron_clear(multilayerperceptron* p);
void mnltrainh(/* Real */ ae_matrix* xy, void mnltrainh(/* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_int_t nvars, ae_int_t nvars,
ae_int_t nclasses, ae_int_t nclasses,
ae_int_t* info, ae_int_t* info,
logitmodel* lm, logitmodel* lm,
mnlreport* rep, mnlreport* rep,
skipping to change at line 2627 skipping to change at line 3744
ae_state *_state); ae_state *_state);
ae_bool _logitmodel_init(logitmodel* p, ae_state *_state, ae_bool make_auto matic); ae_bool _logitmodel_init(logitmodel* p, ae_state *_state, ae_bool make_auto matic);
ae_bool _logitmodel_init_copy(logitmodel* dst, logitmodel* src, ae_state *_ state, ae_bool make_automatic); ae_bool _logitmodel_init_copy(logitmodel* dst, logitmodel* src, ae_state *_ state, ae_bool make_automatic);
void _logitmodel_clear(logitmodel* p); void _logitmodel_clear(logitmodel* p);
ae_bool _logitmcstate_init(logitmcstate* p, ae_state *_state, ae_bool make_ automatic); ae_bool _logitmcstate_init(logitmcstate* p, ae_state *_state, ae_bool make_ automatic);
ae_bool _logitmcstate_init_copy(logitmcstate* dst, logitmcstate* src, ae_st ate *_state, ae_bool make_automatic); ae_bool _logitmcstate_init_copy(logitmcstate* dst, logitmcstate* src, ae_st ate *_state, ae_bool make_automatic);
void _logitmcstate_clear(logitmcstate* p); void _logitmcstate_clear(logitmcstate* p);
ae_bool _mnlreport_init(mnlreport* p, ae_state *_state, ae_bool make_automa tic); ae_bool _mnlreport_init(mnlreport* p, ae_state *_state, ae_bool make_automa tic);
ae_bool _mnlreport_init_copy(mnlreport* dst, mnlreport* src, ae_state *_sta te, ae_bool make_automatic); ae_bool _mnlreport_init_copy(mnlreport* dst, mnlreport* src, ae_state *_sta te, ae_bool make_automatic);
void _mnlreport_clear(mnlreport* p); void _mnlreport_clear(mnlreport* p);
void mcpdcreate(ae_int_t n, mcpdstate* s, ae_state *_state);
void mcpdcreateentry(ae_int_t n,
ae_int_t entrystate,
mcpdstate* s,
ae_state *_state);
void mcpdcreateexit(ae_int_t n,
ae_int_t exitstate,
mcpdstate* s,
ae_state *_state);
void mcpdcreateentryexit(ae_int_t n,
ae_int_t entrystate,
ae_int_t exitstate,
mcpdstate* s,
ae_state *_state);
void mcpdaddtrack(mcpdstate* s,
/* Real */ ae_matrix* xy,
ae_int_t k,
ae_state *_state);
void mcpdsetec(mcpdstate* s,
/* Real */ ae_matrix* ec,
ae_state *_state);
void mcpdaddec(mcpdstate* s,
ae_int_t i,
ae_int_t j,
double c,
ae_state *_state);
void mcpdsetbc(mcpdstate* s,
/* Real */ ae_matrix* bndl,
/* Real */ ae_matrix* bndu,
ae_state *_state);
void mcpdaddbc(mcpdstate* s,
ae_int_t i,
ae_int_t j,
double bndl,
double bndu,
ae_state *_state);
void mcpdsetlc(mcpdstate* s,
/* Real */ ae_matrix* c,
/* Integer */ ae_vector* ct,
ae_int_t k,
ae_state *_state);
void mcpdsettikhonovregularizer(mcpdstate* s, double v, ae_state *_state);
void mcpdsetprior(mcpdstate* s,
/* Real */ ae_matrix* pp,
ae_state *_state);
void mcpdsetpredictionweights(mcpdstate* s,
/* Real */ ae_vector* pw,
ae_state *_state);
void mcpdsolve(mcpdstate* s, ae_state *_state);
void mcpdresults(mcpdstate* s,
/* Real */ ae_matrix* p,
mcpdreport* rep,
ae_state *_state);
ae_bool _mcpdstate_init(mcpdstate* p, ae_state *_state, ae_bool make_automa
tic);
ae_bool _mcpdstate_init_copy(mcpdstate* dst, mcpdstate* src, ae_state *_sta
te, ae_bool make_automatic);
void _mcpdstate_clear(mcpdstate* p);
ae_bool _mcpdreport_init(mcpdreport* p, ae_state *_state, ae_bool make_auto
matic);
ae_bool _mcpdreport_init_copy(mcpdreport* dst, mcpdreport* src, ae_state *_
state, ae_bool make_automatic);
void _mcpdreport_clear(mcpdreport* p);
void mlptrainlm(multilayerperceptron* network, void mlptrainlm(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
double decay, double decay,
ae_int_t restarts, ae_int_t restarts,
ae_int_t* info, ae_int_t* info,
mlpreport* rep, mlpreport* rep,
ae_state *_state); ae_state *_state);
void mlptrainlbfgs(multilayerperceptron* network, void mlptrainlbfgs(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
 End of changes. 17 change blocks. 
4 lines changed or deleted 1213 lines changed or added


 optimization.h   optimization.h 
skipping to change at line 23 skipping to change at line 23
GNU General Public License for more details. GNU General Public License for more details.
A copy of the GNU General Public License is available at A copy of the GNU General Public License is available at
http://www.fsf.org/licensing/licenses http://www.fsf.org/licensing/licenses
>>> END OF LICENSE >>> >>> END OF LICENSE >>>
*************************************************************************/ *************************************************************************/
#ifndef _optimization_pkg_h #ifndef _optimization_pkg_h
#define _optimization_pkg_h #define _optimization_pkg_h
#include "ap.h" #include "ap.h"
#include "alglibinternal.h" #include "alglibinternal.h"
#include "linalg.h"
#include "alglibmisc.h" #include "alglibmisc.h"
#include "linalg.h"
#include "solvers.h" #include "solvers.h"
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
// //
// THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (DATATYPES) // THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (DATATYPES)
// //
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
namespace alglib_impl namespace alglib_impl
{ {
typedef struct typedef struct
{ {
ae_int_t n; ae_int_t n;
double epsg;
double epsf;
double epsx;
ae_int_t maxits;
double stpmax;
double suggestedstep;
ae_bool xrep;
ae_bool drep;
ae_int_t cgtype;
ae_int_t prectype;
ae_vector diagh;
ae_vector diaghl2;
ae_matrix vcorr;
ae_int_t vcnt;
ae_vector s;
double diffstep;
ae_int_t nfev;
ae_int_t mcstage;
ae_int_t k;
ae_vector xk;
ae_vector dk;
ae_vector xn;
ae_vector dn;
ae_vector d;
double fold;
double stp;
double curstpmax;
ae_vector yk;
double laststep;
double lastscaledstep;
ae_int_t mcinfo;
ae_bool innerresetneeded;
ae_bool terminationneeded;
double trimthreshold;
ae_int_t rstimer;
ae_vector x;
double f;
ae_vector g;
ae_bool needf;
ae_bool needfg;
ae_bool xupdated;
ae_bool algpowerup;
ae_bool lsstart;
ae_bool lsend;
rcommstate rstate;
ae_int_t repiterationscount;
ae_int_t repnfev;
ae_int_t repterminationtype;
ae_int_t debugrestartscount;
linminstate lstate;
double fbase;
double fm2;
double fm1;
double fp1;
double fp2;
double betahs;
double betady;
ae_vector work0;
ae_vector work1;
} mincgstate;
typedef struct
{
ae_int_t iterationscount;
ae_int_t nfev;
ae_int_t terminationtype;
} mincgreport;
typedef struct
{
ae_int_t nmain;
ae_int_t nslack;
double innerepsg;
double innerepsf;
double innerepsx;
double outerepsx;
double outerepsi;
ae_int_t maxits;
ae_bool xrep;
double stpmax;
double diffstep;
ae_int_t prectype;
ae_vector diaghoriginal;
ae_vector diagh;
ae_vector x;
double f;
ae_vector g;
ae_bool needf;
ae_bool needfg;
ae_bool xupdated;
rcommstate rstate;
ae_int_t repinneriterationscount;
ae_int_t repouteriterationscount;
ae_int_t repnfev;
ae_int_t repterminationtype;
double repdebugeqerr;
double repdebugfs;
double repdebugff;
double repdebugdx;
ae_vector xcur;
ae_vector xprev;
ae_vector xstart;
ae_int_t itsleft;
ae_vector xend;
ae_vector lastg;
double trimthreshold;
ae_matrix ceoriginal;
ae_matrix ceeffective;
ae_matrix cecurrent;
ae_vector ct;
ae_int_t cecnt;
ae_int_t cedim;
ae_vector xe;
ae_vector hasbndl;
ae_vector hasbndu;
ae_vector bndloriginal;
ae_vector bnduoriginal;
ae_vector bndleffective;
ae_vector bndueffective;
ae_vector activeconstraints;
ae_vector constrainedvalues;
ae_vector transforms;
ae_vector seffective;
ae_vector soriginal;
ae_vector w;
ae_vector tmp0;
ae_vector tmp1;
ae_vector tmp2;
ae_vector r;
ae_matrix lmmatrix;
double v0;
double v1;
double v2;
double t;
double errfeas;
double gnorm;
double mpgnorm;
double mba;
ae_int_t variabletofreeze;
double valuetofreeze;
double fbase;
double fm2;
double fm1;
double fp1;
double fp2;
double xm1;
double xp1;
mincgstate cgstate;
mincgreport cgrep;
ae_int_t optdim;
} minbleicstate;
typedef struct
{
ae_int_t inneriterationscount;
ae_int_t outeriterationscount;
ae_int_t nfev;
ae_int_t terminationtype;
double debugeqerr;
double debugfs;
double debugff;
double debugdx;
} minbleicreport;
typedef struct
{
ae_int_t n;
ae_int_t m; ae_int_t m;
double epsg; double epsg;
double epsf; double epsf;
double epsx; double epsx;
ae_int_t maxits; ae_int_t maxits;
ae_bool xrep; ae_bool xrep;
double stpmax; double stpmax;
ae_vector s; ae_vector s;
double diffstep;
ae_int_t nfev; ae_int_t nfev;
ae_int_t mcstage; ae_int_t mcstage;
ae_int_t k; ae_int_t k;
ae_int_t q; ae_int_t q;
ae_int_t p; ae_int_t p;
ae_vector rho; ae_vector rho;
ae_matrix yk; ae_matrix yk;
ae_matrix sk; ae_matrix sk;
ae_vector theta; ae_vector theta;
ae_vector d; ae_vector d;
double stp; double stp;
ae_vector work; ae_vector work;
double fold; double fold;
double trimthreshold;
ae_int_t prectype; ae_int_t prectype;
double gammak; double gammak;
ae_matrix denseh; ae_matrix denseh;
ae_vector diagh; ae_vector diagh;
double fbase;
double fm2;
double fm1;
double fp1;
double fp2;
ae_vector autobuf; ae_vector autobuf;
ae_vector x; ae_vector x;
double f; double f;
ae_vector g; ae_vector g;
ae_bool needf;
ae_bool needfg; ae_bool needfg;
ae_bool xupdated; ae_bool xupdated;
rcommstate rstate; rcommstate rstate;
ae_int_t repiterationscount; ae_int_t repiterationscount;
ae_int_t repnfev; ae_int_t repnfev;
ae_int_t repterminationtype; ae_int_t repterminationtype;
linminstate lstate; linminstate lstate;
} minlbfgsstate; } minlbfgsstate;
typedef struct typedef struct
{ {
skipping to change at line 256 skipping to change at line 427
double betahs; double betahs;
double betady; double betady;
} minasastate; } minasastate;
typedef struct typedef struct
{ {
ae_int_t iterationscount; ae_int_t iterationscount;
ae_int_t nfev; ae_int_t nfev;
ae_int_t terminationtype; ae_int_t terminationtype;
ae_int_t activeconstraints; ae_int_t activeconstraints;
} minasareport; } minasareport;
typedef struct
}
/////////////////////////////////////////////////////////////////////////
//
// THIS SECTION CONTAINS C++ INTERFACE
//
/////////////////////////////////////////////////////////////////////////
namespace alglib
{ {
ae_int_t n;
double epsg; /*************************************************************************
double epsf; This object stores state of the nonlinear CG optimizer.
double epsx;
ae_int_t maxits; You should use ALGLIB functions to work with this object.
double stpmax; *************************************************************************/
double suggestedstep; class _mincgstate_owner
ae_bool xrep;
ae_bool drep;
ae_int_t cgtype;
ae_int_t prectype;
ae_vector diagh;
ae_vector diaghl2;
ae_matrix vcorr;
ae_int_t vcnt;
ae_vector s;
ae_int_t nfev;
ae_int_t mcstage;
ae_int_t k;
ae_vector xk;
ae_vector dk;
ae_vector xn;
ae_vector dn;
ae_vector d;
double fold;
double stp;
double curstpmax;
ae_vector yk;
double laststep;
double lastscaledstep;
ae_int_t mcinfo;
ae_bool innerresetneeded;
ae_bool terminationneeded;
ae_int_t rstimer;
ae_vector x;
double f;
ae_vector g;
ae_bool needfg;
ae_bool xupdated;
ae_bool algpowerup;
ae_bool lsstart;
ae_bool lsend;
rcommstate rstate;
ae_int_t repiterationscount;
ae_int_t repnfev;
ae_int_t repterminationtype;
ae_int_t debugrestartscount;
linminstate lstate;
double betahs;
double betady;
ae_vector work0;
ae_vector work1;
} mincgstate;
typedef struct
{ {
ae_int_t iterationscount; public:
ae_int_t nfev; _mincgstate_owner();
ae_int_t terminationtype; _mincgstate_owner(const _mincgstate_owner &rhs);
} mincgreport; _mincgstate_owner& operator=(const _mincgstate_owner &rhs);
typedef struct virtual ~_mincgstate_owner();
alglib_impl::mincgstate* c_ptr();
alglib_impl::mincgstate* c_ptr() const;
protected:
alglib_impl::mincgstate *p_struct;
};
class mincgstate : public _mincgstate_owner
{ {
ae_int_t nmain; public:
ae_int_t nslack; mincgstate();
double innerepsg; mincgstate(const mincgstate &rhs);
double innerepsf; mincgstate& operator=(const mincgstate &rhs);
double innerepsx; virtual ~mincgstate();
double outerepsx; ae_bool &needf;
double outerepsi; ae_bool &needfg;
ae_int_t maxits; ae_bool &xupdated;
ae_bool xrep; double &f;
double stpmax; real_1d_array g;
ae_int_t prectype; real_1d_array x;
ae_vector diaghoriginal;
ae_vector diagh; };
ae_vector x;
double f; /*************************************************************************
ae_vector g;
ae_bool needfg; *************************************************************************/
ae_bool xupdated; class _mincgreport_owner
rcommstate rstate;
ae_int_t repinneriterationscount;
ae_int_t repouteriterationscount;
ae_int_t repnfev;
ae_int_t repterminationtype;
double repdebugeqerr;
double repdebugfs;
double repdebugff;
double repdebugdx;
ae_vector xcur;
ae_vector xprev;
ae_vector xstart;
ae_int_t itsleft;
ae_vector xend;
ae_vector lastg;
ae_matrix ceoriginal;
ae_matrix ceeffective;
ae_matrix cecurrent;
ae_vector ct;
ae_int_t cecnt;
ae_int_t cedim;
ae_vector xe;
ae_vector hasbndl;
ae_vector hasbndu;
ae_vector bndloriginal;
ae_vector bnduoriginal;
ae_vector bndleffective;
ae_vector bndueffective;
ae_vector activeconstraints;
ae_vector constrainedvalues;
ae_vector transforms;
ae_vector seffective;
ae_vector soriginal;
ae_vector w;
ae_vector tmp0;
ae_vector tmp1;
ae_vector tmp2;
ae_vector r;
ae_matrix lmmatrix;
double v0;
double v1;
double v2;
double t;
double errfeas;
double gnorm;
double mpgnorm;
double mba;
ae_int_t variabletofreeze;
double valuetofreeze;
mincgstate cgstate;
mincgreport cgrep;
ae_int_t optdim;
} minbleicstate;
typedef struct
{ {
ae_int_t inneriterationscount; public:
ae_int_t outeriterationscount; _mincgreport_owner();
ae_int_t nfev; _mincgreport_owner(const _mincgreport_owner &rhs);
ae_int_t terminationtype; _mincgreport_owner& operator=(const _mincgreport_owner &rhs);
double debugeqerr; virtual ~_mincgreport_owner();
double debugfs; alglib_impl::mincgreport* c_ptr();
double debugff; alglib_impl::mincgreport* c_ptr() const;
double debugdx; protected:
} minbleicreport; alglib_impl::mincgreport *p_struct;
};
class mincgreport : public _mincgreport_owner
{
public:
mincgreport();
mincgreport(const mincgreport &rhs);
mincgreport& operator=(const mincgreport &rhs);
virtual ~mincgreport();
ae_int_t &iterationscount;
ae_int_t &nfev;
ae_int_t &terminationtype;
} };
///////////////////////////////////////////////////////////////////////// /*************************************************************************
// This object stores nonlinear optimizer state.
// THIS SECTION CONTAINS C++ INTERFACE You should use functions provided by MinBLEIC subpackage to work with this
// object
///////////////////////////////////////////////////////////////////////// *************************************************************************/
namespace alglib class _minbleicstate_owner
{
public:
_minbleicstate_owner();
_minbleicstate_owner(const _minbleicstate_owner &rhs);
_minbleicstate_owner& operator=(const _minbleicstate_owner &rhs);
virtual ~_minbleicstate_owner();
alglib_impl::minbleicstate* c_ptr();
alglib_impl::minbleicstate* c_ptr() const;
protected:
alglib_impl::minbleicstate *p_struct;
};
class minbleicstate : public _minbleicstate_owner
{
public:
minbleicstate();
minbleicstate(const minbleicstate &rhs);
minbleicstate& operator=(const minbleicstate &rhs);
virtual ~minbleicstate();
ae_bool &needf;
ae_bool &needfg;
ae_bool &xupdated;
double &f;
real_1d_array g;
real_1d_array x;
};
/*************************************************************************
This structure stores optimization report:
* InnerIterationsCount number of inner iterations
* OuterIterationsCount number of outer iterations
* NFEV number of gradient evaluations
* TerminationType termination type (see below)
TERMINATION CODES
TerminationType field contains completion code, which can be:
-10 unsupported combination of algorithm settings:
1) StpMax is set to non-zero value,
AND 2) non-default preconditioner is used.
You can't use both features at the same moment,
so you have to choose one of them (and to turn
off another one).
-3 inconsistent constraints. Feasible point is
either nonexistent or too hard to find. Try to
restart optimizer with better initial
approximation
4 conditions on constraints are fulfilled
with error less than or equal to EpsC
5 MaxIts steps was taken
7 stopping conditions are too stringent,
further improvement is impossible,
X contains best point found so far.
ADDITIONAL FIELDS
There are additional fields which can be used for debugging:
* DebugEqErr error in the equality constraints (2-norm)
* DebugFS f, calculated at projection of initial point
to the feasible set
* DebugFF f, calculated at the final point
* DebugDX |X_start-X_final|
*************************************************************************/
class _minbleicreport_owner
{
public:
_minbleicreport_owner();
_minbleicreport_owner(const _minbleicreport_owner &rhs);
_minbleicreport_owner& operator=(const _minbleicreport_owner &rhs);
virtual ~_minbleicreport_owner();
alglib_impl::minbleicreport* c_ptr();
alglib_impl::minbleicreport* c_ptr() const;
protected:
alglib_impl::minbleicreport *p_struct;
};
class minbleicreport : public _minbleicreport_owner
{ {
public:
minbleicreport();
minbleicreport(const minbleicreport &rhs);
minbleicreport& operator=(const minbleicreport &rhs);
virtual ~minbleicreport();
ae_int_t &inneriterationscount;
ae_int_t &outeriterationscount;
ae_int_t &nfev;
ae_int_t &terminationtype;
double &debugeqerr;
double &debugfs;
double &debugff;
double &debugdx;
};
/************************************************************************* /*************************************************************************
*************************************************************************/ *************************************************************************/
class _minlbfgsstate_owner class _minlbfgsstate_owner
{ {
public: public:
_minlbfgsstate_owner(); _minlbfgsstate_owner();
_minlbfgsstate_owner(const _minlbfgsstate_owner &rhs); _minlbfgsstate_owner(const _minlbfgsstate_owner &rhs);
_minlbfgsstate_owner& operator=(const _minlbfgsstate_owner &rhs); _minlbfgsstate_owner& operator=(const _minlbfgsstate_owner &rhs);
skipping to change at line 434 skipping to change at line 620
protected: protected:
alglib_impl::minlbfgsstate *p_struct; alglib_impl::minlbfgsstate *p_struct;
}; };
class minlbfgsstate : public _minlbfgsstate_owner class minlbfgsstate : public _minlbfgsstate_owner
{ {
public: public:
minlbfgsstate(); minlbfgsstate();
minlbfgsstate(const minlbfgsstate &rhs); minlbfgsstate(const minlbfgsstate &rhs);
minlbfgsstate& operator=(const minlbfgsstate &rhs); minlbfgsstate& operator=(const minlbfgsstate &rhs);
virtual ~minlbfgsstate(); virtual ~minlbfgsstate();
ae_bool &needf;
ae_bool &needfg; ae_bool &needfg;
ae_bool &xupdated; ae_bool &xupdated;
double &f; double &f;
real_1d_array g; real_1d_array g;
real_1d_array x; real_1d_array x;
}; };
/************************************************************************* /*************************************************************************
skipping to change at line 699 skipping to change at line 886
minasareport& operator=(const minasareport &rhs); minasareport& operator=(const minasareport &rhs);
virtual ~minasareport(); virtual ~minasareport();
ae_int_t &iterationscount; ae_int_t &iterationscount;
ae_int_t &nfev; ae_int_t &nfev;
ae_int_t &terminationtype; ae_int_t &terminationtype;
ae_int_t &activeconstraints; ae_int_t &activeconstraints;
}; };
/************************************************************************* /*************************************************************************
This object stores state of the nonlinear CG optimizer. NONLINEAR CONJUGATE GRADIENT METHOD
You should use ALGLIB functions to work with this object. DESCRIPTION:
*************************************************************************/ The subroutine minimizes function F(x) of N arguments by using one of the
class _mincgstate_owner nonlinear conjugate gradient methods.
{
public:
_mincgstate_owner();
_mincgstate_owner(const _mincgstate_owner &rhs);
_mincgstate_owner& operator=(const _mincgstate_owner &rhs);
virtual ~_mincgstate_owner();
alglib_impl::mincgstate* c_ptr();
alglib_impl::mincgstate* c_ptr() const;
protected:
alglib_impl::mincgstate *p_struct;
};
class mincgstate : public _mincgstate_owner
{
public:
mincgstate();
mincgstate(const mincgstate &rhs);
mincgstate& operator=(const mincgstate &rhs);
virtual ~mincgstate();
ae_bool &needfg;
ae_bool &xupdated;
double &f;
real_1d_array g;
real_1d_array x;
}; These CG methods are globally convergent (even on non-convex functions) as
long as grad(f) is Lipschitz continuous in a some neighborhood of the
L = { x : f(x)<=f(x0) }.
/************************************************************************* REQUIREMENTS:
Algorithm will request following information during its operation:
* function value F and its gradient G (simultaneously) at given point X
*************************************************************************/ USAGE:
class _mincgreport_owner 1. User initializes algorithm state with MinCGCreate() call
{ 2. User tunes solver parameters with MinCGSetCond(), MinCGSetStpMax() and
public: other functions
_mincgreport_owner(); 3. User calls MinCGOptimize() function which takes algorithm state and
_mincgreport_owner(const _mincgreport_owner &rhs); pointer (delegate, etc.) to callback function which calculates F/G.
_mincgreport_owner& operator=(const _mincgreport_owner &rhs); 4. User calls MinCGResults() to get solution
virtual ~_mincgreport_owner(); 5. Optionally, user may call MinCGRestartFrom() to solve another problem
alglib_impl::mincgreport* c_ptr(); with same N but another starting point and/or another function.
alglib_impl::mincgreport* c_ptr() const; MinCGRestartFrom() allows to reuse already initialized structure.
protected:
alglib_impl::mincgreport *p_struct;
};
class mincgreport : public _mincgreport_owner
{
public:
mincgreport();
mincgreport(const mincgreport &rhs);
mincgreport& operator=(const mincgreport &rhs);
virtual ~mincgreport();
ae_int_t &iterationscount;
ae_int_t &nfev;
ae_int_t &terminationtype;
};
/*************************************************************************
This object stores nonlinear optimizer state.
You should use functions provided by MinBLEIC subpackage to work with this
object
*************************************************************************/
class _minbleicstate_owner
{
public:
_minbleicstate_owner();
_minbleicstate_owner(const _minbleicstate_owner &rhs);
_minbleicstate_owner& operator=(const _minbleicstate_owner &rhs);
virtual ~_minbleicstate_owner();
alglib_impl::minbleicstate* c_ptr();
alglib_impl::minbleicstate* c_ptr() const;
protected:
alglib_impl::minbleicstate *p_struct;
};
class minbleicstate : public _minbleicstate_owner
{
public:
minbleicstate();
minbleicstate(const minbleicstate &rhs);
minbleicstate& operator=(const minbleicstate &rhs);
virtual ~minbleicstate();
ae_bool &needfg;
ae_bool &xupdated;
double &f;
real_1d_array g;
real_1d_array x;
};
/*************************************************************************
This structure stores optimization report:
* InnerIterationsCount number of inner iterations
* OuterIterationsCount number of outer iterations
* NFEV number of gradient evaluations
* TerminationType termination type (see below)
TERMINATION CODES
TerminationType field contains completion code, which can be: INPUT PARAMETERS:
-10 unsupported combination of algorithm settings: N - problem dimension, N>0:
1) StpMax is set to non-zero value, * if given, only leading N elements of X are used
AND 2) non-default preconditioner is used. * if not given, automatically determined from size of X
You can't use both features at the same moment, X - starting point, array[0..N-1].
so you have to choose one of them (and to turn
off another one).
-3 inconsistent constraints. Feasible point is
either nonexistent or too hard to find. Try to
restart optimizer with better initial
approximation
4 conditions on constraints are fulfilled
with error less than or equal to EpsC
5 MaxIts steps was taken
7 stopping conditions are too stringent,
further improvement is impossible,
X contains best point found so far.
ADDITIONAL FIELDS OUTPUT PARAMETERS:
State - structure which stores algorithm state
There are additional fields which can be used for debugging: -- ALGLIB --
* DebugEqErr error in the equality constraints (2-norm) Copyright 25.03.2010 by Bochkanov Sergey
* DebugFS f, calculated at projection of initial point
to the feasible set
* DebugFF f, calculated at the final point
* DebugDX |X_start-X_final|
*************************************************************************/ *************************************************************************/
class _minbleicreport_owner void mincgcreate(const ae_int_t n, const real_1d_array &x, mincgstate &stat
{ e);
public: void mincgcreate(const real_1d_array &x, mincgstate &state);
_minbleicreport_owner();
_minbleicreport_owner(const _minbleicreport_owner &rhs);
_minbleicreport_owner& operator=(const _minbleicreport_owner &rhs);
virtual ~_minbleicreport_owner();
alglib_impl::minbleicreport* c_ptr();
alglib_impl::minbleicreport* c_ptr() const;
protected:
alglib_impl::minbleicreport *p_struct;
};
class minbleicreport : public _minbleicreport_owner
{
public:
minbleicreport();
minbleicreport(const minbleicreport &rhs);
minbleicreport& operator=(const minbleicreport &rhs);
virtual ~minbleicreport();
ae_int_t &inneriterationscount;
ae_int_t &outeriterationscount;
ae_int_t &nfev;
ae_int_t &terminationtype;
double &debugeqerr;
double &debugfs;
double &debugff;
double &debugdx;
};
/************************************************************************* /*************************************************************************
LIMITED MEMORY BFGS METHOD FOR LARGE SCALE OPTIMIZATION The subroutine is finite difference variant of MinCGCreate(). It uses
finite differences in order to differentiate target function.
DESCRIPTION:
The subroutine minimizes function F(x) of N arguments by using a quasi-
Newton method (LBFGS scheme) which is optimized to use a minimum amount
of memory.
The subroutine generates the approximation of an inverse Hessian matrix by
using information about the last M steps of the algorithm (instead of N).
It lessens a required amount of memory from a value of order N^2 to a
value of order 2*N*M.
REQUIREMENTS:
Algorithm will request following information during its operation:
* function value F and its gradient G (simultaneously) at given point X
USAGE: Description below contains information which is specific to this function
1. User initializes algorithm state with MinLBFGSCreate() call only. We recommend to read comments on MinCGCreate() in order to get more
2. User tunes solver parameters with MinLBFGSSetCond() MinLBFGSSetStpMax() information about creation of CG optimizer.
and other functions
3. User calls MinLBFGSOptimize() function which takes algorithm state and
pointer (delegate, etc.) to callback function which calculates F/G.
4. User calls MinLBFGSResults() to get solution
5. Optionally user may call MinLBFGSRestartFrom() to solve another problem
with same N/M but another starting point and/or another function.
MinLBFGSRestartFrom() allows to reuse already initialized structure.
INPUT PARAMETERS: INPUT PARAMETERS:
N - problem dimension. N>0 N - problem dimension, N>0:
M - number of corrections in the BFGS scheme of Hessian * if given, only leading N elements of X are used
approximation update. Recommended value: 3<=M<=7. The smal * if not given, automatically determined from size of X
ler X - starting point, array[0..N-1].
value causes worse convergence, the bigger will not cause DiffStep- differentiation step, >0
a
considerably better convergence, but will cause a fall in
the
performance. M<=N.
X - initial solution approximation, array[0..N-1].
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
NOTES: NOTES:
1. you may tune stopping conditions with MinLBFGSSetCond() function 1. algorithm uses 4-point central formula for differentiation.
2. if target function contains exp() or other fast growing functions, and 2. differentiation step along I-th axis is equal to DiffStep*S[I] where
optimization algorithm makes too large steps which leads to overflow, S[] is scaling vector which can be set by MinCGSetScale() call.
use MinLBFGSSetStpMax() function to bound algorithm's steps. However, 3. we recommend you to use moderate values of differentiation step. Too
L-BFGS rarely needs such a tuning. large step will result in too large truncation errors, while too small
step will result in too large numerical errors. 1.0E-6 can be good
value to start with.
4. Numerical differentiation is very inefficient - one gradient
calculation needs 4*N function evaluations. This function will work for
any N - either small (1...10), moderate (10...100) or large (100...).
However, performance penalty will be too severe for any N's except for
small ones.
We should also say that code which relies on numerical differentiation
is less robust and precise. L-BFGS needs exact gradient values.
Imprecise gradient may slow down convergence, especially on highly
nonlinear problems.
Thus we recommend to use this function for fast prototyping on small-
dimensional problems only, and to implement analytical gradient as soon
as possible.
-- ALGLIB -- -- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey Copyright 16.05.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlbfgscreate(const ae_int_t n, const ae_int_t m, const real_1d_array void mincgcreatef(const ae_int_t n, const real_1d_array &x, const double di
&x, minlbfgsstate &state); ffstep, mincgstate &state);
void minlbfgscreate(const ae_int_t m, const real_1d_array &x, minlbfgsstate void mincgcreatef(const real_1d_array &x, const double diffstep, mincgstate
&state); &state);
/************************************************************************* /*************************************************************************
This function sets stopping conditions for L-BFGS optimization algorithm. This function sets stopping conditions for CG optimization algorithm.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
EpsG - >=0 EpsG - >=0
The subroutine finishes its work if the condition The subroutine finishes its work if the condition
||G||<EpsG is satisfied, where ||.|| means Euclidian norm, |v|<EpsG is satisfied, where:
G - gradient. * |.| means Euclidian norm
* v - scaled gradient vector, v[i]=g[i]*s[i]
* g - gradient
* s - scaling coefficients set by MinCGSetScale()
EpsF - >=0 EpsF - >=0
The subroutine finishes its work if on k+1-th iteration The subroutine finishes its work if on k+1-th iteration
the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
is satisfied. is satisfied.
EpsX - >=0 EpsX - >=0
The subroutine finishes its work if on k+1-th iteration The subroutine finishes its work if on k+1-th iteration
the condition |X(k+1)-X(k)| <= EpsX is fulfilled. the condition |v|<=EpsX is fulfilled, where:
* |.| means Euclidian norm
* v - scaled step vector, v[i]=dx[i]/s[i]
* dx - ste pvector, dx=X(k+1)-X(k)
* s - scaling coefficients set by MinCGSetScale()
MaxIts - maximum number of iterations. If MaxIts=0, the number of MaxIts - maximum number of iterations. If MaxIts=0, the number of
iterations is unlimited. iterations is unlimited.
Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to
automatic stopping criterion selection (small EpsX). automatic stopping criterion selection (small EpsX).
-- ALGLIB -- -- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlbfgssetcond(const minlbfgsstate &state, const double epsg, const d void mincgsetcond(const mincgstate &state, const double epsg, const double
ouble epsf, const double epsx, const ae_int_t maxits); epsf, const double epsx, const ae_int_t maxits);
/*************************************************************************
This function turns on/off reporting.
INPUT PARAMETERS:
State - structure which stores algorithm state
NeedXRep- whether iteration reports are needed or not
If NeedXRep is True, algorithm will call rep() callback function if it is
provided to MinLBFGSOptimize().
-- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/
void minlbfgssetxrep(const minlbfgsstate &state, const bool needxrep);
/*************************************************************************
This function sets maximum step length
INPUT PARAMETERS:
State - structure which stores algorithm state
StpMax - maximum step length, >=0. Set StpMax to 0.0 (default), if
you don't want to limit step length.
Use this subroutine when you optimize target function which contains exp()
or other fast growing functions, and optimization algorithm makes too
large steps which leads to overflow. This function allows us to reject
steps that are too large (and therefore expose us to the possible
overflow) without actually calculating function value at the x+stp*d.
-- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/
void minlbfgssetstpmax(const minlbfgsstate &state, const double stpmax);
/************************************************************************* /*************************************************************************
This function sets scaling coefficients for LBFGS optimizer. This function sets scaling coefficients for CG optimizer.
ALGLIB optimizers use scaling matrices to test stopping conditions (step ALGLIB optimizers use scaling matrices to test stopping conditions (step
size and gradient are scaled before comparison with tolerances). Scale of size and gradient are scaled before comparison with tolerances). Scale of
the I-th variable is a translation invariant measure of: the I-th variable is a translation invariant measure of:
a) "how large" the variable is a) "how large" the variable is
b) how large the step should be to make significant changes in the function b) how large the step should be to make significant changes in the function
In most optimizers (and in the LBFGS too) scaling is NOT a form of Scaling is also used by finite difference variant of CG optimizer - step
along I-th axis is equal to DiffStep*S[I].
In most optimizers (and in the CG too) scaling is NOT a form of
preconditioning. It just affects stopping conditions. You should set preconditioning. It just affects stopping conditions. You should set
preconditioner by separate call to one of the MinLBFGSSetPrec...() preconditioner by separate call to one of the MinCGSetPrec...() functions.
functions.
There is special preconditioning mode, however, which uses scaling There is special preconditioning mode, however, which uses scaling
coefficients to form diagonal preconditioning matrix. You can turn this coefficients to form diagonal preconditioning matrix. You can turn this
mode on, if you want. But you should understand that scaling is not the mode on, if you want. But you should understand that scaling is not the
same thing as preconditioning - these are two different, although related same thing as preconditioning - these are two different, although related
forms of tuning solver. forms of tuning solver.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure stores algorithm state State - structure stores algorithm state
S - array[N], non-zero scaling coefficients S - array[N], non-zero scaling coefficients
S[i] may be negative, sign doesn't matter. S[i] may be negative, sign doesn't matter.
-- ALGLIB -- -- ALGLIB --
Copyright 14.01.2011 by Bochkanov Sergey Copyright 14.01.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlbfgssetscale(const minlbfgsstate &state, const real_1d_array &s); void mincgsetscale(const mincgstate &state, const real_1d_array &s);
/************************************************************************* /*************************************************************************
Modification of the preconditioner: default preconditioner (simple This function turns on/off reporting.
scaling, same for all elements of X) is used.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
NeedXRep- whether iteration reports are needed or not
NOTE: you can change preconditioner "on the fly", during algorithm If NeedXRep is True, algorithm will call rep() callback function if it is
iterations. provided to MinCGOptimize().
-- ALGLIB -- -- ALGLIB --
Copyright 13.10.2010 by Bochkanov Sergey Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlbfgssetprecdefault(const minlbfgsstate &state); void mincgsetxrep(const mincgstate &state, const bool needxrep);
/************************************************************************* /*************************************************************************
Modification of the preconditioner: Cholesky factorization of approximate This function sets CG algorithm.
Hessian is used.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
P - triangular preconditioner, Cholesky factorization of CGType - algorithm type:
the approximate Hessian. array[0..N-1,0..N-1], * -1 automatic selection of the best algorithm
(if larger, only leading N elements are used). * 0 DY (Dai and Yuan) algorithm
IsUpper - whether upper or lower triangle of P is given * 1 Hybrid DY-HS algorithm
(other triangle is not referenced)
After call to this function preconditioner is changed to P (P is copied -- ALGLIB --
into the internal buffer). Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/
void mincgsetcgtype(const mincgstate &state, const ae_int_t cgtype);
/*************************************************************************
This function sets maximum step length
INPUT PARAMETERS:
State - structure which stores algorithm state
StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't
want to limit step length.
Use this subroutine when you optimize target function which contains exp()
or other fast growing functions, and optimization algorithm makes too
large steps which leads to overflow. This function allows us to reject
steps that are too large (and therefore expose us to the possible
overflow) without actually calculating function value at the x+stp*d.
-- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/
void mincgsetstpmax(const mincgstate &state, const double stpmax);
/*************************************************************************
This function allows to suggest initial step length to the CG algorithm.
Suggested step length is used as starting point for the line search. It
can be useful when you have badly scaled problem, i.e. when ||grad||
(which is used as initial estimate for the first step) is many orders of
magnitude different from the desired step.
Line search may fail on such problems without good estimate of initial
step length. Imagine, for example, problem with ||grad||=10^50 and desired
step equal to 0.1 Line search function will use 10^50 as initial step,
then it will decrease step length by 2 (up to 20 attempts) and will get
10^44, which is still too large.
This function allows us to tell than line search should be started from
some moderate step length, like 1.0, so algorithm will be able to detect
desired step length in a several searches.
Default behavior (when no step is suggested) is to use preconditioner, if
it is available, to generate initial estimate of step length.
This function influences only first iteration of algorithm. It should be
called between MinCGCreate/MinCGRestartFrom() call and MinCGOptimize call.
Suggested step is ignored if you have preconditioner.
INPUT PARAMETERS:
State - structure used to store algorithm state.
Stp - initial estimate of the step length.
Can be zero (no estimate).
-- ALGLIB --
Copyright 30.07.2010 by Bochkanov Sergey
*************************************************************************/
void mincgsuggeststep(const mincgstate &state, const double stp);
/*************************************************************************
Modification of the preconditioner: preconditioning is turned off.
INPUT PARAMETERS:
State - structure which stores algorithm state
NOTE: you can change preconditioner "on the fly", during algorithm NOTE: you can change preconditioner "on the fly", during algorithm
iterations. iterations.
NOTE 2: P should be nonsingular. Exception will be thrown otherwise.
-- ALGLIB -- -- ALGLIB --
Copyright 13.10.2010 by Bochkanov Sergey Copyright 13.10.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlbfgssetpreccholesky(const minlbfgsstate &state, const real_2d_arra y &p, const bool isupper); void mincgsetprecdefault(const mincgstate &state);
/************************************************************************* /*************************************************************************
Modification of the preconditioner: diagonal of approximate Hessian is Modification of the preconditioner: diagonal of approximate Hessian is
used. used.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
D - diagonal of the approximate Hessian, array[0..N-1], D - diagonal of the approximate Hessian, array[0..N-1],
(if larger, only leading N elements are used). (if larger, only leading N elements are used).
NOTE: you can change preconditioner "on the fly", during algorithm NOTE: you can change preconditioner "on the fly", during algorithm
iterations. iterations.
NOTE 2: D[i] should be positive. Exception will be thrown otherwise. NOTE 2: D[i] should be positive. Exception will be thrown otherwise.
NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
-- ALGLIB -- -- ALGLIB --
Copyright 13.10.2010 by Bochkanov Sergey Copyright 13.10.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlbfgssetprecdiag(const minlbfgsstate &state, const real_1d_array &d ); void mincgsetprecdiag(const mincgstate &state, const real_1d_array &d);
/************************************************************************* /*************************************************************************
Modification of the preconditioner: scale-based diagonal preconditioning. Modification of the preconditioner: scale-based diagonal preconditioning.
This preconditioning mode can be useful when you don't have approximate This preconditioning mode can be useful when you don't have approximate
diagonal of Hessian, but you know that your variables are badly scaled diagonal of Hessian, but you know that your variables are badly scaled
(for example, one variable is in [1,10], and another in [1000,100000]), (for example, one variable is in [1,10], and another in [1000,100000]),
and most part of the ill-conditioning comes from different scales of vars. and most part of the ill-conditioning comes from different scales of vars.
In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2), In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2),
can greatly improve convergence. can greatly improve convergence.
IMPRTANT: you should set scale of your variables with MinLBFGSSetScale() IMPRTANT: you should set scale of your variables with MinCGSetScale() call
call (before or after MinLBFGSSetPrecScale() call). Without knowledge of (before or after MinCGSetPrecScale() call). Without knowledge of the scale
the scale of your variables scale-based preconditioner will be just unit of your variables scale-based preconditioner will be just unit matrix.
matrix.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
NOTE: you can change preconditioner "on the fly", during algorithm
iterations.
-- ALGLIB -- -- ALGLIB --
Copyright 13.10.2010 by Bochkanov Sergey Copyright 13.10.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlbfgssetprecscale(const minlbfgsstate &state); void mincgsetprecscale(const mincgstate &state);
/************************************************************************* /*************************************************************************
This function provides reverse communication interface This function provides reverse communication interface
Reverse communication interface is not documented or recommended to use. Reverse communication interface is not documented or recommended to use.
See below for functions which provide better documented API See below for functions which provide better documented API
*************************************************************************/ *************************************************************************/
bool minlbfgsiteration(const minlbfgsstate &state); bool mincgiteration(const mincgstate &state);
/************************************************************************* /*************************************************************************
This family of functions is used to launcn iterations of nonlinear optimize r This family of functions is used to launcn iterations of nonlinear optimize r
These functions accept following parameters: These functions accept following parameters:
state - algorithm state state - algorithm state
func - callback which calculates function (or merit function)
value func at given point x
grad - callback which calculates function (or merit function) grad - callback which calculates function (or merit function)
value func and gradient grad at given point x value func and gradient grad at given point x
rep - optional callback which is called after each iteration rep - optional callback which is called after each iteration
can be NULL can be NULL
ptr - optional pointer which is passed to func/grad/hess/jac/rep ptr - optional pointer which is passed to func/grad/hess/jac/rep
can be NULL can be NULL
NOTES:
1. This function has two different implementations: one which uses exact
(analytical) user-supplied gradient, and one which uses function value
only and numerically differentiates function in order to obtain
gradient.
Depending on the specific function used to create optimizer object
(either MinCGCreate() for analytical gradient or MinCGCreateF() for
numerical differentiation) you should choose appropriate variant of
MinCGOptimize() - one which accepts function AND gradient or one which
accepts function ONLY.
Be careful to choose variant of MinCGOptimize() which corresponds to
your optimization scheme! Table below lists different combinations of
callback (function/gradient) passed to MinCGOptimize() and specific
function used to create optimizer.
| USER PASSED TO MinCGOptimize()
CREATED WITH | function only | function and gradient
------------------------------------------------------------
MinCGCreateF() | work FAIL
MinCGCreate() | FAIL work
Here "FAIL" denotes inappropriate combinations of optimizer creation
function and MinCGOptimize() version. Attemps to use such combination
(for example, to create optimizer with MinCGCreateF() and to pass
gradient information to MinCGOptimize()) will lead to exception being
thrown. Either you did not pass gradient when it WAS needed or you
passed gradient when it was NOT needed.
-- ALGLIB -- -- ALGLIB --
Copyright 20.03.2009 by Bochkanov Sergey Copyright 20.04.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlbfgsoptimize(minlbfgsstate &state, void mincgoptimize(mincgstate &state,
void (*func)(const real_1d_array &x, double &func, void *ptr),
void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
void *ptr = NULL);
void mincgoptimize(mincgstate &state,
void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
void *ptr = NULL); void *ptr = NULL);
/************************************************************************* /*************************************************************************
L-BFGS algorithm results Conjugate gradient results
INPUT PARAMETERS: INPUT PARAMETERS:
State - algorithm state State - algorithm state
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
X - array[0..N-1], solution X - array[0..N-1], solution
Rep - optimization report: Rep - optimization report:
* Rep.TerminationType completetion code: * Rep.TerminationType completetion code:
* -2 rounding errors prevent further improvement.
X contains best point found.
* -1 incorrect parameters were specified
* 1 relative function improvement is no more than * 1 relative function improvement is no more than
EpsF. EpsF.
* 2 relative step is no more than EpsX. * 2 relative step is no more than EpsX.
* 4 gradient norm is no more than EpsG * 4 gradient norm is no more than EpsG
* 5 MaxIts steps was taken * 5 MaxIts steps was taken
* 7 stopping conditions are too stringent, * 7 stopping conditions are too stringent,
further improvement is impossible further improvement is impossible,
we return best X found so far
* 8 terminated by user
* Rep.IterationsCount contains iterations count * Rep.IterationsCount contains iterations count
* NFEV countains number of function calculations * NFEV countains number of function calculations
-- ALGLIB -- -- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey Copyright 20.04.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlbfgsresults(const minlbfgsstate &state, real_1d_array &x, minlbfgs report &rep); void mincgresults(const mincgstate &state, real_1d_array &x, mincgreport &r ep);
/************************************************************************* /*************************************************************************
L-BFGS algorithm results Conjugate gradient results
Buffered implementation of MinLBFGSResults which uses pre-allocated buffer Buffered implementation of MinCGResults(), which uses pre-allocated buffer
to store X[]. If buffer size is too small, it resizes buffer. It is to store X[]. If buffer size is too small, it resizes buffer. It is
intended to be used in the inner cycles of performance critical algorithms intended to be used in the inner cycles of performance critical algorithms
where array reallocation penalty is too large to be ignored. where array reallocation penalty is too large to be ignored.
-- ALGLIB -- -- ALGLIB --
Copyright 20.08.2010 by Bochkanov Sergey Copyright 20.04.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlbfgsresultsbuf(const minlbfgsstate &state, real_1d_array &x, minlb fgsreport &rep); void mincgresultsbuf(const mincgstate &state, real_1d_array &x, mincgreport &rep);
/************************************************************************* /*************************************************************************
This subroutine restarts LBFGS algorithm from new point. All optimization This subroutine restarts CG algorithm from new point. All optimization
parameters are left unchanged. parameters are left unchanged.
This function allows to solve multiple optimization problems (which This function allows to solve multiple optimization problems (which
must have same number of dimensions) without object reallocation penalty. must have same number of dimensions) without object reallocation penalty.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure used to store algorithm state State - structure used to store algorithm state.
X - new starting point. X - new starting point.
-- ALGLIB -- -- ALGLIB --
Copyright 30.07.2010 by Bochkanov Sergey Copyright 30.07.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlbfgsrestartfrom(const minlbfgsstate &state, const real_1d_array &x ); void mincgrestartfrom(const mincgstate &state, const real_1d_array &x);
/************************************************************************* /*************************************************************************
CONSTRAINED QUADRATIC PROGRAMMING BOUND CONSTRAINED OPTIMIZATION
WITH ADDITIONAL LINEAR EQUALITY AND INEQUALITY CONSTRAINTS
The subroutine creates QP optimizer. After initial creation, it contains DESCRIPTION:
default optimization problem with zero quadratic and linear terms and no The subroutine minimizes function F(x) of N arguments subject to any
constraints. You should set quadratic/linear terms with calls to functions combination of:
provided by MinQP subpackage. * bound constraints
* linear inequality constraints
* linear equality constraints
INPUT PARAMETERS: REQUIREMENTS:
N - problem size * user must provide function value and gradient
* starting point X0 must be feasible or
not too far away from the feasible set
* grad(f) must be Lipschitz continuous on a level set:
L = { x : f(x)<=f(x0) }
* function must be defined everywhere on the feasible set F
OUTPUT PARAMETERS: USAGE:
State - optimizer with zero quadratic/linear terms
and no constraints
-- ALGLIB -- Constrained optimization if far more complex than the unconstrained one.
Copyright 11.01.2011 by Bochkanov Sergey Here we give very brief outline of the BLEIC optimizer. We strongly recomme
*************************************************************************/ nd
void minqpcreate(const ae_int_t n, minqpstate &state); you to read examples in the ALGLIB Reference Manual and to read ALGLIB User
Guide
on optimization, which is available at http://www.alglib.net/optimization/
/************************************************************************* 1. User initializes algorithm state with MinBLEICCreate() call
This function sets linear term for QP solver.
By default, linear term is zero. 2. USer adds boundary and/or linear constraints by calling
MinBLEICSetBC() and MinBLEICSetLC() functions.
INPUT PARAMETERS: 3. User sets stopping conditions for underlying unconstrained solver
State - structure which stores algorithm state with MinBLEICSetInnerCond() call.
B - linear term, array[N]. This function controls accuracy of underlying optimization algorithm.
-- ALGLIB -- 4. User sets stopping conditions for outer iteration by calling
Copyright 11.01.2011 by Bochkanov Sergey MinBLEICSetOuterCond() function.
*************************************************************************/ This function controls handling of boundary and inequality constraints.
void minqpsetlinearterm(const minqpstate &state, const real_1d_array &b);
/************************************************************************* 5. Additionally, user may set limit on number of internal iterations
This function sets quadratic term for QP solver. by MinBLEICSetMaxIts() call.
This function allows to prevent algorithm from looping forever.
By default quadratic term is zero. 6. User calls MinBLEICOptimize() function which takes algorithm state and
pointer (delegate, etc.) to callback function which calculates F/G.
IMPORTANT: this solver minimizes following function: 7. User calls MinBLEICResults() to get solution
f(x) = 0.5*x'*A*x + b'*x.
Note that quadratic term has 0.5 before it. So if you want to minimize 8. Optionally user may call MinBLEICRestartFrom() to solve another problem
f(x) = x^2 + x with same N but another starting point.
you should rewrite your problem as follows: MinBLEICRestartFrom() allows to reuse already initialized structure.
f(x) = 0.5*(2*x^2) + x
and your matrix A will be equal to [[2.0]], not to [[1.0]]
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state N - problem dimension, N>0:
A - matrix, array[N,N] * if given, only leading N elements of X are used
IsUpper - (optional) storage type: * if not given, automatically determined from size ofX
* if True, symmetric matrix A is given by its upper X - starting point, array[N]:
triangle, and the lower triangle isn * it is better to set X to a feasible point
* if False, symmetric matrix A is given by its lower * but X can be infeasible, in which case algorithm will try
triangle, and the upper triangle isn to find feasible point first, using X as initial
* if not given, both lower and upper triangles must be approximation.
filled.
OUTPUT PARAMETERS:
State - structure stores algorithm state
-- ALGLIB -- -- ALGLIB --
Copyright 11.01.2011 by Bochkanov Sergey Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a, void minbleiccreate(const ae_int_t n, const real_1d_array &x, minbleicstate
const bool isupper); &state);
void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a) void minbleiccreate(const real_1d_array &x, minbleicstate &state);
;
/************************************************************************* /*************************************************************************
This function sets starting point for QP solver. It is useful to have The subroutine is finite difference variant of MinBLEICCreate(). It uses
good initial approximation to the solution, because it will increase finite differences in order to differentiate target function.
speed of convergence and identification of active constraints.
Description below contains information which is specific to this function
only. We recommend to read comments on MinBLEICCreate() in order to get
more information about creation of BLEIC optimizer.
INPUT PARAMETERS: INPUT PARAMETERS:
N - problem dimension, N>0:
* if given, only leading N elements of X are used
* if not given, automatically determined from size of X
X - starting point, array[0..N-1].
DiffStep- differentiation step, >0
OUTPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
X - starting point, array[N].
NOTES:
1. algorithm uses 4-point central formula for differentiation.
2. differentiation step along I-th axis is equal to DiffStep*S[I] where
S[] is scaling vector which can be set by MinBLEICSetScale() call.
3. we recommend you to use moderate values of differentiation step. Too
large step will result in too large truncation errors, while too small
step will result in too large numerical errors. 1.0E-6 can be good
value to start with.
4. Numerical differentiation is very inefficient - one gradient
calculation needs 4*N function evaluations. This function will work for
any N - either small (1...10), moderate (10...100) or large (100...).
However, performance penalty will be too severe for any N's except for
small ones.
We should also say that code which relies on numerical differentiation
is less robust and precise. CG needs exact gradient values. Imprecise
gradient may slow down convergence, especially on highly nonlinear
problems.
Thus we recommend to use this function for fast prototyping on small-
dimensional problems only, and to implement analytical gradient as soon
as possible.
-- ALGLIB -- -- ALGLIB --
Copyright 11.01.2011 by Bochkanov Sergey Copyright 16.05.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minqpsetstartingpoint(const minqpstate &state, const real_1d_array &x) void minbleiccreatef(const ae_int_t n, const real_1d_array &x, const double
; diffstep, minbleicstate &state);
void minbleiccreatef(const real_1d_array &x, const double diffstep, minblei
cstate &state);
/************************************************************************* /*************************************************************************
This function sets origin for QP solver. By default, following QP program This function sets boundary constraints for BLEIC optimizer.
is solved:
min(0.5*x'*A*x+b'*x)
This function allows to solve different problem: Boundary constraints are inactive by default (after initial creation).
They are preserved after algorithm restart with MinBLEICRestartFrom().
min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin))
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure stores algorithm state
XOrigin - origin, array[N]. BndL - lower bounds, array[N].
If some (all) variables are unbounded, you may specify
very small number or -INF.
BndU - upper bounds, array[N].
If some (all) variables are unbounded, you may specify
very large number or +INF.
NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
variable will be "frozen" at X[i]=BndL[i]=BndU[i].
NOTE 2: this solver has following useful properties:
* bound constraints are always satisfied exactly
* function is evaluated only INSIDE area specified by bound constraints,
even when numerical differentiation is used (algorithm adjusts nodes
according to boundary constraints)
-- ALGLIB -- -- ALGLIB --
Copyright 11.01.2011 by Bochkanov Sergey Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minqpsetorigin(const minqpstate &state, const real_1d_array &xorigin); void minbleicsetbc(const minbleicstate &state, const real_1d_array &bndl, c onst real_1d_array &bndu);
/************************************************************************* /*************************************************************************
This function tells solver to use Cholesky-based algorithm. This function sets linear constraints for BLEIC optimizer.
Cholesky-based algorithm can be used when:
* problem is convex
* there is no constraints or only boundary constraints are present
This algorithm has O(N^3) complexity for unconstrained problem and is up Linear constraints are inactive by default (after initial creation).
to several times slower on bound constrained problems (these additional They are preserved after algorithm restart with MinBLEICRestartFrom().
iterations are needed to identify active constraints).
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure previously allocated with MinBLEICCreate call.
C - linear constraints, array[K,N+1].
Each row of C represents one constraint, either equality
or inequality (see below):
* first N elements correspond to coefficients,
* last element corresponds to the right part.
All elements of C (including right part) must be finite.
CT - type of constraints, array[K]:
* if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
* if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1]
* if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
K - number of equality/inequality constraints, K>=0:
* if given, only leading K elements of C/CT are used
* if not given, automatically determined from sizes of C/CT
NOTE 1: linear (non-bound) constraints are satisfied only approximately:
* there always exists some minor violation (about Epsilon in magnitude)
due to rounding errors
* numerical differentiation, if used, may lead to function evaluations
outside of the feasible area, because algorithm does NOT change
numerical differentiation formula according to linear constraints.
If you want constraints to be satisfied exactly, try to reformulate your
problem in such manner that all constraints will become boundary ones
(this kind of constraints is always satisfied exactly, both in the final
solution and in all intermediate points).
-- ALGLIB -- -- ALGLIB --
Copyright 11.01.2011 by Bochkanov Sergey Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minqpsetalgocholesky(const minqpstate &state); void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, cons
t integer_1d_array &ct, const ae_int_t k);
void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, cons
t integer_1d_array &ct);
/************************************************************************* /*************************************************************************
This function sets boundary constraints for QP solver This function sets stopping conditions for the underlying nonlinear CG
optimizer. It controls overall accuracy of solution. These conditions
Boundary constraints are inactive by default (after initial creation). should be strict enough in order for algorithm to converge.
After being set, they are preserved until explicitly turned off with
another SetBC() call.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure stores algorithm state State - structure which stores algorithm state
BndL - lower bounds, array[N]. EpsG - >=0
If some (all) variables are unbounded, you may specify The subroutine finishes its work if the condition
very small number or -INF (latter is recommended because |v|<EpsG is satisfied, where:
it will allow solver to use better algorithm). * |.| means Euclidian norm
BndU - upper bounds, array[N]. * v - scaled gradient vector, v[i]=g[i]*s[i]
If some (all) variables are unbounded, you may specify * g - gradient
very large number or +INF (latter is recommended because * s - scaling coefficients set by MinBLEICSetScale()
it will allow solver to use better algorithm). EpsF - >=0
The subroutine finishes its work if on k+1-th iteration
the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
is satisfied.
EpsX - >=0
The subroutine finishes its work if on k+1-th iteration
the condition |v|<=EpsX is fulfilled, where:
* |.| means Euclidian norm
* v - scaled step vector, v[i]=dx[i]/s[i]
* dx - ste pvector, dx=X(k+1)-X(k)
* s - scaling coefficients set by MinBLEICSetScale()
NOTE: it is possible to specify BndL[i]=BndU[i]. In this case I-th Passing EpsG=0, EpsF=0 and EpsX=0 (simultaneously) will lead to
variable will be "frozen" at X[i]=BndL[i]=BndU[i]. automatic stopping criterion selection.
These conditions are used to terminate inner iterations. However, you
need to tune termination conditions for outer iterations too.
-- ALGLIB -- -- ALGLIB --
Copyright 11.01.2011 by Bochkanov Sergey Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minqpsetbc(const minqpstate &state, const real_1d_array &bndl, const r eal_1d_array &bndu); void minbleicsetinnercond(const minbleicstate &state, const double epsg, co nst double epsf, const double epsx);
/************************************************************************* /*************************************************************************
This function solves quadratic programming problem. This function sets stopping conditions for outer iteration of BLEIC algo.
You should call it after setting solver options with MinQPSet...() calls.
These conditions control accuracy of constraint handling and amount of
infeasibility allowed in the solution.
INPUT PARAMETERS: INPUT PARAMETERS:
State - algorithm state State - structure which stores algorithm state
EpsX - >0, stopping condition on outer iteration step length
EpsI - >0, stopping condition on infeasibility
You should use MinQPResults() function to access results after calls Both EpsX and EpsI must be non-zero.
to this function.
MEANING OF EpsX
EpsX is a stopping condition for outer iterations. Algorithm will stop
when solution of the current modified subproblem will be within EpsX
(using 2-norm) of the previous solution.
MEANING OF EpsI
EpsI controls feasibility properties - algorithm won't stop until all
inequality constraints will be satisfied with error (distance from current
point to the feasible area) at most EpsI.
-- ALGLIB -- -- ALGLIB --
Copyright 11.01.2011 by Bochkanov Sergey Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minqpoptimize(const minqpstate &state); void minbleicsetoutercond(const minbleicstate &state, const double epsx, co nst double epsi);
/************************************************************************* /*************************************************************************
QP solver results This function sets scaling coefficients for BLEIC optimizer.
INPUT PARAMETERS: ALGLIB optimizers use scaling matrices to test stopping conditions (step
State - algorithm state size and gradient are scaled before comparison with tolerances). Scale of
the I-th variable is a translation invariant measure of:
a) "how large" the variable is
b) how large the step should be to make significant changes in the function
OUTPUT PARAMETERS: Scaling is also used by finite difference variant of the optimizer - step
X - array[0..N-1], solution along I-th axis is equal to DiffStep*S[I].
Rep - optimization report. You should check Rep.TerminationType,
which contains completion code, and you may check another In most optimizers (and in the BLEIC too) scaling is NOT a form of
fields which contain another information about algorithm preconditioning. It just affects stopping conditions. You should set
functioning. preconditioner by separate call to one of the MinBLEICSetPrec...()
functions.
There is a special preconditioning mode, however, which uses scaling
coefficients to form diagonal preconditioning matrix. You can turn this
mode on, if you want. But you should understand that scaling is not the
same thing as preconditioning - these are two different, although related
forms of tuning solver.
INPUT PARAMETERS:
State - structure stores algorithm state
S - array[N], non-zero scaling coefficients
S[i] may be negative, sign doesn't matter.
-- ALGLIB -- -- ALGLIB --
Copyright 11.01.2011 by Bochkanov Sergey Copyright 14.01.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minqpresults(const minqpstate &state, real_1d_array &x, minqpreport &r ep); void minbleicsetscale(const minbleicstate &state, const real_1d_array &s);
/************************************************************************* /*************************************************************************
QP results Modification of the preconditioner: preconditioning is turned off.
Buffered implementation of MinQPResults() which uses pre-allocated buffer INPUT PARAMETERS:
to store X[]. If buffer size is too small, it resizes buffer. It is State - structure which stores algorithm state
intended to be used in the inner cycles of performance critical algorithms
where array reallocation penalty is too large to be ignored.
-- ALGLIB -- -- ALGLIB --
Copyright 11.01.2011 by Bochkanov Sergey Copyright 13.10.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minqpresultsbuf(const minqpstate &state, real_1d_array &x, minqpreport &rep); void minbleicsetprecdefault(const minbleicstate &state);
/************************************************************************* /*************************************************************************
IMPROVED LEVENBERG-MARQUARDT METHOD FOR Modification of the preconditioner: diagonal of approximate Hessian is
NON-LINEAR LEAST SQUARES OPTIMIZATION used.
DESCRIPTION: INPUT PARAMETERS:
This function is used to find minimum of function which is represented as State - structure which stores algorithm state
sum of squares: D - diagonal of the approximate Hessian, array[0..N-1],
F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1]) (if larger, only leading N elements are used).
using value of function vector f[] and Jacobian of f[].
REQUIREMENTS: NOTE 1: D[i] should be positive. Exception will be thrown otherwise.
This algorithm will request following information during its operation:
* function vector f[] at given point X NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
* function vector f[] and Jacobian of f[] (simultaneously) at given point
There are several overloaded versions of MinLMOptimize() function which -- ALGLIB --
correspond to different LM-like optimization algorithms provided by this Copyright 13.10.2010 by Bochkanov Sergey
unit. You should choose version which accepts fvec() and jac() callbacks. *************************************************************************/
First one is used to calculate f[] at given point, second one calculates void minbleicsetprecdiag(const minbleicstate &state, const real_1d_array &d
f[] and Jacobian df[i]/dx[j]. );
You can try to initialize MinLMState structure with VJ function and then /*************************************************************************
use incorrect version of MinLMOptimize() (for example, version which Modification of the preconditioner: scale-based diagonal preconditioning.
works with general form function and does not provide Jacobian), but it
will lead to exception being thrown after first attempt to calculate
Jacobian.
USAGE: This preconditioning mode can be useful when you don't have approximate
1. User initializes algorithm state with MinLMCreateVJ() call diagonal of Hessian, but you know that your variables are badly scaled
2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and (for example, one variable is in [1,10], and another in [1000,100000]),
other functions and most part of the ill-conditioning comes from different scales of vars.
3. User calls MinLMOptimize() function which takes algorithm state and
callback functions.
4. User calls MinLMResults() to get solution
5. Optionally, user may call MinLMRestartFrom() to solve another problem
with same N/M but another starting point and/or another function.
MinLMRestartFrom() allows to reuse already initialized structure.
INPUT PARAMETERS: In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2),
N - dimension, N>1 can greatly improve convergence.
* if given, only leading N elements of X are used
* if not given, automatically determined from size of X
M - number of functions f[i]
X - initial solution, array[0..N-1]
OUTPUT PARAMETERS: IMPRTANT: you should set scale of your variables with MinBLEICSetScale()
State - structure which stores algorithm state call (before or after MinBLEICSetPrecScale() call). Without knowledge of
the scale of your variables scale-based preconditioner will be just unit
matrix.
NOTES: INPUT PARAMETERS:
1. you may tune stopping conditions with MinLMSetCond() function State - structure which stores algorithm state
2. if target function contains exp() or other fast growing functions, and
optimization algorithm makes too large steps which leads to overflow,
use MinLMSetStpMax() function to bound algorithm's steps.
-- ALGLIB -- -- ALGLIB --
Copyright 30.03.2009 by Bochkanov Sergey Copyright 13.10.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlmcreatevj(const ae_int_t n, const ae_int_t m, const real_1d_array void minbleicsetprecscale(const minbleicstate &state);
&x, minlmstate &state);
void minlmcreatevj(const ae_int_t m, const real_1d_array &x, minlmstate &st
ate);
/************************************************************************* /*************************************************************************
IMPROVED LEVENBERG-MARQUARDT METHOD FOR This function allows to stop algorithm after specified number of inner
NON-LINEAR LEAST SQUARES OPTIMIZATION iterations.
DESCRIPTION:
This function is used to find minimum of function which is represented as
sum of squares:
F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1])
using value of function vector f[] only. Finite differences are used to
calculate Jacobian.
REQUIREMENTS:
This algorithm will request following information during its operation:
* function vector f[] at given point X
There are several overloaded versions of MinLMOptimize() function which
correspond to different LM-like optimization algorithms provided by this
unit. You should choose version which accepts fvec() callback.
You can try to initialize MinLMState structure with VJ function and then
use incorrect version of MinLMOptimize() (for example, version which
works with general form function and does not accept function vector), but
it will lead to exception being thrown after first attempt to calculate
Jacobian.
USAGE:
1. User initializes algorithm state with MinLMCreateV() call
2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and
other functions
3. User calls MinLMOptimize() function which takes algorithm state and
callback functions.
4. User calls MinLMResults() to get solution
5. Optionally, user may call MinLMRestartFrom() to solve another problem
with same N/M but another starting point and/or another function.
MinLMRestartFrom() allows to reuse already initialized structure.
INPUT PARAMETERS:
N - dimension, N>1
* if given, only leading N elements of X are used
* if not given, automatically determined from size of X
M - number of functions f[i]
X - initial solution, array[0..N-1]
DiffStep- differentiation step, >0
OUTPUT PARAMETERS:
State - structure which stores algorithm state
See also MinLMIteration, MinLMResults.
NOTES:
1. you may tune stopping conditions with MinLMSetCond() function
2. if target function contains exp() or other fast growing functions, and
optimization algorithm makes too large steps which leads to overflow,
use MinLMSetStpMax() function to bound algorithm's steps.
-- ALGLIB --
Copyright 30.03.2009 by Bochkanov Sergey
*************************************************************************/
void minlmcreatev(const ae_int_t n, const ae_int_t m, const real_1d_array &
x, const double diffstep, minlmstate &state);
void minlmcreatev(const ae_int_t m, const real_1d_array &x, const double di
ffstep, minlmstate &state);
/*************************************************************************
LEVENBERG-MARQUARDT-LIKE METHOD FOR NON-LINEAR OPTIMIZATION
DESCRIPTION:
This function is used to find minimum of general form (not "sum-of-
-squares") function
F = F(x[0], ..., x[n-1])
using its gradient and Hessian. Levenberg-Marquardt modification with
L-BFGS pre-optimization and internal pre-conditioned L-BFGS optimization
after each Levenberg-Marquardt step is used.
REQUIREMENTS:
This algorithm will request following information during its operation:
* function value F at given point X
* F and gradient G (simultaneously) at given point X
* F, G and Hessian H (simultaneously) at given point X
There are several overloaded versions of MinLMOptimize() function which
correspond to different LM-like optimization algorithms provided by this
unit. You should choose version which accepts func(), grad() and hess()
function pointers. First pointer is used to calculate F at given point,
second one calculates F(x) and grad F(x), third one calculates F(x),
grad F(x), hess F(x).
You can try to initialize MinLMState structure with FGH-function and then
use incorrect version of MinLMOptimize() (for example, version which does
not provide Hessian matrix), but it will lead to exception being thrown
after first attempt to calculate Hessian.
USAGE:
1. User initializes algorithm state with MinLMCreateFGH() call
2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and
other functions
3. User calls MinLMOptimize() function which takes algorithm state and
pointers (delegates, etc.) to callback functions.
4. User calls MinLMResults() to get solution
5. Optionally, user may call MinLMRestartFrom() to solve another problem
with same N but another starting point and/or another function.
MinLMRestartFrom() allows to reuse already initialized structure.
INPUT PARAMETERS:
N - dimension, N>1
* if given, only leading N elements of X are used
* if not given, automatically determined from size of X
X - initial solution, array[0..N-1]
OUTPUT PARAMETERS:
State - structure which stores algorithm state
NOTES:
1. you may tune stopping conditions with MinLMSetCond() function
2. if target function contains exp() or other fast growing functions, and
optimization algorithm makes too large steps which leads to overflow,
use MinLMSetStpMax() function to bound algorithm's steps.
-- ALGLIB --
Copyright 30.03.2009 by Bochkanov Sergey
*************************************************************************/
void minlmcreatefgh(const ae_int_t n, const real_1d_array &x, minlmstate &s
tate);
void minlmcreatefgh(const real_1d_array &x, minlmstate &state);
/*************************************************************************
This function sets stopping conditions for Levenberg-Marquardt optimization
algorithm.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
EpsG - >=0 MaxIts - maximum number of inner iterations.
The subroutine finishes its work if the condition If MaxIts=0, the number of iterations is unlimited.
|v|<EpsG is satisfied, where:
* |.| means Euclidian norm
* v - scaled gradient vector, v[i]=g[i]*s[i]
* g - gradient
* s - scaling coefficients set by MinLMSetScale()
EpsF - >=0
The subroutine finishes its work if on k+1-th iteration
the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
is satisfied.
EpsX - >=0
The subroutine finishes its work if on k+1-th iteration
the condition |v|<=EpsX is fulfilled, where:
* |.| means Euclidian norm
* v - scaled step vector, v[i]=dx[i]/s[i]
* dx - ste pvector, dx=X(k+1)-X(k)
* s - scaling coefficients set by MinLMSetScale()
MaxIts - maximum number of iterations. If MaxIts=0, the number of
iterations is unlimited. Only Levenberg-Marquardt
iterations are counted (L-BFGS/CG iterations are NOT
counted because their cost is very low compared to that of
LM).
Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to
automatic stopping criterion selection (small EpsX).
-- ALGLIB -- -- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlmsetcond(const minlmstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits); void minbleicsetmaxits(const minbleicstate &state, const ae_int_t maxits);
/************************************************************************* /*************************************************************************
This function turns on/off reporting. This function turns on/off reporting.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
NeedXRep- whether iteration reports are needed or not NeedXRep- whether iteration reports are needed or not
If NeedXRep is True, algorithm will call rep() callback function if it is If NeedXRep is True, algorithm will call rep() callback function if it is
provided to MinLMOptimize(). Both Levenberg-Marquardt and internal L-BFGS provided to MinBLEICOptimize().
iterations are reported.
-- ALGLIB -- -- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlmsetxrep(const minlmstate &state, const bool needxrep); void minbleicsetxrep(const minbleicstate &state, const bool needxrep);
/************************************************************************* /*************************************************************************
This function sets maximum step length This function sets maximum step length
IMPORTANT: this feature is hard to combine with preconditioning. You can't
set upper limit on step length, when you solve optimization problem with
linear (non-boundary) constraints AND preconditioner turned on.
When non-boundary constraints are present, you have to either a) use
preconditioner, or b) use upper limit on step length. YOU CAN'T USE BOTH!
In this case algorithm will terminate with appropriate error code.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't
want to limit step length. want to limit step length.
Use this subroutine when you optimize target function which contains exp() Use this subroutine when you optimize target function which contains exp()
or other fast growing functions, and optimization algorithm makes too or other fast growing functions, and optimization algorithm makes too
large steps which leads to overflow. This function allows us to reject large steps which lead to overflow. This function allows us to reject
steps that are too large (and therefore expose us to the possible steps that are too large (and therefore expose us to the possible
overflow) without actually calculating function value at the x+stp*d. overflow) without actually calculating function value at the x+stp*d.
NOTE: non-zero StpMax leads to moderate performance degradation because
intermediate step of preconditioned L-BFGS optimization is incompatible
with limits on step size.
-- ALGLIB -- -- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlmsetstpmax(const minlmstate &state, const double stpmax); void minbleicsetstpmax(const minbleicstate &state, const double stpmax);
/************************************************************************* /*************************************************************************
This function sets scaling coefficients for LM optimizer. This function provides reverse communication interface
Reverse communication interface is not documented or recommended to use.
See below for functions which provide better documented API
*************************************************************************/
bool minbleiciteration(const minbleicstate &state);
ALGLIB optimizers use scaling matrices to test stopping conditions (step /*************************************************************************
size and gradient are scaled before comparison with tolerances). Scale of This family of functions is used to launcn iterations of nonlinear optimize
the I-th variable is a translation invariant measure of: r
a) "how large" the variable is
b) how large the step should be to make significant changes in the function
Generally, scale is NOT considered to be a form of preconditioner. But LM These functions accept following parameters:
optimizer is unique in that it uses scaling matrix both in the stopping state - algorithm state
condition tests and as Marquardt damping factor. func - callback which calculates function (or merit function)
value func at given point x
grad - callback which calculates function (or merit function)
value func and gradient grad at given point x
rep - optional callback which is called after each iteration
can be NULL
ptr - optional pointer which is passed to func/grad/hess/jac/rep
can be NULL
Proper scaling is very important for the algorithm performance. It is less NOTES:
important for the quality of results, but still has some influence (it is
easier to converge when variables are properly scaled, so premature
stopping is possible when very badly scalled variables are combined with
relaxed stopping conditions).
INPUT PARAMETERS: 1. This function has two different implementations: one which uses exact
State - structure stores algorithm state (analytical) user-supplied gradient, and one which uses function value
S - array[N], non-zero scaling coefficients only and numerically differentiates function in order to obtain
S[i] may be negative, sign doesn't matter. gradient.
Depending on the specific function used to create optimizer object
(either MinBLEICCreate() for analytical gradient or MinBLEICCreateF()
for numerical differentiation) you should choose appropriate variant of
MinBLEICOptimize() - one which accepts function AND gradient or one
which accepts function ONLY.
Be careful to choose variant of MinBLEICOptimize() which corresponds to
your optimization scheme! Table below lists different combinations of
callback (function/gradient) passed to MinBLEICOptimize() and specific
function used to create optimizer.
| USER PASSED TO MinBLEICOptimize()
CREATED WITH | function only | function and gradient
------------------------------------------------------------
MinBLEICCreateF() | work FAIL
MinBLEICCreate() | FAIL work
Here "FAIL" denotes inappropriate combinations of optimizer creation
function and MinBLEICOptimize() version. Attemps to use such
combination (for example, to create optimizer with MinBLEICCreateF()
and to pass gradient information to MinCGOptimize()) will lead to
exception being thrown. Either you did not pass gradient when it WAS
needed or you passed gradient when it was NOT needed.
-- ALGLIB -- -- ALGLIB --
Copyright 14.01.2011 by Bochkanov Sergey Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlmsetscale(const minlmstate &state, const real_1d_array &s); void minbleicoptimize(minbleicstate &state,
void (*func)(const real_1d_array &x, double &func, void *ptr),
void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
void *ptr = NULL);
void minbleicoptimize(minbleicstate &state,
void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad,
void *ptr),
void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
void *ptr = NULL);
/************************************************************************* /*************************************************************************
This function sets boundary constraints for LM optimizer BLEIC results
Boundary constraints are inactive by default (after initial creation).
They are preserved until explicitly turned off with another SetBC() call.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure stores algorithm state State - algorithm state
BndL - lower bounds, array[N].
If some (all) variables are unbounded, you may specify
very small number or -INF (latter is recommended because
it will allow solver to use better algorithm).
BndU - upper bounds, array[N].
If some (all) variables are unbounded, you may specify
very large number or +INF (latter is recommended because
it will allow solver to use better algorithm).
NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
variable will be "frozen" at X[i]=BndL[i]=BndU[i].
NOTE 2: this solver has following useful properties: OUTPUT PARAMETERS:
* bound constraints are always satisfied exactly X - array[0..N-1], solution
* function is evaluated only INSIDE area specified by bound constraints Rep - optimization report. You should check Rep.TerminationType
in order to distinguish successful termination from
unsuccessful one.
More information about fields of this structure can be
found in the comments on MinBLEICReport datatype.
-- ALGLIB -- -- ALGLIB --
Copyright 14.01.2011 by Bochkanov Sergey Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlmsetbc(const minlmstate &state, const real_1d_array &bndl, const r eal_1d_array &bndu); void minbleicresults(const minbleicstate &state, real_1d_array &x, minbleic report &rep);
/************************************************************************* /*************************************************************************
This function is used to change acceleration settings BLEIC results
You can choose between three acceleration strategies:
* AccType=0, no acceleration.
* AccType=1, secant updates are used to update quadratic model after each
iteration. After fixed number of iterations (or after model breakdown)
we recalculate quadratic model using analytic Jacobian or finite
differences. Number of secant-based iterations depends on optimization
settings: about 3 iterations - when we have analytic Jacobian, up to 2*N
iterations - when we use finite differences to calculate Jacobian.
AccType=1 is recommended when Jacobian calculation cost is prohibitive
high (several Mx1 function vector calculations followed by several NxN
Cholesky factorizations are faster than calculation of one M*N Jacobian).
It should also be used when we have no Jacobian, because finite difference
approximation takes too much time to compute.
Table below list optimization protocols (XYZ protocol corresponds to
MinLMCreateXYZ) and acceleration types they support (and use by default).
ACCELERATION TYPES SUPPORTED BY OPTIMIZATION PROTOCOLS:
protocol 0 1 comment Buffered implementation of MinBLEICResults() which uses pre-allocated buffe
V + + r
VJ + + to store X[]. If buffer size is too small, it resizes buffer. It is
FGH + intended to be used in the inner cycles of performance critical algorithms
where array reallocation penalty is too large to be ignored.
DAFAULT VALUES: -- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/
void minbleicresultsbuf(const minbleicstate &state, real_1d_array &x, minbl
eicreport &rep);
protocol 0 1 comment /*************************************************************************
V x without acceleration it is so slooooooooow This subroutine restarts algorithm from new point.
VJ x All optimization parameters (including constraints) are left unchanged.
FGH x
NOTE: this function should be called before optimization. Attempt to call This function allows to solve multiple optimization problems (which
it during algorithm iterations may result in unexpected behavior. must have same number of dimensions) without object reallocation penalty.
NOTE: attempt to call this function with unsupported protocol/acceleration INPUT PARAMETERS:
combination will result in exception being thrown. State - structure previously allocated with MinBLEICCreate call.
X - new starting point.
-- ALGLIB -- -- ALGLIB --
Copyright 14.10.2010 by Bochkanov Sergey Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlmsetacctype(const minlmstate &state, const ae_int_t acctype); void minbleicrestartfrom(const minbleicstate &state, const real_1d_array &x );
/************************************************************************* /*************************************************************************
This function provides reverse communication interface LIMITED MEMORY BFGS METHOD FOR LARGE SCALE OPTIMIZATION
Reverse communication interface is not documented or recommended to use.
See below for functions which provide better documented API
*************************************************************************/
bool minlmiteration(const minlmstate &state);
/************************************************************************* DESCRIPTION:
This family of functions is used to launcn iterations of nonlinear optimize The subroutine minimizes function F(x) of N arguments by using a quasi-
r Newton method (LBFGS scheme) which is optimized to use a minimum amount
of memory.
The subroutine generates the approximation of an inverse Hessian matrix by
using information about the last M steps of the algorithm (instead of N).
It lessens a required amount of memory from a value of order N^2 to a
value of order 2*N*M.
These functions accept following parameters: REQUIREMENTS:
state - algorithm state Algorithm will request following information during its operation:
func - callback which calculates function (or merit function) * function value F and its gradient G (simultaneously) at given point X
value func at given point x
grad - callback which calculates function (or merit function)
value func and gradient grad at given point x
hess - callback which calculates function (or merit function)
value func, gradient grad and Hessian hess at given point x
fvec - callback which calculates function vector fi[]
at given point x
jac - callback which calculates function vector fi[]
and Jacobian jac at given point x
rep - optional callback which is called after each iteration
can be NULL
ptr - optional pointer which is passed to func/grad/hess/jac/rep
can be NULL
NOTES: USAGE:
1. User initializes algorithm state with MinLBFGSCreate() call
2. User tunes solver parameters with MinLBFGSSetCond() MinLBFGSSetStpMax()
and other functions
3. User calls MinLBFGSOptimize() function which takes algorithm state and
pointer (delegate, etc.) to callback function which calculates F/G.
4. User calls MinLBFGSResults() to get solution
5. Optionally user may call MinLBFGSRestartFrom() to solve another problem
with same N/M but another starting point and/or another function.
MinLBFGSRestartFrom() allows to reuse already initialized structure.
1. Depending on function used to create state structure, this algorithm INPUT PARAMETERS:
may accept Jacobian and/or Hessian and/or gradient. According to the N - problem dimension. N>0
said above, there ase several versions of this function, which accept M - number of corrections in the BFGS scheme of Hessian
different sets of callbacks. approximation update. Recommended value: 3<=M<=7. The smal
ler
value causes worse convergence, the bigger will not cause
a
considerably better convergence, but will cause a fall in
the
performance. M<=N.
X - initial solution approximation, array[0..N-1].
This flexibility opens way to subtle errors - you may create state with OUTPUT PARAMETERS:
MinLMCreateFGH() (optimization using Hessian), but call function which State - structure which stores algorithm state
does not accept Hessian. So when algorithm will request Hessian, there
will be no callback to call. In this case exception will be thrown.
Be careful to avoid such errors because there is no way to find them at NOTES:
compile time - you can see them at runtime only. 1. you may tune stopping conditions with MinLBFGSSetCond() function
2. if target function contains exp() or other fast growing functions, and
optimization algorithm makes too large steps which leads to overflow,
use MinLBFGSSetStpMax() function to bound algorithm's steps. However,
L-BFGS rarely needs such a tuning.
-- ALGLIB -- -- ALGLIB --
Copyright 10.03.2009 by Bochkanov Sergey Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlmoptimize(minlmstate &state, void minlbfgscreate(const ae_int_t n, const ae_int_t m, const real_1d_array
void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr), &x, minlbfgsstate &state);
void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, void minlbfgscreate(const ae_int_t m, const real_1d_array &x, minlbfgsstate
void *ptr = NULL); &state);
void minlmoptimize(minlmstate &state,
void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr),
void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &
jac, void *ptr),
void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
void *ptr = NULL);
void minlmoptimize(minlmstate &state,
void (*func)(const real_1d_array &x, double &func, void *ptr),
void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad,
void *ptr),
void (*hess)(const real_1d_array &x, double &func, real_1d_array &grad,
real_2d_array &hess, void *ptr),
void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
void *ptr = NULL);
void minlmoptimize(minlmstate &state,
void (*func)(const real_1d_array &x, double &func, void *ptr),
void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &
jac, void *ptr),
void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
void *ptr = NULL);
void minlmoptimize(minlmstate &state,
void (*func)(const real_1d_array &x, double &func, void *ptr),
void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad,
void *ptr),
void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &
jac, void *ptr),
void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
void *ptr = NULL);
/************************************************************************* /*************************************************************************
Levenberg-Marquardt algorithm results The subroutine is finite difference variant of MinLBFGSCreate(). It uses
finite differences in order to differentiate target function.
Description below contains information which is specific to this function
only. We recommend to read comments on MinLBFGSCreate() in order to get
more information about creation of LBFGS optimizer.
INPUT PARAMETERS: INPUT PARAMETERS:
State - algorithm state N - problem dimension, N>0:
* if given, only leading N elements of X are used
* if not given, automatically determined from size of X
M - number of corrections in the BFGS scheme of Hessian
approximation update. Recommended value: 3<=M<=7. The smal
ler
value causes worse convergence, the bigger will not cause
a
considerably better convergence, but will cause a fall in
the
performance. M<=N.
X - starting point, array[0..N-1].
DiffStep- differentiation step, >0
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
X - array[0..N-1], solution State - structure which stores algorithm state
Rep - optimization report;
see comments for this structure for more info. NOTES:
1. algorithm uses 4-point central formula for differentiation.
2. differentiation step along I-th axis is equal to DiffStep*S[I] where
S[] is scaling vector which can be set by MinLBFGSSetScale() call.
3. we recommend you to use moderate values of differentiation step. Too
large step will result in too large truncation errors, while too small
step will result in too large numerical errors. 1.0E-6 can be good
value to start with.
4. Numerical differentiation is very inefficient - one gradient
calculation needs 4*N function evaluations. This function will work for
any N - either small (1...10), moderate (10...100) or large (100...).
However, performance penalty will be too severe for any N's except for
small ones.
We should also say that code which relies on numerical differentiation
is less robust and precise. LBFGS needs exact gradient values.
Imprecise gradient may slow down convergence, especially on highly
nonlinear problems.
Thus we recommend to use this function for fast prototyping on small-
dimensional problems only, and to implement analytical gradient as soon
as possible.
-- ALGLIB -- -- ALGLIB --
Copyright 10.03.2009 by Bochkanov Sergey Copyright 16.05.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlmresults(const minlmstate &state, real_1d_array &x, minlmreport &r void minlbfgscreatef(const ae_int_t n, const ae_int_t m, const real_1d_arra
ep); y &x, const double diffstep, minlbfgsstate &state);
void minlbfgscreatef(const ae_int_t m, const real_1d_array &x, const double
diffstep, minlbfgsstate &state);
/************************************************************************* /*************************************************************************
Levenberg-Marquardt algorithm results This function sets stopping conditions for L-BFGS optimization algorithm.
Buffered implementation of MinLMResults(), which uses pre-allocated buffer INPUT PARAMETERS:
to store X[]. If buffer size is too small, it resizes buffer. It is State - structure which stores algorithm state
intended to be used in the inner cycles of performance critical algorithms EpsG - >=0
where array reallocation penalty is too large to be ignored. The subroutine finishes its work if the condition
|v|<EpsG is satisfied, where:
* |.| means Euclidian norm
* v - scaled gradient vector, v[i]=g[i]*s[i]
* g - gradient
* s - scaling coefficients set by MinLBFGSSetScale()
EpsF - >=0
The subroutine finishes its work if on k+1-th iteration
the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
is satisfied.
EpsX - >=0
The subroutine finishes its work if on k+1-th iteration
the condition |v|<=EpsX is fulfilled, where:
* |.| means Euclidian norm
* v - scaled step vector, v[i]=dx[i]/s[i]
* dx - ste pvector, dx=X(k+1)-X(k)
* s - scaling coefficients set by MinLBFGSSetScale()
MaxIts - maximum number of iterations. If MaxIts=0, the number of
iterations is unlimited.
Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to
automatic stopping criterion selection (small EpsX).
-- ALGLIB -- -- ALGLIB --
Copyright 10.03.2009 by Bochkanov Sergey Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlmresultsbuf(const minlmstate &state, real_1d_array &x, minlmreport &rep); void minlbfgssetcond(const minlbfgsstate &state, const double epsg, const d ouble epsf, const double epsx, const ae_int_t maxits);
/************************************************************************* /*************************************************************************
This subroutine restarts LM algorithm from new point. All optimization This function turns on/off reporting.
parameters are left unchanged.
This function allows to solve multiple optimization problems (which
must have same number of dimensions) without object reallocation penalty.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure used for reverse communication previously State - structure which stores algorithm state
allocated with MinLMCreateXXX call. NeedXRep- whether iteration reports are needed or not
X - new starting point.
If NeedXRep is True, algorithm will call rep() callback function if it is
provided to MinLBFGSOptimize().
-- ALGLIB -- -- ALGLIB --
Copyright 30.07.2010 by Bochkanov Sergey Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlmrestartfrom(const minlmstate &state, const real_1d_array &x); void minlbfgssetxrep(const minlbfgsstate &state, const bool needxrep);
/************************************************************************* /*************************************************************************
This is obsolete function. This function sets maximum step length
Since ALGLIB 3.3 it is equivalent to MinLMCreateVJ(). INPUT PARAMETERS:
State - structure which stores algorithm state
StpMax - maximum step length, >=0. Set StpMax to 0.0 (default), if
you don't want to limit step length.
Use this subroutine when you optimize target function which contains exp()
or other fast growing functions, and optimization algorithm makes too
large steps which leads to overflow. This function allows us to reject
steps that are too large (and therefore expose us to the possible
overflow) without actually calculating function value at the x+stp*d.
-- ALGLIB -- -- ALGLIB --
Copyright 30.03.2009 by Bochkanov Sergey Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlmcreatevgj(const ae_int_t n, const ae_int_t m, const real_1d_array void minlbfgssetstpmax(const minlbfgsstate &state, const double stpmax);
&x, minlmstate &state);
void minlmcreatevgj(const ae_int_t m, const real_1d_array &x, minlmstate &s
tate);
/************************************************************************* /*************************************************************************
This is obsolete function. This function sets scaling coefficients for LBFGS optimizer.
Since ALGLIB 3.3 it is equivalent to MinLMCreateFJ().
-- ALGLIB -- ALGLIB optimizers use scaling matrices to test stopping conditions (step
Copyright 30.03.2009 by Bochkanov Sergey size and gradient are scaled before comparison with tolerances). Scale of
*************************************************************************/ the I-th variable is a translation invariant measure of:
void minlmcreatefgj(const ae_int_t n, const ae_int_t m, const real_1d_array a) "how large" the variable is
&x, minlmstate &state); b) how large the step should be to make significant changes in the function
void minlmcreatefgj(const ae_int_t m, const real_1d_array &x, minlmstate &s
tate);
/*************************************************************************
This function is considered obsolete since ALGLIB 3.1.0 and is present for
backward compatibility only. We recommend to use MinLMCreateVJ, which
provides similar, but more consistent and feature-rich interface.
-- ALGLIB -- Scaling is also used by finite difference variant of the optimizer - step
Copyright 30.03.2009 by Bochkanov Sergey along I-th axis is equal to DiffStep*S[I].
*************************************************************************/
void minlmcreatefj(const ae_int_t n, const ae_int_t m, const real_1d_array
&x, minlmstate &state);
void minlmcreatefj(const ae_int_t m, const real_1d_array &x, minlmstate &st
ate);
/************************************************************************* In most optimizers (and in the LBFGS too) scaling is NOT a form of
Obsolete function, use MinLBFGSSetPrecDefault() instead. preconditioning. It just affects stopping conditions. You should set
preconditioner by separate call to one of the MinLBFGSSetPrec...()
functions.
-- ALGLIB -- There is special preconditioning mode, however, which uses scaling
Copyright 13.10.2010 by Bochkanov Sergey coefficients to form diagonal preconditioning matrix. You can turn this
*************************************************************************/ mode on, if you want. But you should understand that scaling is not the
void minlbfgssetdefaultpreconditioner(const minlbfgsstate &state); same thing as preconditioning - these are two different, although related
forms of tuning solver.
/************************************************************************* INPUT PARAMETERS:
Obsolete function, use MinLBFGSSetCholeskyPreconditioner() instead. State - structure stores algorithm state
S - array[N], non-zero scaling coefficients
S[i] may be negative, sign doesn't matter.
-- ALGLIB -- -- ALGLIB --
Copyright 13.10.2010 by Bochkanov Sergey Copyright 14.01.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minlbfgssetcholeskypreconditioner(const minlbfgsstate &state, const re al_2d_array &p, const bool isupper); void minlbfgssetscale(const minlbfgsstate &state, const real_1d_array &s);
/************************************************************************* /*************************************************************************
NONLINEAR BOUND CONSTRAINED OPTIMIZATION USING Modification of the preconditioner: default preconditioner (simple
MODIFIED ACTIVE SET ALGORITHM scaling, same for all elements of X) is used.
WILLIAM W. HAGER AND HONGCHAO ZHANG
DESCRIPTION:
The subroutine minimizes function F(x) of N arguments with bound
constraints: BndL[i] <= x[i] <= BndU[i]
This method is globally convergent as long as grad(f) is Lipschitz
continuous on a level set: L = { x : f(x)<=f(x0) }.
REQUIREMENTS:
Algorithm will request following information during its operation:
* function value F and its gradient G (simultaneously) at given point X
USAGE:
1. User initializes algorithm state with MinASACreate() call
2. User tunes solver parameters with MinASASetCond() MinASASetStpMax() and
other functions
3. User calls MinASAOptimize() function which takes algorithm state and
pointer (delegate, etc.) to callback function which calculates F/G.
4. User calls MinASAResults() to get solution
5. Optionally, user may call MinASARestartFrom() to solve another problem
with same N but another starting point and/or another function.
MinASARestartFrom() allows to reuse already initialized structure.
INPUT PARAMETERS: INPUT PARAMETERS:
N - problem dimension, N>0: State - structure which stores algorithm state
* if given, only leading N elements of X are used
* if not given, automatically determined from sizes of
X/BndL/BndU.
X - starting point, array[0..N-1].
BndL - lower bounds, array[0..N-1].
all elements MUST be specified, i.e. all variables are
bounded. However, if some (all) variables are unbounded,
you may specify very small number as bound: -1000, -1.0E6
or -1.0E300, or something like that.
BndU - upper bounds, array[0..N-1].
all elements MUST be specified, i.e. all variables are
bounded. However, if some (all) variables are unbounded,
you may specify very large number as bound: +1000, +1.0E6
or +1.0E300, or something like that.
OUTPUT PARAMETERS:
State - structure stores algorithm state
NOTES:
1. you may tune stopping conditions with MinASASetCond() function NOTE: you can change preconditioner "on the fly", during algorithm
2. if target function contains exp() or other fast growing functions, and iterations.
optimization algorithm makes too large steps which leads to overflow,
use MinASASetStpMax() function to bound algorithm's steps.
3. this function does NOT support infinite/NaN values in X, BndL, BndU.
-- ALGLIB -- -- ALGLIB --
Copyright 25.03.2010 by Bochkanov Sergey Copyright 13.10.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minasacreate(const ae_int_t n, const real_1d_array &x, const real_1d_a void minlbfgssetprecdefault(const minlbfgsstate &state);
rray &bndl, const real_1d_array &bndu, minasastate &state);
void minasacreate(const real_1d_array &x, const real_1d_array &bndl, const
real_1d_array &bndu, minasastate &state);
/************************************************************************* /*************************************************************************
This function sets stopping conditions for the ASA optimization algorithm. Modification of the preconditioner: Cholesky factorization of approximate
Hessian is used.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
EpsG - >=0 P - triangular preconditioner, Cholesky factorization of
The subroutine finishes its work if the condition the approximate Hessian. array[0..N-1,0..N-1],
||G||<EpsG is satisfied, where ||.|| means Euclidian norm, (if larger, only leading N elements are used).
G - gradient. IsUpper - whether upper or lower triangle of P is given
EpsF - >=0 (other triangle is not referenced)
The subroutine finishes its work if on k+1-th iteration
the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
is satisfied.
EpsX - >=0
The subroutine finishes its work if on k+1-th iteration
the condition |X(k+1)-X(k)| <= EpsX is fulfilled.
MaxIts - maximum number of iterations. If MaxIts=0, the number of
iterations is unlimited.
Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to After call to this function preconditioner is changed to P (P is copied
automatic stopping criterion selection (small EpsX). into the internal buffer).
NOTE: you can change preconditioner "on the fly", during algorithm
iterations.
NOTE 2: P should be nonsingular. Exception will be thrown otherwise.
-- ALGLIB -- -- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey Copyright 13.10.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minasasetcond(const minasastate &state, const double epsg, const doubl e epsf, const double epsx, const ae_int_t maxits); void minlbfgssetpreccholesky(const minlbfgsstate &state, const real_2d_arra y &p, const bool isupper);
/************************************************************************* /*************************************************************************
This function turns on/off reporting. Modification of the preconditioner: diagonal of approximate Hessian is
used.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
NeedXRep- whether iteration reports are needed or not D - diagonal of the approximate Hessian, array[0..N-1],
(if larger, only leading N elements are used).
If NeedXRep is True, algorithm will call rep() callback function if it is NOTE: you can change preconditioner "on the fly", during algorithm
provided to MinASAOptimize(). iterations.
NOTE 2: D[i] should be positive. Exception will be thrown otherwise.
NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
-- ALGLIB -- -- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey Copyright 13.10.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minasasetxrep(const minasastate &state, const bool needxrep); void minlbfgssetprecdiag(const minlbfgsstate &state, const real_1d_array &d );
/************************************************************************* /*************************************************************************
This function sets optimization algorithm. Modification of the preconditioner: scale-based diagonal preconditioning.
INPUT PARAMETERS: This preconditioning mode can be useful when you don't have approximate
State - structure which stores algorithm stat diagonal of Hessian, but you know that your variables are badly scaled
UAType - algorithm type: (for example, one variable is in [1,10], and another in [1000,100000]),
* -1 automatic selection of the best algorithm and most part of the ill-conditioning comes from different scales of vars.
* 0 DY (Dai and Yuan) algorithm
* 1 Hybrid DY-HS algorithm
-- ALGLIB -- In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2),
Copyright 02.04.2010 by Bochkanov Sergey can greatly improve convergence.
*************************************************************************/
void minasasetalgorithm(const minasastate &state, const ae_int_t algotype);
/************************************************************************* IMPRTANT: you should set scale of your variables with MinLBFGSSetScale()
This function sets maximum step length call (before or after MinLBFGSSetPrecScale() call). Without knowledge of
the scale of your variables scale-based preconditioner will be just unit
matrix.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't
want to limit step length (zero by default).
Use this subroutine when you optimize target function which contains exp()
or other fast growing functions, and optimization algorithm makes too
large steps which leads to overflow. This function allows us to reject
steps that are too large (and therefore expose us to the possible
overflow) without actually calculating function value at the x+stp*d.
-- ALGLIB -- -- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey Copyright 13.10.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minasasetstpmax(const minasastate &state, const double stpmax); void minlbfgssetprecscale(const minlbfgsstate &state);
/************************************************************************* /*************************************************************************
This function provides reverse communication interface This function provides reverse communication interface
Reverse communication interface is not documented or recommended to use. Reverse communication interface is not documented or recommended to use.
See below for functions which provide better documented API See below for functions which provide better documented API
*************************************************************************/ *************************************************************************/
bool minasaiteration(const minasastate &state); bool minlbfgsiteration(const minlbfgsstate &state);
/************************************************************************* /*************************************************************************
This family of functions is used to launcn iterations of nonlinear optimize r This family of functions is used to launcn iterations of nonlinear optimize r
These functions accept following parameters: These functions accept following parameters:
state - algorithm state state - algorithm state
func - callback which calculates function (or merit function)
value func at given point x
grad - callback which calculates function (or merit function) grad - callback which calculates function (or merit function)
value func and gradient grad at given point x value func and gradient grad at given point x
rep - optional callback which is called after each iteration rep - optional callback which is called after each iteration
can be NULL can be NULL
ptr - optional pointer which is passed to func/grad/hess/jac/rep ptr - optional pointer which is passed to func/grad/hess/jac/rep
can be NULL can be NULL
NOTES:
1. This function has two different implementations: one which uses exact
(analytical) user-supplied gradient, and one which uses function value
only and numerically differentiates function in order to obtain
gradient.
Depending on the specific function used to create optimizer object
(either MinLBFGSCreate() for analytical gradient or MinLBFGSCreateF()
for numerical differentiation) you should choose appropriate variant of
MinLBFGSOptimize() - one which accepts function AND gradient or one
which accepts function ONLY.
Be careful to choose variant of MinLBFGSOptimize() which corresponds to
your optimization scheme! Table below lists different combinations of
callback (function/gradient) passed to MinLBFGSOptimize() and specific
function used to create optimizer.
| USER PASSED TO MinLBFGSOptimize()
CREATED WITH | function only | function and gradient
------------------------------------------------------------
MinLBFGSCreateF() | work FAIL
MinLBFGSCreate() | FAIL work
Here "FAIL" denotes inappropriate combinations of optimizer creation
function and MinLBFGSOptimize() version. Attemps to use such
combination (for example, to create optimizer with MinLBFGSCreateF() and
to pass gradient information to MinCGOptimize()) will lead to exception
being thrown. Either you did not pass gradient when it WAS needed or
you passed gradient when it was NOT needed.
-- ALGLIB -- -- ALGLIB --
Copyright 20.03.2009 by Bochkanov Sergey Copyright 20.03.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minasaoptimize(minasastate &state, void minlbfgsoptimize(minlbfgsstate &state,
void (*func)(const real_1d_array &x, double &func, void *ptr),
void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
void *ptr = NULL);
void minlbfgsoptimize(minlbfgsstate &state,
void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
void *ptr = NULL); void *ptr = NULL);
/************************************************************************* /*************************************************************************
ASA results L-BFGS algorithm results
INPUT PARAMETERS: INPUT PARAMETERS:
State - algorithm state State - algorithm state
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
X - array[0..N-1], solution X - array[0..N-1], solution
Rep - optimization report: Rep - optimization report:
* Rep.TerminationType completetion code: * Rep.TerminationType completetion code:
* -2 rounding errors prevent further improvement. * -2 rounding errors prevent further improvement.
X contains best point found. X contains best point found.
* -1 incorrect parameters were specified * -1 incorrect parameters were specified
* 1 relative function improvement is no more than * 1 relative function improvement is no more than
EpsF. EpsF.
* 2 relative step is no more than EpsX. * 2 relative step is no more than EpsX.
* 4 gradient norm is no more than EpsG * 4 gradient norm is no more than EpsG
* 5 MaxIts steps was taken * 5 MaxIts steps was taken
* 7 stopping conditions are too stringent, * 7 stopping conditions are too stringent,
further improvement is impossible further improvement is impossible
* Rep.IterationsCount contains iterations count * Rep.IterationsCount contains iterations count
* NFEV countains number of function calculations * NFEV countains number of function calculations
* ActiveConstraints contains number of active constraints
-- ALGLIB -- -- ALGLIB --
Copyright 20.03.2009 by Bochkanov Sergey Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minasaresults(const minasastate &state, real_1d_array &x, minasareport &rep); void minlbfgsresults(const minlbfgsstate &state, real_1d_array &x, minlbfgs report &rep);
/************************************************************************* /*************************************************************************
ASA results L-BFGS algorithm results
Buffered implementation of MinASAResults() which uses pre-allocated buffer Buffered implementation of MinLBFGSResults which uses pre-allocated buffer
to store X[]. If buffer size is too small, it resizes buffer. It is to store X[]. If buffer size is too small, it resizes buffer. It is
intended to be used in the inner cycles of performance critical algorithms intended to be used in the inner cycles of performance critical algorithms
where array reallocation penalty is too large to be ignored. where array reallocation penalty is too large to be ignored.
-- ALGLIB -- -- ALGLIB --
Copyright 20.03.2009 by Bochkanov Sergey Copyright 20.08.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minasaresultsbuf(const minasastate &state, real_1d_array &x, minasarep ort &rep); void minlbfgsresultsbuf(const minlbfgsstate &state, real_1d_array &x, minlb fgsreport &rep);
/************************************************************************* /*************************************************************************
This subroutine restarts CG algorithm from new point. All optimization This subroutine restarts LBFGS algorithm from new point. All optimization
parameters are left unchanged. parameters are left unchanged.
This function allows to solve multiple optimization problems (which This function allows to solve multiple optimization problems (which
must have same number of dimensions) without object reallocation penalty. must have same number of dimensions) without object reallocation penalty.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure previously allocated with MinCGCreate call. State - structure used to store algorithm state
X - new starting point. X - new starting point.
BndL - new lower bounds
BndU - new upper bounds
-- ALGLIB -- -- ALGLIB --
Copyright 30.07.2010 by Bochkanov Sergey Copyright 30.07.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minasarestartfrom(const minasastate &state, const real_1d_array &x, co nst real_1d_array &bndl, const real_1d_array &bndu); void minlbfgsrestartfrom(const minlbfgsstate &state, const real_1d_array &x );
/************************************************************************* /*************************************************************************
NONLINEAR CONJUGATE GRADIENT METHOD CONSTRAINED QUADRATIC PROGRAMMING
DESCRIPTION:
The subroutine minimizes function F(x) of N arguments by using one of the
nonlinear conjugate gradient methods.
These CG methods are globally convergent (even on non-convex functions) as
long as grad(f) is Lipschitz continuous in a some neighborhood of the
L = { x : f(x)<=f(x0) }.
REQUIREMENTS:
Algorithm will request following information during its operation:
* function value F and its gradient G (simultaneously) at given point X
USAGE: The subroutine creates QP optimizer. After initial creation, it contains
1. User initializes algorithm state with MinCGCreate() call default optimization problem with zero quadratic and linear terms and no
2. User tunes solver parameters with MinCGSetCond(), MinCGSetStpMax() and constraints. You should set quadratic/linear terms with calls to functions
other functions provided by MinQP subpackage.
3. User calls MinCGOptimize() function which takes algorithm state and
pointer (delegate, etc.) to callback function which calculates F/G.
4. User calls MinCGResults() to get solution
5. Optionally, user may call MinCGRestartFrom() to solve another problem
with same N but another starting point and/or another function.
MinCGRestartFrom() allows to reuse already initialized structure.
INPUT PARAMETERS: INPUT PARAMETERS:
N - problem dimension, N>0: N - problem size
* if given, only leading N elements of X are used
* if not given, automatically determined from size of X
X - starting point, array[0..N-1].
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
State - structure which stores algorithm state State - optimizer with zero quadratic/linear terms
and no constraints
-- ALGLIB -- -- ALGLIB --
Copyright 25.03.2010 by Bochkanov Sergey Copyright 11.01.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mincgcreate(const ae_int_t n, const real_1d_array &x, mincgstate &stat void minqpcreate(const ae_int_t n, minqpstate &state);
e);
void mincgcreate(const real_1d_array &x, mincgstate &state);
/************************************************************************* /*************************************************************************
This function sets stopping conditions for CG optimization algorithm. This function sets linear term for QP solver.
By default, linear term is zero.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
EpsG - >=0 B - linear term, array[N].
The subroutine finishes its work if the condition
|v|<EpsG is satisfied, where:
* |.| means Euclidian norm
* v - scaled gradient vector, v[i]=g[i]*s[i]
* g - gradient
* s - scaling coefficients set by MinCGSetScale()
EpsF - >=0
The subroutine finishes its work if on k+1-th iteration
the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
is satisfied.
EpsX - >=0
The subroutine finishes its work if on k+1-th iteration
the condition |v|<=EpsX is fulfilled, where:
* |.| means Euclidian norm
* v - scaled step vector, v[i]=dx[i]/s[i]
* dx - ste pvector, dx=X(k+1)-X(k)
* s - scaling coefficients set by MinCGSetScale()
MaxIts - maximum number of iterations. If MaxIts=0, the number of
iterations is unlimited.
Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to
automatic stopping criterion selection (small EpsX).
-- ALGLIB -- -- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey Copyright 11.01.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mincgsetcond(const mincgstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits); void minqpsetlinearterm(const minqpstate &state, const real_1d_array &b);
/************************************************************************* /*************************************************************************
This function sets scaling coefficients for CG optimizer. This function sets quadratic term for QP solver.
ALGLIB optimizers use scaling matrices to test stopping conditions (step
size and gradient are scaled before comparison with tolerances). Scale of
the I-th variable is a translation invariant measure of:
a) "how large" the variable is
b) how large the step should be to make significant changes in the function
In most optimizers (and in the CG too) scaling is NOT a form of By default quadratic term is zero.
preconditioning. It just affects stopping conditions. You should set
preconditioner by separate call to one of the MinCGSetPrec...() functions.
There is special preconditioning mode, however, which uses scaling IMPORTANT: this solver minimizes following function:
coefficients to form diagonal preconditioning matrix. You can turn this f(x) = 0.5*x'*A*x + b'*x.
mode on, if you want. But you should understand that scaling is not the Note that quadratic term has 0.5 before it. So if you want to minimize
same thing as preconditioning - these are two different, although related f(x) = x^2 + x
forms of tuning solver. you should rewrite your problem as follows:
f(x) = 0.5*(2*x^2) + x
and your matrix A will be equal to [[2.0]], not to [[1.0]]
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure stores algorithm state State - structure which stores algorithm state
S - array[N], non-zero scaling coefficients A - matrix, array[N,N]
S[i] may be negative, sign doesn't matter. IsUpper - (optional) storage type:
* if True, symmetric matrix A is given by its upper
triangle, and the lower triangle isn
* if False, symmetric matrix A is given by its lower
triangle, and the upper triangle isn
* if not given, both lower and upper triangles must be
filled.
-- ALGLIB -- -- ALGLIB --
Copyright 14.01.2011 by Bochkanov Sergey Copyright 11.01.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mincgsetscale(const mincgstate &state, const real_1d_array &s); void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a,
const bool isupper);
void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a)
;
/************************************************************************* /*************************************************************************
This function turns on/off reporting. This function sets starting point for QP solver. It is useful to have
good initial approximation to the solution, because it will increase
speed of convergence and identification of active constraints.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
NeedXRep- whether iteration reports are needed or not X - starting point, array[N].
If NeedXRep is True, algorithm will call rep() callback function if it is
provided to MinCGOptimize().
-- ALGLIB -- -- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey Copyright 11.01.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mincgsetxrep(const mincgstate &state, const bool needxrep); void minqpsetstartingpoint(const minqpstate &state, const real_1d_array &x) ;
/************************************************************************* /*************************************************************************
This function sets CG algorithm. This function sets origin for QP solver. By default, following QP program
is solved:
min(0.5*x'*A*x+b'*x)
This function allows to solve different problem:
min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin))
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
CGType - algorithm type: XOrigin - origin, array[N].
* -1 automatic selection of the best algorithm
* 0 DY (Dai and Yuan) algorithm
* 1 Hybrid DY-HS algorithm
-- ALGLIB -- -- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey Copyright 11.01.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mincgsetcgtype(const mincgstate &state, const ae_int_t cgtype); void minqpsetorigin(const minqpstate &state, const real_1d_array &xorigin);
/************************************************************************* /*************************************************************************
This function sets maximum step length This function tells solver to use Cholesky-based algorithm.
Cholesky-based algorithm can be used when:
* problem is convex
* there is no constraints or only boundary constraints are present
This algorithm has O(N^3) complexity for unconstrained problem and is up
to several times slower on bound constrained problems (these additional
iterations are needed to identify active constraints).
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't
want to limit step length.
Use this subroutine when you optimize target function which contains exp()
or other fast growing functions, and optimization algorithm makes too
large steps which leads to overflow. This function allows us to reject
steps that are too large (and therefore expose us to the possible
overflow) without actually calculating function value at the x+stp*d.
-- ALGLIB -- -- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey Copyright 11.01.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mincgsetstpmax(const mincgstate &state, const double stpmax); void minqpsetalgocholesky(const minqpstate &state);
/************************************************************************* /*************************************************************************
This function allows to suggest initial step length to the CG algorithm. This function sets boundary constraints for QP solver
Suggested step length is used as starting point for the line search. It
can be useful when you have badly scaled problem, i.e. when ||grad||
(which is used as initial estimate for the first step) is many orders of
magnitude different from the desired step.
Line search may fail on such problems without good estimate of initial
step length. Imagine, for example, problem with ||grad||=10^50 and desired
step equal to 0.1 Line search function will use 10^50 as initial step,
then it will decrease step length by 2 (up to 20 attempts) and will get
10^44, which is still too large.
This function allows us to tell than line search should be started from
some moderate step length, like 1.0, so algorithm will be able to detect
desired step length in a several searches.
Default behavior (when no step is suggested) is to use preconditioner, if
it is available, to generate initial estimate of step length.
This function influences only first iteration of algorithm. It should be Boundary constraints are inactive by default (after initial creation).
called between MinCGCreate/MinCGRestartFrom() call and MinCGOptimize call. After being set, they are preserved until explicitly turned off with
Suggested step is ignored if you have preconditioner. another SetBC() call.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure used to store algorithm state. State - structure stores algorithm state
Stp - initial estimate of the step length. BndL - lower bounds, array[N].
Can be zero (no estimate). If some (all) variables are unbounded, you may specify
very small number or -INF (latter is recommended because
it will allow solver to use better algorithm).
BndU - upper bounds, array[N].
If some (all) variables are unbounded, you may specify
very large number or +INF (latter is recommended because
it will allow solver to use better algorithm).
NOTE: it is possible to specify BndL[i]=BndU[i]. In this case I-th
variable will be "frozen" at X[i]=BndL[i]=BndU[i].
-- ALGLIB -- -- ALGLIB --
Copyright 30.07.2010 by Bochkanov Sergey Copyright 11.01.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mincgsuggeststep(const mincgstate &state, const double stp); void minqpsetbc(const minqpstate &state, const real_1d_array &bndl, const r eal_1d_array &bndu);
/************************************************************************* /*************************************************************************
Modification of the preconditioner: preconditioning is turned off. This function solves quadratic programming problem.
You should call it after setting solver options with MinQPSet...() calls.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - algorithm state
NOTE: you can change preconditioner "on the fly", during algorithm You should use MinQPResults() function to access results after calls
iterations. to this function.
-- ALGLIB -- -- ALGLIB --
Copyright 13.10.2010 by Bochkanov Sergey Copyright 11.01.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mincgsetprecdefault(const mincgstate &state); void minqpoptimize(const minqpstate &state);
/************************************************************************* /*************************************************************************
Modification of the preconditioner: diagonal of approximate Hessian is QP solver results
used.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - algorithm state
D - diagonal of the approximate Hessian, array[0..N-1],
(if larger, only leading N elements are used).
NOTE: you can change preconditioner "on the fly", during algorithm OUTPUT PARAMETERS:
iterations. X - array[0..N-1], solution
Rep - optimization report. You should check Rep.TerminationType,
which contains completion code, and you may check another
fields which contain another information about algorithm
functioning.
NOTE 2: D[i] should be positive. Exception will be thrown otherwise. -- ALGLIB --
Copyright 11.01.2011 by Bochkanov Sergey
*************************************************************************/
void minqpresults(const minqpstate &state, real_1d_array &x, minqpreport &r
ep);
NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. /*************************************************************************
QP results
Buffered implementation of MinQPResults() which uses pre-allocated buffer
to store X[]. If buffer size is too small, it resizes buffer. It is
intended to be used in the inner cycles of performance critical algorithms
where array reallocation penalty is too large to be ignored.
-- ALGLIB -- -- ALGLIB --
Copyright 13.10.2010 by Bochkanov Sergey Copyright 11.01.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mincgsetprecdiag(const mincgstate &state, const real_1d_array &d); void minqpresultsbuf(const minqpstate &state, real_1d_array &x, minqpreport &rep);
/************************************************************************* /*************************************************************************
Modification of the preconditioner: scale-based diagonal preconditioning. IMPROVED LEVENBERG-MARQUARDT METHOD FOR
NON-LINEAR LEAST SQUARES OPTIMIZATION
This preconditioning mode can be useful when you don't have approximate DESCRIPTION:
diagonal of Hessian, but you know that your variables are badly scaled This function is used to find minimum of function which is represented as
(for example, one variable is in [1,10], and another in [1000,100000]), sum of squares:
and most part of the ill-conditioning comes from different scales of vars. F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1])
using value of function vector f[] and Jacobian of f[].
In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2), REQUIREMENTS:
can greatly improve convergence. This algorithm will request following information during its operation:
IMPRTANT: you should set scale of your variables with MinCGSetScale() call * function vector f[] at given point X
(before or after MinCGSetPrecScale() call). Without knowledge of the scale * function vector f[] and Jacobian of f[] (simultaneously) at given point
of your variables scale-based preconditioner will be just unit matrix.
There are several overloaded versions of MinLMOptimize() function which
correspond to different LM-like optimization algorithms provided by this
unit. You should choose version which accepts fvec() and jac() callbacks.
First one is used to calculate f[] at given point, second one calculates
f[] and Jacobian df[i]/dx[j].
You can try to initialize MinLMState structure with VJ function and then
use incorrect version of MinLMOptimize() (for example, version which
works with general form function and does not provide Jacobian), but it
will lead to exception being thrown after first attempt to calculate
Jacobian.
USAGE:
1. User initializes algorithm state with MinLMCreateVJ() call
2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and
other functions
3. User calls MinLMOptimize() function which takes algorithm state and
callback functions.
4. User calls MinLMResults() to get solution
5. Optionally, user may call MinLMRestartFrom() to solve another problem
with same N/M but another starting point and/or another function.
MinLMRestartFrom() allows to reuse already initialized structure.
INPUT PARAMETERS: INPUT PARAMETERS:
N - dimension, N>1
* if given, only leading N elements of X are used
* if not given, automatically determined from size of X
M - number of functions f[i]
X - initial solution, array[0..N-1]
OUTPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
NOTE: you can change preconditioner "on the fly", during algorithm NOTES:
iterations. 1. you may tune stopping conditions with MinLMSetCond() function
2. if target function contains exp() or other fast growing functions, and
optimization algorithm makes too large steps which leads to overflow,
use MinLMSetStpMax() function to bound algorithm's steps.
-- ALGLIB -- -- ALGLIB --
Copyright 13.10.2010 by Bochkanov Sergey Copyright 30.03.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mincgsetprecscale(const mincgstate &state); void minlmcreatevj(const ae_int_t n, const ae_int_t m, const real_1d_array
&x, minlmstate &state);
void minlmcreatevj(const ae_int_t m, const real_1d_array &x, minlmstate &st
ate);
/************************************************************************* /*************************************************************************
This function provides reverse communication interface IMPROVED LEVENBERG-MARQUARDT METHOD FOR
Reverse communication interface is not documented or recommended to use. NON-LINEAR LEAST SQUARES OPTIMIZATION
See below for functions which provide better documented API
*************************************************************************/
bool mincgiteration(const mincgstate &state);
/************************************************************************* DESCRIPTION:
This family of functions is used to launcn iterations of nonlinear optimize This function is used to find minimum of function which is represented as
r sum of squares:
F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1])
using value of function vector f[] only. Finite differences are used to
calculate Jacobian.
These functions accept following parameters: REQUIREMENTS:
state - algorithm state This algorithm will request following information during its operation:
grad - callback which calculates function (or merit function) * function vector f[] at given point X
value func and gradient grad at given point x
rep - optional callback which is called after each iteration
can be NULL
ptr - optional pointer which is passed to func/grad/hess/jac/rep
can be NULL
-- ALGLIB -- There are several overloaded versions of MinLMOptimize() function which
Copyright 20.04.2009 by Bochkanov Sergey correspond to different LM-like optimization algorithms provided by this
unit. You should choose version which accepts fvec() callback.
*************************************************************************/ You can try to initialize MinLMState structure with VJ function and then
void mincgoptimize(mincgstate &state, use incorrect version of MinLMOptimize() (for example, version which
void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, works with general form function and does not accept function vector), but
void *ptr), it will lead to exception being thrown after first attempt to calculate
void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, Jacobian.
void *ptr = NULL);
/************************************************************************* USAGE:
Conjugate gradient results 1. User initializes algorithm state with MinLMCreateV() call
2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and
other functions
3. User calls MinLMOptimize() function which takes algorithm state and
callback functions.
4. User calls MinLMResults() to get solution
5. Optionally, user may call MinLMRestartFrom() to solve another problem
with same N/M but another starting point and/or another function.
MinLMRestartFrom() allows to reuse already initialized structure.
INPUT PARAMETERS: INPUT PARAMETERS:
State - algorithm state N - dimension, N>1
* if given, only leading N elements of X are used
* if not given, automatically determined from size of X
M - number of functions f[i]
X - initial solution, array[0..N-1]
DiffStep- differentiation step, >0
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
X - array[0..N-1], solution State - structure which stores algorithm state
Rep - optimization report:
* Rep.TerminationType completetion code: See also MinLMIteration, MinLMResults.
* 1 relative function improvement is no more than
EpsF. NOTES:
* 2 relative step is no more than EpsX. 1. you may tune stopping conditions with MinLMSetCond() function
* 4 gradient norm is no more than EpsG 2. if target function contains exp() or other fast growing functions, and
* 5 MaxIts steps was taken optimization algorithm makes too large steps which leads to overflow,
* 7 stopping conditions are too stringent, use MinLMSetStpMax() function to bound algorithm's steps.
further improvement is impossible,
we return best X found so far
* 8 terminated by user
* Rep.IterationsCount contains iterations count
* NFEV countains number of function calculations
-- ALGLIB -- -- ALGLIB --
Copyright 20.04.2009 by Bochkanov Sergey Copyright 30.03.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mincgresults(const mincgstate &state, real_1d_array &x, mincgreport &r void minlmcreatev(const ae_int_t n, const ae_int_t m, const real_1d_array &
ep); x, const double diffstep, minlmstate &state);
void minlmcreatev(const ae_int_t m, const real_1d_array &x, const double di
ffstep, minlmstate &state);
/************************************************************************* /*************************************************************************
Conjugate gradient results LEVENBERG-MARQUARDT-LIKE METHOD FOR NON-LINEAR OPTIMIZATION
Buffered implementation of MinCGResults(), which uses pre-allocated buffer DESCRIPTION:
to store X[]. If buffer size is too small, it resizes buffer. It is This function is used to find minimum of general form (not "sum-of-
intended to be used in the inner cycles of performance critical algorithms -squares") function
where array reallocation penalty is too large to be ignored. F = F(x[0], ..., x[n-1])
using its gradient and Hessian. Levenberg-Marquardt modification with
L-BFGS pre-optimization and internal pre-conditioned L-BFGS optimization
after each Levenberg-Marquardt step is used.
REQUIREMENTS:
This algorithm will request following information during its operation:
* function value F at given point X
* F and gradient G (simultaneously) at given point X
* F, G and Hessian H (simultaneously) at given point X
There are several overloaded versions of MinLMOptimize() function which
correspond to different LM-like optimization algorithms provided by this
unit. You should choose version which accepts func(), grad() and hess()
function pointers. First pointer is used to calculate F at given point,
second one calculates F(x) and grad F(x), third one calculates F(x),
grad F(x), hess F(x).
You can try to initialize MinLMState structure with FGH-function and then
use incorrect version of MinLMOptimize() (for example, version which does
not provide Hessian matrix), but it will lead to exception being thrown
after first attempt to calculate Hessian.
USAGE:
1. User initializes algorithm state with MinLMCreateFGH() call
2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and
other functions
3. User calls MinLMOptimize() function which takes algorithm state and
pointers (delegates, etc.) to callback functions.
4. User calls MinLMResults() to get solution
5. Optionally, user may call MinLMRestartFrom() to solve another problem
with same N but another starting point and/or another function.
MinLMRestartFrom() allows to reuse already initialized structure.
INPUT PARAMETERS:
N - dimension, N>1
* if given, only leading N elements of X are used
* if not given, automatically determined from size of X
X - initial solution, array[0..N-1]
OUTPUT PARAMETERS:
State - structure which stores algorithm state
NOTES:
1. you may tune stopping conditions with MinLMSetCond() function
2. if target function contains exp() or other fast growing functions, and
optimization algorithm makes too large steps which leads to overflow,
use MinLMSetStpMax() function to bound algorithm's steps.
-- ALGLIB -- -- ALGLIB --
Copyright 20.04.2009 by Bochkanov Sergey Copyright 30.03.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mincgresultsbuf(const mincgstate &state, real_1d_array &x, mincgreport void minlmcreatefgh(const ae_int_t n, const real_1d_array &x, minlmstate &s
&rep); tate);
void minlmcreatefgh(const real_1d_array &x, minlmstate &state);
/************************************************************************* /*************************************************************************
This subroutine restarts CG algorithm from new point. All optimization This function sets stopping conditions for Levenberg-Marquardt optimization
parameters are left unchanged. algorithm.
This function allows to solve multiple optimization problems (which
must have same number of dimensions) without object reallocation penalty.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure used to store algorithm state. State - structure which stores algorithm state
X - new starting point. EpsG - >=0
The subroutine finishes its work if the condition
|v|<EpsG is satisfied, where:
* |.| means Euclidian norm
* v - scaled gradient vector, v[i]=g[i]*s[i]
* g - gradient
* s - scaling coefficients set by MinLMSetScale()
EpsF - >=0
The subroutine finishes its work if on k+1-th iteration
the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
is satisfied.
EpsX - >=0
The subroutine finishes its work if on k+1-th iteration
the condition |v|<=EpsX is fulfilled, where:
* |.| means Euclidian norm
* v - scaled step vector, v[i]=dx[i]/s[i]
* dx - ste pvector, dx=X(k+1)-X(k)
* s - scaling coefficients set by MinLMSetScale()
MaxIts - maximum number of iterations. If MaxIts=0, the number of
iterations is unlimited. Only Levenberg-Marquardt
iterations are counted (L-BFGS/CG iterations are NOT
counted because their cost is very low compared to that of
LM).
Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to
automatic stopping criterion selection (small EpsX).
-- ALGLIB -- -- ALGLIB --
Copyright 30.07.2010 by Bochkanov Sergey Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mincgrestartfrom(const mincgstate &state, const real_1d_array &x); void minlmsetcond(const minlmstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits);
/************************************************************************* /*************************************************************************
BOUND CONSTRAINED OPTIMIZATION This function turns on/off reporting.
WITH ADDITIONAL LINEAR EQUALITY AND INEQUALITY CONSTRAINTS
DESCRIPTION: INPUT PARAMETERS:
The subroutine minimizes function F(x) of N arguments subject to any State - structure which stores algorithm state
combination of: NeedXRep- whether iteration reports are needed or not
* bound constraints
* linear inequality constraints
* linear equality constraints
REQUIREMENTS: If NeedXRep is True, algorithm will call rep() callback function if it is
* user must provide function value and gradient provided to MinLMOptimize(). Both Levenberg-Marquardt and internal L-BFGS
* starting point X0 must be feasible or iterations are reported.
not too far away from the feasible set
* grad(f) must be Lipschitz continuous on a level set:
L = { x : f(x)<=f(x0) }
* function must be defined everywhere on the feasible set F
USAGE: -- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/
void minlmsetxrep(const minlmstate &state, const bool needxrep);
Constrained optimization if far more complex than the unconstrained one. /*************************************************************************
Here we give very brief outline of the BLEIC optimizer. We strongly recomme This function sets maximum step length
nd
you to read examples in the ALGLIB Reference Manual and to read ALGLIB User
Guide
on optimization, which is available at http://www.alglib.net/optimization/
1. User initializes algorithm state with MinBLEICCreate() call INPUT PARAMETERS:
State - structure which stores algorithm state
StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't
want to limit step length.
2. USer adds boundary and/or linear constraints by calling Use this subroutine when you optimize target function which contains exp()
MinBLEICSetBC() and MinBLEICSetLC() functions. or other fast growing functions, and optimization algorithm makes too
large steps which leads to overflow. This function allows us to reject
steps that are too large (and therefore expose us to the possible
overflow) without actually calculating function value at the x+stp*d.
3. User sets stopping conditions for underlying unconstrained solver NOTE: non-zero StpMax leads to moderate performance degradation because
with MinBLEICSetInnerCond() call. intermediate step of preconditioned L-BFGS optimization is incompatible
This function controls accuracy of underlying optimization algorithm. with limits on step size.
4. User sets stopping conditions for outer iteration by calling -- ALGLIB --
MinBLEICSetOuterCond() function. Copyright 02.04.2010 by Bochkanov Sergey
This function controls handling of boundary and inequality constraints. *************************************************************************/
void minlmsetstpmax(const minlmstate &state, const double stpmax);
5. Additionally, user may set limit on number of internal iterations /*************************************************************************
by MinBLEICSetMaxIts() call. This function sets scaling coefficients for LM optimizer.
This function allows to prevent algorithm from looping forever.
6. User calls MinBLEICOptimize() function which takes algorithm state and ALGLIB optimizers use scaling matrices to test stopping conditions (step
pointer (delegate, etc.) to callback function which calculates F/G. size and gradient are scaled before comparison with tolerances). Scale of
the I-th variable is a translation invariant measure of:
a) "how large" the variable is
b) how large the step should be to make significant changes in the function
7. User calls MinBLEICResults() to get solution Generally, scale is NOT considered to be a form of preconditioner. But LM
optimizer is unique in that it uses scaling matrix both in the stopping
condition tests and as Marquardt damping factor.
8. Optionally user may call MinBLEICRestartFrom() to solve another problem Proper scaling is very important for the algorithm performance. It is less
with same N but another starting point. important for the quality of results, but still has some influence (it is
MinBLEICRestartFrom() allows to reuse already initialized structure. easier to converge when variables are properly scaled, so premature
stopping is possible when very badly scalled variables are combined with
relaxed stopping conditions).
INPUT PARAMETERS: INPUT PARAMETERS:
N - problem dimension, N>0:
* if given, only leading N elements of X are used
* if not given, automatically determined from size ofX
X - starting point, array[N]:
* it is better to set X to a feasible point
* but X can be infeasible, in which case algorithm will try
to find feasible point first, using X as initial
approximation.
OUTPUT PARAMETERS:
State - structure stores algorithm state State - structure stores algorithm state
S - array[N], non-zero scaling coefficients
S[i] may be negative, sign doesn't matter.
-- ALGLIB -- -- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey Copyright 14.01.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleiccreate(const ae_int_t n, const real_1d_array &x, minbleicstate void minlmsetscale(const minlmstate &state, const real_1d_array &s);
&state);
void minbleiccreate(const real_1d_array &x, minbleicstate &state);
/************************************************************************* /*************************************************************************
This function sets boundary constraints for BLEIC optimizer. This function sets boundary constraints for LM optimizer
Boundary constraints are inactive by default (after initial creation). Boundary constraints are inactive by default (after initial creation).
They are preserved after algorithm restart with MinBLEICRestartFrom(). They are preserved until explicitly turned off with another SetBC() call.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure stores algorithm state State - structure stores algorithm state
BndL - lower bounds, array[N]. BndL - lower bounds, array[N].
If some (all) variables are unbounded, you may specify If some (all) variables are unbounded, you may specify
very small number or -INF. very small number or -INF (latter is recommended because
it will allow solver to use better algorithm).
BndU - upper bounds, array[N]. BndU - upper bounds, array[N].
If some (all) variables are unbounded, you may specify If some (all) variables are unbounded, you may specify
very large number or +INF. very large number or +INF (latter is recommended because
it will allow solver to use better algorithm).
NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
variable will be "frozen" at X[i]=BndL[i]=BndU[i]. variable will be "frozen" at X[i]=BndL[i]=BndU[i].
NOTE 2: this solver has following useful properties: NOTE 2: this solver has following useful properties:
* bound constraints are always satisfied exactly * bound constraints are always satisfied exactly
* function is evaluated only INSIDE area specified by bound constraints * function is evaluated only INSIDE area specified by bound constraints
or at its boundary
-- ALGLIB -- -- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey Copyright 14.01.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicsetbc(const minbleicstate &state, const real_1d_array &bndl, c onst real_1d_array &bndu); void minlmsetbc(const minlmstate &state, const real_1d_array &bndl, const r eal_1d_array &bndu);
/************************************************************************* /*************************************************************************
This function sets linear constraints for BLEIC optimizer. This function is used to change acceleration settings
Linear constraints are inactive by default (after initial creation). You can choose between three acceleration strategies:
They are preserved after algorithm restart with MinBLEICRestartFrom(). * AccType=0, no acceleration.
* AccType=1, secant updates are used to update quadratic model after each
iteration. After fixed number of iterations (or after model breakdown)
we recalculate quadratic model using analytic Jacobian or finite
differences. Number of secant-based iterations depends on optimization
settings: about 3 iterations - when we have analytic Jacobian, up to 2*N
iterations - when we use finite differences to calculate Jacobian.
INPUT PARAMETERS: AccType=1 is recommended when Jacobian calculation cost is prohibitive
State - structure previously allocated with MinBLEICCreate call. high (several Mx1 function vector calculations followed by several NxN
C - linear constraints, array[K,N+1]. Cholesky factorizations are faster than calculation of one M*N Jacobian).
Each row of C represents one constraint, either equality It should also be used when we have no Jacobian, because finite difference
or inequality (see below): approximation takes too much time to compute.
* first N elements correspond to coefficients,
* last element corresponds to the right part. Table below list optimization protocols (XYZ protocol corresponds to
All elements of C (including right part) must be finite. MinLMCreateXYZ) and acceleration types they support (and use by default).
CT - type of constraints, array[K]:
* if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] ACCELERATION TYPES SUPPORTED BY OPTIMIZATION PROTOCOLS:
* if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1]
* if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] protocol 0 1 comment
K - number of equality/inequality constraints, K>=0: V + +
* if given, only leading K elements of C/CT are used VJ + +
* if not given, automatically determined from sizes of C/CT FGH +
DAFAULT VALUES:
protocol 0 1 comment
V x without acceleration it is so slooooooooow
VJ x
FGH x
NOTE: this function should be called before optimization. Attempt to call
it during algorithm iterations may result in unexpected behavior.
NOTE: attempt to call this function with unsupported protocol/acceleration
combination will result in exception being thrown.
-- ALGLIB -- -- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey Copyright 14.10.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, cons void minlmsetacctype(const minlmstate &state, const ae_int_t acctype);
t integer_1d_array &ct, const ae_int_t k);
void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, cons
t integer_1d_array &ct);
/************************************************************************* /*************************************************************************
This function sets stopping conditions for the underlying nonlinear CG This function provides reverse communication interface
optimizer. It controls overall accuracy of solution. These conditions Reverse communication interface is not documented or recommended to use.
should be strict enough in order for algorithm to converge. See below for functions which provide better documented API
*************************************************************************/
bool minlmiteration(const minlmstate &state);
INPUT PARAMETERS: /*************************************************************************
State - structure which stores algorithm state This family of functions is used to launcn iterations of nonlinear optimize
EpsG - >=0 r
The subroutine finishes its work if the condition
|v|<EpsG is satisfied, where: These functions accept following parameters:
* |.| means Euclidian norm state - algorithm state
* v - scaled gradient vector, v[i]=g[i]*s[i] func - callback which calculates function (or merit function)
* g - gradient value func at given point x
* s - scaling coefficients set by MinBLEICSetScale() grad - callback which calculates function (or merit function)
EpsF - >=0 value func and gradient grad at given point x
The subroutine finishes its work if on k+1-th iteration hess - callback which calculates function (or merit function)
the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} value func, gradient grad and Hessian hess at given point x
is satisfied. fvec - callback which calculates function vector fi[]
EpsX - >=0 at given point x
The subroutine finishes its work if on k+1-th iteration jac - callback which calculates function vector fi[]
the condition |v|<=EpsX is fulfilled, where: and Jacobian jac at given point x
* |.| means Euclidian norm rep - optional callback which is called after each iteration
* v - scaled step vector, v[i]=dx[i]/s[i] can be NULL
* dx - ste pvector, dx=X(k+1)-X(k) ptr - optional pointer which is passed to func/grad/hess/jac/rep
* s - scaling coefficients set by MinBLEICSetScale() can be NULL
NOTES:
1. Depending on function used to create state structure, this algorithm
may accept Jacobian and/or Hessian and/or gradient. According to the
said above, there ase several versions of this function, which accept
different sets of callbacks.
This flexibility opens way to subtle errors - you may create state with
MinLMCreateFGH() (optimization using Hessian), but call function which
does not accept Hessian. So when algorithm will request Hessian, there
will be no callback to call. In this case exception will be thrown.
Be careful to avoid such errors because there is no way to find them at
compile time - you can see them at runtime only.
-- ALGLIB --
Copyright 10.03.2009 by Bochkanov Sergey
*************************************************************************/
void minlmoptimize(minlmstate &state,
void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr),
void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
void *ptr = NULL);
void minlmoptimize(minlmstate &state,
void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr),
void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &
jac, void *ptr),
void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
void *ptr = NULL);
void minlmoptimize(minlmstate &state,
void (*func)(const real_1d_array &x, double &func, void *ptr),
void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad,
void *ptr),
void (*hess)(const real_1d_array &x, double &func, real_1d_array &grad,
real_2d_array &hess, void *ptr),
void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
void *ptr = NULL);
void minlmoptimize(minlmstate &state,
void (*func)(const real_1d_array &x, double &func, void *ptr),
void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &
jac, void *ptr),
void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
void *ptr = NULL);
void minlmoptimize(minlmstate &state,
void (*func)(const real_1d_array &x, double &func, void *ptr),
void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad,
void *ptr),
void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &
jac, void *ptr),
void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
void *ptr = NULL);
Passing EpsG=0, EpsF=0 and EpsX=0 (simultaneously) will lead to /*************************************************************************
automatic stopping criterion selection. Levenberg-Marquardt algorithm results
These conditions are used to terminate inner iterations. However, you INPUT PARAMETERS:
need to tune termination conditions for outer iterations too. State - algorithm state
OUTPUT PARAMETERS:
X - array[0..N-1], solution
Rep - optimization report;
see comments for this structure for more info.
-- ALGLIB -- -- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey Copyright 10.03.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicsetinnercond(const minbleicstate &state, const double epsg, co nst double epsf, const double epsx); void minlmresults(const minlmstate &state, real_1d_array &x, minlmreport &r ep);
/************************************************************************* /*************************************************************************
This function sets stopping conditions for outer iteration of BLEIC algo. Levenberg-Marquardt algorithm results
These conditions control accuracy of constraint handling and amount of
infeasibility allowed in the solution.
INPUT PARAMETERS:
State - structure which stores algorithm state
EpsX - >0, stopping condition on outer iteration step length
EpsI - >0, stopping condition on infeasibility
Both EpsX and EpsI must be non-zero. Buffered implementation of MinLMResults(), which uses pre-allocated buffer
to store X[]. If buffer size is too small, it resizes buffer. It is
intended to be used in the inner cycles of performance critical algorithms
where array reallocation penalty is too large to be ignored.
MEANING OF EpsX -- ALGLIB --
Copyright 10.03.2009 by Bochkanov Sergey
*************************************************************************/
void minlmresultsbuf(const minlmstate &state, real_1d_array &x, minlmreport
&rep);
EpsX is a stopping condition for outer iterations. Algorithm will stop /*************************************************************************
when solution of the current modified subproblem will be within EpsX This subroutine restarts LM algorithm from new point. All optimization
(using 2-norm) of the previous solution. parameters are left unchanged.
MEANING OF EpsI This function allows to solve multiple optimization problems (which
must have same number of dimensions) without object reallocation penalty.
EpsI controls feasibility properties - algorithm won't stop until all INPUT PARAMETERS:
inequality constraints will be satisfied with error (distance from current State - structure used for reverse communication previously
point to the feasible area) at most EpsI. allocated with MinLMCreateXXX call.
X - new starting point.
-- ALGLIB -- -- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey Copyright 30.07.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicsetoutercond(const minbleicstate &state, const double epsx, co nst double epsi); void minlmrestartfrom(const minlmstate &state, const real_1d_array &x);
/************************************************************************* /*************************************************************************
This is obsolete function which was used by previous version of the BLEIC This is obsolete function.
optimizer. It does nothing in the current version of BLEIC.
Since ALGLIB 3.3 it is equivalent to MinLMCreateVJ().
-- ALGLIB -- -- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey Copyright 30.03.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicsetbarrierwidth(const minbleicstate &state, const double mu); void minlmcreatevgj(const ae_int_t n, const ae_int_t m, const real_1d_array
&x, minlmstate &state);
void minlmcreatevgj(const ae_int_t m, const real_1d_array &x, minlmstate &s
tate);
/************************************************************************* /*************************************************************************
This is obsolete function which was used by previous version of the BLEIC This is obsolete function.
optimizer. It does nothing in the current version of BLEIC.
Since ALGLIB 3.3 it is equivalent to MinLMCreateFJ().
-- ALGLIB -- -- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey Copyright 30.03.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicsetbarrierdecay(const minbleicstate &state, const double mudec void minlmcreatefgj(const ae_int_t n, const ae_int_t m, const real_1d_array
ay); &x, minlmstate &state);
void minlmcreatefgj(const ae_int_t m, const real_1d_array &x, minlmstate &s
tate);
/************************************************************************* /*************************************************************************
This function sets scaling coefficients for BLEIC optimizer. This function is considered obsolete since ALGLIB 3.1.0 and is present for
backward compatibility only. We recommend to use MinLMCreateVJ, which
ALGLIB optimizers use scaling matrices to test stopping conditions (step provides similar, but more consistent and feature-rich interface.
size and gradient are scaled before comparison with tolerances). Scale of
the I-th variable is a translation invariant measure of:
a) "how large" the variable is
b) how large the step should be to make significant changes in the function
In most optimizers (and in the BLEIC too) scaling is NOT a form of
preconditioning. It just affects stopping conditions. You should set
preconditioner by separate call to one of the MinBLEICSetPrec...()
functions.
There is a special preconditioning mode, however, which uses scaling
coefficients to form diagonal preconditioning matrix. You can turn this
mode on, if you want. But you should understand that scaling is not the
same thing as preconditioning - these are two different, although related
forms of tuning solver.
INPUT PARAMETERS:
State - structure stores algorithm state
S - array[N], non-zero scaling coefficients
S[i] may be negative, sign doesn't matter.
-- ALGLIB -- -- ALGLIB --
Copyright 14.01.2011 by Bochkanov Sergey Copyright 30.03.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicsetscale(const minbleicstate &state, const real_1d_array &s); void minlmcreatefj(const ae_int_t n, const ae_int_t m, const real_1d_array
&x, minlmstate &state);
void minlmcreatefj(const ae_int_t m, const real_1d_array &x, minlmstate &st
ate);
/************************************************************************* /*************************************************************************
Modification of the preconditioner: preconditioning is turned off. Obsolete function, use MinLBFGSSetPrecDefault() instead.
INPUT PARAMETERS:
State - structure which stores algorithm state
-- ALGLIB -- -- ALGLIB --
Copyright 13.10.2010 by Bochkanov Sergey Copyright 13.10.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicsetprecdefault(const minbleicstate &state); void minlbfgssetdefaultpreconditioner(const minlbfgsstate &state);
/************************************************************************* /*************************************************************************
Modification of the preconditioner: diagonal of approximate Hessian is Obsolete function, use MinLBFGSSetCholeskyPreconditioner() instead.
used.
INPUT PARAMETERS:
State - structure which stores algorithm state
D - diagonal of the approximate Hessian, array[0..N-1],
(if larger, only leading N elements are used).
NOTE 1: D[i] should be positive. Exception will be thrown otherwise.
NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
-- ALGLIB -- -- ALGLIB --
Copyright 13.10.2010 by Bochkanov Sergey Copyright 13.10.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicsetprecdiag(const minbleicstate &state, const real_1d_array &d ); void minlbfgssetcholeskypreconditioner(const minlbfgsstate &state, const re al_2d_array &p, const bool isupper);
/************************************************************************* /*************************************************************************
Modification of the preconditioner: scale-based diagonal preconditioning. This is obsolete function which was used by previous version of the BLEIC
optimizer. It does nothing in the current version of BLEIC.
This preconditioning mode can be useful when you don't have approximate
diagonal of Hessian, but you know that your variables are badly scaled
(for example, one variable is in [1,10], and another in [1000,100000]),
and most part of the ill-conditioning comes from different scales of vars.
In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2),
can greatly improve convergence.
IMPRTANT: you should set scale of your variables with MinBLEICSetScale()
call (before or after MinBLEICSetPrecScale() call). Without knowledge of
the scale of your variables scale-based preconditioner will be just unit
matrix.
INPUT PARAMETERS:
State - structure which stores algorithm state
-- ALGLIB -- -- ALGLIB --
Copyright 13.10.2010 by Bochkanov Sergey Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicsetprecscale(const minbleicstate &state); void minbleicsetbarrierwidth(const minbleicstate &state, const double mu);
/************************************************************************* /*************************************************************************
This function allows to stop algorithm after specified number of inner This is obsolete function which was used by previous version of the BLEIC
iterations. optimizer. It does nothing in the current version of BLEIC.
INPUT PARAMETERS:
State - structure which stores algorithm state
MaxIts - maximum number of inner iterations.
If MaxIts=0, the number of iterations is unlimited.
-- ALGLIB -- -- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicsetmaxits(const minbleicstate &state, const ae_int_t maxits); void minbleicsetbarrierdecay(const minbleicstate &state, const double mudec ay);
/************************************************************************* /*************************************************************************
This function turns on/off reporting. Obsolete optimization algorithm.
Was replaced by MinBLEIC subpackage.
INPUT PARAMETERS: -- ALGLIB --
State - structure which stores algorithm state Copyright 25.03.2010 by Bochkanov Sergey
NeedXRep- whether iteration reports are needed or not *************************************************************************/
void minasacreate(const ae_int_t n, const real_1d_array &x, const real_1d_a
rray &bndl, const real_1d_array &bndu, minasastate &state);
void minasacreate(const real_1d_array &x, const real_1d_array &bndl, const
real_1d_array &bndu, minasastate &state);
If NeedXRep is True, algorithm will call rep() callback function if it is /*************************************************************************
provided to MinBLEICOptimize(). Obsolete optimization algorithm.
Was replaced by MinBLEIC subpackage.
-- ALGLIB -- -- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicsetxrep(const minbleicstate &state, const bool needxrep); void minasasetcond(const minasastate &state, const double epsg, const doubl e epsf, const double epsx, const ae_int_t maxits);
/************************************************************************* /*************************************************************************
This function sets maximum step length Obsolete optimization algorithm.
Was replaced by MinBLEIC subpackage.
IMPORTANT: this feature is hard to combine with preconditioning. You can't -- ALGLIB --
set upper limit on step length, when you solve optimization problem with Copyright 02.04.2010 by Bochkanov Sergey
linear (non-boundary) constraints AND preconditioner turned on. *************************************************************************/
void minasasetxrep(const minasastate &state, const bool needxrep);
When non-boundary constraints are present, you have to either a) use /*************************************************************************
preconditioner, or b) use upper limit on step length. YOU CAN'T USE BOTH! Obsolete optimization algorithm.
In this case algorithm will terminate with appropriate error code. Was replaced by MinBLEIC subpackage.
INPUT PARAMETERS: -- ALGLIB --
State - structure which stores algorithm state Copyright 02.04.2010 by Bochkanov Sergey
StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't *************************************************************************/
want to limit step length. void minasasetalgorithm(const minasastate &state, const ae_int_t algotype);
Use this subroutine when you optimize target function which contains exp() /*************************************************************************
or other fast growing functions, and optimization algorithm makes too Obsolete optimization algorithm.
large steps which lead to overflow. This function allows us to reject Was replaced by MinBLEIC subpackage.
steps that are too large (and therefore expose us to the possible
overflow) without actually calculating function value at the x+stp*d.
-- ALGLIB -- -- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicsetstpmax(const minbleicstate &state, const double stpmax); void minasasetstpmax(const minasastate &state, const double stpmax);
/************************************************************************* /*************************************************************************
This function provides reverse communication interface This function provides reverse communication interface
Reverse communication interface is not documented or recommended to use. Reverse communication interface is not documented or recommended to use.
See below for functions which provide better documented API See below for functions which provide better documented API
*************************************************************************/ *************************************************************************/
bool minbleiciteration(const minbleicstate &state); bool minasaiteration(const minasastate &state);
/************************************************************************* /*************************************************************************
This family of functions is used to launcn iterations of nonlinear optimize r This family of functions is used to launcn iterations of nonlinear optimize r
These functions accept following parameters: These functions accept following parameters:
state - algorithm state state - algorithm state
grad - callback which calculates function (or merit function) grad - callback which calculates function (or merit function)
value func and gradient grad at given point x value func and gradient grad at given point x
rep - optional callback which is called after each iteration rep - optional callback which is called after each iteration
can be NULL can be NULL
ptr - optional pointer which is passed to func/grad/hess/jac/rep ptr - optional pointer which is passed to func/grad/hess/jac/rep
can be NULL can be NULL
-- ALGLIB -- -- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey Copyright 20.03.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicoptimize(minbleicstate &state, void minasaoptimize(minasastate &state,
void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
void *ptr = NULL); void *ptr = NULL);
/************************************************************************* /*************************************************************************
BLEIC results Obsolete optimization algorithm.
Was replaced by MinBLEIC subpackage.
INPUT PARAMETERS:
State - algorithm state
OUTPUT PARAMETERS:
X - array[0..N-1], solution
Rep - optimization report. You should check Rep.TerminationType
in order to distinguish successful termination from
unsuccessful one.
More information about fields of this structure can be
found in the comments on MinBLEICReport datatype.
-- ALGLIB -- -- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey Copyright 20.03.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicresults(const minbleicstate &state, real_1d_array &x, minbleic report &rep); void minasaresults(const minasastate &state, real_1d_array &x, minasareport &rep);
/************************************************************************* /*************************************************************************
BLEIC results Obsolete optimization algorithm.
Was replaced by MinBLEIC subpackage.
Buffered implementation of MinBLEICResults() which uses pre-allocated buffe
r
to store X[]. If buffer size is too small, it resizes buffer. It is
intended to be used in the inner cycles of performance critical algorithms
where array reallocation penalty is too large to be ignored.
-- ALGLIB -- -- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey Copyright 20.03.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicresultsbuf(const minbleicstate &state, real_1d_array &x, minbl void minasaresultsbuf(const minasastate &state, real_1d_array &x, minasarep
eicreport &rep); ort &rep);
/*************************************************************************
This subroutine restarts algorithm from new point.
All optimization parameters (including constraints) are left unchanged.
This function allows to solve multiple optimization problems (which
must have same number of dimensions) without object reallocation penalty.
INPUT PARAMETERS: /*************************************************************************
State - structure previously allocated with MinBLEICCreate call. Obsolete optimization algorithm.
X - new starting point. Was replaced by MinBLEIC subpackage.
-- ALGLIB -- -- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey Copyright 30.07.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicrestartfrom(const minbleicstate &state, const real_1d_array &x ); void minasarestartfrom(const minasastate &state, const real_1d_array &x, co nst real_1d_array &bndl, const real_1d_array &bndu);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
// //
// THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (FUNCTIONS) // THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (FUNCTIONS)
// //
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
namespace alglib_impl namespace alglib_impl
{ {
void mincgcreate(ae_int_t n,
/* Real */ ae_vector* x,
mincgstate* state,
ae_state *_state);
void mincgcreatef(ae_int_t n,
/* Real */ ae_vector* x,
double diffstep,
mincgstate* state,
ae_state *_state);
void mincgsetcond(mincgstate* state,
double epsg,
double epsf,
double epsx,
ae_int_t maxits,
ae_state *_state);
void mincgsetscale(mincgstate* state,
/* Real */ ae_vector* s,
ae_state *_state);
void mincgsetxrep(mincgstate* state, ae_bool needxrep, ae_state *_state);
void mincgsetdrep(mincgstate* state, ae_bool needdrep, ae_state *_state);
void mincgsetcgtype(mincgstate* state, ae_int_t cgtype, ae_state *_state);
void mincgsetstpmax(mincgstate* state, double stpmax, ae_state *_state);
void mincgsuggeststep(mincgstate* state, double stp, ae_state *_state);
void mincgsetprecdefault(mincgstate* state, ae_state *_state);
void mincgsetprecdiag(mincgstate* state,
/* Real */ ae_vector* d,
ae_state *_state);
void mincgsetprecscale(mincgstate* state, ae_state *_state);
ae_bool mincgiteration(mincgstate* state, ae_state *_state);
void mincgresults(mincgstate* state,
/* Real */ ae_vector* x,
mincgreport* rep,
ae_state *_state);
void mincgresultsbuf(mincgstate* state,
/* Real */ ae_vector* x,
mincgreport* rep,
ae_state *_state);
void mincgrestartfrom(mincgstate* state,
/* Real */ ae_vector* x,
ae_state *_state);
void mincgsetprecdiagfast(mincgstate* state,
/* Real */ ae_vector* d,
ae_state *_state);
void mincgsetpreclowrankfast(mincgstate* state,
/* Real */ ae_vector* d1,
/* Real */ ae_vector* c,
/* Real */ ae_matrix* v,
ae_int_t vcnt,
ae_state *_state);
void mincgsetprecvarpart(mincgstate* state,
/* Real */ ae_vector* d2,
ae_state *_state);
ae_bool _mincgstate_init(mincgstate* p, ae_state *_state, ae_bool make_auto
matic);
ae_bool _mincgstate_init_copy(mincgstate* dst, mincgstate* src, ae_state *_
state, ae_bool make_automatic);
void _mincgstate_clear(mincgstate* p);
ae_bool _mincgreport_init(mincgreport* p, ae_state *_state, ae_bool make_au
tomatic);
ae_bool _mincgreport_init_copy(mincgreport* dst, mincgreport* src, ae_state
*_state, ae_bool make_automatic);
void _mincgreport_clear(mincgreport* p);
void minbleiccreate(ae_int_t n,
/* Real */ ae_vector* x,
minbleicstate* state,
ae_state *_state);
void minbleiccreatef(ae_int_t n,
/* Real */ ae_vector* x,
double diffstep,
minbleicstate* state,
ae_state *_state);
void minbleicsetbc(minbleicstate* state,
/* Real */ ae_vector* bndl,
/* Real */ ae_vector* bndu,
ae_state *_state);
void minbleicsetlc(minbleicstate* state,
/* Real */ ae_matrix* c,
/* Integer */ ae_vector* ct,
ae_int_t k,
ae_state *_state);
void minbleicsetinnercond(minbleicstate* state,
double epsg,
double epsf,
double epsx,
ae_state *_state);
void minbleicsetoutercond(minbleicstate* state,
double epsx,
double epsi,
ae_state *_state);
void minbleicsetscale(minbleicstate* state,
/* Real */ ae_vector* s,
ae_state *_state);
void minbleicsetprecdefault(minbleicstate* state, ae_state *_state);
void minbleicsetprecdiag(minbleicstate* state,
/* Real */ ae_vector* d,
ae_state *_state);
void minbleicsetprecscale(minbleicstate* state, ae_state *_state);
void minbleicsetmaxits(minbleicstate* state,
ae_int_t maxits,
ae_state *_state);
void minbleicsetxrep(minbleicstate* state,
ae_bool needxrep,
ae_state *_state);
void minbleicsetstpmax(minbleicstate* state,
double stpmax,
ae_state *_state);
ae_bool minbleiciteration(minbleicstate* state, ae_state *_state);
void minbleicresults(minbleicstate* state,
/* Real */ ae_vector* x,
minbleicreport* rep,
ae_state *_state);
void minbleicresultsbuf(minbleicstate* state,
/* Real */ ae_vector* x,
minbleicreport* rep,
ae_state *_state);
void minbleicrestartfrom(minbleicstate* state,
/* Real */ ae_vector* x,
ae_state *_state);
ae_bool _minbleicstate_init(minbleicstate* p, ae_state *_state, ae_bool mak
e_automatic);
ae_bool _minbleicstate_init_copy(minbleicstate* dst, minbleicstate* src, ae
_state *_state, ae_bool make_automatic);
void _minbleicstate_clear(minbleicstate* p);
ae_bool _minbleicreport_init(minbleicreport* p, ae_state *_state, ae_bool m
ake_automatic);
ae_bool _minbleicreport_init_copy(minbleicreport* dst, minbleicreport* src,
ae_state *_state, ae_bool make_automatic);
void _minbleicreport_clear(minbleicreport* p);
void minlbfgscreate(ae_int_t n, void minlbfgscreate(ae_int_t n,
ae_int_t m, ae_int_t m,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
minlbfgsstate* state, minlbfgsstate* state,
ae_state *_state); ae_state *_state);
void minlbfgscreatef(ae_int_t n,
ae_int_t m,
/* Real */ ae_vector* x,
double diffstep,
minlbfgsstate* state,
ae_state *_state);
void minlbfgssetcond(minlbfgsstate* state, void minlbfgssetcond(minlbfgsstate* state,
double epsg, double epsg,
double epsf, double epsf,
double epsx, double epsx,
ae_int_t maxits, ae_int_t maxits,
ae_state *_state); ae_state *_state);
void minlbfgssetxrep(minlbfgsstate* state, void minlbfgssetxrep(minlbfgsstate* state,
ae_bool needxrep, ae_bool needxrep,
ae_state *_state); ae_state *_state);
void minlbfgssetstpmax(minlbfgsstate* state, void minlbfgssetstpmax(minlbfgsstate* state,
double stpmax, double stpmax,
ae_state *_state); ae_state *_state);
void minlbfgssetscale(minlbfgsstate* state, void minlbfgssetscale(minlbfgsstate* state,
/* Real */ ae_vector* s, /* Real */ ae_vector* s,
ae_state *_state); ae_state *_state);
void minlbfgscreatex(ae_int_t n, void minlbfgscreatex(ae_int_t n,
ae_int_t m, ae_int_t m,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
ae_int_t flags, ae_int_t flags,
double diffstep,
minlbfgsstate* state, minlbfgsstate* state,
ae_state *_state); ae_state *_state);
void minlbfgssetprecdefault(minlbfgsstate* state, ae_state *_state); void minlbfgssetprecdefault(minlbfgsstate* state, ae_state *_state);
void minlbfgssetpreccholesky(minlbfgsstate* state, void minlbfgssetpreccholesky(minlbfgsstate* state,
/* Real */ ae_matrix* p, /* Real */ ae_matrix* p,
ae_bool isupper, ae_bool isupper,
ae_state *_state); ae_state *_state);
void minlbfgssetprecdiag(minlbfgsstate* state, void minlbfgssetprecdiag(minlbfgsstate* state,
/* Real */ ae_vector* d, /* Real */ ae_vector* d,
ae_state *_state); ae_state *_state);
skipping to change at line 3058 skipping to change at line 3371
void _minlmstate_clear(minlmstate* p); void _minlmstate_clear(minlmstate* p);
ae_bool _minlmreport_init(minlmreport* p, ae_state *_state, ae_bool make_au tomatic); ae_bool _minlmreport_init(minlmreport* p, ae_state *_state, ae_bool make_au tomatic);
ae_bool _minlmreport_init_copy(minlmreport* dst, minlmreport* src, ae_state *_state, ae_bool make_automatic); ae_bool _minlmreport_init_copy(minlmreport* dst, minlmreport* src, ae_state *_state, ae_bool make_automatic);
void _minlmreport_clear(minlmreport* p); void _minlmreport_clear(minlmreport* p);
void minlbfgssetdefaultpreconditioner(minlbfgsstate* state, void minlbfgssetdefaultpreconditioner(minlbfgsstate* state,
ae_state *_state); ae_state *_state);
void minlbfgssetcholeskypreconditioner(minlbfgsstate* state, void minlbfgssetcholeskypreconditioner(minlbfgsstate* state,
/* Real */ ae_matrix* p, /* Real */ ae_matrix* p,
ae_bool isupper, ae_bool isupper,
ae_state *_state); ae_state *_state);
void minbleicsetbarrierwidth(minbleicstate* state,
double mu,
ae_state *_state);
void minbleicsetbarrierdecay(minbleicstate* state,
double mudecay,
ae_state *_state);
void minasacreate(ae_int_t n, void minasacreate(ae_int_t n,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* bndl, /* Real */ ae_vector* bndl,
/* Real */ ae_vector* bndu, /* Real */ ae_vector* bndu,
minasastate* state, minasastate* state,
ae_state *_state); ae_state *_state);
void minasasetcond(minasastate* state, void minasasetcond(minasastate* state,
double epsg, double epsg,
double epsf, double epsf,
double epsx, double epsx,
skipping to change at line 3095 skipping to change at line 3414
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* bndl, /* Real */ ae_vector* bndl,
/* Real */ ae_vector* bndu, /* Real */ ae_vector* bndu,
ae_state *_state); ae_state *_state);
ae_bool _minasastate_init(minasastate* p, ae_state *_state, ae_bool make_au tomatic); ae_bool _minasastate_init(minasastate* p, ae_state *_state, ae_bool make_au tomatic);
ae_bool _minasastate_init_copy(minasastate* dst, minasastate* src, ae_state *_state, ae_bool make_automatic); ae_bool _minasastate_init_copy(minasastate* dst, minasastate* src, ae_state *_state, ae_bool make_automatic);
void _minasastate_clear(minasastate* p); void _minasastate_clear(minasastate* p);
ae_bool _minasareport_init(minasareport* p, ae_state *_state, ae_bool make_ automatic); ae_bool _minasareport_init(minasareport* p, ae_state *_state, ae_bool make_ automatic);
ae_bool _minasareport_init_copy(minasareport* dst, minasareport* src, ae_st ate *_state, ae_bool make_automatic); ae_bool _minasareport_init_copy(minasareport* dst, minasareport* src, ae_st ate *_state, ae_bool make_automatic);
void _minasareport_clear(minasareport* p); void _minasareport_clear(minasareport* p);
void mincgcreate(ae_int_t n,
/* Real */ ae_vector* x,
mincgstate* state,
ae_state *_state);
void mincgsetcond(mincgstate* state,
double epsg,
double epsf,
double epsx,
ae_int_t maxits,
ae_state *_state);
void mincgsetscale(mincgstate* state,
/* Real */ ae_vector* s,
ae_state *_state);
void mincgsetxrep(mincgstate* state, ae_bool needxrep, ae_state *_state);
void mincgsetdrep(mincgstate* state, ae_bool needdrep, ae_state *_state);
void mincgsetcgtype(mincgstate* state, ae_int_t cgtype, ae_state *_state);
void mincgsetstpmax(mincgstate* state, double stpmax, ae_state *_state);
void mincgsuggeststep(mincgstate* state, double stp, ae_state *_state);
void mincgsetprecdefault(mincgstate* state, ae_state *_state);
void mincgsetprecdiag(mincgstate* state,
/* Real */ ae_vector* d,
ae_state *_state);
void mincgsetprecscale(mincgstate* state, ae_state *_state);
ae_bool mincgiteration(mincgstate* state, ae_state *_state);
void mincgresults(mincgstate* state,
/* Real */ ae_vector* x,
mincgreport* rep,
ae_state *_state);
void mincgresultsbuf(mincgstate* state,
/* Real */ ae_vector* x,
mincgreport* rep,
ae_state *_state);
void mincgrestartfrom(mincgstate* state,
/* Real */ ae_vector* x,
ae_state *_state);
void mincgsetprecdiagfast(mincgstate* state,
/* Real */ ae_vector* d,
ae_state *_state);
void mincgsetpreclowrankfast(mincgstate* state,
/* Real */ ae_vector* d1,
/* Real */ ae_vector* c,
/* Real */ ae_matrix* v,
ae_int_t vcnt,
ae_state *_state);
void mincgsetprecvarpart(mincgstate* state,
/* Real */ ae_vector* d2,
ae_state *_state);
ae_bool _mincgstate_init(mincgstate* p, ae_state *_state, ae_bool make_auto
matic);
ae_bool _mincgstate_init_copy(mincgstate* dst, mincgstate* src, ae_state *_
state, ae_bool make_automatic);
void _mincgstate_clear(mincgstate* p);
ae_bool _mincgreport_init(mincgreport* p, ae_state *_state, ae_bool make_au
tomatic);
ae_bool _mincgreport_init_copy(mincgreport* dst, mincgreport* src, ae_state
*_state, ae_bool make_automatic);
void _mincgreport_clear(mincgreport* p);
void minbleiccreate(ae_int_t n,
/* Real */ ae_vector* x,
minbleicstate* state,
ae_state *_state);
void minbleicsetbc(minbleicstate* state,
/* Real */ ae_vector* bndl,
/* Real */ ae_vector* bndu,
ae_state *_state);
void minbleicsetlc(minbleicstate* state,
/* Real */ ae_matrix* c,
/* Integer */ ae_vector* ct,
ae_int_t k,
ae_state *_state);
void minbleicsetinnercond(minbleicstate* state,
double epsg,
double epsf,
double epsx,
ae_state *_state);
void minbleicsetoutercond(minbleicstate* state,
double epsx,
double epsi,
ae_state *_state);
void minbleicsetbarrierwidth(minbleicstate* state,
double mu,
ae_state *_state);
void minbleicsetbarrierdecay(minbleicstate* state,
double mudecay,
ae_state *_state);
void minbleicsetscale(minbleicstate* state,
/* Real */ ae_vector* s,
ae_state *_state);
void minbleicsetprecdefault(minbleicstate* state, ae_state *_state);
void minbleicsetprecdiag(minbleicstate* state,
/* Real */ ae_vector* d,
ae_state *_state);
void minbleicsetprecscale(minbleicstate* state, ae_state *_state);
void minbleicsetmaxits(minbleicstate* state,
ae_int_t maxits,
ae_state *_state);
void minbleicsetxrep(minbleicstate* state,
ae_bool needxrep,
ae_state *_state);
void minbleicsetstpmax(minbleicstate* state,
double stpmax,
ae_state *_state);
ae_bool minbleiciteration(minbleicstate* state, ae_state *_state);
void minbleicresults(minbleicstate* state,
/* Real */ ae_vector* x,
minbleicreport* rep,
ae_state *_state);
void minbleicresultsbuf(minbleicstate* state,
/* Real */ ae_vector* x,
minbleicreport* rep,
ae_state *_state);
void minbleicrestartfrom(minbleicstate* state,
/* Real */ ae_vector* x,
ae_state *_state);
ae_bool _minbleicstate_init(minbleicstate* p, ae_state *_state, ae_bool mak
e_automatic);
ae_bool _minbleicstate_init_copy(minbleicstate* dst, minbleicstate* src, ae
_state *_state, ae_bool make_automatic);
void _minbleicstate_clear(minbleicstate* p);
ae_bool _minbleicreport_init(minbleicreport* p, ae_state *_state, ae_bool m
ake_automatic);
ae_bool _minbleicreport_init_copy(minbleicreport* dst, minbleicreport* src,
ae_state *_state, ae_bool make_automatic);
void _minbleicreport_clear(minbleicreport* p);
} }
#endif #endif
 End of changes. 393 change blocks. 
1710 lines changed or deleted 1921 lines changed or added

This html diff was produced by rfcdiff 1.41. The latest version is available from http://tools.ietf.org/tools/rfcdiff/