alglibinternal.h   alglibinternal.h 
skipping to change at line 43 skipping to change at line 43
ae_vector ia1; ae_vector ia1;
ae_vector ia2; ae_vector ia2;
ae_vector ia3; ae_vector ia3;
ae_vector ra0; ae_vector ra0;
ae_vector ra1; ae_vector ra1;
ae_vector ra2; ae_vector ra2;
ae_vector ra3; ae_vector ra3;
} apbuffers; } apbuffers;
typedef struct typedef struct
{ {
ae_bool val;
} sboolean;
typedef struct
{
ae_vector val;
} sbooleanarray;
typedef struct
{
ae_int_t val;
} sinteger;
typedef struct
{
ae_vector val;
} sintegerarray;
typedef struct
{
double val;
} sreal;
typedef struct
{
ae_vector val;
} srealarray;
typedef struct
{
ae_complex val;
} scomplex;
typedef struct
{
ae_vector val;
} scomplexarray;
typedef struct
{
ae_bool brackt; ae_bool brackt;
ae_bool stage1; ae_bool stage1;
ae_int_t infoc; ae_int_t infoc;
double dg; double dg;
double dgm; double dgm;
double dginit; double dginit;
double dgtest; double dgtest;
double dgx; double dgx;
double dgxm; double dgxm;
double dgy; double dgy;
skipping to change at line 198 skipping to change at line 230
ae_state *_state); ae_state *_state);
double safepythag2(double x, double y, ae_state *_state); double safepythag2(double x, double y, ae_state *_state);
double safepythag3(double x, double y, double z, ae_state *_state); double safepythag3(double x, double y, double z, ae_state *_state);
ae_int_t saferdiv(double x, double y, double* r, ae_state *_state); ae_int_t saferdiv(double x, double y, double* r, ae_state *_state);
double safeminposrv(double x, double y, double v, ae_state *_state); double safeminposrv(double x, double y, double v, ae_state *_state);
void apperiodicmap(double* x, void apperiodicmap(double* x,
double a, double a,
double b, double b,
double* k, double* k,
ae_state *_state); ae_state *_state);
double randomnormal(ae_state *_state);
double boundval(double x, double b1, double b2, ae_state *_state); double boundval(double x, double b1, double b2, ae_state *_state);
void alloccomplex(ae_serializer* s, ae_complex v, ae_state *_state); void alloccomplex(ae_serializer* s, ae_complex v, ae_state *_state);
void serializecomplex(ae_serializer* s, ae_complex v, ae_state *_state); void serializecomplex(ae_serializer* s, ae_complex v, ae_state *_state);
ae_complex unserializecomplex(ae_serializer* s, ae_state *_state); ae_complex unserializecomplex(ae_serializer* s, ae_state *_state);
void allocrealarray(ae_serializer* s, void allocrealarray(ae_serializer* s,
/* Real */ ae_vector* v, /* Real */ ae_vector* v,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
void serializerealarray(ae_serializer* s, void serializerealarray(ae_serializer* s,
/* Real */ ae_vector* v, /* Real */ ae_vector* v,
skipping to change at line 253 skipping to change at line 286
void copyrealmatrix(/* Real */ ae_matrix* src, void copyrealmatrix(/* Real */ ae_matrix* src,
/* Real */ ae_matrix* dst, /* Real */ ae_matrix* dst,
ae_state *_state); ae_state *_state);
ae_int_t recsearch(/* Integer */ ae_vector* a, ae_int_t recsearch(/* Integer */ ae_vector* a,
ae_int_t nrec, ae_int_t nrec,
ae_int_t nheader, ae_int_t nheader,
ae_int_t i0, ae_int_t i0,
ae_int_t i1, ae_int_t i1,
/* Integer */ ae_vector* b, /* Integer */ ae_vector* b,
ae_state *_state); ae_state *_state);
ae_bool _apbuffers_init(apbuffers* p, ae_state *_state, ae_bool make_automa ae_bool _apbuffers_init(void* _p, ae_state *_state, ae_bool make_automatic)
tic); ;
ae_bool _apbuffers_init_copy(apbuffers* dst, apbuffers* src, ae_state *_sta ae_bool _apbuffers_init_copy(void* _dst, void* _src, ae_state *_state, ae_b
te, ae_bool make_automatic); ool make_automatic);
void _apbuffers_clear(apbuffers* p); void _apbuffers_clear(void* _p);
void _apbuffers_destroy(void* _p);
ae_bool _sboolean_init(void* _p, ae_state *_state, ae_bool make_automatic);
ae_bool _sboolean_init_copy(void* _dst, void* _src, ae_state *_state, ae_bo
ol make_automatic);
void _sboolean_clear(void* _p);
void _sboolean_destroy(void* _p);
ae_bool _sbooleanarray_init(void* _p, ae_state *_state, ae_bool make_automa
tic);
ae_bool _sbooleanarray_init_copy(void* _dst, void* _src, ae_state *_state,
ae_bool make_automatic);
void _sbooleanarray_clear(void* _p);
void _sbooleanarray_destroy(void* _p);
ae_bool _sinteger_init(void* _p, ae_state *_state, ae_bool make_automatic);
ae_bool _sinteger_init_copy(void* _dst, void* _src, ae_state *_state, ae_bo
ol make_automatic);
void _sinteger_clear(void* _p);
void _sinteger_destroy(void* _p);
ae_bool _sintegerarray_init(void* _p, ae_state *_state, ae_bool make_automa
tic);
ae_bool _sintegerarray_init_copy(void* _dst, void* _src, ae_state *_state,
ae_bool make_automatic);
void _sintegerarray_clear(void* _p);
void _sintegerarray_destroy(void* _p);
ae_bool _sreal_init(void* _p, ae_state *_state, ae_bool make_automatic);
ae_bool _sreal_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool
make_automatic);
void _sreal_clear(void* _p);
void _sreal_destroy(void* _p);
ae_bool _srealarray_init(void* _p, ae_state *_state, ae_bool make_automatic
);
ae_bool _srealarray_init_copy(void* _dst, void* _src, ae_state *_state, ae_
bool make_automatic);
void _srealarray_clear(void* _p);
void _srealarray_destroy(void* _p);
ae_bool _scomplex_init(void* _p, ae_state *_state, ae_bool make_automatic);
ae_bool _scomplex_init_copy(void* _dst, void* _src, ae_state *_state, ae_bo
ol make_automatic);
void _scomplex_clear(void* _p);
void _scomplex_destroy(void* _p);
ae_bool _scomplexarray_init(void* _p, ae_state *_state, ae_bool make_automa
tic);
ae_bool _scomplexarray_init_copy(void* _dst, void* _src, ae_state *_state,
ae_bool make_automatic);
void _scomplexarray_clear(void* _p);
void _scomplexarray_destroy(void* _p);
void tagsort(/* Real */ ae_vector* a, void tagsort(/* Real */ ae_vector* a,
ae_int_t n, ae_int_t n,
/* Integer */ ae_vector* p1, /* Integer */ ae_vector* p1,
/* Integer */ ae_vector* p2, /* Integer */ ae_vector* p2,
ae_state *_state); ae_state *_state);
void tagsortbuf(/* Real */ ae_vector* a, void tagsortbuf(/* Real */ ae_vector* a,
ae_int_t n, ae_int_t n,
/* Integer */ ae_vector* p1, /* Integer */ ae_vector* p1,
/* Integer */ ae_vector* p2, /* Integer */ ae_vector* p2,
apbuffers* buf, apbuffers* buf,
skipping to change at line 466 skipping to change at line 532
ae_int_t optypea, ae_int_t optypea,
/* Complex */ ae_matrix* b, /* Complex */ ae_matrix* b,
ae_int_t ib, ae_int_t ib,
ae_int_t jb, ae_int_t jb,
ae_int_t optypeb, ae_int_t optypeb,
ae_complex beta, ae_complex beta,
/* Complex */ ae_matrix* c, /* Complex */ ae_matrix* c,
ae_int_t ic, ae_int_t ic,
ae_int_t jc, ae_int_t jc,
ae_state *_state); ae_state *_state);
void hermitianmatrixvectormultiply(/* Complex */ ae_matrix* a,
ae_bool isupper,
ae_int_t i1,
ae_int_t i2,
/* Complex */ ae_vector* x,
ae_complex alpha,
/* Complex */ ae_vector* y,
ae_state *_state);
void hermitianrank2update(/* Complex */ ae_matrix* a,
ae_bool isupper,
ae_int_t i1,
ae_int_t i2,
/* Complex */ ae_vector* x,
/* Complex */ ae_vector* y,
/* Complex */ ae_vector* t,
ae_complex alpha,
ae_state *_state);
void generatereflection(/* Real */ ae_vector* x,
ae_int_t n,
double* tau,
ae_state *_state);
void applyreflectionfromtheleft(/* Real */ ae_matrix* c,
double tau,
/* Real */ ae_vector* v,
ae_int_t m1,
ae_int_t m2,
ae_int_t n1,
ae_int_t n2,
/* Real */ ae_vector* work,
ae_state *_state);
void applyreflectionfromtheright(/* Real */ ae_matrix* c,
double tau,
/* Real */ ae_vector* v,
ae_int_t m1,
ae_int_t m2,
ae_int_t n1,
ae_int_t n2,
/* Real */ ae_vector* work,
ae_state *_state);
void complexgeneratereflection(/* Complex */ ae_vector* x,
ae_int_t n,
ae_complex* tau,
ae_state *_state);
void complexapplyreflectionfromtheleft(/* Complex */ ae_matrix* c,
ae_complex tau,
/* Complex */ ae_vector* v,
ae_int_t m1,
ae_int_t m2,
ae_int_t n1,
ae_int_t n2,
/* Complex */ ae_vector* work,
ae_state *_state);
void complexapplyreflectionfromtheright(/* Complex */ ae_matrix* c,
ae_complex tau,
/* Complex */ ae_vector* v,
ae_int_t m1,
ae_int_t m2,
ae_int_t n1,
ae_int_t n2,
/* Complex */ ae_vector* work,
ae_state *_state);
void symmetricmatrixvectormultiply(/* Real */ ae_matrix* a,
ae_bool isupper,
ae_int_t i1,
ae_int_t i2,
/* Real */ ae_vector* x,
double alpha,
/* Real */ ae_vector* y,
ae_state *_state);
void symmetricrank2update(/* Real */ ae_matrix* a,
ae_bool isupper,
ae_int_t i1,
ae_int_t i2,
/* Real */ ae_vector* x,
/* Real */ ae_vector* y,
/* Real */ ae_vector* t,
double alpha,
ae_state *_state);
double vectornorm2(/* Real */ ae_vector* x, double vectornorm2(/* Real */ ae_vector* x,
ae_int_t i1, ae_int_t i1,
ae_int_t i2, ae_int_t i2,
ae_state *_state); ae_state *_state);
ae_int_t vectoridxabsmax(/* Real */ ae_vector* x, ae_int_t vectoridxabsmax(/* Real */ ae_vector* x,
ae_int_t i1, ae_int_t i1,
ae_int_t i2, ae_int_t i2,
ae_state *_state); ae_state *_state);
ae_int_t columnidxabsmax(/* Real */ ae_matrix* x, ae_int_t columnidxabsmax(/* Real */ ae_matrix* x,
ae_int_t i1, ae_int_t i1,
skipping to change at line 635 skipping to change at line 623
ae_bool transb, ae_bool transb,
double alpha, double alpha,
/* Real */ ae_matrix* c, /* Real */ ae_matrix* c,
ae_int_t ci1, ae_int_t ci1,
ae_int_t ci2, ae_int_t ci2,
ae_int_t cj1, ae_int_t cj1,
ae_int_t cj2, ae_int_t cj2,
double beta, double beta,
/* Real */ ae_vector* work, /* Real */ ae_vector* work,
ae_state *_state); ae_state *_state);
void hermitianmatrixvectormultiply(/* Complex */ ae_matrix* a,
ae_bool isupper,
ae_int_t i1,
ae_int_t i2,
/* Complex */ ae_vector* x,
ae_complex alpha,
/* Complex */ ae_vector* y,
ae_state *_state);
void hermitianrank2update(/* Complex */ ae_matrix* a,
ae_bool isupper,
ae_int_t i1,
ae_int_t i2,
/* Complex */ ae_vector* x,
/* Complex */ ae_vector* y,
/* Complex */ ae_vector* t,
ae_complex alpha,
ae_state *_state);
void generatereflection(/* Real */ ae_vector* x,
ae_int_t n,
double* tau,
ae_state *_state);
void applyreflectionfromtheleft(/* Real */ ae_matrix* c,
double tau,
/* Real */ ae_vector* v,
ae_int_t m1,
ae_int_t m2,
ae_int_t n1,
ae_int_t n2,
/* Real */ ae_vector* work,
ae_state *_state);
void applyreflectionfromtheright(/* Real */ ae_matrix* c,
double tau,
/* Real */ ae_vector* v,
ae_int_t m1,
ae_int_t m2,
ae_int_t n1,
ae_int_t n2,
/* Real */ ae_vector* work,
ae_state *_state);
void complexgeneratereflection(/* Complex */ ae_vector* x,
ae_int_t n,
ae_complex* tau,
ae_state *_state);
void complexapplyreflectionfromtheleft(/* Complex */ ae_matrix* c,
ae_complex tau,
/* Complex */ ae_vector* v,
ae_int_t m1,
ae_int_t m2,
ae_int_t n1,
ae_int_t n2,
/* Complex */ ae_vector* work,
ae_state *_state);
void complexapplyreflectionfromtheright(/* Complex */ ae_matrix* c,
ae_complex tau,
/* Complex */ ae_vector* v,
ae_int_t m1,
ae_int_t m2,
ae_int_t n1,
ae_int_t n2,
/* Complex */ ae_vector* work,
ae_state *_state);
void symmetricmatrixvectormultiply(/* Real */ ae_matrix* a,
ae_bool isupper,
ae_int_t i1,
ae_int_t i2,
/* Real */ ae_vector* x,
double alpha,
/* Real */ ae_vector* y,
ae_state *_state);
void symmetricrank2update(/* Real */ ae_matrix* a,
ae_bool isupper,
ae_int_t i1,
ae_int_t i2,
/* Real */ ae_vector* x,
/* Real */ ae_vector* y,
/* Real */ ae_vector* t,
double alpha,
ae_state *_state);
void applyrotationsfromtheleft(ae_bool isforward, void applyrotationsfromtheleft(ae_bool isforward,
ae_int_t m1, ae_int_t m1,
ae_int_t m2, ae_int_t m2,
ae_int_t n1, ae_int_t n1,
ae_int_t n2, ae_int_t n2,
/* Real */ ae_vector* c, /* Real */ ae_vector* c,
/* Real */ ae_vector* s, /* Real */ ae_vector* s,
/* Real */ ae_matrix* a, /* Real */ ae_matrix* a,
/* Real */ ae_vector* work, /* Real */ ae_vector* work,
ae_state *_state); ae_state *_state);
skipping to change at line 757 skipping to change at line 823
double stpmax, double stpmax,
ae_int_t fmax, ae_int_t fmax,
armijostate* state, armijostate* state,
ae_state *_state); ae_state *_state);
ae_bool armijoiteration(armijostate* state, ae_state *_state); ae_bool armijoiteration(armijostate* state, ae_state *_state);
void armijoresults(armijostate* state, void armijoresults(armijostate* state,
ae_int_t* info, ae_int_t* info,
double* stp, double* stp,
double* f, double* f,
ae_state *_state); ae_state *_state);
ae_bool _linminstate_init(linminstate* p, ae_state *_state, ae_bool make_au ae_bool _linminstate_init(void* _p, ae_state *_state, ae_bool make_automati
tomatic); c);
ae_bool _linminstate_init_copy(linminstate* dst, linminstate* src, ae_state ae_bool _linminstate_init_copy(void* _dst, void* _src, ae_state *_state, ae
*_state, ae_bool make_automatic); _bool make_automatic);
void _linminstate_clear(linminstate* p); void _linminstate_clear(void* _p);
ae_bool _armijostate_init(armijostate* p, ae_state *_state, ae_bool make_au void _linminstate_destroy(void* _p);
tomatic); ae_bool _armijostate_init(void* _p, ae_state *_state, ae_bool make_automati
ae_bool _armijostate_init_copy(armijostate* dst, armijostate* src, ae_state c);
*_state, ae_bool make_automatic); ae_bool _armijostate_init_copy(void* _dst, void* _src, ae_state *_state, ae
void _armijostate_clear(armijostate* p); _bool make_automatic);
void _armijostate_clear(void* _p);
void _armijostate_destroy(void* _p);
void ftbasegeneratecomplexfftplan(ae_int_t n, void ftbasegeneratecomplexfftplan(ae_int_t n,
ftplan* plan, ftplan* plan,
ae_state *_state); ae_state *_state);
void ftbasegeneraterealfftplan(ae_int_t n, ftplan* plan, ae_state *_state); void ftbasegeneraterealfftplan(ae_int_t n, ftplan* plan, ae_state *_state);
void ftbasegeneraterealfhtplan(ae_int_t n, ftplan* plan, ae_state *_state); void ftbasegeneraterealfhtplan(ae_int_t n, ftplan* plan, ae_state *_state);
void ftbaseexecuteplan(/* Real */ ae_vector* a, void ftbaseexecuteplan(/* Real */ ae_vector* a,
ae_int_t aoffset, ae_int_t aoffset,
ae_int_t n, ae_int_t n,
ftplan* plan, ftplan* plan,
ae_state *_state); ae_state *_state);
skipping to change at line 788 skipping to change at line 856
ae_state *_state); ae_state *_state);
void ftbasefactorize(ae_int_t n, void ftbasefactorize(ae_int_t n,
ae_int_t tasktype, ae_int_t tasktype,
ae_int_t* n1, ae_int_t* n1,
ae_int_t* n2, ae_int_t* n2,
ae_state *_state); ae_state *_state);
ae_bool ftbaseissmooth(ae_int_t n, ae_state *_state); ae_bool ftbaseissmooth(ae_int_t n, ae_state *_state);
ae_int_t ftbasefindsmooth(ae_int_t n, ae_state *_state); ae_int_t ftbasefindsmooth(ae_int_t n, ae_state *_state);
ae_int_t ftbasefindsmootheven(ae_int_t n, ae_state *_state); ae_int_t ftbasefindsmootheven(ae_int_t n, ae_state *_state);
double ftbasegetflopestimate(ae_int_t n, ae_state *_state); double ftbasegetflopestimate(ae_int_t n, ae_state *_state);
ae_bool _ftplan_init(ftplan* p, ae_state *_state, ae_bool make_automatic); ae_bool _ftplan_init(void* _p, ae_state *_state, ae_bool make_automatic);
ae_bool _ftplan_init_copy(ftplan* dst, ftplan* src, ae_state *_state, ae_bo ae_bool _ftplan_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool
ol make_automatic); make_automatic);
void _ftplan_clear(ftplan* p); void _ftplan_clear(void* _p);
void _ftplan_destroy(void* _p);
double nulog1p(double x, ae_state *_state); double nulog1p(double x, ae_state *_state);
double nuexpm1(double x, ae_state *_state); double nuexpm1(double x, ae_state *_state);
double nucosm1(double x, ae_state *_state); double nucosm1(double x, ae_state *_state);
} }
#endif #endif
 End of changes. 7 change blocks. 
97 lines changed or deleted 178 lines changed or added


 alglibmisc.h   alglibmisc.h 
skipping to change at line 674 skipping to change at line 674
double lambdav, double lambdav,
ae_state *_state); ae_state *_state);
double hqrnddiscrete(hqrndstate* state, double hqrnddiscrete(hqrndstate* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
double hqrndcontinuous(hqrndstate* state, double hqrndcontinuous(hqrndstate* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
ae_bool _hqrndstate_init(hqrndstate* p, ae_state *_state, ae_bool make_auto ae_bool _hqrndstate_init(void* _p, ae_state *_state, ae_bool make_automatic
matic); );
ae_bool _hqrndstate_init_copy(hqrndstate* dst, hqrndstate* src, ae_state *_ ae_bool _hqrndstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_
state, ae_bool make_automatic); bool make_automatic);
void _hqrndstate_clear(hqrndstate* p); void _hqrndstate_clear(void* _p);
void _hqrndstate_destroy(void* _p);
void kdtreebuild(/* Real */ ae_matrix* xy, void kdtreebuild(/* Real */ ae_matrix* xy,
ae_int_t n, ae_int_t n,
ae_int_t nx, ae_int_t nx,
ae_int_t ny, ae_int_t ny,
ae_int_t normtype, ae_int_t normtype,
kdtree* kdt, kdtree* kdt,
ae_state *_state); ae_state *_state);
void kdtreebuildtagged(/* Real */ ae_matrix* xy, void kdtreebuildtagged(/* Real */ ae_matrix* xy,
/* Integer */ ae_vector* tags, /* Integer */ ae_vector* tags,
ae_int_t n, ae_int_t n,
skipping to change at line 735 skipping to change at line 736
ae_state *_state); ae_state *_state);
void kdtreequeryresultstagsi(kdtree* kdt, void kdtreequeryresultstagsi(kdtree* kdt,
/* Integer */ ae_vector* tags, /* Integer */ ae_vector* tags,
ae_state *_state); ae_state *_state);
void kdtreequeryresultsdistancesi(kdtree* kdt, void kdtreequeryresultsdistancesi(kdtree* kdt,
/* Real */ ae_vector* r, /* Real */ ae_vector* r,
ae_state *_state); ae_state *_state);
void kdtreealloc(ae_serializer* s, kdtree* tree, ae_state *_state); void kdtreealloc(ae_serializer* s, kdtree* tree, ae_state *_state);
void kdtreeserialize(ae_serializer* s, kdtree* tree, ae_state *_state); void kdtreeserialize(ae_serializer* s, kdtree* tree, ae_state *_state);
void kdtreeunserialize(ae_serializer* s, kdtree* tree, ae_state *_state); void kdtreeunserialize(ae_serializer* s, kdtree* tree, ae_state *_state);
ae_bool _kdtree_init(kdtree* p, ae_state *_state, ae_bool make_automatic); ae_bool _kdtree_init(void* _p, ae_state *_state, ae_bool make_automatic);
ae_bool _kdtree_init_copy(kdtree* dst, kdtree* src, ae_state *_state, ae_bo ae_bool _kdtree_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool
ol make_automatic); make_automatic);
void _kdtree_clear(kdtree* p); void _kdtree_clear(void* _p);
void _kdtree_destroy(void* _p);
} }
#endif #endif
 End of changes. 2 change blocks. 
9 lines changed or deleted 11 lines changed or added


 ap.h   ap.h 
skipping to change at line 48 skipping to change at line 48
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
// //
// THIS SECTION CONTAINS DECLARATIONS FOR BASIC FUNCTIONALITY // THIS SECTION CONTAINS DECLARATIONS FOR BASIC FUNCTIONALITY
// LIKE MEMORY MANAGEMENT FOR VECTORS/MATRICES WHICH IS SHARED // LIKE MEMORY MANAGEMENT FOR VECTORS/MATRICES WHICH IS SHARED
// BETWEEN C++ AND PURE C LIBRARIES // BETWEEN C++ AND PURE C LIBRARIES
// //
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
namespace alglib_impl namespace alglib_impl
{ {
#include <stdlib.h>
#include <string.h>
#include <setjmp.h>
#include <math.h>
#include <stddef.h>
/* /*
* definitions * definitions
*/ */
#define AE_UNKNOWN 0 #define AE_UNKNOWN 0
#define AE_MSVC 1 #define AE_MSVC 1
#define AE_GNUC 2 #define AE_GNUC 2
#define AE_SUNC 3 #define AE_SUNC 3
#define AE_INTEL 1 #define AE_INTEL 1
#define AE_SPARC 2 #define AE_SPARC 2
#define AE_WINDOWS 1
#define AE_POSIX 2
#define AE_LOCK_ALIGNMENT 16
/*
* in case no OS is defined, use AE_UNKNOWN
*/
#ifndef AE_OS
#define AE_OS AE_UNKNOWN
#endif
/* /*
* automatically determine compiler * automatically determine compiler
*/ */
#define AE_COMPILER AE_UNKNOWN #define AE_COMPILER AE_UNKNOWN
#ifdef __GNUC__ #ifdef __GNUC__
#undef AE_COMPILER #undef AE_COMPILER
#define AE_COMPILER AE_GNUC #define AE_COMPILER AE_GNUC
#endif #endif
#if defined(__SUNPRO_C)||defined(__SUNPRO_CC) #if defined(__SUNPRO_C)||defined(__SUNPRO_CC)
skipping to change at line 85 skipping to change at line 102
/* /*
* if we work under C++ environment, define several conditions * if we work under C++ environment, define several conditions
*/ */
#ifdef AE_USE_CPP #ifdef AE_USE_CPP
#define AE_USE_CPP_BOOL #define AE_USE_CPP_BOOL
#define AE_USE_CPP_ERROR_HANDLING #define AE_USE_CPP_ERROR_HANDLING
#define AE_USE_CPP_SERIALIZATION #define AE_USE_CPP_SERIALIZATION
#endif #endif
/* /*
* Include SMP headers
*/
#if AE_OS==AE_WINDOWS
#include <windows.h>
#include <process.h>
#elif AE_OS==AE_POSIX
#include <time.h>
#include <unistd.h>
#include <pthread.h>
#endif
/*
* define ae_int32_t, ae_int64_t, ae_int_t, ae_bool, ae_complex, ae_error_t ype and ae_datatype * define ae_int32_t, ae_int64_t, ae_int_t, ae_bool, ae_complex, ae_error_t ype and ae_datatype
*/ */
#if defined(AE_HAVE_STDINT) #if defined(AE_HAVE_STDINT)
#include <stdint.h> #include <stdint.h>
#endif #endif
#if defined(AE_INT32_T) #if defined(AE_INT32_T)
typedef AE_INT32_T ae_int32_t; typedef AE_INT32_T ae_int32_t;
#endif #endif
#if defined(AE_HAVE_STDINT) && !defined(AE_INT32_T) #if defined(AE_HAVE_STDINT) && !defined(AE_INT32_T)
skipping to change at line 326 skipping to change at line 355
frame marker frame marker
************************************************************************/ ************************************************************************/
typedef struct ae_frame typedef struct ae_frame
{ {
ae_dyn_block db_marker; ae_dyn_block db_marker;
} ae_frame; } ae_frame;
/************************************************************************ /************************************************************************
ALGLIB environment state ALGLIB environment state
************************************************************************/ ************************************************************************/
typedef struct typedef struct ae_state
{ {
/*
* endianness type: AE_LITTLE_ENDIAN or AE_BIG_ENDIAN
*/
ae_int_t endianness; ae_int_t endianness;
/*
* double value for NAN
*/
double v_nan; double v_nan;
/*
* double value for +INF
*/
double v_posinf; double v_posinf;
/*
* double value for -INF
*/
double v_neginf; double v_neginf;
/*
* pointer to the top block in a stack of frames
* which hold dynamically allocated objects
*/
ae_dyn_block * volatile p_top_block; ae_dyn_block * volatile p_top_block;
ae_dyn_block last_block; ae_dyn_block last_block;
/*
* jmp_buf for cases when C-style exception handling is used
*/
#ifndef AE_USE_CPP_ERROR_HANDLING #ifndef AE_USE_CPP_ERROR_HANDLING
jmp_buf * volatile break_jump; jmp_buf * volatile break_jump;
#endif #endif
/*
* ae_error_type of the last error (filled when exception is thrown)
*/
ae_error_type volatile last_error; ae_error_type volatile last_error;
/*
* human-readable message (filled when exception is thrown)
*/
const char* volatile error_msg; const char* volatile error_msg;
/*
* threading information:
* a) current thread pool
* b) current worker thread
* c) parent task (one we are solving right now)
* d) thread exception handler (function which must be called
* by ae_assert before raising exception).
*
* NOTE: we use void* to store pointers in order to avoid explicit depe
ndency on smp.h
*/
void *worker_thread;
void *parent_task;
void (*thread_exception_handler)(void*);
} ae_state; } ae_state;
/************************************************************************ /************************************************************************
Serializer Serializer
************************************************************************/ ************************************************************************/
typedef struct typedef struct
{ {
ae_int_t mode; ae_int_t mode;
ae_int_t entries_needed; ae_int_t entries_needed;
ae_int_t entries_saved; ae_int_t entries_saved;
skipping to change at line 395 skipping to change at line 470
{ {
void *p_ptr; void *p_ptr;
void **pp_void; void **pp_void;
ae_bool **pp_bool; ae_bool **pp_bool;
ae_int_t **pp_int; ae_int_t **pp_int;
double **pp_double; double **pp_double;
ae_complex **pp_complex; ae_complex **pp_complex;
} ptr; } ptr;
} ae_matrix; } ae_matrix;
typedef struct ae_smart_ptr
{
/* pointer to subscriber; all changes in ptr are translated to subscrib
er */
void **subscriber;
/* pointer to object */
void *ptr;
/* whether smart pointer owns ptr */
ae_bool is_owner;
/* destructor function for pointer; clears all dynamically allocated me
mory */
void (*destroy)(void*);
/* frame entry; used to ensure automatic deallocation of smart pointer
in case of exception/exit */
ae_dyn_block frame_entry;
} ae_smart_ptr;
/*************************************************************************
Lock.
This structure provides OS-independent non-reentrant lock:
* under Windows/Posix systems it uses system-provided locks
* under Boost it uses OS-independent lock provided by Boost package
* when no OS is defined, it uses "fake lock" (just stub which is not thread
-safe):
a) "fake lock" can be in locked or free mode
b) "fake lock" can be used only from one thread - one which created lock
c) when thread acquires free lock, it immediately returns
d) when thread acquires busy lock, program is terminated
(because lock is already acquired and no one else can free it)
*************************************************************************/
typedef struct
{
#if AE_OS==AE_WINDOWS
volatile ae_int_t * volatile p_lock;
char buf[sizeof(ae_int_t)+AE_LOCK_ALIGNMENT];
#elif AE_OS==AE_POSIX
pthread_mutex_t mutex;
#else
ae_bool is_locked;
#endif
} ae_lock;
/*************************************************************************
Shared pool: data structure used to provide thread-safe access to pool of
temporary variables.
*************************************************************************/
typedef struct ae_shared_pool_entry
{
void * volatile obj;
void * volatile next_entry;
} ae_shared_pool_entry;
typedef struct ae_shared_pool
{
/* lock object which protects pool */
ae_lock pool_lock;
/* seed object (used to create new instances of temporaries) */
void * volatile seed_object;
/*
* list of recycled OBJECTS:
* 1. entries in this list store pointers to recycled objects
* 2. every time we retrieve object, we retrieve first entry from this
list,
* move it to recycled_entries and return its obj field to caller/
*/
ae_shared_pool_entry * volatile recycled_objects;
/*
* list of recycled ENTRIES:
* 1. this list holds entries which are not used to store recycled obje
cts;
* every time recycled object is retrieved, its entry is moved to th
is list.
* 2. every time object is recycled, we try to fetch entry for him from
this list
* before allocating it with malloc()
*/
ae_shared_pool_entry * volatile recycled_entries;
/* enumeration pointer, points to current recycled object*/
ae_shared_pool_entry * volatile enumeration_counter;
/* size of object; this field is used when we call malloc() for new obj
ects */
ae_int_t size_of_object;
/* initializer function; accepts pointer to malloc'ed object, initializ
es its fields */
ae_bool (*init)(void* dst, ae_state* state, ae_bool make_automatic);
/* copy constructor; accepts pointer to malloc'ed, but not initialized
object */
ae_bool (*init_copy)(void* dst, void* src, ae_state* state, ae_bool mak
e_automatic);
/* destructor function; */
void (*destroy)(void* ptr);
/* frame entry; contains pointer to the pool object itself */
ae_dyn_block frame_entry;
} ae_shared_pool;
ae_int_t ae_misalignment(const void *ptr, size_t alignment); ae_int_t ae_misalignment(const void *ptr, size_t alignment);
void* ae_align(void *ptr, size_t alignment); void* ae_align(void *ptr, size_t alignment);
void* aligned_malloc(size_t size, size_t alignment); void* aligned_malloc(size_t size, size_t alignment);
void aligned_free(void *block); void aligned_free(void *block);
void* ae_malloc(size_t size, ae_state *state); void* ae_malloc(size_t size, ae_state *state);
void ae_free(void *p); void ae_free(void *p);
ae_int_t ae_sizeof(ae_datatype datatype); ae_int_t ae_sizeof(ae_datatype datatype);
void ae_touch_ptr(void *p);
void ae_state_init(ae_state *state); void ae_state_init(ae_state *state);
void ae_state_clear(ae_state *state); void ae_state_clear(ae_state *state);
#ifndef AE_USE_CPP_ERROR_HANDLING #ifndef AE_USE_CPP_ERROR_HANDLING
void ae_state_set_break_jump(ae_state *state, jmp_buf *buf); void ae_state_set_break_jump(ae_state *state, jmp_buf *buf);
#endif #endif
void ae_break(ae_state *state, ae_error_type error_type, const char *msg); void ae_break(ae_state *state, ae_error_type error_type, const char *msg);
void ae_frame_make(ae_state *state, ae_frame *tmp); void ae_frame_make(ae_state *state, ae_frame *tmp);
void ae_frame_leave(ae_state *state); void ae_frame_leave(ae_state *state);
skipping to change at line 425 skipping to change at line 598
ae_bool ae_db_malloc(ae_dyn_block *block, ae_int_t size, ae_state *state, a e_bool make_automatic); ae_bool ae_db_malloc(ae_dyn_block *block, ae_int_t size, ae_state *state, a e_bool make_automatic);
ae_bool ae_db_realloc(ae_dyn_block *block, ae_int_t size, ae_state *state); ae_bool ae_db_realloc(ae_dyn_block *block, ae_int_t size, ae_state *state);
void ae_db_free(ae_dyn_block *block); void ae_db_free(ae_dyn_block *block);
void ae_db_swap(ae_dyn_block *block1, ae_dyn_block *block2); void ae_db_swap(ae_dyn_block *block1, ae_dyn_block *block2);
ae_bool ae_vector_init(ae_vector *dst, ae_int_t size, ae_datatype datatype, ae_state *state, ae_bool make_automatic); ae_bool ae_vector_init(ae_vector *dst, ae_int_t size, ae_datatype datatype, ae_state *state, ae_bool make_automatic);
ae_bool ae_vector_init_copy(ae_vector *dst, ae_vector *src, ae_state *state , ae_bool make_automatic); ae_bool ae_vector_init_copy(ae_vector *dst, ae_vector *src, ae_state *state , ae_bool make_automatic);
void ae_vector_init_from_x(ae_vector *dst, x_vector *src, ae_state *state, ae_bool make_automatic); void ae_vector_init_from_x(ae_vector *dst, x_vector *src, ae_state *state, ae_bool make_automatic);
ae_bool ae_vector_set_length(ae_vector *dst, ae_int_t newsize, ae_state *st ate); ae_bool ae_vector_set_length(ae_vector *dst, ae_int_t newsize, ae_state *st ate);
void ae_vector_clear(ae_vector *dst); void ae_vector_clear(ae_vector *dst);
void ae_vector_destroy(ae_vector *dst);
void ae_swap_vectors(ae_vector *vec1, ae_vector *vec2); void ae_swap_vectors(ae_vector *vec1, ae_vector *vec2);
ae_bool ae_matrix_init(ae_matrix *dst, ae_int_t rows, ae_int_t cols, ae_dat atype datatype, ae_state *state, ae_bool make_automatic); ae_bool ae_matrix_init(ae_matrix *dst, ae_int_t rows, ae_int_t cols, ae_dat atype datatype, ae_state *state, ae_bool make_automatic);
ae_bool ae_matrix_init_copy(ae_matrix *dst, ae_matrix *src, ae_state *state , ae_bool make_automatic); ae_bool ae_matrix_init_copy(ae_matrix *dst, ae_matrix *src, ae_state *state , ae_bool make_automatic);
void ae_matrix_init_from_x(ae_matrix *dst, x_matrix *src, ae_state *state, ae_bool make_automatic); void ae_matrix_init_from_x(ae_matrix *dst, x_matrix *src, ae_state *state, ae_bool make_automatic);
ae_bool ae_matrix_set_length(ae_matrix *dst, ae_int_t rows, ae_int_t cols, ae_state *state); ae_bool ae_matrix_set_length(ae_matrix *dst, ae_int_t rows, ae_int_t cols, ae_state *state);
void ae_matrix_clear(ae_matrix *dst); void ae_matrix_clear(ae_matrix *dst);
void ae_matrix_destroy(ae_matrix *dst);
void ae_swap_matrices(ae_matrix *mat1, ae_matrix *mat2); void ae_swap_matrices(ae_matrix *mat1, ae_matrix *mat2);
ae_bool ae_smart_ptr_init(ae_smart_ptr *dst, void **subscriber, ae_state *s
tate, ae_bool make_automatic);
void ae_smart_ptr_clear(void *_dst); /* accepts ae_smart_ptr* */
void ae_smart_ptr_destroy(void *_dst);
void ae_smart_ptr_assign(ae_smart_ptr *dst, void *new_ptr, ae_bool is_owner
, void (*destroy)(void*));
void ae_smart_ptr_release(ae_smart_ptr *dst);
void ae_init_lock(ae_lock *lock);
void ae_acquire_lock(ae_lock *lock);
void ae_release_lock(ae_lock *lock);
void ae_free_lock(ae_lock *lock);
ae_bool ae_shared_pool_init(void *_dst, ae_state *state, ae_bool make_autom
atic);
ae_bool ae_shared_pool_init_copy(void *_dst, void *_src, ae_state *state, a
e_bool make_automatic);
void ae_shared_pool_clear(void *dst);
void ae_shared_pool_destroy(void *dst);
void ae_shared_pool_set_seed(
ae_shared_pool *dst,
void *seed_object,
ae_int_t size_of_object,
ae_bool (*init)(void* dst, ae_state* state, ae_bool make_automa
tic),
ae_bool (*init_copy)(void* dst, void* src, ae_state* state, ae_
bool make_automatic),
void (*destroy)(void* ptr),
ae_state *state);
void ae_shared_pool_retrieve(
ae_shared_pool *pool,
ae_smart_ptr *pptr,
ae_state *state);
void ae_shared_pool_recycle(
ae_shared_pool *pool,
ae_smart_ptr *pptr,
ae_state *state);
void ae_shared_pool_clear_recycled(
ae_shared_pool *pool,
ae_state *state);
void ae_shared_pool_first_recycled(
ae_shared_pool *pool,
ae_smart_ptr *pptr,
ae_state *state);
void ae_shared_pool_next_recycled(
ae_shared_pool *pool,
ae_smart_ptr *pptr,
ae_state *state);
void ae_shared_pool_reset(
ae_shared_pool *pool,
ae_state *state);
void ae_x_set_vector(x_vector *dst, ae_vector *src, ae_state *state); void ae_x_set_vector(x_vector *dst, ae_vector *src, ae_state *state);
void ae_x_set_matrix(x_matrix *dst, ae_matrix *src, ae_state *state); void ae_x_set_matrix(x_matrix *dst, ae_matrix *src, ae_state *state);
void ae_x_attach_to_vector(x_vector *dst, ae_vector *src); void ae_x_attach_to_vector(x_vector *dst, ae_vector *src);
void ae_x_attach_to_matrix(x_matrix *dst, ae_matrix *src); void ae_x_attach_to_matrix(x_matrix *dst, ae_matrix *src);
void x_vector_clear(x_vector *dst); void x_vector_clear(x_vector *dst);
ae_bool x_is_symmetric(x_matrix *a); ae_bool x_is_symmetric(x_matrix *a);
ae_bool x_is_hermitian(x_matrix *a); ae_bool x_is_hermitian(x_matrix *a);
ae_bool x_force_symmetric(x_matrix *a); ae_bool x_force_symmetric(x_matrix *a);
skipping to change at line 625 skipping to change at line 846
{ {
int stage; int stage;
ae_vector ia; ae_vector ia;
ae_vector ba; ae_vector ba;
ae_vector ra; ae_vector ra;
ae_vector ca; ae_vector ca;
} rcommstate; } rcommstate;
ae_bool _rcommstate_init(rcommstate* p, ae_state *_state, ae_bool make_auto matic); ae_bool _rcommstate_init(rcommstate* p, ae_state *_state, ae_bool make_auto matic);
ae_bool _rcommstate_init_copy(rcommstate* dst, rcommstate* src, ae_state *_ state, ae_bool make_automatic); ae_bool _rcommstate_init_copy(rcommstate* dst, rcommstate* src, ae_state *_ state, ae_bool make_automatic);
void _rcommstate_clear(rcommstate* p); void _rcommstate_clear(rcommstate* p);
void _rcommstate_destroy(rcommstate* p);
#ifdef AE_USE_ALLOC_COUNTER #ifdef AE_USE_ALLOC_COUNTER
extern ae_int64_t _alloc_counter; extern ae_int64_t _alloc_counter;
#endif #endif
/************************************************************************ /************************************************************************
debug functions (must be turned on by preprocessor definitions): debug functions (must be turned on by preprocessor definitions):
* tickcount(), which is wrapper around GetTickCount() * tickcount(), which is wrapper around GetTickCount()
* flushconsole(), fluches console * flushconsole(), fluches console
* ae_debugrng(), returns random number generated with high-quality random n umbers generator * ae_debugrng(), returns random number generated with high-quality random n umbers generator
* ae_set_seed(), sets seed of the debug RNG (NON-THREAD-SAFE!!!) * ae_set_seed(), sets seed of the debug RNG (NON-THREAD-SAFE!!!)
* ae_get_seed(), returns two seed values of the debug RNG (NON-THREAD-SAFE! !!) * ae_get_seed(), returns two seed values of the debug RNG (NON-THREAD-SAFE! !!)
************************************************************************/ ************************************************************************/
#ifdef AE_DEBUG4WINDOWS #ifdef AE_DEBUG4WINDOWS
#include <windows.h> #include <windows.h>
#include <stdio.h> #include <stdio.h>
#define tickcount(s) GetTickCount() #define tickcount(s) GetTickCount()
#define flushconsole(s) fflush(stdout) #define flushconsole(s) fflush(stdout)
#endif #endif
#ifdef AE_DEBUG4POSIX
#define tickcount(s) PosixGetTickCount()
#define flushconsole(s) fflush(stdout)
int PosixGetTickCount();
#endif
#ifdef AE_DEBUGRNG #ifdef AE_DEBUGRNG
ae_int_t ae_debugrng(); ae_int_t ae_debugrng();
void ae_set_seed(ae_int_t s0, ae_int_t s1); void ae_set_seed(ae_int_t s0, ae_int_t s1);
void ae_get_seed(ae_int_t *s0, ae_int_t *s1); void ae_get_seed(ae_int_t *s0, ae_int_t *s1);
#endif #endif
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
// //
 End of changes. 20 change blocks. 
1 lines changed or deleted 247 lines changed or added


 dataanalysis.h   dataanalysis.h 
skipping to change at line 47 skipping to change at line 47
typedef struct typedef struct
{ {
double relclserror; double relclserror;
double avgce; double avgce;
double rmserror; double rmserror;
double avgerror; double avgerror;
double avgrelerror; double avgrelerror;
} cvreport; } cvreport;
typedef struct typedef struct
{ {
ae_int_t npoints;
ae_int_t nfeatures;
ae_int_t disttype;
ae_matrix xy;
ae_matrix d;
ae_int_t ahcalgo;
ae_int_t kmeansrestarts;
ae_int_t kmeansmaxits;
} clusterizerstate;
typedef struct
{
ae_int_t npoints;
ae_vector p;
ae_matrix z;
ae_matrix pz;
ae_matrix pm;
ae_vector mergedist;
} ahcreport;
typedef struct
{
ae_int_t npoints;
ae_int_t nfeatures;
ae_int_t terminationtype;
ae_int_t k;
ae_matrix c;
ae_vector cidx;
} kmeansreport;
typedef struct
{
ae_int_t nvars; ae_int_t nvars;
ae_int_t nclasses; ae_int_t nclasses;
ae_int_t ntrees; ae_int_t ntrees;
ae_int_t bufsize; ae_int_t bufsize;
ae_vector trees; ae_vector trees;
} decisionforest; } decisionforest;
typedef struct typedef struct
{ {
double relclserror; double relclserror;
double avgce; double avgce;
skipping to change at line 113 skipping to change at line 142
ae_vector hlneurons; ae_vector hlneurons;
ae_vector structinfo; ae_vector structinfo;
ae_vector weights; ae_vector weights;
ae_vector columnmeans; ae_vector columnmeans;
ae_vector columnsigmas; ae_vector columnsigmas;
ae_vector neurons; ae_vector neurons;
ae_vector dfdnet; ae_vector dfdnet;
ae_vector derror; ae_vector derror;
ae_vector x; ae_vector x;
ae_vector y; ae_vector y;
ae_matrix xy;
ae_vector xyrow;
ae_matrix chunks; ae_matrix chunks;
ae_vector nwbuf; ae_vector nwbuf;
ae_vector integerbuf; ae_vector integerbuf;
} multilayerperceptron; } multilayerperceptron;
typedef struct typedef struct
{ {
double relclserror;
double avgce;
double rmserror;
double avgerror;
double avgrelerror;
} modelerrors;
typedef struct
{
ae_vector w; ae_vector w;
} logitmodel; } logitmodel;
typedef struct typedef struct
{ {
ae_bool brackt; ae_bool brackt;
ae_bool stage1; ae_bool stage1;
ae_int_t infoc; ae_int_t infoc;
double dg; double dg;
double dgm; double dgm;
double dginit; double dginit;
skipping to change at line 193 skipping to change at line 232
} mcpdstate; } mcpdstate;
typedef struct typedef struct
{ {
ae_int_t inneriterationscount; ae_int_t inneriterationscount;
ae_int_t outeriterationscount; ae_int_t outeriterationscount;
ae_int_t nfev; ae_int_t nfev;
ae_int_t terminationtype; ae_int_t terminationtype;
} mcpdreport; } mcpdreport;
typedef struct typedef struct
{ {
ae_int_t ensemblesize;
ae_vector weights;
ae_vector columnmeans;
ae_vector columnsigmas;
multilayerperceptron network;
ae_vector y;
} mlpensemble;
typedef struct
{
double relclserror;
double avgce;
double rmserror;
double avgerror;
double avgrelerror;
ae_int_t ngrad; ae_int_t ngrad;
ae_int_t nhess; ae_int_t nhess;
ae_int_t ncholesky; ae_int_t ncholesky;
} mlpreport; } mlpreport;
typedef struct typedef struct
{ {
double relclserror; double relclserror;
double avgce; double avgce;
double rmserror; double rmserror;
double avgerror; double avgerror;
double avgrelerror; double avgrelerror;
} mlpcvreport; } mlpcvreport;
typedef struct typedef struct
{ {
ae_int_t ensemblesize; ae_int_t nin;
ae_vector weights; ae_int_t nout;
ae_vector columnmeans; ae_bool rcpar;
ae_vector columnsigmas; ae_int_t lbfgsfactor;
double decay;
double wstep;
ae_int_t maxits;
ae_int_t datatype;
ae_int_t npoints;
ae_matrix densexy;
sparsematrix sparsexy;
multilayerperceptron tnetwork;
minlbfgsstate tstate;
ae_vector wbest;
ae_vector wfinal;
ae_int_t ngradbatch;
ae_vector subset;
ae_int_t subsetsize;
ae_vector valsubset;
ae_int_t valsubsetsize;
} mlptrainer;
typedef struct
{
multilayerperceptron network; multilayerperceptron network;
multilayerperceptron tnetwork;
minlbfgsstate state;
mlpreport rep;
ae_vector subset;
ae_int_t subsetsize;
ae_vector xyrow;
ae_vector y; ae_vector y;
} mlpensemble; ae_int_t ngrad;
ae_vector bufwbest;
ae_vector bufwfinal;
} mlpparallelizationcv;
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
// //
// THIS SECTION CONTAINS C++ INTERFACE // THIS SECTION CONTAINS C++ INTERFACE
// //
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
namespace alglib namespace alglib
{ {
/************************************************************************* /*************************************************************************
This structure is a clusterization engine.
You should not try to access its fields directly.
Use ALGLIB functions in order to work with this object.
-- ALGLIB --
Copyright 10.07.2012 by Bochkanov Sergey
*************************************************************************/
class _clusterizerstate_owner
{
public:
_clusterizerstate_owner();
_clusterizerstate_owner(const _clusterizerstate_owner &rhs);
_clusterizerstate_owner& operator=(const _clusterizerstate_owner &rhs);
virtual ~_clusterizerstate_owner();
alglib_impl::clusterizerstate* c_ptr();
alglib_impl::clusterizerstate* c_ptr() const;
protected:
alglib_impl::clusterizerstate *p_struct;
};
class clusterizerstate : public _clusterizerstate_owner
{
public:
clusterizerstate();
clusterizerstate(const clusterizerstate &rhs);
clusterizerstate& operator=(const clusterizerstate &rhs);
virtual ~clusterizerstate();
};
/*************************************************************************
This structure is used to store results of the agglomerative hierarchical
clustering (AHC).
Following information is returned:
* NPoints contains number of points in the original dataset
* Z contains information about merges performed (see below). Z contains
indexes from the original (unsorted) dataset and it can be used when you
need to know what points were merged. However, it is not convenient when
you want to build a dendrograd (see below).
* if you want to build dendrogram, you can use Z, but it is not good
option, because Z contains indexes from unsorted dataset. Dendrogram
built from such dataset is likely to have intersections. So, you have to
reorder you points before building dendrogram.
Permutation which reorders point is returned in P. Another representation
of merges, which is more convenient for dendorgram construction, is
returned in PM.
* more information on format of Z, P and PM can be found below and in the
examples from ALGLIB Reference Manual.
FORMAL DESCRIPTION OF FIELDS:
NPoints number of points
Z array[NPoints-1,2], contains indexes of clusters
linked in pairs to form clustering tree. I-th row
corresponds to I-th merge:
* Z[I,0] - index of the first cluster to merge
* Z[I,1] - index of the second cluster to merge
* Z[I,0]<Z[I,1]
* clusters are numbered from 0 to 2*NPoints-2, with
indexes from 0 to NPoints-1 corresponding to points
of the original dataset, and indexes from NPoints to
2*NPoints-2 correspond to clusters generated by
subsequent merges (I-th row of Z creates cluster
with index NPoints+I).
IMPORTANT: indexes in Z[] are indexes in the ORIGINAL,
unsorted dataset. In addition to Z algorithm outputs
permutation which rearranges points in such way that
subsequent merges are performed on adjacent points
(such order is needed if you want to build dendrogram).
However, indexes in Z are related to original,
unrearranged sequence of points.
P array[NPoints], permutation which reorders points for
dendrogram construction. P[i] contains index of the
position where we should move I-th point of the
original dataset in order to apply merges PZ/PM.
PZ same as Z, but for permutation of points given by P.
The only thing which changed are indexes of the
original points; indexes of clusters remained same.
MergeDist array[NPoints-1], contains distances between clusters
being merged (MergeDist[i] correspond to merge stored
in Z[i,...]).
PM array[NPoints-1,6], another representation of merges,
which is suited for dendrogram construction. It deals
with rearranged points (permutation P is applied) and
represents merges in a form which different from one
used by Z.
For each I from 0 to NPoints-2, I-th row of PM represen
ts
merge performed on two clusters C0 and C1. Here:
* C0 contains points with indexes PM[I,0]...PM[I,1]
* C1 contains points with indexes PM[I,2]...PM[I,3]
* indexes stored in PM are given for dataset sorted
according to permutation P
* PM[I,1]=PM[I,2]-1 (only adjacent clusters are merged)
* PM[I,0]<=PM[I,1], PM[I,2]<=PM[I,3], i.e. both
clusters contain at least one point
* heights of "subdendrograms" corresponding to C0/C1
are stored in PM[I,4] and PM[I,5]. Subdendrograms
corresponding to single-point clusters have
height=0. Dendrogram of the merge result has height
H=max(H0,H1)+1.
NOTE: there is one-to-one correspondence between merges described by Z and
PM. I-th row of Z describes same merge of clusters as I-th row of PM,
with "left" cluster from Z corresponding to the "left" one from PM.
-- ALGLIB --
Copyright 10.07.2012 by Bochkanov Sergey
*************************************************************************/
class _ahcreport_owner
{
public:
_ahcreport_owner();
_ahcreport_owner(const _ahcreport_owner &rhs);
_ahcreport_owner& operator=(const _ahcreport_owner &rhs);
virtual ~_ahcreport_owner();
alglib_impl::ahcreport* c_ptr();
alglib_impl::ahcreport* c_ptr() const;
protected:
alglib_impl::ahcreport *p_struct;
};
class ahcreport : public _ahcreport_owner
{
public:
ahcreport();
ahcreport(const ahcreport &rhs);
ahcreport& operator=(const ahcreport &rhs);
virtual ~ahcreport();
ae_int_t &npoints;
integer_1d_array p;
integer_2d_array z;
integer_2d_array pz;
integer_2d_array pm;
real_1d_array mergedist;
};
/*************************************************************************
This structure is used to store results of the k-means++ clustering
algorithm.
Following information is always returned:
* NPoints contains number of points in the original dataset
* TerminationType contains completion code, negative on failure, positive
on success
* K contains number of clusters
For positive TerminationType we return:
* NFeatures contains number of variables in the original dataset
* C, which contains centers found by algorithm
* CIdx, which maps points of the original dataset to clusters
FORMAL DESCRIPTION OF FIELDS:
NPoints number of points, >=0
NFeatures number of variables, >=1
TerminationType completion code:
* -5 if distance type is anything different from
Euclidean metric
* -3 for degenerate dataset: a) less than K distinct
points, b) K=0 for non-empty dataset.
* +1 for successful completion
K number of clusters
C array[K,NFeatures], rows of the array store centers
CIdx array[NPoints], which contains cluster indexes
-- ALGLIB --
Copyright 27.11.2012 by Bochkanov Sergey
*************************************************************************/
class _kmeansreport_owner
{
public:
_kmeansreport_owner();
_kmeansreport_owner(const _kmeansreport_owner &rhs);
_kmeansreport_owner& operator=(const _kmeansreport_owner &rhs);
virtual ~_kmeansreport_owner();
alglib_impl::kmeansreport* c_ptr();
alglib_impl::kmeansreport* c_ptr() const;
protected:
alglib_impl::kmeansreport *p_struct;
};
class kmeansreport : public _kmeansreport_owner
{
public:
kmeansreport();
kmeansreport(const kmeansreport &rhs);
kmeansreport& operator=(const kmeansreport &rhs);
virtual ~kmeansreport();
ae_int_t &npoints;
ae_int_t &nfeatures;
ae_int_t &terminationtype;
ae_int_t &k;
real_2d_array c;
integer_1d_array cidx;
};
/*************************************************************************
*************************************************************************/ *************************************************************************/
class _decisionforest_owner class _decisionforest_owner
{ {
public: public:
_decisionforest_owner(); _decisionforest_owner();
_decisionforest_owner(const _decisionforest_owner &rhs); _decisionforest_owner(const _decisionforest_owner &rhs);
_decisionforest_owner& operator=(const _decisionforest_owner &rhs); _decisionforest_owner& operator=(const _decisionforest_owner &rhs);
virtual ~_decisionforest_owner(); virtual ~_decisionforest_owner();
alglib_impl::decisionforest* c_ptr(); alglib_impl::decisionforest* c_ptr();
skipping to change at line 384 skipping to change at line 670
{ {
public: public:
multilayerperceptron(); multilayerperceptron();
multilayerperceptron(const multilayerperceptron &rhs); multilayerperceptron(const multilayerperceptron &rhs);
multilayerperceptron& operator=(const multilayerperceptron &rhs); multilayerperceptron& operator=(const multilayerperceptron &rhs);
virtual ~multilayerperceptron(); virtual ~multilayerperceptron();
}; };
/************************************************************************* /*************************************************************************
Model's errors:
* RelCLSError - fraction of misclassified cases.
* AvgCE - acerage cross-entropy
* RMSError - root-mean-square error
* AvgError - average error
* AvgRelError - average relative error
NOTE 1: RelCLSError/AvgCE are zero on regression problems.
NOTE 2: on classification problems RMSError/AvgError/AvgRelError contain
errors in prediction of posterior probabilities
*************************************************************************/
class _modelerrors_owner
{
public:
_modelerrors_owner();
_modelerrors_owner(const _modelerrors_owner &rhs);
_modelerrors_owner& operator=(const _modelerrors_owner &rhs);
virtual ~_modelerrors_owner();
alglib_impl::modelerrors* c_ptr();
alglib_impl::modelerrors* c_ptr() const;
protected:
alglib_impl::modelerrors *p_struct;
};
class modelerrors : public _modelerrors_owner
{
public:
modelerrors();
modelerrors(const modelerrors &rhs);
modelerrors& operator=(const modelerrors &rhs);
virtual ~modelerrors();
double &relclserror;
double &avgce;
double &rmserror;
double &avgerror;
double &avgrelerror;
};
/*************************************************************************
*************************************************************************/ *************************************************************************/
class _logitmodel_owner class _logitmodel_owner
{ {
public: public:
_logitmodel_owner(); _logitmodel_owner();
_logitmodel_owner(const _logitmodel_owner &rhs); _logitmodel_owner(const _logitmodel_owner &rhs);
_logitmodel_owner& operator=(const _logitmodel_owner &rhs); _logitmodel_owner& operator=(const _logitmodel_owner &rhs);
virtual ~_logitmodel_owner(); virtual ~_logitmodel_owner();
alglib_impl::logitmodel* c_ptr(); alglib_impl::logitmodel* c_ptr();
skipping to change at line 509 skipping to change at line 835
mcpdreport& operator=(const mcpdreport &rhs); mcpdreport& operator=(const mcpdreport &rhs);
virtual ~mcpdreport(); virtual ~mcpdreport();
ae_int_t &inneriterationscount; ae_int_t &inneriterationscount;
ae_int_t &outeriterationscount; ae_int_t &outeriterationscount;
ae_int_t &nfev; ae_int_t &nfev;
ae_int_t &terminationtype; ae_int_t &terminationtype;
}; };
/************************************************************************* /*************************************************************************
Neural networks ensemble
*************************************************************************/
class _mlpensemble_owner
{
public:
_mlpensemble_owner();
_mlpensemble_owner(const _mlpensemble_owner &rhs);
_mlpensemble_owner& operator=(const _mlpensemble_owner &rhs);
virtual ~_mlpensemble_owner();
alglib_impl::mlpensemble* c_ptr();
alglib_impl::mlpensemble* c_ptr() const;
protected:
alglib_impl::mlpensemble *p_struct;
};
class mlpensemble : public _mlpensemble_owner
{
public:
mlpensemble();
mlpensemble(const mlpensemble &rhs);
mlpensemble& operator=(const mlpensemble &rhs);
virtual ~mlpensemble();
};
/*************************************************************************
Training report: Training report:
* NGrad - number of gradient calculations * RelCLSError - fraction of misclassified cases.
* NHess - number of Hessian calculations * AvgCE - acerage cross-entropy
* NCholesky - number of Cholesky decompositions * RMSError - root-mean-square error
* AvgError - average error
* AvgRelError - average relative error
* NGrad - number of gradient calculations
* NHess - number of Hessian calculations
* NCholesky - number of Cholesky decompositions
NOTE 1: RelCLSError/AvgCE are zero on regression problems.
NOTE 2: on classification problems RMSError/AvgError/AvgRelError contain
errors in prediction of posterior probabilities
*************************************************************************/ *************************************************************************/
class _mlpreport_owner class _mlpreport_owner
{ {
public: public:
_mlpreport_owner(); _mlpreport_owner();
_mlpreport_owner(const _mlpreport_owner &rhs); _mlpreport_owner(const _mlpreport_owner &rhs);
_mlpreport_owner& operator=(const _mlpreport_owner &rhs); _mlpreport_owner& operator=(const _mlpreport_owner &rhs);
virtual ~_mlpreport_owner(); virtual ~_mlpreport_owner();
alglib_impl::mlpreport* c_ptr(); alglib_impl::mlpreport* c_ptr();
alglib_impl::mlpreport* c_ptr() const; alglib_impl::mlpreport* c_ptr() const;
protected: protected:
alglib_impl::mlpreport *p_struct; alglib_impl::mlpreport *p_struct;
}; };
class mlpreport : public _mlpreport_owner class mlpreport : public _mlpreport_owner
{ {
public: public:
mlpreport(); mlpreport();
mlpreport(const mlpreport &rhs); mlpreport(const mlpreport &rhs);
mlpreport& operator=(const mlpreport &rhs); mlpreport& operator=(const mlpreport &rhs);
virtual ~mlpreport(); virtual ~mlpreport();
double &relclserror;
double &avgce;
double &rmserror;
double &avgerror;
double &avgrelerror;
ae_int_t &ngrad; ae_int_t &ngrad;
ae_int_t &nhess; ae_int_t &nhess;
ae_int_t &ncholesky; ae_int_t &ncholesky;
}; };
/************************************************************************* /*************************************************************************
Cross-validation estimates of generalization error Cross-validation estimates of generalization error
*************************************************************************/ *************************************************************************/
class _mlpcvreport_owner class _mlpcvreport_owner
skipping to change at line 570 skipping to change at line 936
virtual ~mlpcvreport(); virtual ~mlpcvreport();
double &relclserror; double &relclserror;
double &avgce; double &avgce;
double &rmserror; double &rmserror;
double &avgerror; double &avgerror;
double &avgrelerror; double &avgrelerror;
}; };
/************************************************************************* /*************************************************************************
Neural networks ensemble Trainer object for neural network.
You should not try to access fields of this object directly - use ALGLIB
functions to work with this object.
*************************************************************************/ *************************************************************************/
class _mlpensemble_owner class _mlptrainer_owner
{ {
public: public:
_mlpensemble_owner(); _mlptrainer_owner();
_mlpensemble_owner(const _mlpensemble_owner &rhs); _mlptrainer_owner(const _mlptrainer_owner &rhs);
_mlpensemble_owner& operator=(const _mlpensemble_owner &rhs); _mlptrainer_owner& operator=(const _mlptrainer_owner &rhs);
virtual ~_mlpensemble_owner(); virtual ~_mlptrainer_owner();
alglib_impl::mlpensemble* c_ptr(); alglib_impl::mlptrainer* c_ptr();
alglib_impl::mlpensemble* c_ptr() const; alglib_impl::mlptrainer* c_ptr() const;
protected: protected:
alglib_impl::mlpensemble *p_struct; alglib_impl::mlptrainer *p_struct;
}; };
class mlpensemble : public _mlpensemble_owner class mlptrainer : public _mlptrainer_owner
{ {
public: public:
mlpensemble(); mlptrainer();
mlpensemble(const mlpensemble &rhs); mlptrainer(const mlptrainer &rhs);
mlpensemble& operator=(const mlpensemble &rhs); mlptrainer& operator=(const mlptrainer &rhs);
virtual ~mlpensemble(); virtual ~mlptrainer();
}; };
/************************************************************************* /*************************************************************************
Optimal binary classification Optimal binary classification
Algorithms finds optimal (=with minimal cross-entropy) binary partition. Algorithms finds optimal (=with minimal cross-entropy) binary partition.
Internal subroutine. Internal subroutine.
INPUT PARAMETERS: INPUT PARAMETERS:
skipping to change at line 650 skipping to change at line 1019
Note: Note:
content of all arrays is changed by subroutine; content of all arrays is changed by subroutine;
it doesn't allocate temporaries. it doesn't allocate temporaries.
-- ALGLIB -- -- ALGLIB --
Copyright 11.12.2008 by Bochkanov Sergey Copyright 11.12.2008 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void dsoptimalsplit2fast(real_1d_array &a, integer_1d_array &c, integer_1d_ array &tiesbuf, integer_1d_array &cntbuf, real_1d_array &bufr, integer_1d_a rray &bufi, const ae_int_t n, const ae_int_t nc, const double alpha, ae_int _t &info, double &threshold, double &rms, double &cvrms); void dsoptimalsplit2fast(real_1d_array &a, integer_1d_array &c, integer_1d_ array &tiesbuf, integer_1d_array &cntbuf, real_1d_array &bufr, integer_1d_a rray &bufi, const ae_int_t n, const ae_int_t nc, const double alpha, ae_int _t &info, double &threshold, double &rms, double &cvrms);
/************************************************************************* /*************************************************************************
This function serializes data structure to string. This function initializes clusterizer object. Newly initialized object is
empty, i.e. it does not contain dataset. You should use it as follows:
1. creation
2. dataset is added with ClusterizerSetPoints()
3. additional parameters are set
3. clusterization is performed with one of the clustering functions
Important properties of s_out: -- ALGLIB --
* it contains alphanumeric characters, dots, underscores, minus signs Copyright 10.07.2012 by Bochkanov Sergey
* these symbols are grouped into words, which are separated by spaces
and Windows-style (CR+LF) newlines
* although serializer uses spaces and CR+LF as separators, you can
replace any separator character by arbitrary combination of spaces,
tabs, Windows or Unix newlines. It allows flexible reformatting of
the string in case you want to include it into text or XML file.
But you should not insert separators into the middle of the "words"
nor you should change case of letters.
* s_out can be freely moved between 32-bit and 64-bit systems, little
and big endian machines, and so on. You can serialize structure on
32-bit machine and unserialize it on 64-bit one (or vice versa), or
serialize it on SPARC and unserialize on x86. You can also
serialize it in C++ version of ALGLIB and unserialize in C# one,
and vice versa.
*************************************************************************/ *************************************************************************/
void dfserialize(decisionforest &obj, std::string &s_out); void clusterizercreate(clusterizerstate &s);
/************************************************************************* /*************************************************************************
This function unserializes data structure from string. This function adds dataset to the clusterizer structure.
*************************************************************************/
void dfunserialize(std::string &s_in, decisionforest &obj);
/************************************************************************* This function overrides all previous calls of ClusterizerSetPoints() or
This subroutine builds random decision forest. ClusterizerSetDistances().
INPUT PARAMETERS: INPUT PARAMETERS:
XY - training set S - clusterizer state, initialized by ClusterizerCreate()
NPoints - training set size, NPoints>=1 XY - array[NPoints,NFeatures], dataset
NVars - number of independent variables, NVars>=1 NPoints - number of points, >=0
NClasses - task type: NFeatures- number of features, >=1
* NClasses=1 - regression task with one DistType- distance function:
dependent variable * 0 Chebyshev distance (L-inf norm)
* NClasses>1 - classification task with * 1 city block distance (L1 norm)
NClasses classes. * 2 Euclidean distance (L2 norm)
NTrees - number of trees in a forest, NTrees>=1. * 10 Pearson correlation:
recommended values: 50-100. dist(a,b) = 1-corr(a,b)
R - percent of a training set used to build * 11 Absolute Pearson correlation:
individual trees. 0<R<=1. dist(a,b) = 1-|corr(a,b)|
recommended values: 0.1 <= R <= 0.66. * 12 Uncentered Pearson correlation (cosine of the angle
):
dist(a,b) = a'*b/(|a|*|b|)
* 13 Absolute uncentered Pearson correlation
dist(a,b) = |a'*b|/(|a|*|b|)
* 20 Spearman rank correlation:
dist(a,b) = 1-rankcorr(a,b)
* 21 Absolute Spearman rank correlation
dist(a,b) = 1-|rankcorr(a,b)|
OUTPUT PARAMETERS: NOTE 1: different distance functions have different performance penalty:
Info - return code: * Euclidean or Pearson correlation distances are the fastest ones
* -2, if there is a point with class number * Spearman correlation distance function is a bit slower
outside of [0..NClasses-1]. * city block and Chebyshev distances are order of magnitude slower
* -1, if incorrect parameters was passed
(NPoints<1, NVars<1, NClasses<1, NTrees<1, R<=0 The reason behing difference in performance is that correlation-bas
or R>1). ed
* 1, if task has been solved distance functions are computed using optimized linear algebra kern
DF - model built els,
Rep - training report, contains error on a training set while Chebyshev and city block distance functions are computed usin
and out-of-bag estimates of generalization error. g
simple nested loops with two branches at each iteration.
NOTE 2: different clustering algorithms have different limitations:
* agglomerative hierarchical clustering algorithms may be used with
any kind of distance metric
* k-means++ clustering algorithm may be used only with Euclidean
distance function
Thus, list of specific clustering algorithms you may use depends
on distance function you specify when you set your dataset.
-- ALGLIB -- -- ALGLIB --
Copyright 19.02.2009 by Bochkanov Sergey Copyright 10.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void dfbuildrandomdecisionforest(const real_2d_array &xy, const ae_int_t np void clusterizersetpoints(const clusterizerstate &s, const real_2d_array &x
oints, const ae_int_t nvars, const ae_int_t nclasses, const ae_int_t ntrees y, const ae_int_t npoints, const ae_int_t nfeatures, const ae_int_t disttyp
, const double r, ae_int_t &info, decisionforest &df, dfreport &rep); e);
void clusterizersetpoints(const clusterizerstate &s, const real_2d_array &x
y, const ae_int_t disttype);
/************************************************************************* /*************************************************************************
This subroutine builds random decision forest. This function adds dataset given by distance matrix to the clusterizer
This function gives ability to tune number of variables used when choosing structure. It is important that dataset is not given explicitly - only
best split. distance matrix is given.
This function overrides all previous calls of ClusterizerSetPoints() or
ClusterizerSetDistances().
INPUT PARAMETERS: INPUT PARAMETERS:
XY - training set S - clusterizer state, initialized by ClusterizerCreate()
NPoints - training set size, NPoints>=1 D - array[NPoints,NPoints], distance matrix given by its upper
NVars - number of independent variables, NVars>=1 or lower triangle (main diagonal is ignored because its
NClasses - task type: entries are expected to be zero).
* NClasses=1 - regression task with one NPoints - number of points
dependent variable IsUpper - whether upper or lower triangle of D is given.
* NClasses>1 - classification task with
NClasses classes.
NTrees - number of trees in a forest, NTrees>=1.
recommended values: 50-100.
NRndVars - number of variables used when choosing best split
R - percent of a training set used to build
individual trees. 0<R<=1.
recommended values: 0.1 <= R <= 0.66.
OUTPUT PARAMETERS: NOTE 1: different clustering algorithms have different limitations:
Info - return code: * agglomerative hierarchical clustering algorithms may be used with
* -2, if there is a point with class number any kind of distance metric, including one which is given by
outside of [0..NClasses-1]. distance matrix
* -1, if incorrect parameters was passed * k-means++ clustering algorithm may be used only with Euclidean
(NPoints<1, NVars<1, NClasses<1, NTrees<1, R<=0 distance function and explicitly given points - it can not be
or R>1). used with dataset given by distance matrix
* 1, if task has been solved Thus, if you call this function, you will be unable to use k-means
DF - model built clustering algorithm to process your problem.
Rep - training report, contains error on a training set
and out-of-bag estimates of generalization error.
-- ALGLIB -- -- ALGLIB --
Copyright 19.02.2009 by Bochkanov Sergey Copyright 10.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void dfbuildrandomdecisionforestx1(const real_2d_array &xy, const ae_int_t void clusterizersetdistances(const clusterizerstate &s, const real_2d_array
npoints, const ae_int_t nvars, const ae_int_t nclasses, const ae_int_t ntre &d, const ae_int_t npoints, const bool isupper);
es, const ae_int_t nrndvars, const double r, ae_int_t &info, decisionforest void clusterizersetdistances(const clusterizerstate &s, const real_2d_array
&df, dfreport &rep); &d, const bool isupper);
/************************************************************************* /*************************************************************************
Procesing This function sets agglomerative hierarchical clustering algorithm
INPUT PARAMETERS: INPUT PARAMETERS:
DF - decision forest model S - clusterizer state, initialized by ClusterizerCreate()
X - input vector, array[0..NVars-1]. Algo - algorithm type:
* 0 complete linkage (default algorithm)
OUTPUT PARAMETERS: * 1 single linkage
Y - result. Regression estimate when solving regression task, * 2 unweighted average linkage
vector of posterior probabilities for classification task. * 3 weighted average linkage
See also DFProcessI.
-- ALGLIB -- -- ALGLIB --
Copyright 16.02.2009 by Bochkanov Sergey Copyright 10.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void dfprocess(const decisionforest &df, const real_1d_array &x, real_1d_ar ray &y); void clusterizersetahcalgo(const clusterizerstate &s, const ae_int_t algo);
/************************************************************************* /*************************************************************************
'interactive' variant of DFProcess for languages like Python which support This function sets k-means++ properties : number of restarts and maximum
constructs like "Y = DFProcessI(DF,X)" and interactive mode of interpreter number of iterations per one run.
This function allocates new array on each call, so it is significantly INPUT PARAMETERS:
slower than its 'non-interactive' counterpart, but it is more convenient S - clusterizer state, initialized by ClusterizerCreate()
when you call it from command line. Restarts- restarts count, >=1.
k-means++ algorithm performs several restarts and chooses
best set of centers (one with minimum squared distance).
MaxIts - maximum number of k-means iterations performed during one
run. >=0, zero value means that algorithm performs unlimite
d
number of iterations.
-- ALGLIB -- -- ALGLIB --
Copyright 28.02.2010 by Bochkanov Sergey Copyright 10.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void dfprocessi(const decisionforest &df, const real_1d_array &x, real_1d_a rray &y); void clusterizersetkmeanslimits(const clusterizerstate &s, const ae_int_t r estarts, const ae_int_t maxits);
/************************************************************************* /*************************************************************************
Relative classification error on the test set This function performs agglomerative hierarchical clustering
INPUT PARAMETERS: INPUT PARAMETERS:
DF - decision forest model S - clusterizer state, initialized by ClusterizerCreate()
XY - test set
NPoints - test set size
RESULT: OUTPUT PARAMETERS:
percent of incorrectly classified cases. Rep - clustering results; see description of AHCReport
structure for more information.
NOTE 1: hierarchical clustering algorithms require large amounts of memory.
In particular, this implementation needs sizeof(double)*NPoints^2
bytes, which are used to store distance matrix. In case we work
with user-supplied matrix, this amount is multiplied by 2 (we have
to store original matrix and to work with its copy).
For example, problem with 10000 points would require 800M of RAM,
even when working in a 1-dimensional space.
-- ALGLIB --
Copyright 10.07.2012 by Bochkanov Sergey
*************************************************************************/
void clusterizerrunahc(const clusterizerstate &s, ahcreport &rep);
/*************************************************************************
This function performs clustering by k-means++ algorithm.
You may change algorithm properties like number of restarts or iterations
limit by calling ClusterizerSetKMeansLimits() functions.
INPUT PARAMETERS:
S - clusterizer state, initialized by ClusterizerCreate()
K - number of clusters, K>=0.
K can be zero only when algorithm is called for empty
dataset, in this case completion code is set to
success (+1).
If K=0 and dataset size is non-zero, we can not
meaningfully assign points to some center (there are no
centers because K=0) and return -3 as completion code
(failure).
OUTPUT PARAMETERS:
Rep - clustering results; see description of KMeansReport
structure for more information.
NOTE 1: k-means clustering can be performed only for datasets with
Euclidean distance function. Algorithm will return negative
completion code in Rep.TerminationType in case dataset was added
to clusterizer with DistType other than Euclidean (or dataset was
specified by distance matrix instead of explicitly given points).
-- ALGLIB --
Copyright 10.07.2012 by Bochkanov Sergey
*************************************************************************/
void clusterizerrunkmeans(const clusterizerstate &s, const ae_int_t k, kmea
nsreport &rep);
/*************************************************************************
This function returns distance matrix for dataset
INPUT PARAMETERS:
XY - array[NPoints,NFeatures], dataset
NPoints - number of points, >=0
NFeatures- number of features, >=1
DistType- distance function:
* 0 Chebyshev distance (L-inf norm)
* 1 city block distance (L1 norm)
* 2 Euclidean distance (L2 norm)
* 10 Pearson correlation:
dist(a,b) = 1-corr(a,b)
* 11 Absolute Pearson correlation:
dist(a,b) = 1-|corr(a,b)|
* 12 Uncentered Pearson correlation (cosine of the angle
):
dist(a,b) = a'*b/(|a|*|b|)
* 13 Absolute uncentered Pearson correlation
dist(a,b) = |a'*b|/(|a|*|b|)
* 20 Spearman rank correlation:
dist(a,b) = 1-rankcorr(a,b)
* 21 Absolute Spearman rank correlation
dist(a,b) = 1-|rankcorr(a,b)|
OUTPUT PARAMETERS:
D - array[NPoints,NPoints], distance matrix
(full matrix is returned, with lower and upper triangles)
NOTES: different distance functions have different performance penalty:
* Euclidean or Pearson correlation distances are the fastest ones
* Spearman correlation distance function is a bit slower
* city block and Chebyshev distances are order of magnitude slower
The reason behing difference in performance is that correlation-base
d
distance functions are computed using optimized linear algebra kerne
ls,
while Chebyshev and city block distance functions are computed using
simple nested loops with two branches at each iteration.
-- ALGLIB --
Copyright 10.07.2012 by Bochkanov Sergey
*************************************************************************/
void clusterizergetdistances(const real_2d_array &xy, const ae_int_t npoint
s, const ae_int_t nfeatures, const ae_int_t disttype, real_2d_array &d);
/*************************************************************************
This function takes as input clusterization report Rep, desired clusters
count K, and builds top K clusters from hierarchical clusterization tree.
It returns assignment of points to clusters (array of cluster indexes).
INPUT PARAMETERS:
Rep - report from ClusterizerRunAHC() performed on XY
K - desired number of clusters, 1<=K<=NPoints.
K can be zero only when NPoints=0.
OUTPUT PARAMETERS:
CIdx - array[NPoints], I-th element contains cluster index (from
0 to K-1) for I-th point of the dataset.
CZ - array[K]. This array allows to convert cluster indexes
returned by this function to indexes used by Rep.Z. J-th
cluster returned by this function corresponds to CZ[J]-th
cluster stored in Rep.Z/PZ/PM.
It is guaranteed that CZ[I]<CZ[I+1].
NOTE: K clusters built by this subroutine are assumed to have no hierarchy.
Although they were obtained by manipulation with top K nodes of
dendrogram (i.e. hierarchical decomposition of dataset), this
function does not return information about hierarchy. Each of the
clusters stand on its own.
NOTE: Cluster indexes returned by this function does not correspond to
indexes returned in Rep.Z/PZ/PM. Either you work with hierarchical
representation of the dataset (dendrogram), or you work with "flat"
representation returned by this function. Each of representations
has its own clusters indexing system (former uses [0, 2*NPoints-2]),
while latter uses [0..K-1]), although it is possible to perform
conversion from one system to another by means of CZ array, returned
by this function, which allows you to convert indexes stored in CIdx
to the numeration system used by Rep.Z.
NOTE: this subroutine is optimized for moderate values of K. Say, for K=5
it will perform many times faster than for K=100. Its worst-case
performance is O(N*K), although in average case it perform better
(up to O(N*log(K))).
-- ALGLIB --
Copyright 10.07.2012 by Bochkanov Sergey
*************************************************************************/
void clusterizergetkclusters(const ahcreport &rep, const ae_int_t k, intege
r_1d_array &cidx, integer_1d_array &cz);
/*************************************************************************
This function accepts AHC report Rep, desired minimum intercluster
distance and returns top clusters from hierarchical clusterization tree
which are separated by distance R or HIGHER.
It returns assignment of points to clusters (array of cluster indexes).
There is one more function with similar name - ClusterizerSeparatedByCorr,
which returns clusters with intercluster correlation equal to R or LOWER
(note: higher for distance, lower for correlation).
INPUT PARAMETERS:
Rep - report from ClusterizerRunAHC() performed on XY
R - desired minimum intercluster distance, R>=0
OUTPUT PARAMETERS:
K - number of clusters, 1<=K<=NPoints
CIdx - array[NPoints], I-th element contains cluster index (from
0 to K-1) for I-th point of the dataset.
CZ - array[K]. This array allows to convert cluster indexes
returned by this function to indexes used by Rep.Z. J-th
cluster returned by this function corresponds to CZ[J]-th
cluster stored in Rep.Z/PZ/PM.
It is guaranteed that CZ[I]<CZ[I+1].
NOTE: K clusters built by this subroutine are assumed to have no hierarchy.
Although they were obtained by manipulation with top K nodes of
dendrogram (i.e. hierarchical decomposition of dataset), this
function does not return information about hierarchy. Each of the
clusters stand on its own.
NOTE: Cluster indexes returned by this function does not correspond to
indexes returned in Rep.Z/PZ/PM. Either you work with hierarchical
representation of the dataset (dendrogram), or you work with "flat"
representation returned by this function. Each of representations
has its own clusters indexing system (former uses [0, 2*NPoints-2]),
while latter uses [0..K-1]), although it is possible to perform
conversion from one system to another by means of CZ array, returned
by this function, which allows you to convert indexes stored in CIdx
to the numeration system used by Rep.Z.
NOTE: this subroutine is optimized for moderate values of K. Say, for K=5
it will perform many times faster than for K=100. Its worst-case
performance is O(N*K), although in average case it perform better
(up to O(N*log(K))).
-- ALGLIB --
Copyright 10.07.2012 by Bochkanov Sergey
*************************************************************************/
void clusterizerseparatedbydist(const ahcreport &rep, const double r, ae_in
t_t &k, integer_1d_array &cidx, integer_1d_array &cz);
/*************************************************************************
This function accepts AHC report Rep, desired maximum intercluster
correlation and returns top clusters from hierarchical clusterization tree
which are separated by correlation R or LOWER.
It returns assignment of points to clusters (array of cluster indexes).
There is one more function with similar name - ClusterizerSeparatedByDist,
which returns clusters with intercluster distance equal to R or HIGHER
(note: higher for distance, lower for correlation).
INPUT PARAMETERS:
Rep - report from ClusterizerRunAHC() performed on XY
R - desired maximum intercluster correlation, -1<=R<=+1
OUTPUT PARAMETERS:
K - number of clusters, 1<=K<=NPoints
CIdx - array[NPoints], I-th element contains cluster index (from
0 to K-1) for I-th point of the dataset.
CZ - array[K]. This array allows to convert cluster indexes
returned by this function to indexes used by Rep.Z. J-th
cluster returned by this function corresponds to CZ[J]-th
cluster stored in Rep.Z/PZ/PM.
It is guaranteed that CZ[I]<CZ[I+1].
NOTE: K clusters built by this subroutine are assumed to have no hierarchy.
Although they were obtained by manipulation with top K nodes of
dendrogram (i.e. hierarchical decomposition of dataset), this
function does not return information about hierarchy. Each of the
clusters stand on its own.
NOTE: Cluster indexes returned by this function does not correspond to
indexes returned in Rep.Z/PZ/PM. Either you work with hierarchical
representation of the dataset (dendrogram), or you work with "flat"
representation returned by this function. Each of representations
has its own clusters indexing system (former uses [0, 2*NPoints-2]),
while latter uses [0..K-1]), although it is possible to perform
conversion from one system to another by means of CZ array, returned
by this function, which allows you to convert indexes stored in CIdx
to the numeration system used by Rep.Z.
NOTE: this subroutine is optimized for moderate values of K. Say, for K=5
it will perform many times faster than for K=100. Its worst-case
performance is O(N*K), although in average case it perform better
(up to O(N*log(K))).
-- ALGLIB --
Copyright 10.07.2012 by Bochkanov Sergey
*************************************************************************/
void clusterizerseparatedbycorr(const ahcreport &rep, const double r, ae_in
t_t &k, integer_1d_array &cidx, integer_1d_array &cz);
/*************************************************************************
k-means++ clusterization.
Backward compatibility function, we recommend to use CLUSTERING subpackage
as better replacement.
-- ALGLIB --
Copyright 21.03.2009 by Bochkanov Sergey
*************************************************************************/
void kmeansgenerate(const real_2d_array &xy, const ae_int_t npoints, const
ae_int_t nvars, const ae_int_t k, const ae_int_t restarts, ae_int_t &info,
real_2d_array &c, integer_1d_array &xyc);
/*************************************************************************
This function serializes data structure to string.
Important properties of s_out:
* it contains alphanumeric characters, dots, underscores, minus signs
* these symbols are grouped into words, which are separated by spaces
and Windows-style (CR+LF) newlines
* although serializer uses spaces and CR+LF as separators, you can
replace any separator character by arbitrary combination of spaces,
tabs, Windows or Unix newlines. It allows flexible reformatting of
the string in case you want to include it into text or XML file.
But you should not insert separators into the middle of the "words"
nor you should change case of letters.
* s_out can be freely moved between 32-bit and 64-bit systems, little
and big endian machines, and so on. You can serialize structure on
32-bit machine and unserialize it on 64-bit one (or vice versa), or
serialize it on SPARC and unserialize on x86. You can also
serialize it in C++ version of ALGLIB and unserialize in C# one,
and vice versa.
*************************************************************************/
void dfserialize(decisionforest &obj, std::string &s_out);
/*************************************************************************
This function unserializes data structure from string.
*************************************************************************/
void dfunserialize(std::string &s_in, decisionforest &obj);
/*************************************************************************
This subroutine builds random decision forest.
INPUT PARAMETERS:
XY - training set
NPoints - training set size, NPoints>=1
NVars - number of independent variables, NVars>=1
NClasses - task type:
* NClasses=1 - regression task with one
dependent variable
* NClasses>1 - classification task with
NClasses classes.
NTrees - number of trees in a forest, NTrees>=1.
recommended values: 50-100.
R - percent of a training set used to build
individual trees. 0<R<=1.
recommended values: 0.1 <= R <= 0.66.
OUTPUT PARAMETERS:
Info - return code:
* -2, if there is a point with class number
outside of [0..NClasses-1].
* -1, if incorrect parameters was passed
(NPoints<1, NVars<1, NClasses<1, NTrees<1, R<=0
or R>1).
* 1, if task has been solved
DF - model built
Rep - training report, contains error on a training set
and out-of-bag estimates of generalization error.
-- ALGLIB --
Copyright 19.02.2009 by Bochkanov Sergey
*************************************************************************/
void dfbuildrandomdecisionforest(const real_2d_array &xy, const ae_int_t np
oints, const ae_int_t nvars, const ae_int_t nclasses, const ae_int_t ntrees
, const double r, ae_int_t &info, decisionforest &df, dfreport &rep);
/*************************************************************************
This subroutine builds random decision forest.
This function gives ability to tune number of variables used when choosing
best split.
INPUT PARAMETERS:
XY - training set
NPoints - training set size, NPoints>=1
NVars - number of independent variables, NVars>=1
NClasses - task type:
* NClasses=1 - regression task with one
dependent variable
* NClasses>1 - classification task with
NClasses classes.
NTrees - number of trees in a forest, NTrees>=1.
recommended values: 50-100.
NRndVars - number of variables used when choosing best split
R - percent of a training set used to build
individual trees. 0<R<=1.
recommended values: 0.1 <= R <= 0.66.
OUTPUT PARAMETERS:
Info - return code:
* -2, if there is a point with class number
outside of [0..NClasses-1].
* -1, if incorrect parameters was passed
(NPoints<1, NVars<1, NClasses<1, NTrees<1, R<=0
or R>1).
* 1, if task has been solved
DF - model built
Rep - training report, contains error on a training set
and out-of-bag estimates of generalization error.
-- ALGLIB --
Copyright 19.02.2009 by Bochkanov Sergey
*************************************************************************/
void dfbuildrandomdecisionforestx1(const real_2d_array &xy, const ae_int_t
npoints, const ae_int_t nvars, const ae_int_t nclasses, const ae_int_t ntre
es, const ae_int_t nrndvars, const double r, ae_int_t &info, decisionforest
&df, dfreport &rep);
/*************************************************************************
Procesing
INPUT PARAMETERS:
DF - decision forest model
X - input vector, array[0..NVars-1].
OUTPUT PARAMETERS:
Y - result. Regression estimate when solving regression task,
vector of posterior probabilities for classification task.
See also DFProcessI.
-- ALGLIB --
Copyright 16.02.2009 by Bochkanov Sergey
*************************************************************************/
void dfprocess(const decisionforest &df, const real_1d_array &x, real_1d_ar
ray &y);
/*************************************************************************
'interactive' variant of DFProcess for languages like Python which support
constructs like "Y = DFProcessI(DF,X)" and interactive mode of interpreter
This function allocates new array on each call, so it is significantly
slower than its 'non-interactive' counterpart, but it is more convenient
when you call it from command line.
-- ALGLIB --
Copyright 28.02.2010 by Bochkanov Sergey
*************************************************************************/
void dfprocessi(const decisionforest &df, const real_1d_array &x, real_1d_a
rray &y);
/*************************************************************************
Relative classification error on the test set
INPUT PARAMETERS:
DF - decision forest model
XY - test set
NPoints - test set size
RESULT:
percent of incorrectly classified cases.
Zero if model solves regression task. Zero if model solves regression task.
-- ALGLIB -- -- ALGLIB --
Copyright 16.02.2009 by Bochkanov Sergey Copyright 16.02.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double dfrelclserror(const decisionforest &df, const real_2d_array &xy, con st ae_int_t npoints); double dfrelclserror(const decisionforest &df, const real_2d_array &xy, con st ae_int_t npoints);
/************************************************************************* /*************************************************************************
Average cross-entropy (in bits per element) on the test set Average cross-entropy (in bits per element) on the test set
skipping to change at line 1173 skipping to change at line 1929
this algorithm uses BOTH previous points and current one, i.e. this algorithm uses BOTH previous points and current one, i.e.
new value of X[i] depends on BOTH previous point and X[i] itself. new value of X[i] depends on BOTH previous point and X[i] itself.
-- ALGLIB -- -- ALGLIB --
Copyright 25.10.2011 by Bochkanov Sergey Copyright 25.10.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void filterlrma(real_1d_array &x, const ae_int_t n, const ae_int_t k); void filterlrma(real_1d_array &x, const ae_int_t n, const ae_int_t k);
void filterlrma(real_1d_array &x, const ae_int_t k); void filterlrma(real_1d_array &x, const ae_int_t k);
/************************************************************************* /*************************************************************************
k-means++ clusterization
INPUT PARAMETERS:
XY - dataset, array [0..NPoints-1,0..NVars-1].
NPoints - dataset size, NPoints>=K
NVars - number of variables, NVars>=1
K - desired number of clusters, K>=1
Restarts - number of restarts, Restarts>=1
OUTPUT PARAMETERS:
Info - return code:
* -3, if task is degenerate (number of distinct points
is
less than K)
* -1, if incorrect NPoints/NFeatures/K/Restarts was pas
sed
* 1, if subroutine finished successfully
C - array[0..NVars-1,0..K-1].matrix whose columns store
cluster's centers
XYC - array[NPoints], which contains cluster indexes
-- ALGLIB --
Copyright 21.03.2009 by Bochkanov Sergey
*************************************************************************/
void kmeansgenerate(const real_2d_array &xy, const ae_int_t npoints, const
ae_int_t nvars, const ae_int_t k, const ae_int_t restarts, ae_int_t &info,
real_2d_array &c, integer_1d_array &xyc);
/*************************************************************************
Multiclass Fisher LDA Multiclass Fisher LDA
Subroutine finds coefficients of linear combination which optimally separat es Subroutine finds coefficients of linear combination which optimally separat es
training set on classes. training set on classes.
INPUT PARAMETERS: INPUT PARAMETERS:
XY - training set, array[0..NPoints-1,0..NVars]. XY - training set, array[0..NPoints-1,0..NVars].
First NVars columns store values of independent First NVars columns store values of independent
variables, next column stores number of class (from 0 variables, next column stores number of class (from 0
to NClasses-1) which dataset element belongs to. Fracti onal to NClasses-1) which dataset element belongs to. Fracti onal
skipping to change at line 1720 skipping to change at line 2451
This function allocates new array on each call, so it is significantly This function allocates new array on each call, so it is significantly
slower than its 'non-interactive' counterpart, but it is more convenient slower than its 'non-interactive' counterpart, but it is more convenient
when you call it from command line. when you call it from command line.
-- ALGLIB -- -- ALGLIB --
Copyright 21.09.2010 by Bochkanov Sergey Copyright 21.09.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpprocessi(const multilayerperceptron &network, const real_1d_array & x, real_1d_array &y); void mlpprocessi(const multilayerperceptron &network, const real_1d_array & x, real_1d_array &y);
/************************************************************************* /*************************************************************************
Error function for neural network, internal subroutine. Error of the neural network on dataset.
INPUT PARAMETERS:
Network - neural network;
XY - training set, see below for information on the
training set format;
SSize - points count.
RESULT:
sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2)
DATASET FORMAT:
This function uses two different dataset formats - one for regression
networks, another one for classification networks.
For regression networks with NIn inputs and NOut outputs following dataset
format is used:
* dataset is given by NPoints*(NIn+NOut) matrix
* each row corresponds to one example
* first NIn columns are inputs, next NOut columns are outputs
For classification networks with NIn inputs and NClasses clases following
dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlperror(const multilayerperceptron &network, const real_2d_array &x y, const ae_int_t ssize); double mlperror(const multilayerperceptron &network, const real_2d_array &x y, const ae_int_t ssize);
/************************************************************************* /*************************************************************************
Error of the neural network on dataset given by sparse matrix.
INPUT PARAMETERS:
Network - neural network
XY - training set, see below for information on the
training set format. This function checks correctness
of the dataset (no NANs/INFs, class numbers are
correct) and throws exception when incorrect dataset
is passed. Sparse matrix must use CRS format for
storage.
NPoints - points count, >=0
RESULT:
sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2)
DATASET FORMAT:
This function uses two different dataset formats - one for regression
networks, another one for classification networks.
For regression networks with NIn inputs and NOut outputs following dataset
format is used:
* dataset is given by NPoints*(NIn+NOut) matrix
* each row corresponds to one example
* first NIn columns are inputs, next NOut columns are outputs
For classification networks with NIn inputs and NClasses clases following
dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1).
-- ALGLIB --
Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/
double mlperrorsparse(const multilayerperceptron &network, const sparsematr
ix &xy, const ae_int_t npoints);
/*************************************************************************
Natural error function for neural network, internal subroutine. Natural error function for neural network, internal subroutine.
-- ALGLIB -- -- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlperrorn(const multilayerperceptron &network, const real_2d_array & xy, const ae_int_t ssize); double mlperrorn(const multilayerperceptron &network, const real_2d_array & xy, const ae_int_t ssize);
/************************************************************************* /*************************************************************************
Classification error Classification error
-- ALGLIB -- -- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
ae_int_t mlpclserror(const multilayerperceptron &network, const real_2d_arr ay &xy, const ae_int_t ssize); ae_int_t mlpclserror(const multilayerperceptron &network, const real_2d_arr ay &xy, const ae_int_t ssize);
/************************************************************************* /*************************************************************************
Relative classification error on the test set Relative classification error on the test set.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - network Network - neural network;
XY - test set XY - training set, see below for information on the
NPoints - test set size training set format;
NPoints - points count.
RESULT: RESULT:
percent of incorrectly classified cases. Works both for Percent of incorrectly classified cases. Works both for classifier
classifier networks and general purpose networks used as networks and general purpose networks used as classifiers.
classifiers.
DATASET FORMAT:
This function uses two different dataset formats - one for regression
networks, another one for classification networks.
For regression networks with NIn inputs and NOut outputs following dataset
format is used:
* dataset is given by NPoints*(NIn+NOut) matrix
* each row corresponds to one example
* first NIn columns are inputs, next NOut columns are outputs
For classification networks with NIn inputs and NClasses clases following
dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 25.12.2008 by Bochkanov Sergey Copyright 25.12.2008 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlprelclserror(const multilayerperceptron &network, const real_2d_ar ray &xy, const ae_int_t npoints); double mlprelclserror(const multilayerperceptron &network, const real_2d_ar ray &xy, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
Average cross-entropy (in bits per element) on the test set Relative classification error on the test set given by sparse matrix.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network Network - neural network;
XY - test set XY - training set, see below for information on the
NPoints - test set size training set format. This function checks correctness
of the dataset (no NANs/INFs, class numbers are
correct) and throws exception when incorrect dataset
is passed. Sparse matrix must use CRS format for
storage.
NPoints - points count, >=0.
RESULT: RESULT:
CrossEntropy/(NPoints*LN(2)). Percent of incorrectly classified cases. Works both for classifier
Zero if network solves regression task. networks and general purpose networks used as classifiers.
DATASET FORMAT:
This function uses two different dataset formats - one for regression
networks, another one for classification networks.
For regression networks with NIn inputs and NOut outputs following dataset
format is used:
* dataset is given by NPoints*(NIn+NOut) matrix
* each row corresponds to one example
* first NIn columns are inputs, next NOut columns are outputs
For classification networks with NIn inputs and NClasses clases following
dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1).
-- ALGLIB --
Copyright 09.08.2012 by Bochkanov Sergey
*************************************************************************/
double mlprelclserrorsparse(const multilayerperceptron &network, const spar
sematrix &xy, const ae_int_t npoints);
/*************************************************************************
Average cross-entropy (in bits per element) on the test set.
INPUT PARAMETERS:
Network - neural network;
XY - training set, see below for information on the
training set format;
NPoints - points count.
RESULT:
CrossEntropy/(NPoints*LN(2)).
Zero if network solves regression task.
DATASET FORMAT:
This function uses two different dataset formats - one for regression
networks, another one for classification networks.
For regression networks with NIn inputs and NOut outputs following dataset
format is used:
* dataset is given by NPoints*(NIn+NOut) matrix
* each row corresponds to one example
* first NIn columns are inputs, next NOut columns are outputs
For classification networks with NIn inputs and NClasses clases following
dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 08.01.2009 by Bochkanov Sergey Copyright 08.01.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlpavgce(const multilayerperceptron &network, const real_2d_array &x y, const ae_int_t npoints); double mlpavgce(const multilayerperceptron &network, const real_2d_array &x y, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
RMS error on the test set Average cross-entropy (in bits per element) on the test set given by
sparse matrix.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network Network - neural network;
XY - test set XY - training set, see below for information on the
NPoints - test set size training set format. This function checks correctness
of the dataset (no NANs/INFs, class numbers are
correct) and throws exception when incorrect dataset
is passed. Sparse matrix must use CRS format for
storage.
NPoints - points count, >=0.
RESULT: RESULT:
root mean square error. CrossEntropy/(NPoints*LN(2)).
Its meaning for regression task is obvious. As for Zero if network solves regression task.
classification task, RMS error means error when estimating posterior
probabilities. DATASET FORMAT:
This function uses two different dataset formats - one for regression
networks, another one for classification networks.
For regression networks with NIn inputs and NOut outputs following dataset
format is used:
* dataset is given by NPoints*(NIn+NOut) matrix
* each row corresponds to one example
* first NIn columns are inputs, next NOut columns are outputs
For classification networks with NIn inputs and NClasses clases following
dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1).
-- ALGLIB --
Copyright 9.08.2012 by Bochkanov Sergey
*************************************************************************/
double mlpavgcesparse(const multilayerperceptron &network, const sparsematr
ix &xy, const ae_int_t npoints);
/*************************************************************************
RMS error on the test set given.
INPUT PARAMETERS:
Network - neural network;
XY - training set, see below for information on the
training set format;
NPoints - points count.
RESULT:
Root mean square error. Its meaning for regression task is obvious. As for
classification task, RMS error means error when estimating posterior
probabilities.
DATASET FORMAT:
This function uses two different dataset formats - one for regression
networks, another one for classification networks.
For regression networks with NIn inputs and NOut outputs following dataset
format is used:
* dataset is given by NPoints*(NIn+NOut) matrix
* each row corresponds to one example
* first NIn columns are inputs, next NOut columns are outputs
For classification networks with NIn inputs and NClasses clases following
dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlprmserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints); double mlprmserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
Average error on the test set RMS error on the test set given by sparse matrix.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network Network - neural network;
XY - test set XY - training set, see below for information on the
NPoints - test set size training set format. This function checks correctness
of the dataset (no NANs/INFs, class numbers are
correct) and throws exception when incorrect dataset
is passed. Sparse matrix must use CRS format for
storage.
NPoints - points count, >=0.
RESULT: RESULT:
Its meaning for regression task is obvious. As for Root mean square error. Its meaning for regression task is obvious. As for
classification task, it means average error when estimating posterior classification task, RMS error means error when estimating posterior
probabilities. probabilities.
DATASET FORMAT:
This function uses two different dataset formats - one for regression
networks, another one for classification networks.
For regression networks with NIn inputs and NOut outputs following dataset
format is used:
* dataset is given by NPoints*(NIn+NOut) matrix
* each row corresponds to one example
* first NIn columns are inputs, next NOut columns are outputs
For classification networks with NIn inputs and NClasses clases following
dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1).
-- ALGLIB --
Copyright 09.08.2012 by Bochkanov Sergey
*************************************************************************/
double mlprmserrorsparse(const multilayerperceptron &network, const sparsem
atrix &xy, const ae_int_t npoints);
/*************************************************************************
Average error on the test set.
INPUT PARAMETERS:
Network - neural network;
XY - training set, see below for information on the
training set format;
NPoints - points count.
RESULT:
Its meaning for regression task is obvious. As for classification task, it
means average error when estimating posterior probabilities.
DATASET FORMAT:
This function uses two different dataset formats - one for regression
networks, another one for classification networks.
For regression networks with NIn inputs and NOut outputs following dataset
format is used:
* dataset is given by NPoints*(NIn+NOut) matrix
* each row corresponds to one example
* first NIn columns are inputs, next NOut columns are outputs
For classification networks with NIn inputs and NClasses clases following
dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 11.03.2008 by Bochkanov Sergey Copyright 11.03.2008 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlpavgerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints); double mlpavgerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
Average relative error on the test set Average error on the test set given by sparse matrix.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network Network - neural network;
XY - test set XY - training set, see below for information on the
NPoints - test set size training set format. This function checks correctness
of the dataset (no NANs/INFs, class numbers are
correct) and throws exception when incorrect dataset
is passed. Sparse matrix must use CRS format for
storage.
NPoints - points count, >=0.
RESULT: RESULT:
Its meaning for regression task is obvious. As for Its meaning for regression task is obvious. As for classification task, it
classification task, it means average relative error when estimating means average error when estimating posterior probabilities.
posterior probability of belonging to the correct class.
DATASET FORMAT:
This function uses two different dataset formats - one for regression
networks, another one for classification networks.
For regression networks with NIn inputs and NOut outputs following dataset
format is used:
* dataset is given by NPoints*(NIn+NOut) matrix
* each row corresponds to one example
* first NIn columns are inputs, next NOut columns are outputs
For classification networks with NIn inputs and NClasses clases following
dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1).
-- ALGLIB --
Copyright 09.08.2012 by Bochkanov Sergey
*************************************************************************/
double mlpavgerrorsparse(const multilayerperceptron &network, const sparsem
atrix &xy, const ae_int_t npoints);
/*************************************************************************
Average relative error on the test set.
INPUT PARAMETERS:
Network - neural network;
XY - training set, see below for information on the
training set format;
NPoints - points count.
RESULT:
Its meaning for regression task is obvious. As for classification task, it
means average relative error when estimating posterior probability of
belonging to the correct class.
DATASET FORMAT:
This function uses two different dataset formats - one for regression
networks, another one for classification networks.
For regression networks with NIn inputs and NOut outputs following dataset
format is used:
* dataset is given by NPoints*(NIn+NOut) matrix
* each row corresponds to one example
* first NIn columns are inputs, next NOut columns are outputs
For classification networks with NIn inputs and NClasses clases following
dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 11.03.2008 by Bochkanov Sergey Copyright 11.03.2008 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlpavgrelerror(const multilayerperceptron &network, const real_2d_ar ray &xy, const ae_int_t npoints); double mlpavgrelerror(const multilayerperceptron &network, const real_2d_ar ray &xy, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
Average relative error on the test set given by sparse matrix.
INPUT PARAMETERS:
Network - neural network;
XY - training set, see below for information on the
training set format. This function checks correctness
of the dataset (no NANs/INFs, class numbers are
correct) and throws exception when incorrect dataset
is passed. Sparse matrix must use CRS format for
storage.
NPoints - points count, >=0.
RESULT:
Its meaning for regression task is obvious. As for classification task, it
means average relative error when estimating posterior probability of
belonging to the correct class.
DATASET FORMAT:
This function uses two different dataset formats - one for regression
networks, another one for classification networks.
For regression networks with NIn inputs and NOut outputs following dataset
format is used:
* dataset is given by NPoints*(NIn+NOut) matrix
* each row corresponds to one example
* first NIn columns are inputs, next NOut columns are outputs
For classification networks with NIn inputs and NClasses clases following
dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1).
-- ALGLIB --
Copyright 09.08.2012 by Bochkanov Sergey
*************************************************************************/
double mlpavgrelerrorsparse(const multilayerperceptron &network, const spar
sematrix &xy, const ae_int_t npoints);
/*************************************************************************
Gradient calculation Gradient calculation
INPUT PARAMETERS: INPUT PARAMETERS:
Network - network initialized with one of the network creation funcs Network - network initialized with one of the network creation funcs
X - input vector, length of array must be at least NIn X - input vector, length of array must be at least NIn
DesiredY- desired outputs, length of array must be at least NOut DesiredY- desired outputs, length of array must be at least NOut
Grad - possibly preallocated array. If size of array is smaller Grad - possibly preallocated array. If size of array is smaller
than WCount, it will be reallocated. It is recommended to than WCount, it will be reallocated. It is recommended to
reuse previously allocated array to reduce allocation reuse previously allocated array to reduce allocation
overhead. overhead.
skipping to change at line 1900 skipping to change at line 2992
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) E - error function, SUM(sqr(y[i]-desiredy[i])/2,i)
Grad - gradient of E with respect to weights of network, array[WCo unt] Grad - gradient of E with respect to weights of network, array[WCo unt]
-- ALGLIB -- -- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpgradbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad); void mlpgradbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad);
/************************************************************************* /*************************************************************************
Batch gradient calculation for a set of inputs/outputs given by sparse
matrices
INPUT PARAMETERS:
Network - network initialized with one of the network creation funcs
XY - set of inputs/outputs; one sample = one row;
first NIn columns contain inputs,
next NOut columns - desired outputs.
SSize - number of elements in XY
Grad - possibly preallocated array. If size of array is smaller
than WCount, it will be reallocated. It is recommended to
reuse previously allocated array to reduce allocation
overhead.
OUTPUT PARAMETERS:
E - error function, SUM(sqr(y[i]-desiredy[i])/2,i)
Grad - gradient of E with respect to weights of network, array[WCo
unt]
-- ALGLIB --
Copyright 26.07.2012 by Bochkanov Sergey
*************************************************************************/
void mlpgradbatchsparse(const multilayerperceptron &network, const sparsema
trix &xy, const ae_int_t ssize, double &e, real_1d_array &grad);
/*************************************************************************
Batch gradient calculation for a subset of dataset
INPUT PARAMETERS:
Network - network initialized with one of the network creation funcs
XY - original dataset; one sample = one row;
first NIn columns contain inputs,
next NOut columns - desired outputs.
SetSize - real size of XY, SetSize>=0;
Idx - subset of SubsetSize elements, array[SubsetSize]:
* Idx[I] stores row index in the original dataset which is
given by XY. Gradient is calculated with respect to rows
whose indexes are stored in Idx[].
* Idx[] must store correct indexes; this function throws
an exception in case incorrect index (less than 0 or
larger than rows(XY)) is given
* Idx[] may store indexes in any order and even with
repetitions.
SubsetSize- number of elements in Idx[] array.
Grad - possibly preallocated array. If size of array is smaller
than WCount, it will be reallocated. It is recommended to
reuse previously allocated array to reduce allocation
overhead.
OUTPUT PARAMETERS:
E - error function, SUM(sqr(y[i]-desiredy[i])/2,i)
Grad - gradient of E with respect to weights of network,
array[WCount]
NOTE: when SubsetSize<0 is used full dataset by call MLPGradBatch function.
-- ALGLIB --
Copyright 26.07.2012 by Bochkanov Sergey
*************************************************************************/
void mlpgradbatchsubset(const multilayerperceptron &network, const real_2d_
array &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_in
t_t subsetsize, double &e, real_1d_array &grad);
/*************************************************************************
Batch gradient calculation for a set of inputs/outputs for a subset of
dataset given by boolean mask.
INPUT PARAMETERS:
Network - network initialized with one of the network creation funcs
XY - set of inputs/outputs; one sample = one row;
first NIn columns contain inputs,
next NOut columns - desired outputs.
SetSize - real size of XY, SetSize>=0;
Idx - subset of SubsetSize elements, array[SubsetSize]:
* Idx[I] stores row index in the original dataset which is
given by XY. Gradient is calculated with respect to rows
whose indexes are stored in Idx[].
* Idx[] must store correct indexes; this function throws
an exception in case incorrect index (less than 0 or
larger than rows(XY)) is given
* Idx[] may store indexes in any order and even with
repetitions.
SubsetSize- number of elements in Idx[] array.
Grad - possibly preallocated array. If size of array is smaller
than WCount, it will be reallocated. It is recommended to
reuse previously allocated array to reduce allocation
overhead.
OUTPUT PARAMETERS:
E - error function, SUM(sqr(y[i]-desiredy[i])/2,i)
Grad - gradient of E with respect to weights of network,
array[WCount]
NOTE: when SubsetSize<0 is used full dataset by call MLPGradBatchSparse
function.
-- ALGLIB --
Copyright 26.07.2012 by Bochkanov Sergey
*************************************************************************/
void mlpgradbatchsparsesubset(const multilayerperceptron &network, const sp
arsematrix &xy, const ae_int_t setsize, const integer_1d_array &idx, const
ae_int_t subsetsize, double &e, real_1d_array &grad);
/*************************************************************************
Batch gradient calculation for a set of inputs/outputs Batch gradient calculation for a set of inputs/outputs
(natural error function is used) (natural error function is used)
INPUT PARAMETERS: INPUT PARAMETERS:
Network - network initialized with one of the network creation funcs Network - network initialized with one of the network creation funcs
XY - set of inputs/outputs; one sample = one row; XY - set of inputs/outputs; one sample = one row;
first NIn columns contain inputs, first NIn columns contain inputs,
next NOut columns - desired outputs. next NOut columns - desired outputs.
SSize - number of elements in XY SSize - number of elements in XY
Grad - possibly preallocated array. If size of array is smaller Grad - possibly preallocated array. If size of array is smaller
skipping to change at line 1953 skipping to change at line 3143
Copyright 26.01.2008 by Bochkanov Sergey. Copyright 26.01.2008 by Bochkanov Sergey.
Hessian calculation based on R-algorithm described in Hessian calculation based on R-algorithm described in
"Fast Exact Multiplication by the Hessian", "Fast Exact Multiplication by the Hessian",
B. A. Pearlmutter, B. A. Pearlmutter,
Neural Computation, 1994. Neural Computation, 1994.
*************************************************************************/ *************************************************************************/
void mlphessianbatch(const multilayerperceptron &network, const real_2d_arr ay &xy, const ae_int_t ssize, double &e, real_1d_array &grad, real_2d_array &h); void mlphessianbatch(const multilayerperceptron &network, const real_2d_arr ay &xy, const ae_int_t ssize, double &e, real_1d_array &grad, real_2d_array &h);
/************************************************************************* /*************************************************************************
Calculation of all types of errors.
INPUT PARAMETERS:
Network - network initialized with one of the network creation funcs
XY - original dataset; one sample = one row;
first NIn columns contain inputs,
next NOut columns - desired outputs.
SetSize - real size of XY, SetSize>=0;
Subset - subset of SubsetSize elements, array[SubsetSize];
SubsetSize- number of elements in Subset[] array.
OUTPUT PARAMETERS:
Rep - it contains all type of errors.
NOTE: when SubsetSize<0 is used full dataset by call MLPGradBatch function.
-- ALGLIB --
Copyright 04.09.2012 by Bochkanov Sergey
*************************************************************************/
void mlpallerrorssubset(const multilayerperceptron &network, const real_2d_
array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae
_int_t subsetsize, modelerrors &rep);
/*************************************************************************
Calculation of all types of errors.
INPUT PARAMETERS:
Network - network initialized with one of the network creation funcs
XY - original dataset given by sparse matrix;
one sample = one row;
first NIn columns contain inputs,
next NOut columns - desired outputs.
SetSize - real size of XY, SetSize>=0;
Subset - subset of SubsetSize elements, array[SubsetSize];
SubsetSize- number of elements in Subset[] array.
OUTPUT PARAMETERS:
Rep - it contains all type of errors.
NOTE: when SubsetSize<0 is used full dataset by call MLPGradBatch function.
-- ALGLIB --
Copyright 04.09.2012 by Bochkanov Sergey
*************************************************************************/
void mlpallerrorssparsesubset(const multilayerperceptron &network, const sp
arsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, con
st ae_int_t subsetsize, modelerrors &rep);
/*************************************************************************
Error of the neural network on dataset.
INPUT PARAMETERS:
Network - neural network;
XY - training set, see below for information on the
training set format;
SetSize - real size of XY, SetSize>=0;
Subset - subset of SubsetSize elements, array[SubsetSize];
SubsetSize- number of elements in Subset[] array.
RESULT:
sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2)
DATASET FORMAT:
This function uses two different dataset formats - one for regression
networks, another one for classification networks.
For regression networks with NIn inputs and NOut outputs following dataset
format is used:
* dataset is given by NPoints*(NIn+NOut) matrix
* each row corresponds to one example
* first NIn columns are inputs, next NOut columns are outputs
For classification networks with NIn inputs and NClasses clases following
dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1).
-- ALGLIB --
Copyright 04.09.2012 by Bochkanov Sergey
*************************************************************************/
double mlperrorsubset(const multilayerperceptron &network, const real_2d_ar
ray &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_i
nt_t subsetsize);
/*************************************************************************
Error of the neural network on dataset.
INPUT PARAMETERS:
Network - neural network;
XY - training set, see below for information on the
training set format. This function checks correctness
of the dataset (no NANs/INFs, class numbers are
correct) and throws exception when incorrect dataset
is passed. Sparse matrix must use CRS format for
storage.
SetSize - real size of XY, SetSize>=0;
it is used when SubsetSize<0;
Subset - subset of SubsetSize elements, array[SubsetSize];
SubsetSize- number of elements in Subset[] array.
RESULT:
sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2)
DATASET FORMAT:
This function uses two different dataset formats - one for regression
networks, another one for classification networks.
For regression networks with NIn inputs and NOut outputs following dataset
format is used:
* dataset is given by NPoints*(NIn+NOut) matrix
* each row corresponds to one example
* first NIn columns are inputs, next NOut columns are outputs
For classification networks with NIn inputs and NClasses clases following
dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1).
-- ALGLIB --
Copyright 04.09.2012 by Bochkanov Sergey
*************************************************************************/
double mlperrorsparsesubset(const multilayerperceptron &network, const spar
sematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const
ae_int_t subsetsize);
/*************************************************************************
This subroutine trains logit model. This subroutine trains logit model.
INPUT PARAMETERS: INPUT PARAMETERS:
XY - training set, array[0..NPoints-1,0..NVars] XY - training set, array[0..NPoints-1,0..NVars]
First NVars columns store values of independent First NVars columns store values of independent
variables, next column stores number of class (from 0 variables, next column stores number of class (from 0
to NClasses-1) which dataset element belongs to. Fracti onal to NClasses-1) which dataset element belongs to. Fracti onal
values are rounded to nearest integer. values are rounded to nearest integer.
NPoints - training set size, NPoints>=1 NPoints - training set size, NPoints>=1
NVars - number of independent variables, NVars>=1 NVars - number of independent variables, NVars>=1
skipping to change at line 2749 skipping to change at line 4063
success, negative ones are failures. success, negative ones are failures.
More information about fields of this structure can be More information about fields of this structure can be
found in the comments on MCPDReport datatype. found in the comments on MCPDReport datatype.
-- ALGLIB -- -- ALGLIB --
Copyright 23.05.2010 by Bochkanov Sergey Copyright 23.05.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mcpdresults(const mcpdstate &s, real_2d_array &p, mcpdreport &rep); void mcpdresults(const mcpdstate &s, real_2d_array &p, mcpdreport &rep);
/************************************************************************* /*************************************************************************
Neural network training using modified Levenberg-Marquardt with exact This function serializes data structure to string.
Hessian calculation and regularization. Subroutine trains neural network
with restarts from random positions. Algorithm is well suited for small
and medium scale problems (hundreds of weights).
INPUT PARAMETERS: Important properties of s_out:
Network - neural network with initialized geometry * it contains alphanumeric characters, dots, underscores, minus signs
XY - training set * these symbols are grouped into words, which are separated by spaces
NPoints - training set size and Windows-style (CR+LF) newlines
Decay - weight decay constant, >=0.001 * although serializer uses spaces and CR+LF as separators, you can
Decay term 'Decay*||Weights||^2' is added to error replace any separator character by arbitrary combination of spaces,
function. tabs, Windows or Unix newlines. It allows flexible reformatting of
If you don't know what Decay to choose, use 0.001. the string in case you want to include it into text or XML file.
Restarts - number of restarts from random position, >0. But you should not insert separators into the middle of the "words"
If you don't know what Restarts to choose, use 2. nor you should change case of letters.
* s_out can be freely moved between 32-bit and 64-bit systems, little
and big endian machines, and so on. You can serialize structure on
32-bit machine and unserialize it on 64-bit one (or vice versa), or
serialize it on SPARC and unserialize on x86. You can also
serialize it in C++ version of ALGLIB and unserialize in C# one,
and vice versa.
*************************************************************************/
void mlpeserialize(mlpensemble &obj, std::string &s_out);
OUTPUT PARAMETERS: /*************************************************************************
Network - trained neural network. This function unserializes data structure from string.
Info - return code: *************************************************************************/
* -9, if internal matrix inverse subroutine failed void mlpeunserialize(std::string &s_in, mlpensemble &obj);
* -2, if there is a point with class number
outside of [0..NOut-1]. /*************************************************************************
* -1, if wrong parameters specified Like MLPCreate0, but for ensembles.
(NPoints<0, Restarts<1).
* 2, if task has been solved.
Rep - training report
-- ALGLIB -- -- ALGLIB --
Copyright 10.03.2009 by Bochkanov Sergey Copyright 18.02.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlptrainlm(const multilayerperceptron &network, const real_2d_array &x y, const ae_int_t npoints, const double decay, const ae_int_t restarts, ae_ int_t &info, mlpreport &rep); void mlpecreate0(const ae_int_t nin, const ae_int_t nout, const ae_int_t en semblesize, mlpensemble &ensemble);
/************************************************************************* /*************************************************************************
Neural network training using L-BFGS algorithm with regularization. Like MLPCreate1, but for ensembles.
Subroutine trains neural network with restarts from random positions.
-- ALGLIB --
Copyright 18.02.2009 by Bochkanov Sergey
*************************************************************************/
void mlpecreate1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t no
ut, const ae_int_t ensemblesize, mlpensemble &ensemble);
/*************************************************************************
Like MLPCreate2, but for ensembles.
-- ALGLIB --
Copyright 18.02.2009 by Bochkanov Sergey
*************************************************************************/
void mlpecreate2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t n
hid2, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemb
le);
/*************************************************************************
Like MLPCreateB0, but for ensembles.
-- ALGLIB --
Copyright 18.02.2009 by Bochkanov Sergey
*************************************************************************/
void mlpecreateb0(const ae_int_t nin, const ae_int_t nout, const double b,
const double d, const ae_int_t ensemblesize, mlpensemble &ensemble);
/*************************************************************************
Like MLPCreateB1, but for ensembles.
-- ALGLIB --
Copyright 18.02.2009 by Bochkanov Sergey
*************************************************************************/
void mlpecreateb1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t n
out, const double b, const double d, const ae_int_t ensemblesize, mlpensemb
le &ensemble);
/*************************************************************************
Like MLPCreateB2, but for ensembles.
-- ALGLIB --
Copyright 18.02.2009 by Bochkanov Sergey
*************************************************************************/
void mlpecreateb2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t
nhid2, const ae_int_t nout, const double b, const double d, const ae_int_t
ensemblesize, mlpensemble &ensemble);
/*************************************************************************
Like MLPCreateR0, but for ensembles.
-- ALGLIB --
Copyright 18.02.2009 by Bochkanov Sergey
*************************************************************************/
void mlpecreater0(const ae_int_t nin, const ae_int_t nout, const double a,
const double b, const ae_int_t ensemblesize, mlpensemble &ensemble);
/*************************************************************************
Like MLPCreateR1, but for ensembles.
-- ALGLIB --
Copyright 18.02.2009 by Bochkanov Sergey
*************************************************************************/
void mlpecreater1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t n
out, const double a, const double b, const ae_int_t ensemblesize, mlpensemb
le &ensemble);
/*************************************************************************
Like MLPCreateR2, but for ensembles.
-- ALGLIB --
Copyright 18.02.2009 by Bochkanov Sergey
*************************************************************************/
void mlpecreater2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t
nhid2, const ae_int_t nout, const double a, const double b, const ae_int_t
ensemblesize, mlpensemble &ensemble);
/*************************************************************************
Like MLPCreateC0, but for ensembles.
-- ALGLIB --
Copyright 18.02.2009 by Bochkanov Sergey
*************************************************************************/
void mlpecreatec0(const ae_int_t nin, const ae_int_t nout, const ae_int_t e
nsemblesize, mlpensemble &ensemble);
/*************************************************************************
Like MLPCreateC1, but for ensembles.
-- ALGLIB --
Copyright 18.02.2009 by Bochkanov Sergey
*************************************************************************/
void mlpecreatec1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t n
out, const ae_int_t ensemblesize, mlpensemble &ensemble);
/*************************************************************************
Like MLPCreateC2, but for ensembles.
-- ALGLIB --
Copyright 18.02.2009 by Bochkanov Sergey
*************************************************************************/
void mlpecreatec2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t
nhid2, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensem
ble);
/*************************************************************************
Creates ensemble from network. Only network geometry is copied.
-- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey
*************************************************************************/
void mlpecreatefromnetwork(const multilayerperceptron &network, const ae_in
t_t ensemblesize, mlpensemble &ensemble);
/*************************************************************************
Randomization of MLP ensemble
-- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey
*************************************************************************/
void mlperandomize(const mlpensemble &ensemble);
/*************************************************************************
Return ensemble properties (number of inputs and outputs).
-- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey
*************************************************************************/
void mlpeproperties(const mlpensemble &ensemble, ae_int_t &nin, ae_int_t &n
out);
/*************************************************************************
Return normalization type (whether ensemble is SOFTMAX-normalized or not).
-- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey
*************************************************************************/
bool mlpeissoftmax(const mlpensemble &ensemble);
/*************************************************************************
Procesing
INPUT PARAMETERS:
Ensemble- neural networks ensemble
X - input vector, array[0..NIn-1].
Y - (possibly) preallocated buffer; if size of Y is less than
NOut, it will be reallocated. If it is large enough, it
is NOT reallocated, so we can save some time on reallocatio
n.
OUTPUT PARAMETERS:
Y - result. Regression estimate when solving regression task,
vector of posterior probabilities for classification task.
-- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey
*************************************************************************/
void mlpeprocess(const mlpensemble &ensemble, const real_1d_array &x, real_
1d_array &y);
/*************************************************************************
'interactive' variant of MLPEProcess for languages like Python which
support constructs like "Y = MLPEProcess(LM,X)" and interactive mode of the
interpreter
This function allocates new array on each call, so it is significantly
slower than its 'non-interactive' counterpart, but it is more convenient
when you call it from command line.
-- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey
*************************************************************************/
void mlpeprocessi(const mlpensemble &ensemble, const real_1d_array &x, real
_1d_array &y);
/*************************************************************************
Relative classification error on the test set
INPUT PARAMETERS:
Ensemble- ensemble
XY - test set
NPoints - test set size
RESULT:
percent of incorrectly classified cases.
Works both for classifier betwork and for regression networks which
are used as classifiers.
-- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey
*************************************************************************/
double mlperelclserror(const mlpensemble &ensemble, const real_2d_array &xy
, const ae_int_t npoints);
/*************************************************************************
Average cross-entropy (in bits per element) on the test set
INPUT PARAMETERS:
Ensemble- ensemble
XY - test set
NPoints - test set size
RESULT:
CrossEntropy/(NPoints*LN(2)).
Zero if ensemble solves regression task.
-- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey
*************************************************************************/
double mlpeavgce(const mlpensemble &ensemble, const real_2d_array &xy, cons
t ae_int_t npoints);
/*************************************************************************
RMS error on the test set
INPUT PARAMETERS:
Ensemble- ensemble
XY - test set
NPoints - test set size
RESULT:
root mean square error.
Its meaning for regression task is obvious. As for classification task
RMS error means error when estimating posterior probabilities.
-- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey
*************************************************************************/
double mlpermserror(const mlpensemble &ensemble, const real_2d_array &xy, c
onst ae_int_t npoints);
/*************************************************************************
Average error on the test set
INPUT PARAMETERS:
Ensemble- ensemble
XY - test set
NPoints - test set size
RESULT:
Its meaning for regression task is obvious. As for classification task
it means average error when estimating posterior probabilities.
-- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey
*************************************************************************/
double mlpeavgerror(const mlpensemble &ensemble, const real_2d_array &xy, c
onst ae_int_t npoints);
/*************************************************************************
Average relative error on the test set
INPUT PARAMETERS:
Ensemble- ensemble
XY - test set
NPoints - test set size
RESULT:
Its meaning for regression task is obvious. As for classification task
it means average relative error when estimating posterior probabilities.
-- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey
*************************************************************************/
double mlpeavgrelerror(const mlpensemble &ensemble, const real_2d_array &xy
, const ae_int_t npoints);
/*************************************************************************
Neural network training using modified Levenberg-Marquardt with exact
Hessian calculation and regularization. Subroutine trains neural network
with restarts from random positions. Algorithm is well suited for small
and medium scale problems (hundreds of weights).
INPUT PARAMETERS:
Network - neural network with initialized geometry
XY - training set
NPoints - training set size
Decay - weight decay constant, >=0.001
Decay term 'Decay*||Weights||^2' is added to error
function.
If you don't know what Decay to choose, use 0.001.
Restarts - number of restarts from random position, >0.
If you don't know what Restarts to choose, use 2.
OUTPUT PARAMETERS:
Network - trained neural network.
Info - return code:
* -9, if internal matrix inverse subroutine failed
* -2, if there is a point with class number
outside of [0..NOut-1].
* -1, if wrong parameters specified
(NPoints<0, Restarts<1).
* 2, if task has been solved.
Rep - training report
-- ALGLIB --
Copyright 10.03.2009 by Bochkanov Sergey
*************************************************************************/
void mlptrainlm(const multilayerperceptron &network, const real_2d_array &x
y, const ae_int_t npoints, const double decay, const ae_int_t restarts, ae_
int_t &info, mlpreport &rep);
/*************************************************************************
Neural network training using L-BFGS algorithm with regularization.
Subroutine trains neural network with restarts from random positions.
Algorithm is well suited for problems of any dimensionality (memory Algorithm is well suited for problems of any dimensionality (memory
requirements and step complexity are linear by weights number). requirements and step complexity are linear by weights number).
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network with initialized geometry Network - neural network with initialized geometry
XY - training set XY - training set
NPoints - training set size NPoints - training set size
Decay - weight decay constant, >=0.001 Decay - weight decay constant, >=0.001
Decay term 'Decay*||Weights||^2' is added to error Decay term 'Decay*||Weights||^2' is added to error
function. function.
skipping to change at line 2933 skipping to change at line 4522
Info - return code, same as in MLPTrainLBFGS Info - return code, same as in MLPTrainLBFGS
Rep - report, same as in MLPTrainLM/MLPTrainLBFGS Rep - report, same as in MLPTrainLM/MLPTrainLBFGS
CVRep - generalization error estimates CVRep - generalization error estimates
-- ALGLIB -- -- ALGLIB --
Copyright 09.12.2007 by Bochkanov Sergey Copyright 09.12.2007 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpkfoldcvlm(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, c onst ae_int_t foldscount, ae_int_t &info, mlpreport &rep, mlpcvreport &cvre p); void mlpkfoldcvlm(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, c onst ae_int_t foldscount, ae_int_t &info, mlpreport &rep, mlpcvreport &cvre p);
/************************************************************************* /*************************************************************************
This function serializes data structure to string. This function estimates generalization error using cross-validation on the
current dataset with current training settings.
Important properties of s_out: INPUT PARAMETERS:
* it contains alphanumeric characters, dots, underscores, minus signs S - trainer object
* these symbols are grouped into words, which are separated by spaces Network - neural network. It must have same number of inputs and
and Windows-style (CR+LF) newlines output/classes as was specified during creation of the
* although serializer uses spaces and CR+LF as separators, you can trainer object. Network is not changed during cross-
replace any separator character by arbitrary combination of spaces, validation and is not trained - it is used only as
tabs, Windows or Unix newlines. It allows flexible reformatting of representative of its architecture. I.e., we estimate
the string in case you want to include it into text or XML file. generalization properties of ARCHITECTURE, not some
But you should not insert separators into the middle of the "words" specific network.
nor you should change case of letters. NRestarts - number of restarts, >=0:
* s_out can be freely moved between 32-bit and 64-bit systems, little * NRestarts>0 means that for each cross-validation
and big endian machines, and so on. You can serialize structure on round specified number of random restarts is
32-bit machine and unserialize it on 64-bit one (or vice versa), or performed, with best network being chosen after
serialize it on SPARC and unserialize on x86. You can also training.
serialize it in C++ version of ALGLIB and unserialize in C# one, * NRestarts=0 is same as NRestarts=1
and vice versa. FoldsCount - number of folds in k-fold cross-validation:
*************************************************************************/ * 2<=FoldsCount<=size of dataset
void mlpeserialize(mlpensemble &obj, std::string &s_out); * recommended value: 10.
* values larger than dataset size will be silently
truncated down to dataset size
/************************************************************************* OUTPUT PARAMETERS:
This function unserializes data structure from string. Rep - structure which contains cross-validation estimates:
*************************************************************************/ * Rep.RelCLSError - fraction of misclassified cases.
void mlpeunserialize(std::string &s_in, mlpensemble &obj); * Rep.AvgCE - acerage cross-entropy
* Rep.RMSError - root-mean-square error
* Rep.AvgError - average error
* Rep.AvgRelError - average relative error
/************************************************************************* NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
Like MLPCreate0, but for ensembles. or subset with only one point was given, zeros are returned as
estimates.
-- ALGLIB -- NOTE: this method performs FoldsCount cross-validation rounds, each one
Copyright 18.02.2009 by Bochkanov Sergey with NRestarts random starts. Thus, FoldsCount*NRestarts networks
*************************************************************************/ are trained in total.
void mlpecreate0(const ae_int_t nin, const ae_int_t nout, const ae_int_t en
semblesize, mlpensemble &ensemble);
/************************************************************************* NOTE: Rep.RelCLSError/Rep.AvgCE are zero on regression problems.
Like MLPCreate1, but for ensembles.
-- ALGLIB -- NOTE: on classification problems Rep.RMSError/Rep.AvgError/Rep.AvgRelError
Copyright 18.02.2009 by Bochkanov Sergey contain errors in prediction of posterior probabilities.
*************************************************************************/
void mlpecreate1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t no
ut, const ae_int_t ensemblesize, mlpensemble &ensemble);
/*************************************************************************
Like MLPCreate2, but for ensembles.
-- ALGLIB -- -- ALGLIB --
Copyright 18.02.2009 by Bochkanov Sergey Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpecreate2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t n void mlpkfoldcv(const mlptrainer &s, const multilayerperceptron &network, c
hid2, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemb onst ae_int_t nrestarts, const ae_int_t foldscount, mlpreport &rep);
le); void smp_mlpkfoldcv(const mlptrainer &s, const multilayerperceptron &networ
k, const ae_int_t nrestarts, const ae_int_t foldscount, mlpreport &rep);
/************************************************************************* /*************************************************************************
Like MLPCreateB0, but for ensembles. Creation of the network trainer object for regression networks
-- ALGLIB -- INPUT PARAMETERS:
Copyright 18.02.2009 by Bochkanov Sergey NIn - number of inputs, NIn>=1
*************************************************************************/ NOut - number of outputs, NOut>=1
void mlpecreateb0(const ae_int_t nin, const ae_int_t nout, const double b,
const double d, const ae_int_t ensemblesize, mlpensemble &ensemble);
/************************************************************************* OUTPUT PARAMETERS:
Like MLPCreateB1, but for ensembles. S - neural network trainer object.
This structure can be used to train any regression
network with NIn inputs and NOut outputs.
-- ALGLIB -- -- ALGLIB --
Copyright 18.02.2009 by Bochkanov Sergey Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpecreateb1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t n out, const double b, const double d, const ae_int_t ensemblesize, mlpensemb le &ensemble); void mlpcreatetrainer(const ae_int_t nin, const ae_int_t nout, mlptrainer & s);
/************************************************************************* /*************************************************************************
Like MLPCreateB2, but for ensembles. Creation of the network trainer object for classification networks
-- ALGLIB -- INPUT PARAMETERS:
Copyright 18.02.2009 by Bochkanov Sergey NIn - number of inputs, NIn>=1
*************************************************************************/ NClasses - number of classes, NClasses>=2
void mlpecreateb2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t
nhid2, const ae_int_t nout, const double b, const double d, const ae_int_t
ensemblesize, mlpensemble &ensemble);
/************************************************************************* OUTPUT PARAMETERS:
Like MLPCreateR0, but for ensembles. S - neural network trainer object.
This structure can be used to train any classification
network with NIn inputs and NOut outputs.
-- ALGLIB -- -- ALGLIB --
Copyright 18.02.2009 by Bochkanov Sergey Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpecreater0(const ae_int_t nin, const ae_int_t nout, const double a, const double b, const ae_int_t ensemblesize, mlpensemble &ensemble); void mlpcreatetrainercls(const ae_int_t nin, const ae_int_t nclasses, mlptr ainer &s);
/************************************************************************* /*************************************************************************
Like MLPCreateR1, but for ensembles. This function sets "current dataset" of the trainer object to one passed
by user.
-- ALGLIB -- INPUT PARAMETERS:
Copyright 18.02.2009 by Bochkanov Sergey S - trainer object
*************************************************************************/ XY - training set, see below for information on the
void mlpecreater1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t n training set format. This function checks correctness
out, const double a, const double b, const ae_int_t ensemblesize, mlpensemb of the dataset (no NANs/INFs, class numbers are
le &ensemble); correct) and throws exception when incorrect dataset
is passed.
NPoints - points count, >=0.
/************************************************************************* DATASET FORMAT:
Like MLPCreateR2, but for ensembles.
-- ALGLIB -- This function uses two different dataset formats - one for regression
Copyright 18.02.2009 by Bochkanov Sergey networks, another one for classification networks.
*************************************************************************/
void mlpecreater2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t
nhid2, const ae_int_t nout, const double a, const double b, const ae_int_t
ensemblesize, mlpensemble &ensemble);
/************************************************************************* For regression networks with NIn inputs and NOut outputs following dataset
Like MLPCreateC0, but for ensembles. format is used:
* dataset is given by NPoints*(NIn+NOut) matrix
* each row corresponds to one example
* first NIn columns are inputs, next NOut columns are outputs
-- ALGLIB -- For classification networks with NIn inputs and NClasses clases following
Copyright 18.02.2009 by Bochkanov Sergey datasetformat is used:
*************************************************************************/ * dataset is given by NPoints*(NIn+1) matrix
void mlpecreatec0(const ae_int_t nin, const ae_int_t nout, const ae_int_t e * each row corresponds to one example
nsemblesize, mlpensemble &ensemble); * first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1).
/*************************************************************************
Like MLPCreateC1, but for ensembles.
-- ALGLIB -- -- ALGLIB --
Copyright 18.02.2009 by Bochkanov Sergey Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpecreatec1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t n out, const ae_int_t ensemblesize, mlpensemble &ensemble); void mlpsetdataset(const mlptrainer &s, const real_2d_array &xy, const ae_i nt_t npoints);
/************************************************************************* /*************************************************************************
Like MLPCreateC2, but for ensembles. This function sets "current dataset" of the trainer object to one passed
by user (sparse matrix is used to store dataset).
-- ALGLIB -- INPUT PARAMETERS:
Copyright 18.02.2009 by Bochkanov Sergey S - trainer object
*************************************************************************/ XY - training set, see below for information on the
void mlpecreatec2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t training set format. This function checks correctness
nhid2, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensem of the dataset (no NANs/INFs, class numbers are
ble); correct) and throws exception when incorrect dataset
is passed. Any sparse storage format can be used:
Hash-table, CRS...
NPoints - points count, >=0
/************************************************************************* DATASET FORMAT:
Creates ensemble from network. Only network geometry is copied.
-- ALGLIB -- This function uses two different dataset formats - one for regression
Copyright 17.02.2009 by Bochkanov Sergey networks, another one for classification networks.
*************************************************************************/
void mlpecreatefromnetwork(const multilayerperceptron &network, const ae_in
t_t ensemblesize, mlpensemble &ensemble);
/************************************************************************* For regression networks with NIn inputs and NOut outputs following dataset
Randomization of MLP ensemble format is used:
* dataset is given by NPoints*(NIn+NOut) matrix
* each row corresponds to one example
* first NIn columns are inputs, next NOut columns are outputs
For classification networks with NIn inputs and NClasses clases following
datasetformat is used:
* dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlperandomize(const mlpensemble &ensemble); void mlpsetsparsedataset(const mlptrainer &s, const sparsematrix &xy, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
Return ensemble properties (number of inputs and outputs). This function sets weight decay coefficient which is used for training.
-- ALGLIB -- INPUT PARAMETERS:
Copyright 17.02.2009 by Bochkanov Sergey S - trainer object
*************************************************************************/ Decay - weight decay coefficient, >=0. Weight decay term
void mlpeproperties(const mlpensemble &ensemble, ae_int_t &nin, ae_int_t &n 'Decay*||Weights||^2' is added to error function. If
out); you don't know what Decay to choose, use 1.0E-3.
Weight decay can be set to zero, in this case network
is trained without weight decay.
/************************************************************************* NOTE: by default network uses some small nonzero value for weight decay.
Return normalization type (whether ensemble is SOFTMAX-normalized or not).
-- ALGLIB -- -- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
bool mlpeissoftmax(const mlpensemble &ensemble); void mlpsetdecay(const mlptrainer &s, const double decay);
/************************************************************************* /*************************************************************************
Procesing This function sets stopping criteria for the optimizer.
INPUT PARAMETERS: INPUT PARAMETERS:
Ensemble- neural networks ensemble S - trainer object
X - input vector, array[0..NIn-1]. WStep - stopping criterion. Algorithm stops if step size is
Y - (possibly) preallocated buffer; if size of Y is less than less than WStep. Recommended value - 0.01. Zero step
NOut, it will be reallocated. If it is large enough, it size means stopping after MaxIts iterations.
is NOT reallocated, so we can save some time on reallocatio WStep>=0.
n. MaxIts - stopping criterion. Algorithm stops after MaxIts
iterations (NOT gradient calculations). Zero MaxIts
means stopping when step is sufficiently small.
MaxIts>=0.
OUTPUT PARAMETERS: NOTE: by default, WStep=0.005 and MaxIts=0 are used. These values are also
Y - result. Regression estimate when solving regression task, used when MLPSetCond() is called with WStep=0 and MaxIts=0.
vector of posterior probabilities for classification task.
-- ALGLIB -- -- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpeprocess(const mlpensemble &ensemble, const real_1d_array &x, real_ 1d_array &y); void mlpsetcond(const mlptrainer &s, const double wstep, const ae_int_t max its);
/************************************************************************* /*************************************************************************
'interactive' variant of MLPEProcess for languages like Python which This function trains neural network passed to this function, using current
support constructs like "Y = MLPEProcess(LM,X)" and interactive mode of the dataset (one which was passed to MLPSetDataset() or MLPSetSparseDataset())
interpreter and current training settings. Training from NRestarts random starting
positions is performed, best network is chosen.
This function allocates new array on each call, so it is significantly Training is performed using current training algorithm.
slower than its 'non-interactive' counterpart, but it is more convenient
when you call it from command line.
-- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey
*************************************************************************/
void mlpeprocessi(const mlpensemble &ensemble, const real_1d_array &x, real
_1d_array &y);
/*************************************************************************
Relative classification error on the test set
INPUT PARAMETERS: INPUT PARAMETERS:
Ensemble- ensemble S - trainer object
XY - test set Network - neural network. It must have same number of inputs and
NPoints - test set size output/classes as was specified during creation of the
trainer object.
RESULT: NRestarts - number of restarts, >=0:
percent of incorrectly classified cases. * NRestarts>0 means that specified number of random
Works both for classifier betwork and for regression networks which restarts are performed, best network is chosen after
are used as classifiers. training
* NRestarts=0 means that current state of the network
-- ALGLIB -- is used for training.
Copyright 17.02.2009 by Bochkanov Sergey
*************************************************************************/
double mlperelclserror(const mlpensemble &ensemble, const real_2d_array &xy
, const ae_int_t npoints);
/************************************************************************* OUTPUT PARAMETERS:
Average cross-entropy (in bits per element) on the test set Network - trained network
INPUT PARAMETERS: NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
Ensemble- ensemble network is filled by zero values. Same behavior for functions
XY - test set MLPStartTraining and MLPContinueTraining.
NPoints - test set size
RESULT: NOTE: this method uses sum-of-squares error function for training.
CrossEntropy/(NPoints*LN(2)).
Zero if ensemble solves regression task.
-- ALGLIB -- -- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlpeavgce(const mlpensemble &ensemble, const real_2d_array &xy, cons t ae_int_t npoints); void mlptrainnetwork(const mlptrainer &s, const multilayerperceptron &netwo rk, const ae_int_t nrestarts, mlpreport &rep);
/************************************************************************* /*************************************************************************
RMS error on the test set IMPORTANT: this is an "expert" version of the MLPTrain() function. We do
not recommend you to use it unless you are pretty sure that you
need ability to monitor training progress.
INPUT PARAMETERS: This function performs step-by-step training of the neural network. Here
Ensemble- ensemble "step-by-step" means that training starts with MLPStartTraining() call,
XY - test set and then user subsequently calls MLPContinueTraining() to perform one more
NPoints - test set size iteration of the training.
RESULT: After call to this function trainer object remembers network and is ready
root mean square error. to train it. However, no training is performed until first call to
Its meaning for regression task is obvious. As for classification task MLPContinueTraining() function. Subsequent calls to MLPContinueTraining()
RMS error means error when estimating posterior probabilities. will advance training progress one iteration further.
-- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey
*************************************************************************/
double mlpermserror(const mlpensemble &ensemble, const real_2d_array &xy, c
onst ae_int_t npoints);
/************************************************************************* EXAMPLE:
Average error on the test set >
> ...initialize network and trainer object....
>
> MLPStartTraining(Trainer, Network, True)
> while MLPContinueTraining(Trainer, Network) do
> ...visualize training progress...
>
INPUT PARAMETERS: INPUT PARAMETERS:
Ensemble- ensemble S - trainer object
XY - test set Network - neural network. It must have same number of inputs and
NPoints - test set size output/classes as was specified during creation of the
trainer object.
RandomStart - randomize network before training or not:
* True means that network is randomized and its
initial state (one which was passed to the trainer
object) is lost.
* False means that training is started from the
current state of the network
OUTPUT PARAMETERS:
Network - neural network which is ready to training (weights are
initialized, preprocessor is initialized using current
training set)
RESULT: NOTE: this method uses sum-of-squares error function for training.
Its meaning for regression task is obvious. As for classification task
it means average error when estimating posterior probabilities. NOTE: it is expected that trainer object settings are NOT changed during
step-by-step training, i.e. no one changes stopping criteria or
training set during training. It is possible and there is no defense
against such actions, but algorithm behavior in such cases is
undefined and can be unpredictable.
-- ALGLIB -- -- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlpeavgerror(const mlpensemble &ensemble, const real_2d_array &xy, c onst ae_int_t npoints); void mlpstarttraining(const mlptrainer &s, const multilayerperceptron &netw ork, const bool randomstart);
/************************************************************************* /*************************************************************************
Average relative error on the test set IMPORTANT: this is an "expert" version of the MLPTrain() function. We do
not recommend you to use it unless you are pretty sure that you
need ability to monitor training progress.
This function performs step-by-step training of the neural network. Here
"step-by-step" means that training starts with MLPStartTraining() call,
and then user subsequently calls MLPContinueTraining() to perform one more
iteration of the training.
This function performs one more iteration of the training and returns
either True (training continues) or False (training stopped). In case True
was returned, Network weights are updated according to the current state
of the optimization progress. In case False was returned, no additional
updates is performed (previous update of the network weights moved us to
the final point, and no additional updates is needed).
EXAMPLE:
>
> [initialize network and trainer object]
>
> MLPStartTraining(Trainer, Network, True)
> while MLPContinueTraining(Trainer, Network) do
> [visualize training progress]
>
INPUT PARAMETERS: INPUT PARAMETERS:
Ensemble- ensemble S - trainer object
XY - test set Network - neural network structure, which is used to store
NPoints - test set size current state of the training process.
RESULT: OUTPUT PARAMETERS:
Its meaning for regression task is obvious. As for classification task Network - weights of the neural network are rewritten by the
it means average relative error when estimating posterior probabilities. current approximation.
NOTE: this method uses sum-of-squares error function for training.
NOTE: it is expected that trainer object settings are NOT changed during
step-by-step training, i.e. no one changes stopping criteria or
training set during training. It is possible and there is no defense
against such actions, but algorithm behavior in such cases is
undefined and can be unpredictable.
NOTE: It is expected that Network is the same one which was passed to
MLPStartTraining() function. However, THIS function checks only
following:
* that number of network inputs is consistent with trainer object
settings
* that number of network outputs/classes is consistent with trainer
object settings
* that number of network weights is the same as number of weights in
the network passed to MLPStartTraining() function
Exception is thrown when these conditions are violated.
It is also expected that you do not change state of the network on
your own - the only party who has right to change network during its
training is a trainer object. Any attempt to interfere with trainer
may lead to unpredictable results.
-- ALGLIB -- -- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlpeavgrelerror(const mlpensemble &ensemble, const real_2d_array &xy , const ae_int_t npoints); bool mlpcontinuetraining(const mlptrainer &s, const multilayerperceptron &n etwork);
/************************************************************************* /*************************************************************************
Training neural networks ensemble using bootstrap aggregating (bagging). Training neural networks ensemble using bootstrap aggregating (bagging).
Modified Levenberg-Marquardt algorithm is used as base training method. Modified Levenberg-Marquardt algorithm is used as base training method.
INPUT PARAMETERS: INPUT PARAMETERS:
Ensemble - model with initialized geometry Ensemble - model with initialized geometry
XY - training set XY - training set
NPoints - training set size NPoints - training set size
Decay - weight decay coefficient, >=0.001 Decay - weight decay coefficient, >=0.001
skipping to change at line 3291 skipping to change at line 4956
* 6, if task has been solved. * 6, if task has been solved.
Rep - training report. Rep - training report.
OOBErrors - out-of-bag generalization error estimate OOBErrors - out-of-bag generalization error estimate
-- ALGLIB -- -- ALGLIB --
Copyright 10.03.2009 by Bochkanov Sergey Copyright 10.03.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpetraines(const mlpensemble &ensemble, const real_2d_array &xy, cons t ae_int_t npoints, const double decay, const ae_int_t restarts, ae_int_t & info, mlpreport &rep); void mlpetraines(const mlpensemble &ensemble, const real_2d_array &xy, cons t ae_int_t npoints, const double decay, const ae_int_t restarts, ae_int_t & info, mlpreport &rep);
/************************************************************************* /*************************************************************************
This function trains neural network ensemble passed to this function using
current dataset and early stopping training algorithm. Each early stopping
round performs NRestarts random restarts (thus, EnsembleSize*NRestarts
training rounds is performed in total).
INPUT PARAMETERS:
S - trainer object;
Ensemble - neural network ensemble. It must have same number of
inputs and outputs/classes as was specified during
creation of the trainer object.
NRestarts - number of restarts, >=0:
* NRestarts>0 means that specified number of random
restarts are performed during each ES round;
* NRestarts=0 is silently replaced by 1.
OUTPUT PARAMETERS:
Ensemble - trained ensemble;
Rep - it contains all type of errors.
NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
or single-point dataset was passed, ensemble is filled by zero
values.
NOTE: this method uses sum-of-squares error function for training.
-- ALGLIB --
Copyright 22.08.2012 by Bochkanov Sergey
*************************************************************************/
void mlptrainensemblees(const mlptrainer &s, const mlpensemble &ensemble, c
onst ae_int_t nrestarts, mlpreport &rep);
/*************************************************************************
Principal components analysis Principal components analysis
Subroutine builds orthogonal basis where first axis corresponds to Subroutine builds orthogonal basis where first axis corresponds to
direction with maximum variance, second axis maximizes variance in subspace direction with maximum variance, second axis maximizes variance in subspace
orthogonal to first axis and so on. orthogonal to first axis and so on.
It should be noted that, unlike LDA, PCA does not use class labels. It should be noted that, unlike LDA, PCA does not use class labels.
INPUT PARAMETERS: INPUT PARAMETERS:
X - dataset, array[0..NPoints-1,0..NVars-1]. X - dataset, array[0..NPoints-1,0..NVars-1].
skipping to change at line 3415 skipping to change at line 5111
void dsoptimalsplitk(/* Real */ ae_vector* a, void dsoptimalsplitk(/* Real */ ae_vector* a,
/* Integer */ ae_vector* c, /* Integer */ ae_vector* c,
ae_int_t n, ae_int_t n,
ae_int_t nc, ae_int_t nc,
ae_int_t kmax, ae_int_t kmax,
ae_int_t* info, ae_int_t* info,
/* Real */ ae_vector* thresholds, /* Real */ ae_vector* thresholds,
ae_int_t* ni, ae_int_t* ni,
double* cve, double* cve,
ae_state *_state); ae_state *_state);
ae_bool _cvreport_init(cvreport* p, ae_state *_state, ae_bool make_automati ae_bool _cvreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
c); ae_bool _cvreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bo
ae_bool _cvreport_init_copy(cvreport* dst, cvreport* src, ae_state *_state, ol make_automatic);
ae_bool make_automatic); void _cvreport_clear(void* _p);
void _cvreport_clear(cvreport* p); void _cvreport_destroy(void* _p);
void clusterizercreate(clusterizerstate* s, ae_state *_state);
void clusterizersetpoints(clusterizerstate* s,
/* Real */ ae_matrix* xy,
ae_int_t npoints,
ae_int_t nfeatures,
ae_int_t disttype,
ae_state *_state);
void clusterizersetdistances(clusterizerstate* s,
/* Real */ ae_matrix* d,
ae_int_t npoints,
ae_bool isupper,
ae_state *_state);
void clusterizersetahcalgo(clusterizerstate* s,
ae_int_t algo,
ae_state *_state);
void clusterizersetkmeanslimits(clusterizerstate* s,
ae_int_t restarts,
ae_int_t maxits,
ae_state *_state);
void clusterizerrunahc(clusterizerstate* s,
ahcreport* rep,
ae_state *_state);
void clusterizerrunkmeans(clusterizerstate* s,
ae_int_t k,
kmeansreport* rep,
ae_state *_state);
void clusterizergetdistances(/* Real */ ae_matrix* xy,
ae_int_t npoints,
ae_int_t nfeatures,
ae_int_t disttype,
/* Real */ ae_matrix* d,
ae_state *_state);
void clusterizergetkclusters(ahcreport* rep,
ae_int_t k,
/* Integer */ ae_vector* cidx,
/* Integer */ ae_vector* cz,
ae_state *_state);
void clusterizerseparatedbydist(ahcreport* rep,
double r,
ae_int_t* k,
/* Integer */ ae_vector* cidx,
/* Integer */ ae_vector* cz,
ae_state *_state);
void clusterizerseparatedbycorr(ahcreport* rep,
double r,
ae_int_t* k,
/* Integer */ ae_vector* cidx,
/* Integer */ ae_vector* cz,
ae_state *_state);
void kmeansgenerateinternal(/* Real */ ae_matrix* xy,
ae_int_t npoints,
ae_int_t nvars,
ae_int_t k,
ae_int_t maxits,
ae_int_t restarts,
ae_int_t* info,
/* Real */ ae_matrix* ccol,
ae_bool needccol,
/* Real */ ae_matrix* crow,
ae_bool needcrow,
/* Integer */ ae_vector* xyc,
ae_state *_state);
ae_bool _clusterizerstate_init(void* _p, ae_state *_state, ae_bool make_aut
omatic);
ae_bool _clusterizerstate_init_copy(void* _dst, void* _src, ae_state *_stat
e, ae_bool make_automatic);
void _clusterizerstate_clear(void* _p);
void _clusterizerstate_destroy(void* _p);
ae_bool _ahcreport_init(void* _p, ae_state *_state, ae_bool make_automatic)
;
ae_bool _ahcreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_b
ool make_automatic);
void _ahcreport_clear(void* _p);
void _ahcreport_destroy(void* _p);
ae_bool _kmeansreport_init(void* _p, ae_state *_state, ae_bool make_automat
ic);
ae_bool _kmeansreport_init_copy(void* _dst, void* _src, ae_state *_state, a
e_bool make_automatic);
void _kmeansreport_clear(void* _p);
void _kmeansreport_destroy(void* _p);
void kmeansgenerate(/* Real */ ae_matrix* xy,
ae_int_t npoints,
ae_int_t nvars,
ae_int_t k,
ae_int_t restarts,
ae_int_t* info,
/* Real */ ae_matrix* c,
/* Integer */ ae_vector* xyc,
ae_state *_state);
void dfbuildrandomdecisionforest(/* Real */ ae_matrix* xy, void dfbuildrandomdecisionforest(/* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_int_t nvars, ae_int_t nvars,
ae_int_t nclasses, ae_int_t nclasses,
ae_int_t ntrees, ae_int_t ntrees,
double r, double r,
ae_int_t* info, ae_int_t* info,
decisionforest* df, decisionforest* df,
dfreport* rep, dfreport* rep,
ae_state *_state); ae_state *_state);
skipping to change at line 3487 skipping to change at line 5267
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
void dfcopy(decisionforest* df1, decisionforest* df2, ae_state *_state); void dfcopy(decisionforest* df1, decisionforest* df2, ae_state *_state);
void dfalloc(ae_serializer* s, decisionforest* forest, ae_state *_state); void dfalloc(ae_serializer* s, decisionforest* forest, ae_state *_state);
void dfserialize(ae_serializer* s, void dfserialize(ae_serializer* s,
decisionforest* forest, decisionforest* forest,
ae_state *_state); ae_state *_state);
void dfunserialize(ae_serializer* s, void dfunserialize(ae_serializer* s,
decisionforest* forest, decisionforest* forest,
ae_state *_state); ae_state *_state);
ae_bool _decisionforest_init(decisionforest* p, ae_state *_state, ae_bool m ae_bool _decisionforest_init(void* _p, ae_state *_state, ae_bool make_autom
ake_automatic); atic);
ae_bool _decisionforest_init_copy(decisionforest* dst, decisionforest* src, ae_bool _decisionforest_init_copy(void* _dst, void* _src, ae_state *_state,
ae_state *_state, ae_bool make_automatic); ae_bool make_automatic);
void _decisionforest_clear(decisionforest* p); void _decisionforest_clear(void* _p);
ae_bool _dfreport_init(dfreport* p, ae_state *_state, ae_bool make_automati void _decisionforest_destroy(void* _p);
c); ae_bool _dfreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
ae_bool _dfreport_init_copy(dfreport* dst, dfreport* src, ae_state *_state, ae_bool _dfreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bo
ae_bool make_automatic); ol make_automatic);
void _dfreport_clear(dfreport* p); void _dfreport_clear(void* _p);
ae_bool _dfinternalbuffers_init(dfinternalbuffers* p, ae_state *_state, ae_ void _dfreport_destroy(void* _p);
bool make_automatic); ae_bool _dfinternalbuffers_init(void* _p, ae_state *_state, ae_bool make_au
ae_bool _dfinternalbuffers_init_copy(dfinternalbuffers* dst, dfinternalbuff tomatic);
ers* src, ae_state *_state, ae_bool make_automatic); ae_bool _dfinternalbuffers_init_copy(void* _dst, void* _src, ae_state *_sta
void _dfinternalbuffers_clear(dfinternalbuffers* p); te, ae_bool make_automatic);
void _dfinternalbuffers_clear(void* _p);
void _dfinternalbuffers_destroy(void* _p);
void lrbuild(/* Real */ ae_matrix* xy, void lrbuild(/* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_int_t nvars, ae_int_t nvars,
ae_int_t* info, ae_int_t* info,
linearmodel* lm, linearmodel* lm,
lrreport* ar, lrreport* ar,
ae_state *_state); ae_state *_state);
void lrbuilds(/* Real */ ae_matrix* xy, void lrbuilds(/* Real */ ae_matrix* xy,
/* Real */ ae_vector* s, /* Real */ ae_vector* s,
ae_int_t npoints, ae_int_t npoints,
skipping to change at line 3568 skipping to change at line 5351
double* covab, double* covab,
double* corrab, double* corrab,
double* p, double* p,
ae_state *_state); ae_state *_state);
void lrline(/* Real */ ae_matrix* xy, void lrline(/* Real */ ae_matrix* xy,
ae_int_t n, ae_int_t n,
ae_int_t* info, ae_int_t* info,
double* a, double* a,
double* b, double* b,
ae_state *_state); ae_state *_state);
ae_bool _linearmodel_init(linearmodel* p, ae_state *_state, ae_bool make_au ae_bool _linearmodel_init(void* _p, ae_state *_state, ae_bool make_automati
tomatic); c);
ae_bool _linearmodel_init_copy(linearmodel* dst, linearmodel* src, ae_state ae_bool _linearmodel_init_copy(void* _dst, void* _src, ae_state *_state, ae
*_state, ae_bool make_automatic); _bool make_automatic);
void _linearmodel_clear(linearmodel* p); void _linearmodel_clear(void* _p);
ae_bool _lrreport_init(lrreport* p, ae_state *_state, ae_bool make_automati void _linearmodel_destroy(void* _p);
c); ae_bool _lrreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
ae_bool _lrreport_init_copy(lrreport* dst, lrreport* src, ae_state *_state, ae_bool _lrreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bo
ae_bool make_automatic); ol make_automatic);
void _lrreport_clear(lrreport* p); void _lrreport_clear(void* _p);
void _lrreport_destroy(void* _p);
void filtersma(/* Real */ ae_vector* x, void filtersma(/* Real */ ae_vector* x,
ae_int_t n, ae_int_t n,
ae_int_t k, ae_int_t k,
ae_state *_state); ae_state *_state);
void filterema(/* Real */ ae_vector* x, void filterema(/* Real */ ae_vector* x,
ae_int_t n, ae_int_t n,
double alpha, double alpha,
ae_state *_state); ae_state *_state);
void filterlrma(/* Real */ ae_vector* x, void filterlrma(/* Real */ ae_vector* x,
ae_int_t n, ae_int_t n,
ae_int_t k, ae_int_t k,
ae_state *_state); ae_state *_state);
void kmeansgenerate(/* Real */ ae_matrix* xy,
ae_int_t npoints,
ae_int_t nvars,
ae_int_t k,
ae_int_t restarts,
ae_int_t* info,
/* Real */ ae_matrix* c,
/* Integer */ ae_vector* xyc,
ae_state *_state);
void fisherlda(/* Real */ ae_matrix* xy, void fisherlda(/* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_int_t nvars, ae_int_t nvars,
ae_int_t nclasses, ae_int_t nclasses,
ae_int_t* info, ae_int_t* info,
/* Real */ ae_vector* w, /* Real */ ae_vector* w,
ae_state *_state); ae_state *_state);
void fisherldan(/* Real */ ae_matrix* xy, void fisherldan(/* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_int_t nvars, ae_int_t nvars,
skipping to change at line 3697 skipping to change at line 5473
ae_state *_state); ae_state *_state);
void mlpunserializeold(/* Real */ ae_vector* ra, void mlpunserializeold(/* Real */ ae_vector* ra,
multilayerperceptron* network, multilayerperceptron* network,
ae_state *_state); ae_state *_state);
void mlprandomize(multilayerperceptron* network, ae_state *_state); void mlprandomize(multilayerperceptron* network, ae_state *_state);
void mlprandomizefull(multilayerperceptron* network, ae_state *_state); void mlprandomizefull(multilayerperceptron* network, ae_state *_state);
void mlpinitpreprocessor(multilayerperceptron* network, void mlpinitpreprocessor(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t ssize, ae_int_t ssize,
ae_state *_state); ae_state *_state);
void mlpinitpreprocessorsparse(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t ssize,
ae_state *_state);
void mlpinitpreprocessorsubset(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t setsize,
/* Integer */ ae_vector* idx,
ae_int_t subsetsize,
ae_state *_state);
void mlpinitpreprocessorsparsesubset(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t setsize,
/* Integer */ ae_vector* idx,
ae_int_t subsetsize,
ae_state *_state);
void mlpproperties(multilayerperceptron* network, void mlpproperties(multilayerperceptron* network,
ae_int_t* nin, ae_int_t* nin,
ae_int_t* nout, ae_int_t* nout,
ae_int_t* wcount, ae_int_t* wcount,
ae_state *_state); ae_state *_state);
ae_int_t mlpgetinputscount(multilayerperceptron* network, ae_int_t mlpgetinputscount(multilayerperceptron* network,
ae_state *_state); ae_state *_state);
ae_int_t mlpgetoutputscount(multilayerperceptron* network, ae_int_t mlpgetoutputscount(multilayerperceptron* network,
ae_state *_state); ae_state *_state);
ae_int_t mlpgetweightscount(multilayerperceptron* network, ae_int_t mlpgetweightscount(multilayerperceptron* network,
skipping to change at line 3777 skipping to change at line 5569
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_state *_state); ae_state *_state);
void mlpprocessi(multilayerperceptron* network, void mlpprocessi(multilayerperceptron* network,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_state *_state); ae_state *_state);
double mlperror(multilayerperceptron* network, double mlperror(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t ssize, ae_int_t ssize,
ae_state *_state); ae_state *_state);
double mlperrorsparse(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t npoints,
ae_state *_state);
double mlperrorn(multilayerperceptron* network, double mlperrorn(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t ssize, ae_int_t ssize,
ae_state *_state); ae_state *_state);
ae_int_t mlpclserror(multilayerperceptron* network, ae_int_t mlpclserror(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t ssize, ae_int_t ssize,
ae_state *_state); ae_state *_state);
double mlprelclserror(multilayerperceptron* network, double mlprelclserror(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double mlprelclserrorsparse(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t npoints,
ae_state *_state);
double mlpavgce(multilayerperceptron* network, double mlpavgce(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double mlpavgcesparse(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t npoints,
ae_state *_state);
double mlprmserror(multilayerperceptron* network, double mlprmserror(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double mlprmserrorsparse(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t npoints,
ae_state *_state);
double mlpavgerror(multilayerperceptron* network, double mlpavgerror(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double mlpavgerrorsparse(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t npoints,
ae_state *_state);
double mlpavgrelerror(multilayerperceptron* network, double mlpavgrelerror(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double mlpavgrelerrorsparse(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t npoints,
ae_state *_state);
void mlpgrad(multilayerperceptron* network, void mlpgrad(multilayerperceptron* network,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* desiredy, /* Real */ ae_vector* desiredy,
double* e, double* e,
/* Real */ ae_vector* grad, /* Real */ ae_vector* grad,
ae_state *_state); ae_state *_state);
void mlpgradn(multilayerperceptron* network, void mlpgradn(multilayerperceptron* network,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* desiredy, /* Real */ ae_vector* desiredy,
double* e, double* e,
/* Real */ ae_vector* grad, /* Real */ ae_vector* grad,
ae_state *_state); ae_state *_state);
void mlpgradbatch(multilayerperceptron* network, void mlpgradbatch(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t ssize, ae_int_t ssize,
double* e, double* e,
/* Real */ ae_vector* grad, /* Real */ ae_vector* grad,
ae_state *_state); ae_state *_state);
void mlpgradbatchsparse(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t ssize,
double* e,
/* Real */ ae_vector* grad,
ae_state *_state);
void mlpgradbatchsubset(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t setsize,
/* Integer */ ae_vector* idx,
ae_int_t subsetsize,
double* e,
/* Real */ ae_vector* grad,
ae_state *_state);
void mlpgradbatchsparsesubset(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t setsize,
/* Integer */ ae_vector* idx,
ae_int_t subsetsize,
double* e,
/* Real */ ae_vector* grad,
ae_state *_state);
void mlpgradnbatch(multilayerperceptron* network, void mlpgradnbatch(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t ssize, ae_int_t ssize,
double* e, double* e,
/* Real */ ae_vector* grad, /* Real */ ae_vector* grad,
ae_state *_state); ae_state *_state);
void mlphessiannbatch(multilayerperceptron* network, void mlphessiannbatch(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t ssize, ae_int_t ssize,
double* e, double* e,
skipping to change at line 3861 skipping to change at line 5699
ae_state *_state); ae_state *_state);
void mlpalloc(ae_serializer* s, void mlpalloc(ae_serializer* s,
multilayerperceptron* network, multilayerperceptron* network,
ae_state *_state); ae_state *_state);
void mlpserialize(ae_serializer* s, void mlpserialize(ae_serializer* s,
multilayerperceptron* network, multilayerperceptron* network,
ae_state *_state); ae_state *_state);
void mlpunserialize(ae_serializer* s, void mlpunserialize(ae_serializer* s,
multilayerperceptron* network, multilayerperceptron* network,
ae_state *_state); ae_state *_state);
ae_bool _multilayerperceptron_init(multilayerperceptron* p, ae_state *_stat void mlpallerrorssubset(multilayerperceptron* network,
e, ae_bool make_automatic); /* Real */ ae_matrix* xy,
ae_bool _multilayerperceptron_init_copy(multilayerperceptron* dst, multilay ae_int_t setsize,
erperceptron* src, ae_state *_state, ae_bool make_automatic); /* Integer */ ae_vector* subset,
void _multilayerperceptron_clear(multilayerperceptron* p); ae_int_t subsetsize,
modelerrors* rep,
ae_state *_state);
void mlpallerrorssparsesubset(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t setsize,
/* Integer */ ae_vector* subset,
ae_int_t subsetsize,
modelerrors* rep,
ae_state *_state);
double mlperrorsubset(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t setsize,
/* Integer */ ae_vector* subset,
ae_int_t subsetsize,
ae_state *_state);
double mlperrorsparsesubset(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t setsize,
/* Integer */ ae_vector* subset,
ae_int_t subsetsize,
ae_state *_state);
ae_bool _multilayerperceptron_init(void* _p, ae_state *_state, ae_bool make
_automatic);
ae_bool _multilayerperceptron_init_copy(void* _dst, void* _src, ae_state *_
state, ae_bool make_automatic);
void _multilayerperceptron_clear(void* _p);
void _multilayerperceptron_destroy(void* _p);
ae_bool _modelerrors_init(void* _p, ae_state *_state, ae_bool make_automati
c);
ae_bool _modelerrors_init_copy(void* _dst, void* _src, ae_state *_state, ae
_bool make_automatic);
void _modelerrors_clear(void* _p);
void _modelerrors_destroy(void* _p);
void mnltrainh(/* Real */ ae_matrix* xy, void mnltrainh(/* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_int_t nvars, ae_int_t nvars,
ae_int_t nclasses, ae_int_t nclasses,
ae_int_t* info, ae_int_t* info,
logitmodel* lm, logitmodel* lm,
mnlreport* rep, mnlreport* rep,
ae_state *_state); ae_state *_state);
void mnlprocess(logitmodel* lm, void mnlprocess(logitmodel* lm,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
skipping to change at line 3915 skipping to change at line 5784
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double mnlavgrelerror(logitmodel* lm, double mnlavgrelerror(logitmodel* lm,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t ssize, ae_int_t ssize,
ae_state *_state); ae_state *_state);
ae_int_t mnlclserror(logitmodel* lm, ae_int_t mnlclserror(logitmodel* lm,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
ae_bool _logitmodel_init(logitmodel* p, ae_state *_state, ae_bool make_auto ae_bool _logitmodel_init(void* _p, ae_state *_state, ae_bool make_automatic
matic); );
ae_bool _logitmodel_init_copy(logitmodel* dst, logitmodel* src, ae_state *_ ae_bool _logitmodel_init_copy(void* _dst, void* _src, ae_state *_state, ae_
state, ae_bool make_automatic); bool make_automatic);
void _logitmodel_clear(logitmodel* p); void _logitmodel_clear(void* _p);
ae_bool _logitmcstate_init(logitmcstate* p, ae_state *_state, ae_bool make_ void _logitmodel_destroy(void* _p);
automatic); ae_bool _logitmcstate_init(void* _p, ae_state *_state, ae_bool make_automat
ae_bool _logitmcstate_init_copy(logitmcstate* dst, logitmcstate* src, ae_st ic);
ate *_state, ae_bool make_automatic); ae_bool _logitmcstate_init_copy(void* _dst, void* _src, ae_state *_state, a
void _logitmcstate_clear(logitmcstate* p); e_bool make_automatic);
ae_bool _mnlreport_init(mnlreport* p, ae_state *_state, ae_bool make_automa void _logitmcstate_clear(void* _p);
tic); void _logitmcstate_destroy(void* _p);
ae_bool _mnlreport_init_copy(mnlreport* dst, mnlreport* src, ae_state *_sta ae_bool _mnlreport_init(void* _p, ae_state *_state, ae_bool make_automatic)
te, ae_bool make_automatic); ;
void _mnlreport_clear(mnlreport* p); ae_bool _mnlreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_b
ool make_automatic);
void _mnlreport_clear(void* _p);
void _mnlreport_destroy(void* _p);
void mcpdcreate(ae_int_t n, mcpdstate* s, ae_state *_state); void mcpdcreate(ae_int_t n, mcpdstate* s, ae_state *_state);
void mcpdcreateentry(ae_int_t n, void mcpdcreateentry(ae_int_t n,
ae_int_t entrystate, ae_int_t entrystate,
mcpdstate* s, mcpdstate* s,
ae_state *_state); ae_state *_state);
void mcpdcreateexit(ae_int_t n, void mcpdcreateexit(ae_int_t n,
ae_int_t exitstate, ae_int_t exitstate,
mcpdstate* s, mcpdstate* s,
ae_state *_state); ae_state *_state);
void mcpdcreateentryexit(ae_int_t n, void mcpdcreateentryexit(ae_int_t n,
skipping to change at line 3977 skipping to change at line 5849
/* Real */ ae_matrix* pp, /* Real */ ae_matrix* pp,
ae_state *_state); ae_state *_state);
void mcpdsetpredictionweights(mcpdstate* s, void mcpdsetpredictionweights(mcpdstate* s,
/* Real */ ae_vector* pw, /* Real */ ae_vector* pw,
ae_state *_state); ae_state *_state);
void mcpdsolve(mcpdstate* s, ae_state *_state); void mcpdsolve(mcpdstate* s, ae_state *_state);
void mcpdresults(mcpdstate* s, void mcpdresults(mcpdstate* s,
/* Real */ ae_matrix* p, /* Real */ ae_matrix* p,
mcpdreport* rep, mcpdreport* rep,
ae_state *_state); ae_state *_state);
ae_bool _mcpdstate_init(mcpdstate* p, ae_state *_state, ae_bool make_automa ae_bool _mcpdstate_init(void* _p, ae_state *_state, ae_bool make_automatic)
tic); ;
ae_bool _mcpdstate_init_copy(mcpdstate* dst, mcpdstate* src, ae_state *_sta ae_bool _mcpdstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_b
te, ae_bool make_automatic); ool make_automatic);
void _mcpdstate_clear(mcpdstate* p); void _mcpdstate_clear(void* _p);
ae_bool _mcpdreport_init(mcpdreport* p, ae_state *_state, ae_bool make_auto void _mcpdstate_destroy(void* _p);
matic); ae_bool _mcpdreport_init(void* _p, ae_state *_state, ae_bool make_automatic
ae_bool _mcpdreport_init_copy(mcpdreport* dst, mcpdreport* src, ae_state *_ );
state, ae_bool make_automatic); ae_bool _mcpdreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_
void _mcpdreport_clear(mcpdreport* p); bool make_automatic);
void mlptrainlm(multilayerperceptron* network, void _mcpdreport_clear(void* _p);
/* Real */ ae_matrix* xy, void _mcpdreport_destroy(void* _p);
ae_int_t npoints,
double decay,
ae_int_t restarts,
ae_int_t* info,
mlpreport* rep,
ae_state *_state);
void mlptrainlbfgs(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t npoints,
double decay,
ae_int_t restarts,
double wstep,
ae_int_t maxits,
ae_int_t* info,
mlpreport* rep,
ae_state *_state);
void mlptraines(multilayerperceptron* network,
/* Real */ ae_matrix* trnxy,
ae_int_t trnsize,
/* Real */ ae_matrix* valxy,
ae_int_t valsize,
double decay,
ae_int_t restarts,
ae_int_t* info,
mlpreport* rep,
ae_state *_state);
void mlpkfoldcvlbfgs(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t npoints,
double decay,
ae_int_t restarts,
double wstep,
ae_int_t maxits,
ae_int_t foldscount,
ae_int_t* info,
mlpreport* rep,
mlpcvreport* cvrep,
ae_state *_state);
void mlpkfoldcvlm(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t npoints,
double decay,
ae_int_t restarts,
ae_int_t foldscount,
ae_int_t* info,
mlpreport* rep,
mlpcvreport* cvrep,
ae_state *_state);
ae_bool _mlpreport_init(mlpreport* p, ae_state *_state, ae_bool make_automa
tic);
ae_bool _mlpreport_init_copy(mlpreport* dst, mlpreport* src, ae_state *_sta
te, ae_bool make_automatic);
void _mlpreport_clear(mlpreport* p);
ae_bool _mlpcvreport_init(mlpcvreport* p, ae_state *_state, ae_bool make_au
tomatic);
ae_bool _mlpcvreport_init_copy(mlpcvreport* dst, mlpcvreport* src, ae_state
*_state, ae_bool make_automatic);
void _mlpcvreport_clear(mlpcvreport* p);
void mlpecreate0(ae_int_t nin, void mlpecreate0(ae_int_t nin,
ae_int_t nout, ae_int_t nout,
ae_int_t ensemblesize, ae_int_t ensemblesize,
mlpensemble* ensemble, mlpensemble* ensemble,
ae_state *_state); ae_state *_state);
void mlpecreate1(ae_int_t nin, void mlpecreate1(ae_int_t nin,
ae_int_t nhid, ae_int_t nhid,
ae_int_t nout, ae_int_t nout,
ae_int_t ensemblesize, ae_int_t ensemblesize,
mlpensemble* ensemble, mlpensemble* ensemble,
skipping to change at line 4144 skipping to change at line 5962
ae_state *_state); ae_state *_state);
ae_bool mlpeissoftmax(mlpensemble* ensemble, ae_state *_state); ae_bool mlpeissoftmax(mlpensemble* ensemble, ae_state *_state);
void mlpeprocess(mlpensemble* ensemble, void mlpeprocess(mlpensemble* ensemble,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_state *_state); ae_state *_state);
void mlpeprocessi(mlpensemble* ensemble, void mlpeprocessi(mlpensemble* ensemble,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_state *_state); ae_state *_state);
void mlpeallerrors(mlpensemble* ensemble,
/* Real */ ae_matrix* xy,
ae_int_t npoints,
double* relcls,
double* avgce,
double* rms,
double* avg,
double* avgrel,
ae_state *_state);
void mlpeallerrorssparse(mlpensemble* ensemble,
sparsematrix* xy,
ae_int_t npoints,
double* relcls,
double* avgce,
double* rms,
double* avg,
double* avgrel,
ae_state *_state);
double mlperelclserror(mlpensemble* ensemble, double mlperelclserror(mlpensemble* ensemble,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double mlpeavgce(mlpensemble* ensemble, double mlpeavgce(mlpensemble* ensemble,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double mlpermserror(mlpensemble* ensemble, double mlpermserror(mlpensemble* ensemble,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double mlpeavgerror(mlpensemble* ensemble, double mlpeavgerror(mlpensemble* ensemble,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double mlpeavgrelerror(mlpensemble* ensemble, double mlpeavgrelerror(mlpensemble* ensemble,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
void mlpealloc(ae_serializer* s, mlpensemble* ensemble, ae_state *_state);
void mlpeserialize(ae_serializer* s,
mlpensemble* ensemble,
ae_state *_state);
void mlpeunserialize(ae_serializer* s,
mlpensemble* ensemble,
ae_state *_state);
ae_bool _mlpensemble_init(void* _p, ae_state *_state, ae_bool make_automati
c);
ae_bool _mlpensemble_init_copy(void* _dst, void* _src, ae_state *_state, ae
_bool make_automatic);
void _mlpensemble_clear(void* _p);
void _mlpensemble_destroy(void* _p);
void mlptrainlm(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t npoints,
double decay,
ae_int_t restarts,
ae_int_t* info,
mlpreport* rep,
ae_state *_state);
void mlptrainlbfgs(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t npoints,
double decay,
ae_int_t restarts,
double wstep,
ae_int_t maxits,
ae_int_t* info,
mlpreport* rep,
ae_state *_state);
void mlptraines(multilayerperceptron* network,
/* Real */ ae_matrix* trnxy,
ae_int_t trnsize,
/* Real */ ae_matrix* valxy,
ae_int_t valsize,
double decay,
ae_int_t restarts,
ae_int_t* info,
mlpreport* rep,
ae_state *_state);
void mlpkfoldcvlbfgs(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t npoints,
double decay,
ae_int_t restarts,
double wstep,
ae_int_t maxits,
ae_int_t foldscount,
ae_int_t* info,
mlpreport* rep,
mlpcvreport* cvrep,
ae_state *_state);
void mlpkfoldcvlm(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t npoints,
double decay,
ae_int_t restarts,
ae_int_t foldscount,
ae_int_t* info,
mlpreport* rep,
mlpcvreport* cvrep,
ae_state *_state);
void mlpkfoldcv(mlptrainer* s,
multilayerperceptron* network,
ae_int_t nrestarts,
ae_int_t foldscount,
mlpreport* rep,
ae_state *_state);
void _pexec_mlpkfoldcv(mlptrainer* s,
multilayerperceptron* network,
ae_int_t nrestarts,
ae_int_t foldscount,
mlpreport* rep, ae_state *_state);
void mlpcreatetrainer(ae_int_t nin,
ae_int_t nout,
mlptrainer* s,
ae_state *_state);
void mlpcreatetrainercls(ae_int_t nin,
ae_int_t nclasses,
mlptrainer* s,
ae_state *_state);
void mlpsetdataset(mlptrainer* s,
/* Real */ ae_matrix* xy,
ae_int_t npoints,
ae_state *_state);
void mlpsetsparsedataset(mlptrainer* s,
sparsematrix* xy,
ae_int_t npoints,
ae_state *_state);
void mlpsetdecay(mlptrainer* s, double decay, ae_state *_state);
void mlpsetcond(mlptrainer* s,
double wstep,
ae_int_t maxits,
ae_state *_state);
void mlptrainnetwork(mlptrainer* s,
multilayerperceptron* network,
ae_int_t nrestarts,
mlpreport* rep,
ae_state *_state);
void mlpstarttraining(mlptrainer* s,
multilayerperceptron* network,
ae_bool randomstart,
ae_state *_state);
ae_bool mlpcontinuetraining(mlptrainer* s,
multilayerperceptron* network,
ae_state *_state);
void mlpebagginglm(mlpensemble* ensemble, void mlpebagginglm(mlpensemble* ensemble,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
double decay, double decay,
ae_int_t restarts, ae_int_t restarts,
ae_int_t* info, ae_int_t* info,
mlpreport* rep, mlpreport* rep,
mlpcvreport* ooberrors, mlpcvreport* ooberrors,
ae_state *_state); ae_state *_state);
void mlpebagginglbfgs(mlpensemble* ensemble, void mlpebagginglbfgs(mlpensemble* ensemble,
skipping to change at line 4192 skipping to change at line 6133
mlpcvreport* ooberrors, mlpcvreport* ooberrors,
ae_state *_state); ae_state *_state);
void mlpetraines(mlpensemble* ensemble, void mlpetraines(mlpensemble* ensemble,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
double decay, double decay,
ae_int_t restarts, ae_int_t restarts,
ae_int_t* info, ae_int_t* info,
mlpreport* rep, mlpreport* rep,
ae_state *_state); ae_state *_state);
void mlpealloc(ae_serializer* s, mlpensemble* ensemble, ae_state *_state); void mlptrainensemblees(mlptrainer* s,
void mlpeserialize(ae_serializer* s,
mlpensemble* ensemble,
ae_state *_state);
void mlpeunserialize(ae_serializer* s,
mlpensemble* ensemble, mlpensemble* ensemble,
ae_int_t nrestarts,
mlpreport* rep,
ae_state *_state); ae_state *_state);
ae_bool _mlpensemble_init(mlpensemble* p, ae_state *_state, ae_bool make_au ae_bool _mlpreport_init(void* _p, ae_state *_state, ae_bool make_automatic)
tomatic); ;
ae_bool _mlpensemble_init_copy(mlpensemble* dst, mlpensemble* src, ae_state ae_bool _mlpreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_b
*_state, ae_bool make_automatic); ool make_automatic);
void _mlpensemble_clear(mlpensemble* p); void _mlpreport_clear(void* _p);
void _mlpreport_destroy(void* _p);
ae_bool _mlpcvreport_init(void* _p, ae_state *_state, ae_bool make_automati
c);
ae_bool _mlpcvreport_init_copy(void* _dst, void* _src, ae_state *_state, ae
_bool make_automatic);
void _mlpcvreport_clear(void* _p);
void _mlpcvreport_destroy(void* _p);
ae_bool _mlptrainer_init(void* _p, ae_state *_state, ae_bool make_automatic
);
ae_bool _mlptrainer_init_copy(void* _dst, void* _src, ae_state *_state, ae_
bool make_automatic);
void _mlptrainer_clear(void* _p);
void _mlptrainer_destroy(void* _p);
ae_bool _mlpparallelizationcv_init(void* _p, ae_state *_state, ae_bool make
_automatic);
ae_bool _mlpparallelizationcv_init_copy(void* _dst, void* _src, ae_state *_
state, ae_bool make_automatic);
void _mlpparallelizationcv_clear(void* _p);
void _mlpparallelizationcv_destroy(void* _p);
void pcabuildbasis(/* Real */ ae_matrix* x, void pcabuildbasis(/* Real */ ae_matrix* x,
ae_int_t npoints, ae_int_t npoints,
ae_int_t nvars, ae_int_t nvars,
ae_int_t* info, ae_int_t* info,
/* Real */ ae_vector* s2, /* Real */ ae_vector* s2,
/* Real */ ae_matrix* v, /* Real */ ae_matrix* v,
ae_state *_state); ae_state *_state);
} }
#endif #endif
 End of changes. 156 change blocks. 
568 lines changed or deleted 2580 lines changed or added


 diffequations.h   diffequations.h 
skipping to change at line 250 skipping to change at line 250
double h, double h,
odesolverstate* state, odesolverstate* state,
ae_state *_state); ae_state *_state);
ae_bool odesolveriteration(odesolverstate* state, ae_state *_state); ae_bool odesolveriteration(odesolverstate* state, ae_state *_state);
void odesolverresults(odesolverstate* state, void odesolverresults(odesolverstate* state,
ae_int_t* m, ae_int_t* m,
/* Real */ ae_vector* xtbl, /* Real */ ae_vector* xtbl,
/* Real */ ae_matrix* ytbl, /* Real */ ae_matrix* ytbl,
odesolverreport* rep, odesolverreport* rep,
ae_state *_state); ae_state *_state);
ae_bool _odesolverstate_init(odesolverstate* p, ae_state *_state, ae_bool m ae_bool _odesolverstate_init(void* _p, ae_state *_state, ae_bool make_autom
ake_automatic); atic);
ae_bool _odesolverstate_init_copy(odesolverstate* dst, odesolverstate* src, ae_bool _odesolverstate_init_copy(void* _dst, void* _src, ae_state *_state,
ae_state *_state, ae_bool make_automatic); ae_bool make_automatic);
void _odesolverstate_clear(odesolverstate* p); void _odesolverstate_clear(void* _p);
ae_bool _odesolverreport_init(odesolverreport* p, ae_state *_state, ae_bool void _odesolverstate_destroy(void* _p);
make_automatic); ae_bool _odesolverreport_init(void* _p, ae_state *_state, ae_bool make_auto
ae_bool _odesolverreport_init_copy(odesolverreport* dst, odesolverreport* s matic);
rc, ae_state *_state, ae_bool make_automatic); ae_bool _odesolverreport_init_copy(void* _dst, void* _src, ae_state *_state
void _odesolverreport_clear(odesolverreport* p); , ae_bool make_automatic);
void _odesolverreport_clear(void* _p);
void _odesolverreport_destroy(void* _p);
} }
#endif #endif
 End of changes. 1 change blocks. 
10 lines changed or deleted 12 lines changed or added


 integration.h   integration.h 
skipping to change at line 789 skipping to change at line 789
double b, double b,
double alpha, double alpha,
double beta, double beta,
autogkstate* state, autogkstate* state,
ae_state *_state); ae_state *_state);
ae_bool autogkiteration(autogkstate* state, ae_state *_state); ae_bool autogkiteration(autogkstate* state, ae_state *_state);
void autogkresults(autogkstate* state, void autogkresults(autogkstate* state,
double* v, double* v,
autogkreport* rep, autogkreport* rep,
ae_state *_state); ae_state *_state);
ae_bool _autogkreport_init(autogkreport* p, ae_state *_state, ae_bool make_ ae_bool _autogkreport_init(void* _p, ae_state *_state, ae_bool make_automat
automatic); ic);
ae_bool _autogkreport_init_copy(autogkreport* dst, autogkreport* src, ae_st ae_bool _autogkreport_init_copy(void* _dst, void* _src, ae_state *_state, a
ate *_state, ae_bool make_automatic); e_bool make_automatic);
void _autogkreport_clear(autogkreport* p); void _autogkreport_clear(void* _p);
ae_bool _autogkinternalstate_init(autogkinternalstate* p, ae_state *_state, void _autogkreport_destroy(void* _p);
ae_bool make_automatic); ae_bool _autogkinternalstate_init(void* _p, ae_state *_state, ae_bool make_
ae_bool _autogkinternalstate_init_copy(autogkinternalstate* dst, autogkinte automatic);
rnalstate* src, ae_state *_state, ae_bool make_automatic); ae_bool _autogkinternalstate_init_copy(void* _dst, void* _src, ae_state *_s
void _autogkinternalstate_clear(autogkinternalstate* p); tate, ae_bool make_automatic);
ae_bool _autogkstate_init(autogkstate* p, ae_state *_state, ae_bool make_au void _autogkinternalstate_clear(void* _p);
tomatic); void _autogkinternalstate_destroy(void* _p);
ae_bool _autogkstate_init_copy(autogkstate* dst, autogkstate* src, ae_state ae_bool _autogkstate_init(void* _p, ae_state *_state, ae_bool make_automati
*_state, ae_bool make_automatic); c);
void _autogkstate_clear(autogkstate* p); ae_bool _autogkstate_init_copy(void* _dst, void* _src, ae_state *_state, ae
_bool make_automatic);
void _autogkstate_clear(void* _p);
void _autogkstate_destroy(void* _p);
} }
#endif #endif
 End of changes. 1 change blocks. 
15 lines changed or deleted 18 lines changed or added


 interpolation.h   interpolation.h 
skipping to change at line 107 skipping to change at line 107
typedef struct typedef struct
{ {
double taskrcond; double taskrcond;
ae_int_t iterationscount; ae_int_t iterationscount;
ae_int_t varidx; ae_int_t varidx;
double rmserror; double rmserror;
double avgerror; double avgerror;
double avgrelerror; double avgrelerror;
double maxerror; double maxerror;
double wrmserror; double wrmserror;
ae_matrix covpar;
ae_vector errpar;
ae_vector errcurve;
ae_vector noise;
double r2;
} lsfitreport; } lsfitreport;
typedef struct typedef struct
{ {
ae_int_t optalgo; ae_int_t optalgo;
ae_int_t m; ae_int_t m;
ae_int_t k; ae_int_t k;
double epsf; double epsf;
double epsx; double epsx;
ae_int_t maxits; ae_int_t maxits;
double stpmax; double stpmax;
ae_bool xrep; ae_bool xrep;
ae_vector s; ae_vector s;
ae_vector bndl; ae_vector bndl;
ae_vector bndu; ae_vector bndu;
ae_matrix taskx; ae_matrix taskx;
ae_vector tasky; ae_vector tasky;
ae_int_t npoints; ae_int_t npoints;
ae_vector w; ae_vector taskw;
ae_int_t nweights; ae_int_t nweights;
ae_int_t wkind; ae_int_t wkind;
ae_int_t wits; ae_int_t wits;
double diffstep;
double teststep; double teststep;
ae_bool xupdated; ae_bool xupdated;
ae_bool needf; ae_bool needf;
ae_bool needfg; ae_bool needfg;
ae_bool needfgh; ae_bool needfgh;
ae_int_t pointindex; ae_int_t pointindex;
ae_vector x; ae_vector x;
ae_vector c; ae_vector c;
double f; double f;
ae_vector g; ae_vector g;
ae_matrix h; ae_matrix h;
ae_vector wcur;
ae_vector tmp; ae_vector tmp;
ae_vector tmpf;
ae_matrix tmpjac;
ae_matrix tmpjacw;
double tmpnoise;
matinvreport invrep;
ae_int_t repiterationscount; ae_int_t repiterationscount;
ae_int_t repterminationtype; ae_int_t repterminationtype;
ae_int_t repvaridx; ae_int_t repvaridx;
double reprmserror; double reprmserror;
double repavgerror; double repavgerror;
double repavgrelerror; double repavgrelerror;
double repmaxerror; double repmaxerror;
double repwrmserror; double repwrmserror;
lsfitreport rep;
minlmstate optstate; minlmstate optstate;
minlmreport optrep; minlmreport optrep;
ae_int_t prevnpt; ae_int_t prevnpt;
ae_int_t prevalgo; ae_int_t prevalgo;
rcommstate rstate; rcommstate rstate;
} lsfitstate; } lsfitstate;
typedef struct typedef struct
{ {
ae_int_t n; ae_int_t n;
ae_bool periodic; ae_bool periodic;
skipping to change at line 430 skipping to change at line 443
virtual ~spline1dfitreport(); virtual ~spline1dfitreport();
double &taskrcond; double &taskrcond;
double &rmserror; double &rmserror;
double &avgerror; double &avgerror;
double &avgrelerror; double &avgrelerror;
double &maxerror; double &maxerror;
}; };
/************************************************************************* /*************************************************************************
Least squares fitting report: Least squares fitting report. This structure contains informational fields
which are set by fitting functions provided by this unit.
Different functions initialize different sets of fields, so you should
read documentation on specific function you used in order to know which
fields are initialized.
TaskRCond reciprocal of task's condition number TaskRCond reciprocal of task's condition number
IterationsCount number of internal iterations IterationsCount number of internal iterations
VarIdx if user-supplied gradient contains errors which were
detected by nonlinear fitter, this field is set to
index of the first component of gradient which is
suspected to be spoiled by bugs.
RMSError RMS error RMSError RMS error
AvgError average error AvgError average error
AvgRelError average relative error (for non-zero Y[I]) AvgRelError average relative error (for non-zero Y[I])
MaxError maximum error MaxError maximum error
WRMSError weighted RMS error WRMSError weighted RMS error
CovPar covariance matrix for parameters, filled by some solver
s
ErrPar vector of errors in parameters, filled by some solvers
ErrCurve vector of fit errors - variability of the best-fit
curve, filled by some solvers.
Noise vector of per-point noise estimates, filled by
some solvers.
R2 coefficient of determination (non-weighted, non-adjuste
d),
filled by some solvers.
*************************************************************************/ *************************************************************************/
class _lsfitreport_owner class _lsfitreport_owner
{ {
public: public:
_lsfitreport_owner(); _lsfitreport_owner();
_lsfitreport_owner(const _lsfitreport_owner &rhs); _lsfitreport_owner(const _lsfitreport_owner &rhs);
_lsfitreport_owner& operator=(const _lsfitreport_owner &rhs); _lsfitreport_owner& operator=(const _lsfitreport_owner &rhs);
virtual ~_lsfitreport_owner(); virtual ~_lsfitreport_owner();
alglib_impl::lsfitreport* c_ptr(); alglib_impl::lsfitreport* c_ptr();
alglib_impl::lsfitreport* c_ptr() const; alglib_impl::lsfitreport* c_ptr() const;
skipping to change at line 468 skipping to change at line 501
lsfitreport& operator=(const lsfitreport &rhs); lsfitreport& operator=(const lsfitreport &rhs);
virtual ~lsfitreport(); virtual ~lsfitreport();
double &taskrcond; double &taskrcond;
ae_int_t &iterationscount; ae_int_t &iterationscount;
ae_int_t &varidx; ae_int_t &varidx;
double &rmserror; double &rmserror;
double &avgerror; double &avgerror;
double &avgrelerror; double &avgrelerror;
double &maxerror; double &maxerror;
double &wrmserror; double &wrmserror;
real_2d_array covpar;
real_1d_array errpar;
real_1d_array errcurve;
real_1d_array noise;
double &r2;
}; };
/************************************************************************* /*************************************************************************
Nonlinear fitter. Nonlinear fitter.
You should use ALGLIB functions to work with fitter. You should use ALGLIB functions to work with fitter.
Never try to access its fields directly! Never try to access its fields directly!
*************************************************************************/ *************************************************************************/
class _lsfitstate_owner class _lsfitstate_owner
skipping to change at line 2560 skipping to change at line 2598
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
Info - error code: Info - error code:
* -4 internal SVD decomposition subroutine failed (very * -4 internal SVD decomposition subroutine failed (very
rare and for degenerate systems only) rare and for degenerate systems only)
* -1 incorrect N/M were specified * -1 incorrect N/M were specified
* 1 task is solved * 1 task is solved
C - decomposition coefficients, array[0..M-1] C - decomposition coefficients, array[0..M-1]
Rep - fitting report. Following fields are set: Rep - fitting report. Following fields are set:
* Rep.TaskRCond reciprocal of condition number * Rep.TaskRCond reciprocal of condition number
* R2 non-adjusted coefficient of determinati
on
(non-weighted)
* RMSError rms error on the (X,Y). * RMSError rms error on the (X,Y).
* AvgError average error on the (X,Y). * AvgError average error on the (X,Y).
* AvgRelError average relative error on the non-zero Y * AvgRelError average relative error on the non-zero Y
* MaxError maximum error * MaxError maximum error
NON-WEIGHTED ERRORS ARE CALCULATED NON-WEIGHTED ERRORS ARE CALCULATED
ERRORS IN PARAMETERS
This solver also calculates different kinds of errors in parameters and
fills corresponding fields of report:
* Rep.CovPar covariance matrix for parameters, array[K,K].
* Rep.ErrPar errors in parameters, array[K],
errpar = sqrt(diag(CovPar))
* Rep.ErrCurve vector of fit errors - standard deviations of empirical
best-fit curve from "ideal" best-fit curve built with
infinite number of samples, array[N].
errcurve = sqrt(diag(F*CovPar*F')),
where F is functions matrix.
* Rep.Noise vector of per-point estimates of noise, array[N]
NOTE: noise in the data is estimated as follows:
* for fitting without user-supplied weights all points are
assumed to have same level of noise, which is estimated from
the data
* for fitting with user-supplied weights we assume that noise
level in I-th point is inversely proportional to Ith weight.
Coefficient of proportionality is estimated from the data.
NOTE: we apply small amount of regularization when we invert squared
Jacobian and calculate covariance matrix. It guarantees that
algorithm won't divide by zero during inversion, but skews
error estimates a bit (fractional error is about 10^-9).
However, we believe that this difference is insignificant for
all practical purposes except for the situation when you want
to compare ALGLIB results with "reference" implementation up
to the last significant digit.
-- ALGLIB -- -- ALGLIB --
Copyright 17.08.2009 by Bochkanov Sergey Copyright 17.08.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void lsfitlinearw(const real_1d_array &y, const real_1d_array &w, const rea l_2d_array &fmatrix, const ae_int_t n, const ae_int_t m, ae_int_t &info, re al_1d_array &c, lsfitreport &rep); void lsfitlinearw(const real_1d_array &y, const real_1d_array &w, const rea l_2d_array &fmatrix, const ae_int_t n, const ae_int_t m, ae_int_t &info, re al_1d_array &c, lsfitreport &rep);
void lsfitlinearw(const real_1d_array &y, const real_1d_array &w, const rea l_2d_array &fmatrix, ae_int_t &info, real_1d_array &c, lsfitreport &rep); void lsfitlinearw(const real_1d_array &y, const real_1d_array &w, const rea l_2d_array &fmatrix, ae_int_t &info, real_1d_array &c, lsfitreport &rep);
/************************************************************************* /*************************************************************************
Weighted constained linear least squares fitting. Weighted constained linear least squares fitting.
This is variation of LSFitLinearW(), which searchs for min|A*x=b| given This is variation of LSFitLinearW(), which searchs for min|A*x=b| given
skipping to change at line 2607 skipping to change at line 2679
Info - error code: Info - error code:
* -4 internal SVD decomposition subroutine failed (very * -4 internal SVD decomposition subroutine failed (very
rare and for degenerate systems only) rare and for degenerate systems only)
* -3 either too many constraints (M or more), * -3 either too many constraints (M or more),
degenerate constraints (some constraints are degenerate constraints (some constraints are
repetead twice) or inconsistent constraints were repetead twice) or inconsistent constraints were
specified. specified.
* 1 task is solved * 1 task is solved
C - decomposition coefficients, array[0..M-1] C - decomposition coefficients, array[0..M-1]
Rep - fitting report. Following fields are set: Rep - fitting report. Following fields are set:
* R2 non-adjusted coefficient of determinati
on
(non-weighted)
* RMSError rms error on the (X,Y). * RMSError rms error on the (X,Y).
* AvgError average error on the (X,Y). * AvgError average error on the (X,Y).
* AvgRelError average relative error on the non-zero Y * AvgRelError average relative error on the non-zero Y
* MaxError maximum error * MaxError maximum error
NON-WEIGHTED ERRORS ARE CALCULATED NON-WEIGHTED ERRORS ARE CALCULATED
IMPORTANT: IMPORTANT:
this subroitine doesn't calculate task's condition number for K<>0. this subroitine doesn't calculate task's condition number for K<>0.
ERRORS IN PARAMETERS
This solver also calculates different kinds of errors in parameters and
fills corresponding fields of report:
* Rep.CovPar covariance matrix for parameters, array[K,K].
* Rep.ErrPar errors in parameters, array[K],
errpar = sqrt(diag(CovPar))
* Rep.ErrCurve vector of fit errors - standard deviations of empirical
best-fit curve from "ideal" best-fit curve built with
infinite number of samples, array[N].
errcurve = sqrt(diag(F*CovPar*F')),
where F is functions matrix.
* Rep.Noise vector of per-point estimates of noise, array[N]
IMPORTANT: errors in parameters are calculated without taking into
account boundary/linear constraints! Presence of constraints
changes distribution of errors, but there is no easy way to
account for constraints when you calculate covariance matrix.
NOTE: noise in the data is estimated as follows:
* for fitting without user-supplied weights all points are
assumed to have same level of noise, which is estimated from
the data
* for fitting with user-supplied weights we assume that noise
level in I-th point is inversely proportional to Ith weight.
Coefficient of proportionality is estimated from the data.
NOTE: we apply small amount of regularization when we invert squared
Jacobian and calculate covariance matrix. It guarantees that
algorithm won't divide by zero during inversion, but skews
error estimates a bit (fractional error is about 10^-9).
However, we believe that this difference is insignificant for
all practical purposes except for the situation when you want
to compare ALGLIB results with "reference" implementation up
to the last significant digit.
-- ALGLIB -- -- ALGLIB --
Copyright 07.09.2009 by Bochkanov Sergey Copyright 07.09.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void lsfitlinearwc(const real_1d_array &y, const real_1d_array &w, const re al_2d_array &fmatrix, const real_2d_array &cmatrix, const ae_int_t n, const ae_int_t m, const ae_int_t k, ae_int_t &info, real_1d_array &c, lsfitrepor t &rep); void lsfitlinearwc(const real_1d_array &y, const real_1d_array &w, const re al_2d_array &fmatrix, const real_2d_array &cmatrix, const ae_int_t n, const ae_int_t m, const ae_int_t k, ae_int_t &info, real_1d_array &c, lsfitrepor t &rep);
void lsfitlinearwc(const real_1d_array &y, const real_1d_array &w, const re al_2d_array &fmatrix, const real_2d_array &cmatrix, ae_int_t &info, real_1d _array &c, lsfitreport &rep); void lsfitlinearwc(const real_1d_array &y, const real_1d_array &w, const re al_2d_array &fmatrix, const real_2d_array &cmatrix, ae_int_t &info, real_1d _array &c, lsfitreport &rep);
/************************************************************************* /*************************************************************************
Linear least squares fitting. Linear least squares fitting.
QR decomposition is used to reduce task to MxM, then triangular solver or QR decomposition is used to reduce task to MxM, then triangular solver or
skipping to change at line 2644 skipping to change at line 2755
M - number of basis functions, M>=1. M - number of basis functions, M>=1.
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
Info - error code: Info - error code:
* -4 internal SVD decomposition subroutine failed (very * -4 internal SVD decomposition subroutine failed (very
rare and for degenerate systems only) rare and for degenerate systems only)
* 1 task is solved * 1 task is solved
C - decomposition coefficients, array[0..M-1] C - decomposition coefficients, array[0..M-1]
Rep - fitting report. Following fields are set: Rep - fitting report. Following fields are set:
* Rep.TaskRCond reciprocal of condition number * Rep.TaskRCond reciprocal of condition number
* R2 non-adjusted coefficient of determinati
on
(non-weighted)
* RMSError rms error on the (X,Y). * RMSError rms error on the (X,Y).
* AvgError average error on the (X,Y). * AvgError average error on the (X,Y).
* AvgRelError average relative error on the non-zero Y * AvgRelError average relative error on the non-zero Y
* MaxError maximum error * MaxError maximum error
NON-WEIGHTED ERRORS ARE CALCULATED NON-WEIGHTED ERRORS ARE CALCULATED
ERRORS IN PARAMETERS
This solver also calculates different kinds of errors in parameters and
fills corresponding fields of report:
* Rep.CovPar covariance matrix for parameters, array[K,K].
* Rep.ErrPar errors in parameters, array[K],
errpar = sqrt(diag(CovPar))
* Rep.ErrCurve vector of fit errors - standard deviations of empirical
best-fit curve from "ideal" best-fit curve built with
infinite number of samples, array[N].
errcurve = sqrt(diag(F*CovPar*F')),
where F is functions matrix.
* Rep.Noise vector of per-point estimates of noise, array[N]
NOTE: noise in the data is estimated as follows:
* for fitting without user-supplied weights all points are
assumed to have same level of noise, which is estimated from
the data
* for fitting with user-supplied weights we assume that noise
level in I-th point is inversely proportional to Ith weight.
Coefficient of proportionality is estimated from the data.
NOTE: we apply small amount of regularization when we invert squared
Jacobian and calculate covariance matrix. It guarantees that
algorithm won't divide by zero during inversion, but skews
error estimates a bit (fractional error is about 10^-9).
However, we believe that this difference is insignificant for
all practical purposes except for the situation when you want
to compare ALGLIB results with "reference" implementation up
to the last significant digit.
-- ALGLIB -- -- ALGLIB --
Copyright 17.08.2009 by Bochkanov Sergey Copyright 17.08.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void lsfitlinear(const real_1d_array &y, const real_2d_array &fmatrix, cons t ae_int_t n, const ae_int_t m, ae_int_t &info, real_1d_array &c, lsfitrepo rt &rep); void lsfitlinear(const real_1d_array &y, const real_2d_array &fmatrix, cons t ae_int_t n, const ae_int_t m, ae_int_t &info, real_1d_array &c, lsfitrepo rt &rep);
void lsfitlinear(const real_1d_array &y, const real_2d_array &fmatrix, ae_i nt_t &info, real_1d_array &c, lsfitreport &rep); void lsfitlinear(const real_1d_array &y, const real_2d_array &fmatrix, ae_i nt_t &info, real_1d_array &c, lsfitreport &rep);
/************************************************************************* /*************************************************************************
Constained linear least squares fitting. Constained linear least squares fitting.
This is variation of LSFitLinear(), which searchs for min|A*x=b| given This is variation of LSFitLinear(), which searchs for min|A*x=b| given
skipping to change at line 2687 skipping to change at line 2832
Info - error code: Info - error code:
* -4 internal SVD decomposition subroutine failed (very * -4 internal SVD decomposition subroutine failed (very
rare and for degenerate systems only) rare and for degenerate systems only)
* -3 either too many constraints (M or more), * -3 either too many constraints (M or more),
degenerate constraints (some constraints are degenerate constraints (some constraints are
repetead twice) or inconsistent constraints were repetead twice) or inconsistent constraints were
specified. specified.
* 1 task is solved * 1 task is solved
C - decomposition coefficients, array[0..M-1] C - decomposition coefficients, array[0..M-1]
Rep - fitting report. Following fields are set: Rep - fitting report. Following fields are set:
* R2 non-adjusted coefficient of determinati
on
(non-weighted)
* RMSError rms error on the (X,Y). * RMSError rms error on the (X,Y).
* AvgError average error on the (X,Y). * AvgError average error on the (X,Y).
* AvgRelError average relative error on the non-zero Y * AvgRelError average relative error on the non-zero Y
* MaxError maximum error * MaxError maximum error
NON-WEIGHTED ERRORS ARE CALCULATED NON-WEIGHTED ERRORS ARE CALCULATED
IMPORTANT: IMPORTANT:
this subroitine doesn't calculate task's condition number for K<>0. this subroitine doesn't calculate task's condition number for K<>0.
ERRORS IN PARAMETERS
This solver also calculates different kinds of errors in parameters and
fills corresponding fields of report:
* Rep.CovPar covariance matrix for parameters, array[K,K].
* Rep.ErrPar errors in parameters, array[K],
errpar = sqrt(diag(CovPar))
* Rep.ErrCurve vector of fit errors - standard deviations of empirical
best-fit curve from "ideal" best-fit curve built with
infinite number of samples, array[N].
errcurve = sqrt(diag(F*CovPar*F')),
where F is functions matrix.
* Rep.Noise vector of per-point estimates of noise, array[N]
IMPORTANT: errors in parameters are calculated without taking into
account boundary/linear constraints! Presence of constraints
changes distribution of errors, but there is no easy way to
account for constraints when you calculate covariance matrix.
NOTE: noise in the data is estimated as follows:
* for fitting without user-supplied weights all points are
assumed to have same level of noise, which is estimated from
the data
* for fitting with user-supplied weights we assume that noise
level in I-th point is inversely proportional to Ith weight.
Coefficient of proportionality is estimated from the data.
NOTE: we apply small amount of regularization when we invert squared
Jacobian and calculate covariance matrix. It guarantees that
algorithm won't divide by zero during inversion, but skews
error estimates a bit (fractional error is about 10^-9).
However, we believe that this difference is insignificant for
all practical purposes except for the situation when you want
to compare ALGLIB results with "reference" implementation up
to the last significant digit.
-- ALGLIB -- -- ALGLIB --
Copyright 07.09.2009 by Bochkanov Sergey Copyright 07.09.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void lsfitlinearc(const real_1d_array &y, const real_2d_array &fmatrix, con st real_2d_array &cmatrix, const ae_int_t n, const ae_int_t m, const ae_int _t k, ae_int_t &info, real_1d_array &c, lsfitreport &rep); void lsfitlinearc(const real_1d_array &y, const real_2d_array &fmatrix, con st real_2d_array &cmatrix, const ae_int_t n, const ae_int_t m, const ae_int _t k, ae_int_t &info, real_1d_array &c, lsfitreport &rep);
void lsfitlinearc(const real_1d_array &y, const real_2d_array &fmatrix, con st real_2d_array &cmatrix, ae_int_t &info, real_1d_array &c, lsfitreport &r ep); void lsfitlinearc(const real_1d_array &y, const real_2d_array &fmatrix, con st real_2d_array &cmatrix, ae_int_t &info, real_1d_array &c, lsfitreport &r ep);
/************************************************************************* /*************************************************************************
Weighted nonlinear least squares fitting using function values only. Weighted nonlinear least squares fitting using function values only.
Combination of numerical differentiation and secant updates is used to Combination of numerical differentiation and secant updates is used to
skipping to change at line 3152 skipping to change at line 3336
/************************************************************************* /*************************************************************************
Nonlinear least squares fitting results. Nonlinear least squares fitting results.
Called after return from LSFitFit(). Called after return from LSFitFit().
INPUT PARAMETERS: INPUT PARAMETERS:
State - algorithm state State - algorithm state
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
Info - completetion code: Info - completion code:
* -7 gradient verification failed. * -7 gradient verification failed.
See LSFitSetGradientCheck() for more informatio n. See LSFitSetGradientCheck() for more informatio n.
* 1 relative function improvement is no more than * 1 relative function improvement is no more than
EpsF. EpsF.
* 2 relative step is no more than EpsX. * 2 relative step is no more than EpsX.
* 4 gradient norm is no more than EpsG * 4 gradient norm is no more than EpsG
* 5 MaxIts steps was taken * 5 MaxIts steps was taken
* 7 stopping conditions are too stringent, * 7 stopping conditions are too stringent,
further improvement is impossible further improvement is impossible
C - array[0..K-1], solution C - array[0..K-1], solution
Rep - optimization report. Following fields are set: Rep - optimization report. On success following fields are set:
* Rep.TerminationType completetion code: * R2 non-adjusted coefficient of determinati
on
(non-weighted)
* RMSError rms error on the (X,Y). * RMSError rms error on the (X,Y).
* AvgError average error on the (X,Y). * AvgError average error on the (X,Y).
* AvgRelError average relative error on the non-zero Y * AvgRelError average relative error on the non-zero Y
* MaxError maximum error * MaxError maximum error
NON-WEIGHTED ERRORS ARE CALCULATED NON-WEIGHTED ERRORS ARE CALCULATED
* WRMSError weighted rms error on the (X,Y). * WRMSError weighted rms error on the (X,Y).
ERRORS IN PARAMETERS
This solver also calculates different kinds of errors in parameters and
fills corresponding fields of report:
* Rep.CovPar covariance matrix for parameters, array[K,K].
* Rep.ErrPar errors in parameters, array[K],
errpar = sqrt(diag(CovPar))
* Rep.ErrCurve vector of fit errors - standard deviations of empirical
best-fit curve from "ideal" best-fit curve built with
infinite number of samples, array[N].
errcurve = sqrt(diag(J*CovPar*J')),
where J is Jacobian matrix.
* Rep.Noise vector of per-point estimates of noise, array[N]
IMPORTANT: errors in parameters are calculated without taking into
account boundary/linear constraints! Presence of constraints
changes distribution of errors, but there is no easy way to
account for constraints when you calculate covariance matrix.
NOTE: noise in the data is estimated as follows:
* for fitting without user-supplied weights all points are
assumed to have same level of noise, which is estimated from
the data
* for fitting with user-supplied weights we assume that noise
level in I-th point is inversely proportional to Ith weight.
Coefficient of proportionality is estimated from the data.
NOTE: we apply small amount of regularization when we invert squared
Jacobian and calculate covariance matrix. It guarantees that
algorithm won't divide by zero during inversion, but skews
error estimates a bit (fractional error is about 10^-9).
However, we believe that this difference is insignificant for
all practical purposes except for the situation when you want
to compare ALGLIB results with "reference" implementation up
to the last significant digit.
-- ALGLIB -- -- ALGLIB --
Copyright 17.08.2009 by Bochkanov Sergey Copyright 17.08.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void lsfitresults(const lsfitstate &state, ae_int_t &info, real_1d_array &c , lsfitreport &rep); void lsfitresults(const lsfitstate &state, ae_int_t &info, real_1d_array &c , lsfitreport &rep);
/************************************************************************* /*************************************************************************
This subroutine turns on verification of the user-supplied analytic This subroutine turns on verification of the user-supplied analytic
gradient: gradient:
* user calls this subroutine before fitting begins * user calls this subroutine before fitting begins
* LSFitFit() is called * LSFitFit() is called
skipping to change at line 4614 skipping to change at line 4836
idwinterpolant* z, idwinterpolant* z,
ae_state *_state); ae_state *_state);
void idwbuildnoisy(/* Real */ ae_matrix* xy, void idwbuildnoisy(/* Real */ ae_matrix* xy,
ae_int_t n, ae_int_t n,
ae_int_t nx, ae_int_t nx,
ae_int_t d, ae_int_t d,
ae_int_t nq, ae_int_t nq,
ae_int_t nw, ae_int_t nw,
idwinterpolant* z, idwinterpolant* z,
ae_state *_state); ae_state *_state);
ae_bool _idwinterpolant_init(idwinterpolant* p, ae_state *_state, ae_bool m ae_bool _idwinterpolant_init(void* _p, ae_state *_state, ae_bool make_autom
ake_automatic); atic);
ae_bool _idwinterpolant_init_copy(idwinterpolant* dst, idwinterpolant* src, ae_bool _idwinterpolant_init_copy(void* _dst, void* _src, ae_state *_state,
ae_state *_state, ae_bool make_automatic); ae_bool make_automatic);
void _idwinterpolant_clear(idwinterpolant* p); void _idwinterpolant_clear(void* _p);
void _idwinterpolant_destroy(void* _p);
double barycentriccalc(barycentricinterpolant* b, double barycentriccalc(barycentricinterpolant* b,
double t, double t,
ae_state *_state); ae_state *_state);
void barycentricdiff1(barycentricinterpolant* b, void barycentricdiff1(barycentricinterpolant* b,
double t, double t,
double* f, double* f,
double* df, double* df,
ae_state *_state); ae_state *_state);
void barycentricdiff2(barycentricinterpolant* b, void barycentricdiff2(barycentricinterpolant* b,
double t, double t,
skipping to change at line 4660 skipping to change at line 4883
ae_state *_state); ae_state *_state);
void barycentricbuildfloaterhormann(/* Real */ ae_vector* x, void barycentricbuildfloaterhormann(/* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_int_t n, ae_int_t n,
ae_int_t d, ae_int_t d,
barycentricinterpolant* b, barycentricinterpolant* b,
ae_state *_state); ae_state *_state);
void barycentriccopy(barycentricinterpolant* b, void barycentriccopy(barycentricinterpolant* b,
barycentricinterpolant* b2, barycentricinterpolant* b2,
ae_state *_state); ae_state *_state);
ae_bool _barycentricinterpolant_init(barycentricinterpolant* p, ae_state *_ ae_bool _barycentricinterpolant_init(void* _p, ae_state *_state, ae_bool ma
state, ae_bool make_automatic); ke_automatic);
ae_bool _barycentricinterpolant_init_copy(barycentricinterpolant* dst, bary ae_bool _barycentricinterpolant_init_copy(void* _dst, void* _src, ae_state
centricinterpolant* src, ae_state *_state, ae_bool make_automatic); *_state, ae_bool make_automatic);
void _barycentricinterpolant_clear(barycentricinterpolant* p); void _barycentricinterpolant_clear(void* _p);
void _barycentricinterpolant_destroy(void* _p);
void polynomialbar2cheb(barycentricinterpolant* p, void polynomialbar2cheb(barycentricinterpolant* p,
double a, double a,
double b, double b,
/* Real */ ae_vector* t, /* Real */ ae_vector* t,
ae_state *_state); ae_state *_state);
void polynomialcheb2bar(/* Real */ ae_vector* t, void polynomialcheb2bar(/* Real */ ae_vector* t,
ae_int_t n, ae_int_t n,
double a, double a,
double b, double b,
barycentricinterpolant* p, barycentricinterpolant* p,
skipping to change at line 4901 skipping to change at line 5125
double mb, double mb,
double a, double a,
double b, double b,
double* x, double* x,
ae_state *_state); ae_state *_state);
void spline1dbuildmonotone(/* Real */ ae_vector* x, void spline1dbuildmonotone(/* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_int_t n, ae_int_t n,
spline1dinterpolant* c, spline1dinterpolant* c,
ae_state *_state); ae_state *_state);
ae_bool _spline1dinterpolant_init(spline1dinterpolant* p, ae_state *_state, ae_bool _spline1dinterpolant_init(void* _p, ae_state *_state, ae_bool make_
ae_bool make_automatic); automatic);
ae_bool _spline1dinterpolant_init_copy(spline1dinterpolant* dst, spline1din ae_bool _spline1dinterpolant_init_copy(void* _dst, void* _src, ae_state *_s
terpolant* src, ae_state *_state, ae_bool make_automatic); tate, ae_bool make_automatic);
void _spline1dinterpolant_clear(spline1dinterpolant* p); void _spline1dinterpolant_clear(void* _p);
void _spline1dinterpolant_destroy(void* _p);
void polynomialfit(/* Real */ ae_vector* x, void polynomialfit(/* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_int_t n, ae_int_t n,
ae_int_t m, ae_int_t m,
ae_int_t* info, ae_int_t* info,
barycentricinterpolant* p, barycentricinterpolant* p,
polynomialfitreport* rep, polynomialfitreport* rep,
ae_state *_state); ae_state *_state);
void polynomialfitwc(/* Real */ ae_vector* x, void polynomialfitwc(/* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
skipping to change at line 5138 skipping to change at line 5363
/* Real */ ae_vector* yc, /* Real */ ae_vector* yc,
/* Integer */ ae_vector* dc, /* Integer */ ae_vector* dc,
ae_int_t k, ae_int_t k,
double* xa, double* xa,
double* xb, double* xb,
double* sa, double* sa,
double* sb, double* sb,
/* Real */ ae_vector* xoriginal, /* Real */ ae_vector* xoriginal,
/* Real */ ae_vector* yoriginal, /* Real */ ae_vector* yoriginal,
ae_state *_state); ae_state *_state);
ae_bool _polynomialfitreport_init(polynomialfitreport* p, ae_state *_state, ae_bool _polynomialfitreport_init(void* _p, ae_state *_state, ae_bool make_
ae_bool make_automatic); automatic);
ae_bool _polynomialfitreport_init_copy(polynomialfitreport* dst, polynomial ae_bool _polynomialfitreport_init_copy(void* _dst, void* _src, ae_state *_s
fitreport* src, ae_state *_state, ae_bool make_automatic); tate, ae_bool make_automatic);
void _polynomialfitreport_clear(polynomialfitreport* p); void _polynomialfitreport_clear(void* _p);
ae_bool _barycentricfitreport_init(barycentricfitreport* p, ae_state *_stat void _polynomialfitreport_destroy(void* _p);
e, ae_bool make_automatic); ae_bool _barycentricfitreport_init(void* _p, ae_state *_state, ae_bool make
ae_bool _barycentricfitreport_init_copy(barycentricfitreport* dst, barycent _automatic);
ricfitreport* src, ae_state *_state, ae_bool make_automatic); ae_bool _barycentricfitreport_init_copy(void* _dst, void* _src, ae_state *_
void _barycentricfitreport_clear(barycentricfitreport* p); state, ae_bool make_automatic);
ae_bool _spline1dfitreport_init(spline1dfitreport* p, ae_state *_state, ae_ void _barycentricfitreport_clear(void* _p);
bool make_automatic); void _barycentricfitreport_destroy(void* _p);
ae_bool _spline1dfitreport_init_copy(spline1dfitreport* dst, spline1dfitrep ae_bool _spline1dfitreport_init(void* _p, ae_state *_state, ae_bool make_au
ort* src, ae_state *_state, ae_bool make_automatic); tomatic);
void _spline1dfitreport_clear(spline1dfitreport* p); ae_bool _spline1dfitreport_init_copy(void* _dst, void* _src, ae_state *_sta
ae_bool _lsfitreport_init(lsfitreport* p, ae_state *_state, ae_bool make_au te, ae_bool make_automatic);
tomatic); void _spline1dfitreport_clear(void* _p);
ae_bool _lsfitreport_init_copy(lsfitreport* dst, lsfitreport* src, ae_state void _spline1dfitreport_destroy(void* _p);
*_state, ae_bool make_automatic); ae_bool _lsfitreport_init(void* _p, ae_state *_state, ae_bool make_automati
void _lsfitreport_clear(lsfitreport* p); c);
ae_bool _lsfitstate_init(lsfitstate* p, ae_state *_state, ae_bool make_auto ae_bool _lsfitreport_init_copy(void* _dst, void* _src, ae_state *_state, ae
matic); _bool make_automatic);
ae_bool _lsfitstate_init_copy(lsfitstate* dst, lsfitstate* src, ae_state *_ void _lsfitreport_clear(void* _p);
state, ae_bool make_automatic); void _lsfitreport_destroy(void* _p);
void _lsfitstate_clear(lsfitstate* p); ae_bool _lsfitstate_init(void* _p, ae_state *_state, ae_bool make_automatic
);
ae_bool _lsfitstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_
bool make_automatic);
void _lsfitstate_clear(void* _p);
void _lsfitstate_destroy(void* _p);
void pspline2build(/* Real */ ae_matrix* xy, void pspline2build(/* Real */ ae_matrix* xy,
ae_int_t n, ae_int_t n,
ae_int_t st, ae_int_t st,
ae_int_t pt, ae_int_t pt,
pspline2interpolant* p, pspline2interpolant* p,
ae_state *_state); ae_state *_state);
void pspline3build(/* Real */ ae_matrix* xy, void pspline3build(/* Real */ ae_matrix* xy,
ae_int_t n, ae_int_t n,
ae_int_t st, ae_int_t st,
ae_int_t pt, ae_int_t pt,
skipping to change at line 5252 skipping to change at line 5482
double* d2z, double* d2z,
ae_state *_state); ae_state *_state);
double pspline2arclength(pspline2interpolant* p, double pspline2arclength(pspline2interpolant* p,
double a, double a,
double b, double b,
ae_state *_state); ae_state *_state);
double pspline3arclength(pspline3interpolant* p, double pspline3arclength(pspline3interpolant* p,
double a, double a,
double b, double b,
ae_state *_state); ae_state *_state);
ae_bool _pspline2interpolant_init(pspline2interpolant* p, ae_state *_state, ae_bool _pspline2interpolant_init(void* _p, ae_state *_state, ae_bool make_
ae_bool make_automatic); automatic);
ae_bool _pspline2interpolant_init_copy(pspline2interpolant* dst, pspline2in ae_bool _pspline2interpolant_init_copy(void* _dst, void* _src, ae_state *_s
terpolant* src, ae_state *_state, ae_bool make_automatic); tate, ae_bool make_automatic);
void _pspline2interpolant_clear(pspline2interpolant* p); void _pspline2interpolant_clear(void* _p);
ae_bool _pspline3interpolant_init(pspline3interpolant* p, ae_state *_state, void _pspline2interpolant_destroy(void* _p);
ae_bool make_automatic); ae_bool _pspline3interpolant_init(void* _p, ae_state *_state, ae_bool make_
ae_bool _pspline3interpolant_init_copy(pspline3interpolant* dst, pspline3in automatic);
terpolant* src, ae_state *_state, ae_bool make_automatic); ae_bool _pspline3interpolant_init_copy(void* _dst, void* _src, ae_state *_s
void _pspline3interpolant_clear(pspline3interpolant* p); tate, ae_bool make_automatic);
void _pspline3interpolant_clear(void* _p);
void _pspline3interpolant_destroy(void* _p);
void rbfcreate(ae_int_t nx, ae_int_t ny, rbfmodel* s, ae_state *_state); void rbfcreate(ae_int_t nx, ae_int_t ny, rbfmodel* s, ae_state *_state);
void rbfsetpoints(rbfmodel* s, void rbfsetpoints(rbfmodel* s,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
void rbfsetalgoqnn(rbfmodel* s, double q, double z, ae_state *_state); void rbfsetalgoqnn(rbfmodel* s, double q, double z, ae_state *_state);
void rbfsetalgomultilayer(rbfmodel* s, void rbfsetalgomultilayer(rbfmodel* s,
double rbase, double rbase,
ae_int_t nlayers, ae_int_t nlayers,
double lambdav, double lambdav,
skipping to change at line 5309 skipping to change at line 5541
void rbfunpack(rbfmodel* s, void rbfunpack(rbfmodel* s,
ae_int_t* nx, ae_int_t* nx,
ae_int_t* ny, ae_int_t* ny,
/* Real */ ae_matrix* xwr, /* Real */ ae_matrix* xwr,
ae_int_t* nc, ae_int_t* nc,
/* Real */ ae_matrix* v, /* Real */ ae_matrix* v,
ae_state *_state); ae_state *_state);
void rbfalloc(ae_serializer* s, rbfmodel* model, ae_state *_state); void rbfalloc(ae_serializer* s, rbfmodel* model, ae_state *_state);
void rbfserialize(ae_serializer* s, rbfmodel* model, ae_state *_state); void rbfserialize(ae_serializer* s, rbfmodel* model, ae_state *_state);
void rbfunserialize(ae_serializer* s, rbfmodel* model, ae_state *_state); void rbfunserialize(ae_serializer* s, rbfmodel* model, ae_state *_state);
ae_bool _rbfmodel_init(rbfmodel* p, ae_state *_state, ae_bool make_automati ae_bool _rbfmodel_init(void* _p, ae_state *_state, ae_bool make_automatic);
c); ae_bool _rbfmodel_init_copy(void* _dst, void* _src, ae_state *_state, ae_bo
ae_bool _rbfmodel_init_copy(rbfmodel* dst, rbfmodel* src, ae_state *_state, ol make_automatic);
ae_bool make_automatic); void _rbfmodel_clear(void* _p);
void _rbfmodel_clear(rbfmodel* p); void _rbfmodel_destroy(void* _p);
ae_bool _rbfreport_init(rbfreport* p, ae_state *_state, ae_bool make_automa ae_bool _rbfreport_init(void* _p, ae_state *_state, ae_bool make_automatic)
tic); ;
ae_bool _rbfreport_init_copy(rbfreport* dst, rbfreport* src, ae_state *_sta ae_bool _rbfreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_b
te, ae_bool make_automatic); ool make_automatic);
void _rbfreport_clear(rbfreport* p); void _rbfreport_clear(void* _p);
void _rbfreport_destroy(void* _p);
double spline2dcalc(spline2dinterpolant* c, double spline2dcalc(spline2dinterpolant* c,
double x, double x,
double y, double y,
ae_state *_state); ae_state *_state);
void spline2ddiff(spline2dinterpolant* c, void spline2ddiff(spline2dinterpolant* c,
double x, double x,
double y, double y,
double* f, double* f,
double* fx, double* fx,
double* fy, double* fy,
skipping to change at line 5405 skipping to change at line 5639
/* Real */ ae_matrix* f, /* Real */ ae_matrix* f,
ae_int_t m, ae_int_t m,
ae_int_t n, ae_int_t n,
spline2dinterpolant* c, spline2dinterpolant* c,
ae_state *_state); ae_state *_state);
void spline2dunpack(spline2dinterpolant* c, void spline2dunpack(spline2dinterpolant* c,
ae_int_t* m, ae_int_t* m,
ae_int_t* n, ae_int_t* n,
/* Real */ ae_matrix* tbl, /* Real */ ae_matrix* tbl,
ae_state *_state); ae_state *_state);
ae_bool _spline2dinterpolant_init(spline2dinterpolant* p, ae_state *_state, ae_bool _spline2dinterpolant_init(void* _p, ae_state *_state, ae_bool make_
ae_bool make_automatic); automatic);
ae_bool _spline2dinterpolant_init_copy(spline2dinterpolant* dst, spline2din ae_bool _spline2dinterpolant_init_copy(void* _dst, void* _src, ae_state *_s
terpolant* src, ae_state *_state, ae_bool make_automatic); tate, ae_bool make_automatic);
void _spline2dinterpolant_clear(spline2dinterpolant* p); void _spline2dinterpolant_clear(void* _p);
void _spline2dinterpolant_destroy(void* _p);
double spline3dcalc(spline3dinterpolant* c, double spline3dcalc(spline3dinterpolant* c,
double x, double x,
double y, double y,
double z, double z,
ae_state *_state); ae_state *_state);
void spline3dlintransxyz(spline3dinterpolant* c, void spline3dlintransxyz(spline3dinterpolant* c,
double ax, double ax,
double bx, double bx,
double ay, double ay,
double by, double by,
skipping to change at line 5467 skipping to change at line 5702
/* Real */ ae_vector* f, /* Real */ ae_vector* f,
ae_state *_state); ae_state *_state);
void spline3dunpackv(spline3dinterpolant* c, void spline3dunpackv(spline3dinterpolant* c,
ae_int_t* n, ae_int_t* n,
ae_int_t* m, ae_int_t* m,
ae_int_t* l, ae_int_t* l,
ae_int_t* d, ae_int_t* d,
ae_int_t* stype, ae_int_t* stype,
/* Real */ ae_matrix* tbl, /* Real */ ae_matrix* tbl,
ae_state *_state); ae_state *_state);
ae_bool _spline3dinterpolant_init(spline3dinterpolant* p, ae_state *_state, ae_bool _spline3dinterpolant_init(void* _p, ae_state *_state, ae_bool make_
ae_bool make_automatic); automatic);
ae_bool _spline3dinterpolant_init_copy(spline3dinterpolant* dst, spline3din ae_bool _spline3dinterpolant_init_copy(void* _dst, void* _src, ae_state *_s
terpolant* src, ae_state *_state, ae_bool make_automatic); tate, ae_bool make_automatic);
void _spline3dinterpolant_clear(spline3dinterpolant* p); void _spline3dinterpolant_clear(void* _p);
void _spline3dinterpolant_destroy(void* _p);
} }
#endif #endif
 End of changes. 29 change blocks. 
75 lines changed or deleted 317 lines changed or added


 linalg.h   linalg.h 
skipping to change at line 39 skipping to change at line 39
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
namespace alglib_impl namespace alglib_impl
{ {
typedef struct typedef struct
{ {
double r1; double r1;
double rinf; double rinf;
} matinvreport; } matinvreport;
typedef struct typedef struct
{ {
ae_vector vals;
ae_vector idx;
ae_vector ridx;
ae_vector didx;
ae_vector uidx;
ae_int_t matrixtype;
ae_int_t m;
ae_int_t n;
ae_int_t nfree;
ae_int_t ninitialized;
} sparsematrix;
typedef struct
{
double e1; double e1;
double e2; double e2;
ae_vector x; ae_vector x;
ae_vector ax; ae_vector ax;
double xax; double xax;
ae_int_t n; ae_int_t n;
ae_vector rk; ae_vector rk;
ae_vector rk1; ae_vector rk1;
ae_vector xk; ae_vector xk;
ae_vector xk1; ae_vector xk1;
ae_vector pk; ae_vector pk;
ae_vector pk1; ae_vector pk1;
ae_vector b; ae_vector b;
rcommstate rstate; rcommstate rstate;
ae_vector tmp2; ae_vector tmp2;
} fblslincgstate; } fblslincgstate;
typedef struct typedef struct
{ {
ae_vector vals;
ae_vector idx;
ae_vector ridx;
ae_vector didx;
ae_vector uidx;
ae_int_t matrixtype;
ae_int_t m;
ae_int_t n;
ae_int_t nfree;
ae_int_t ninitialized;
} sparsematrix;
typedef struct
{
ae_int_t n; ae_int_t n;
ae_int_t m; ae_int_t m;
ae_int_t nstart; ae_int_t nstart;
ae_int_t nits; ae_int_t nits;
ae_int_t seedval; ae_int_t seedval;
ae_vector x0; ae_vector x0;
ae_vector x1; ae_vector x1;
ae_vector t; ae_vector t;
ae_vector xbest; ae_vector xbest;
hqrndstate r; hqrndstate r;
skipping to change at line 575 skipping to change at line 575
IC - submatrix offset IC - submatrix offset
JC - submatrix offset JC - submatrix offset
-- ALGLIB routine -- -- ALGLIB routine --
16.12.2009 16.12.2009
Bochkanov Sergey Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void cmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k, cons t alglib::complex alpha, const complex_2d_array &a, const ae_int_t ia, cons t ae_int_t ja, const ae_int_t optypea, const complex_2d_array &b, const ae_ int_t ib, const ae_int_t jb, const ae_int_t optypeb, const alglib::complex beta, complex_2d_array &c, const ae_int_t ic, const ae_int_t jc); void cmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k, cons t alglib::complex alpha, const complex_2d_array &a, const ae_int_t ia, cons t ae_int_t ja, const ae_int_t optypea, const complex_2d_array &b, const ae_ int_t ib, const ae_int_t jb, const ae_int_t optypeb, const alglib::complex beta, complex_2d_array &c, const ae_int_t ic, const ae_int_t jc);
/************************************************************************* /*************************************************************************
This subroutine calculates C = alpha*op1(A)*op2(B) +beta*C where:
* C is MxN general matrix
* op1(A) is MxK matrix
* op2(B) is KxN matrix
* "op" may be identity transformation, transposition
Additional info:
* cache-oblivious algorithm is used.
* multiplication result replaces C. If Beta=0, C elements are not used in
calculations (not multiplied by zero - just not referenced)
* if Alpha=0, A is not used (not multiplied by zero - just not referenced)
* if both Beta and Alpha are zero, C is filled by zeros.
INPUT PARAMETERS
M - matrix size, M>0
N - matrix size, N>0
K - matrix size, K>0
Alpha - coefficient
A - matrix
IA - submatrix offset
JA - submatrix offset
OpTypeA - transformation type:
* 0 - no transformation
* 1 - transposition
B - matrix
IB - submatrix offset
JB - submatrix offset
OpTypeB - transformation type:
* 0 - no transformation
* 1 - transposition
Beta - coefficient
C - matrix
IC - submatrix offset
JC - submatrix offset
-- ALGLIB routine --
16.12.2009
Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void rmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k, cons void rmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k, cons
t double alpha, const real_2d_array &a, const ae_int_t ia, const ae_int_t j t double alpha, const real_2d_array &a, const ae_int_t ia, const ae_int_t j
a, const ae_int_t optypea, const real_2d_array &b, const ae_int_t ib, const a, const ae_int_t optypea, const real_2d_array &b, const ae_int_t ib, const
ae_int_t jb, const ae_int_t optypeb, const double beta, real_2d_array &c, ae_int_t jb, const ae_int_t optypeb, const double beta, const real_2d_arra
const ae_int_t ic, const ae_int_t jc); y &c, const ae_int_t ic, const ae_int_t jc);
void smp_rmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k,
const double alpha, const real_2d_array &a, const ae_int_t ia, const ae_int
_t ja, const ae_int_t optypea, const real_2d_array &b, const ae_int_t ib, c
onst ae_int_t jb, const ae_int_t optypeb, const double beta, const real_2d_
array &c, const ae_int_t ic, const ae_int_t jc);
/************************************************************************* /*************************************************************************
QR decomposition of a rectangular matrix of size MxN QR decomposition of a rectangular matrix of size MxN
Input parameters: Input parameters:
A - matrix A whose indexes range within [0..M-1, 0..N-1]. A - matrix A whose indexes range within [0..M-1, 0..N-1].
M - number of rows in matrix A. M - number of rows in matrix A.
N - number of columns in matrix A. N - number of columns in matrix A.
Output parameters: Output parameters:
skipping to change at line 3093 skipping to change at line 3057
3. matrix is passed to some linear algebra algorithm 3. matrix is passed to some linear algebra algorithm
This function is a memory-efficient alternative to SparseCreate(), but it This function is a memory-efficient alternative to SparseCreate(), but it
is more complex because it requires you to know in advance how large your is more complex because it requires you to know in advance how large your
matrix is. Some information about different matrix formats can be found matrix is. Some information about different matrix formats can be found
below, in the "NOTES" section. below, in the "NOTES" section.
INPUT PARAMETERS INPUT PARAMETERS
M - number of rows in a matrix, M>=1 M - number of rows in a matrix, M>=1
N - number of columns in a matrix, N>=1 N - number of columns in a matrix, N>=1
NER - number of elements at each row, array[M], NER[i]>=0 NER - number of elements at each row, array[M], NER[I]>=0
OUTPUT PARAMETERS OUTPUT PARAMETERS
S - sparse M*N matrix in CRS representation. S - sparse M*N matrix in CRS representation.
You have to fill ALL non-zero elements by calling You have to fill ALL non-zero elements by calling
SparseSet() BEFORE you try to use this matrix. SparseSet() BEFORE you try to use this matrix.
NOTE 1. NOTE 1.
Sparse matrices can be stored using either Hash-Table representation or Sparse matrices can be stored using either Hash-Table representation or
Compressed Row Storage representation. Hast-table is better suited for Compressed Row Storage representation. Hast-table is better suited for
skipping to change at line 3185 skipping to change at line 3149
NOTE 1: when S[i,j] is exactly zero after modification, it is deleted NOTE 1: when S[i,j] is exactly zero after modification, it is deleted
from the table. from the table.
-- ALGLIB PROJECT -- -- ALGLIB PROJECT --
Copyright 14.10.2011 by Bochkanov Sergey Copyright 14.10.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void sparseadd(const sparsematrix &s, const ae_int_t i, const ae_int_t j, c onst double v); void sparseadd(const sparsematrix &s, const ae_int_t i, const ae_int_t j, c onst double v);
/************************************************************************* /*************************************************************************
This function modifies S[i,j] - element of the sparse matrix. Matrix must This function modifies S[i,j] - element of the sparse matrix.
be in a Hash-Table mode.
In case new value of S[i,j] is zero, this element is deleted from the For Hash-based storage format:
table. * new value can be zero or non-zero. In case new value of S[i,j] is zero,
this element is deleted from the table.
* this function has no effect when called with zero V for non-existent
element.
For CRS-bases storage format:
* new value MUST be non-zero. Exception will be thrown for zero V.
* elements must be initialized in correct order - from top row to bottom,
within row - from left to right.
INPUT PARAMETERS INPUT PARAMETERS
S - sparse M*N matrix in Hash-Table representation. S - sparse M*N matrix in Hash-Table or CRS representation.
Exception will be thrown for CRS matrix.
I - row index of the element to modify, 0<=I<M I - row index of the element to modify, 0<=I<M
J - column index of the element to modify, 0<=J<N J - column index of the element to modify, 0<=J<N
V - value to set, must be finite number, can be zero V - value to set, must be finite number, can be zero
OUTPUT PARAMETERS OUTPUT PARAMETERS
S - modified matrix S - modified matrix
NOTE: this function has no effect when called with zero V for non-
existent element.
-- ALGLIB PROJECT -- -- ALGLIB PROJECT --
Copyright 14.10.2011 by Bochkanov Sergey Copyright 14.10.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void sparseset(const sparsematrix &s, const ae_int_t i, const ae_int_t j, c onst double v); void sparseset(const sparsematrix &s, const ae_int_t i, const ae_int_t j, c onst double v);
/************************************************************************* /*************************************************************************
This function returns S[i,j] - element of the sparse matrix. Matrix can This function returns S[i,j] - element of the sparse matrix. Matrix can
be in any mode (Hash-Table or CRS), but this function is less efficient be in any mode (Hash-Table or CRS), but this function is less efficient
for CRS matrices. Hash-Table matrices can find element in O(1) time, for CRS matrices. Hash-Table matrices can find element in O(1) time,
while CRS matrices need O(RS) time, where RS is an number of non-zero while CRS matrices need O(log(RS)) time, where RS is an number of non-
elements in a row. zero elements in a row.
INPUT PARAMETERS INPUT PARAMETERS
S - sparse M*N matrix in Hash-Table representation. S - sparse M*N matrix in Hash-Table representation.
Exception will be thrown for CRS matrix. Exception will be thrown for CRS matrix.
I - row index of the element to modify, 0<=I<M I - row index of the element to modify, 0<=I<M
J - column index of the element to modify, 0<=J<N J - column index of the element to modify, 0<=J<N
RESULT RESULT
value of S[I,J] or zero (in case no element with such index is found) value of S[I,J] or zero (in case no element with such index is found)
-- ALGLIB PROJECT -- -- ALGLIB PROJECT --
Copyright 14.10.2011 by Bochkanov Sergey Copyright 14.10.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double sparseget(const sparsematrix &s, const ae_int_t i, const ae_int_t j) ; double sparseget(const sparsematrix &s, const ae_int_t i, const ae_int_t j) ;
/************************************************************************* /*************************************************************************
This function returns I-th diagonal element of the sparse matrix.
Matrix can be in any mode (Hash-Table or CRS storage), but this function
is most efficient for CRS matrices - it requires less than 50 CPU cycles
to extract diagonal element. For Hash-Table matrices we still have O(1)
query time, but function is many times slower.
INPUT PARAMETERS
S - sparse M*N matrix in Hash-Table representation.
Exception will be thrown for CRS matrix.
I - index of the element to modify, 0<=I<min(M,N)
RESULT
value of S[I,I] or zero (in case no element with such index is found)
-- ALGLIB PROJECT --
Copyright 14.10.2011 by Bochkanov Sergey
*************************************************************************/
double sparsegetdiagonal(const sparsematrix &s, const ae_int_t i);
/*************************************************************************
This function converts matrix to CRS format. This function converts matrix to CRS format.
Some algorithms (linear algebra ones, for example) require matrices in Some algorithms (linear algebra ones, for example) require matrices in
CRS format. CRS format.
INPUT PARAMETERS INPUT PARAMETERS
S - sparse M*N matrix. S - sparse M*N matrix in any format
OUTPUT PARAMETERS OUTPUT PARAMETERS
S - matrix in CRS format S - matrix in CRS format
NOTE: this function has no effect when called with matrix which is NOTE: this function has no effect when called with matrix which is
already in CRS mode. already in CRS mode.
-- ALGLIB PROJECT -- -- ALGLIB PROJECT --
Copyright 14.10.2011 by Bochkanov Sergey Copyright 14.10.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
skipping to change at line 3342 skipping to change at line 3330
void sparsemv2(const sparsematrix &s, const real_1d_array &x, real_1d_array &y0, real_1d_array &y1); void sparsemv2(const sparsematrix &s, const real_1d_array &x, real_1d_array &y0, real_1d_array &y1);
/************************************************************************* /*************************************************************************
This function calculates matrix-vector product S*x, when S is symmetric This function calculates matrix-vector product S*x, when S is symmetric
matrix. Matrix S must be stored in CRS format (exception will be matrix. Matrix S must be stored in CRS format (exception will be
thrown otherwise). thrown otherwise).
INPUT PARAMETERS INPUT PARAMETERS
S - sparse M*M matrix in CRS format (you MUST convert it S - sparse M*M matrix in CRS format (you MUST convert it
to CRS before calling this function). to CRS before calling this function).
IsUpper - whether upper or lower triangle of S is given:
* if upper triangle is given, only S[i,j] for j>=i
are used, and lower triangle is ignored (it can be
empty - these elements are not referenced at all).
* if lower triangle is given, only S[i,j] for j<=i
are used, and upper triangle is ignored.
X - array[N], input vector. For performance reasons we X - array[N], input vector. For performance reasons we
make only quick checks - we check that array size is make only quick checks - we check that array size is
at least N, but we do not check for NAN's or INF's. at least N, but we do not check for NAN's or INF's.
Y - output buffer, possibly preallocated. In case buffer Y - output buffer, possibly preallocated. In case buffer
size is too small to store result, this buffer is size is too small to store result, this buffer is
automatically resized. automatically resized.
OUTPUT PARAMETERS OUTPUT PARAMETERS
Y - array[M], S*x Y - array[M], S*x
skipping to change at line 3456 skipping to change at line 3450
void sparsemm2(const sparsematrix &s, const real_2d_array &a, const ae_int_ t k, real_2d_array &b0, real_2d_array &b1); void sparsemm2(const sparsematrix &s, const real_2d_array &a, const ae_int_ t k, real_2d_array &b0, real_2d_array &b1);
/************************************************************************* /*************************************************************************
This function calculates matrix-matrix product S*A, when S is symmetric This function calculates matrix-matrix product S*A, when S is symmetric
matrix. Matrix S must be stored in CRS format (exception will be matrix. Matrix S must be stored in CRS format (exception will be
thrown otherwise). thrown otherwise).
INPUT PARAMETERS INPUT PARAMETERS
S - sparse M*M matrix in CRS format (you MUST convert it S - sparse M*M matrix in CRS format (you MUST convert it
to CRS before calling this function). to CRS before calling this function).
IsUpper - whether upper or lower triangle of S is given:
* if upper triangle is given, only S[i,j] for j>=i
are used, and lower triangle is ignored (it can be
empty - these elements are not referenced at all).
* if lower triangle is given, only S[i,j] for j<=i
are used, and upper triangle is ignored.
A - array[N][K], input dense matrix. For performance reason s A - array[N][K], input dense matrix. For performance reason s
we make only quick checks - we check that array size is we make only quick checks - we check that array size is
at least N, but we do not check for NAN's or INF's. at least N, but we do not check for NAN's or INF's.
K - number of columns of matrix (A). K - number of columns of matrix (A).
B - output buffer, possibly preallocated. In case buffer B - output buffer, possibly preallocated. In case buffer
size is too small to store result, this buffer is size is too small to store result, this buffer is
automatically resized. automatically resized.
OUTPUT PARAMETERS OUTPUT PARAMETERS
B - array[M][K], S*A B - array[M][K], S*A
skipping to change at line 3542 skipping to change at line 3542
long as different threads modify different elements. long as different threads modify different elements.
INPUT PARAMETERS INPUT PARAMETERS
S - sparse M*N matrix in Hash-Table or CRS representation. S - sparse M*N matrix in Hash-Table or CRS representation.
I - row index of non-zero element to modify, 0<=I<M I - row index of non-zero element to modify, 0<=I<M
J - column index of non-zero element to modify, 0<=J<N J - column index of non-zero element to modify, 0<=J<N
V - value to rewrite, must be finite number V - value to rewrite, must be finite number
OUTPUT PARAMETERS OUTPUT PARAMETERS
S - modified matrix S - modified matrix
RESULT
True in case when element exists
False in case when element doesn't exist or it is zero
-- ALGLIB PROJECT -- -- ALGLIB PROJECT --
Copyright 14.03.2012 by Bochkanov Sergey Copyright 14.03.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
bool sparserewriteexisting(const sparsematrix &s, const ae_int_t i, const a e_int_t j, const double v); bool sparserewriteexisting(const sparsematrix &s, const ae_int_t i, const a e_int_t j, const double v);
/************************************************************************* /*************************************************************************
This function returns I-th row of the sparse matrix stored in CRS format.
NOTE: when incorrect I (outside of [0,M-1]) or matrix (non-CRS) are
passed, this function throws exception.
INPUT PARAMETERS:
S - sparse M*N matrix in CRS format
I - row index, 0<=I<M
IRow - output buffer, can be preallocated. In case buffer
size is too small to store I-th row, it is
automatically reallocated.
OUTPUT PARAMETERS:
IRow - array[M], I-th row.
-- ALGLIB PROJECT --
Copyright 20.07.2012 by Bochkanov Sergey
*************************************************************************/
void sparsegetrow(const sparsematrix &s, const ae_int_t i, real_1d_array &i
row);
/*************************************************************************
This function performs in-place conversion from CRS format to Hash table
storage.
INPUT PARAMETERS
S - sparse matrix in CRS format.
OUTPUT PARAMETERS
S - sparse matrix in Hash table format.
NOTE: this function has no effect when called with matrix which is
already in Hash table mode.
-- ALGLIB PROJECT --
Copyright 20.07.2012 by Bochkanov Sergey
*************************************************************************/
void sparseconverttohash(const sparsematrix &s);
/*************************************************************************
This function performs out-of-place conversion to Hash table storage
format. S0 is copied to S1 and converted on-the-fly.
INPUT PARAMETERS
S0 - sparse matrix in any format.
OUTPUT PARAMETERS
S1 - sparse matrix in Hash table format.
NOTE: if S0 is stored as Hash-table, it is just copied without conversion.
-- ALGLIB PROJECT --
Copyright 20.07.2012 by Bochkanov Sergey
*************************************************************************/
void sparsecopytohash(const sparsematrix &s0, sparsematrix &s1);
/*************************************************************************
This function performs out-of-place conversion to CRS format. S0 is
copied to S1 and converted on-the-fly.
INPUT PARAMETERS
S0 - sparse matrix in any format.
OUTPUT PARAMETERS
S1 - sparse matrix in CRS format.
NOTE: if S0 is stored as CRS, it is just copied without conversion.
-- ALGLIB PROJECT --
Copyright 20.07.2012 by Bochkanov Sergey
*************************************************************************/
void sparsecopytocrs(const sparsematrix &s0, sparsematrix &s1);
/*************************************************************************
This function returns type of the matrix storage format.
INPUT PARAMETERS:
S - sparse matrix.
RESULT:
sparse storage format used by matrix:
0 - Hash-table
1 - CRS-format
NOTE: future versions of ALGLIB may include additional sparse storage
formats.
-- ALGLIB PROJECT --
Copyright 20.07.2012 by Bochkanov Sergey
*************************************************************************/
ae_int_t sparsegetmatrixtype(const sparsematrix &s);
/*************************************************************************
This function checks matrix storage format and returns True when matrix is
stored using Hash table representation.
INPUT PARAMETERS:
S - sparse matrix.
RESULT:
True if matrix type is Hash table
False if matrix type is not Hash table
-- ALGLIB PROJECT --
Copyright 20.07.2012 by Bochkanov Sergey
*************************************************************************/
bool sparseishash(const sparsematrix &s);
/*************************************************************************
This function checks matrix storage format and returns True when matrix is
stored using CRS representation.
INPUT PARAMETERS:
S - sparse matrix.
RESULT:
True if matrix type is CRS
False if matrix type is not CRS
-- ALGLIB PROJECT --
Copyright 20.07.2012 by Bochkanov Sergey
*************************************************************************/
bool sparseiscrs(const sparsematrix &s);
/*************************************************************************
The function frees all memory occupied by sparse matrix. Sparse matrix
structure becomes unusable after this call.
OUTPUT PARAMETERS
S - sparse matrix to delete
-- ALGLIB PROJECT --
Copyright 24.07.2012 by Bochkanov Sergey
*************************************************************************/
void sparsefree(sparsematrix &s);
/*************************************************************************
The function returns number of rows of a sparse matrix.
RESULT: number of rows of a sparse matrix.
-- ALGLIB PROJECT --
Copyright 23.08.2012 by Bochkanov Sergey
*************************************************************************/
ae_int_t sparsegetnrows(const sparsematrix &s);
/*************************************************************************
The function returns number of columns of a sparse matrix.
RESULT: number of columns of a sparse matrix.
-- ALGLIB PROJECT --
Copyright 23.08.2012 by Bochkanov Sergey
*************************************************************************/
ae_int_t sparsegetncols(const sparsematrix &s);
/*************************************************************************
This procedure initializes matrix norm estimator. This procedure initializes matrix norm estimator.
USAGE: USAGE:
1. User initializes algorithm state with NormEstimatorCreate() call 1. User initializes algorithm state with NormEstimatorCreate() call
2. User calls NormEstimatorEstimateSparse() (or NormEstimatorIteration()) 2. User calls NormEstimatorEstimateSparse() (or NormEstimatorIteration())
3. User calls NormEstimatorResults() to get solution. 3. User calls NormEstimatorResults() to get solution.
INPUT PARAMETERS: INPUT PARAMETERS:
M - number of rows in the matrix being estimated, M>0 M - number of rows in the matrix being estimated, M>0
N - number of columns in the matrix being estimated, N>0 N - number of columns in the matrix being estimated, N>0
skipping to change at line 4215 skipping to change at line 4374
ae_int_t optypea, ae_int_t optypea,
/* Real */ ae_matrix* b, /* Real */ ae_matrix* b,
ae_int_t ib, ae_int_t ib,
ae_int_t jb, ae_int_t jb,
ae_int_t optypeb, ae_int_t optypeb,
double beta, double beta,
/* Real */ ae_matrix* c, /* Real */ ae_matrix* c,
ae_int_t ic, ae_int_t ic,
ae_int_t jc, ae_int_t jc,
ae_state *_state); ae_state *_state);
void _pexec_rmatrixgemm(ae_int_t m,
ae_int_t n,
ae_int_t k,
double alpha,
/* Real */ ae_matrix* a,
ae_int_t ia,
ae_int_t ja,
ae_int_t optypea,
/* Real */ ae_matrix* b,
ae_int_t ib,
ae_int_t jb,
ae_int_t optypeb,
double beta,
/* Real */ ae_matrix* c,
ae_int_t ic,
ae_int_t jc, ae_state *_state);
void rmatrixqr(/* Real */ ae_matrix* a, void rmatrixqr(/* Real */ ae_matrix* a,
ae_int_t m, ae_int_t m,
ae_int_t n, ae_int_t n,
/* Real */ ae_vector* tau, /* Real */ ae_vector* tau,
ae_state *_state); ae_state *_state);
void rmatrixlq(/* Real */ ae_matrix* a, void rmatrixlq(/* Real */ ae_matrix* a,
ae_int_t m, ae_int_t m,
ae_int_t n, ae_int_t n,
/* Real */ ae_vector* tau, /* Real */ ae_vector* tau,
ae_state *_state); ae_state *_state);
skipping to change at line 4717 skipping to change at line 4892
ae_int_t* info, ae_int_t* info,
matinvreport* rep, matinvreport* rep,
ae_state *_state); ae_state *_state);
void cmatrixtrinverse(/* Complex */ ae_matrix* a, void cmatrixtrinverse(/* Complex */ ae_matrix* a,
ae_int_t n, ae_int_t n,
ae_bool isupper, ae_bool isupper,
ae_bool isunit, ae_bool isunit,
ae_int_t* info, ae_int_t* info,
matinvreport* rep, matinvreport* rep,
ae_state *_state); ae_state *_state);
ae_bool _matinvreport_init(matinvreport* p, ae_state *_state, ae_bool make_ ae_bool _matinvreport_init(void* _p, ae_state *_state, ae_bool make_automat
automatic); ic);
ae_bool _matinvreport_init_copy(matinvreport* dst, matinvreport* src, ae_st ae_bool _matinvreport_init_copy(void* _dst, void* _src, ae_state *_state, a
ate *_state, ae_bool make_automatic); e_bool make_automatic);
void _matinvreport_clear(matinvreport* p); void _matinvreport_clear(void* _p);
void fblscholeskysolve(/* Real */ ae_matrix* cha, void _matinvreport_destroy(void* _p);
double sqrtscalea,
ae_int_t n,
ae_bool isupper,
/* Real */ ae_vector* xb,
/* Real */ ae_vector* tmp,
ae_state *_state);
void fblssolvecgx(/* Real */ ae_matrix* a,
ae_int_t m,
ae_int_t n,
double alpha,
/* Real */ ae_vector* b,
/* Real */ ae_vector* x,
/* Real */ ae_vector* buf,
ae_state *_state);
void fblscgcreate(/* Real */ ae_vector* x,
/* Real */ ae_vector* b,
ae_int_t n,
fblslincgstate* state,
ae_state *_state);
ae_bool fblscgiteration(fblslincgstate* state, ae_state *_state);
void fblssolvels(/* Real */ ae_matrix* a,
/* Real */ ae_vector* b,
ae_int_t m,
ae_int_t n,
/* Real */ ae_vector* tmp0,
/* Real */ ae_vector* tmp1,
/* Real */ ae_vector* tmp2,
ae_state *_state);
ae_bool _fblslincgstate_init(fblslincgstate* p, ae_state *_state, ae_bool m
ake_automatic);
ae_bool _fblslincgstate_init_copy(fblslincgstate* dst, fblslincgstate* src,
ae_state *_state, ae_bool make_automatic);
void _fblslincgstate_clear(fblslincgstate* p);
void sparsecreate(ae_int_t m, void sparsecreate(ae_int_t m,
ae_int_t n, ae_int_t n,
ae_int_t k, ae_int_t k,
sparsematrix* s, sparsematrix* s,
ae_state *_state); ae_state *_state);
void sparsecreatecrs(ae_int_t m, void sparsecreatecrs(ae_int_t m,
ae_int_t n, ae_int_t n,
/* Integer */ ae_vector* ner, /* Integer */ ae_vector* ner,
sparsematrix* s, sparsematrix* s,
ae_state *_state); ae_state *_state);
skipping to change at line 4777 skipping to change at line 4921
ae_state *_state); ae_state *_state);
void sparseset(sparsematrix* s, void sparseset(sparsematrix* s,
ae_int_t i, ae_int_t i,
ae_int_t j, ae_int_t j,
double v, double v,
ae_state *_state); ae_state *_state);
double sparseget(sparsematrix* s, double sparseget(sparsematrix* s,
ae_int_t i, ae_int_t i,
ae_int_t j, ae_int_t j,
ae_state *_state); ae_state *_state);
double sparsegetdiagonal(sparsematrix* s, ae_int_t i, ae_state *_state);
void sparseconverttocrs(sparsematrix* s, ae_state *_state); void sparseconverttocrs(sparsematrix* s, ae_state *_state);
void sparsemv(sparsematrix* s, void sparsemv(sparsematrix* s,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_state *_state); ae_state *_state);
void sparsemtv(sparsematrix* s, void sparsemtv(sparsematrix* s,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_state *_state); ae_state *_state);
void sparsemv2(sparsematrix* s, void sparsemv2(sparsematrix* s,
skipping to change at line 4832 skipping to change at line 4977
ae_int_t* t1, ae_int_t* t1,
ae_int_t* i, ae_int_t* i,
ae_int_t* j, ae_int_t* j,
double* v, double* v,
ae_state *_state); ae_state *_state);
ae_bool sparserewriteexisting(sparsematrix* s, ae_bool sparserewriteexisting(sparsematrix* s,
ae_int_t i, ae_int_t i,
ae_int_t j, ae_int_t j,
double v, double v,
ae_state *_state); ae_state *_state);
ae_bool _sparsematrix_init(sparsematrix* p, ae_state *_state, ae_bool make_ void sparsegetrow(sparsematrix* s,
automatic); ae_int_t i,
ae_bool _sparsematrix_init_copy(sparsematrix* dst, sparsematrix* src, ae_st /* Real */ ae_vector* irow,
ate *_state, ae_bool make_automatic); ae_state *_state);
void _sparsematrix_clear(sparsematrix* p); void sparseconverttohash(sparsematrix* s, ae_state *_state);
void sparsecopytohash(sparsematrix* s0,
sparsematrix* s1,
ae_state *_state);
void sparsecopytocrs(sparsematrix* s0, sparsematrix* s1, ae_state *_state);
ae_int_t sparsegetmatrixtype(sparsematrix* s, ae_state *_state);
ae_bool sparseishash(sparsematrix* s, ae_state *_state);
ae_bool sparseiscrs(sparsematrix* s, ae_state *_state);
void sparsefree(sparsematrix* s, ae_state *_state);
ae_int_t sparsegetnrows(sparsematrix* s, ae_state *_state);
ae_int_t sparsegetncols(sparsematrix* s, ae_state *_state);
ae_bool _sparsematrix_init(void* _p, ae_state *_state, ae_bool make_automat
ic);
ae_bool _sparsematrix_init_copy(void* _dst, void* _src, ae_state *_state, a
e_bool make_automatic);
void _sparsematrix_clear(void* _p);
void _sparsematrix_destroy(void* _p);
void fblscholeskysolve(/* Real */ ae_matrix* cha,
double sqrtscalea,
ae_int_t n,
ae_bool isupper,
/* Real */ ae_vector* xb,
/* Real */ ae_vector* tmp,
ae_state *_state);
void fblssolvecgx(/* Real */ ae_matrix* a,
ae_int_t m,
ae_int_t n,
double alpha,
/* Real */ ae_vector* b,
/* Real */ ae_vector* x,
/* Real */ ae_vector* buf,
ae_state *_state);
void fblscgcreate(/* Real */ ae_vector* x,
/* Real */ ae_vector* b,
ae_int_t n,
fblslincgstate* state,
ae_state *_state);
ae_bool fblscgiteration(fblslincgstate* state, ae_state *_state);
void fblssolvels(/* Real */ ae_matrix* a,
/* Real */ ae_vector* b,
ae_int_t m,
ae_int_t n,
/* Real */ ae_vector* tmp0,
/* Real */ ae_vector* tmp1,
/* Real */ ae_vector* tmp2,
ae_state *_state);
ae_bool _fblslincgstate_init(void* _p, ae_state *_state, ae_bool make_autom
atic);
ae_bool _fblslincgstate_init_copy(void* _dst, void* _src, ae_state *_state,
ae_bool make_automatic);
void _fblslincgstate_clear(void* _p);
void _fblslincgstate_destroy(void* _p);
void normestimatorcreate(ae_int_t m, void normestimatorcreate(ae_int_t m,
ae_int_t n, ae_int_t n,
ae_int_t nstart, ae_int_t nstart,
ae_int_t nits, ae_int_t nits,
normestimatorstate* state, normestimatorstate* state,
ae_state *_state); ae_state *_state);
void normestimatorsetseed(normestimatorstate* state, void normestimatorsetseed(normestimatorstate* state,
ae_int_t seedval, ae_int_t seedval,
ae_state *_state); ae_state *_state);
ae_bool normestimatoriteration(normestimatorstate* state, ae_bool normestimatoriteration(normestimatorstate* state,
ae_state *_state); ae_state *_state);
void normestimatorestimatesparse(normestimatorstate* state, void normestimatorestimatesparse(normestimatorstate* state,
sparsematrix* a, sparsematrix* a,
ae_state *_state); ae_state *_state);
void normestimatorresults(normestimatorstate* state, void normestimatorresults(normestimatorstate* state,
double* nrm, double* nrm,
ae_state *_state); ae_state *_state);
void normestimatorrestart(normestimatorstate* state, ae_state *_state); void normestimatorrestart(normestimatorstate* state, ae_state *_state);
ae_bool _normestimatorstate_init(normestimatorstate* p, ae_state *_state, a ae_bool _normestimatorstate_init(void* _p, ae_state *_state, ae_bool make_a
e_bool make_automatic); utomatic);
ae_bool _normestimatorstate_init_copy(normestimatorstate* dst, normestimato ae_bool _normestimatorstate_init_copy(void* _dst, void* _src, ae_state *_st
rstate* src, ae_state *_state, ae_bool make_automatic); ate, ae_bool make_automatic);
void _normestimatorstate_clear(normestimatorstate* p); void _normestimatorstate_clear(void* _p);
void _normestimatorstate_destroy(void* _p);
double rmatrixludet(/* Real */ ae_matrix* a, double rmatrixludet(/* Real */ ae_matrix* a,
/* Integer */ ae_vector* pivots, /* Integer */ ae_vector* pivots,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
double rmatrixdet(/* Real */ ae_matrix* a, double rmatrixdet(/* Real */ ae_matrix* a,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
ae_complex cmatrixludet(/* Complex */ ae_matrix* a, ae_complex cmatrixludet(/* Complex */ ae_matrix* a,
/* Integer */ ae_vector* pivots, /* Integer */ ae_vector* pivots,
ae_int_t n, ae_int_t n,
 End of changes. 22 change blocks. 
117 lines changed or deleted 317 lines changed or added


 optimization.h   optimization.h 
skipping to change at line 37 skipping to change at line 37
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
// //
// THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (DATATYPES) // THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (DATATYPES)
// //
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
namespace alglib_impl namespace alglib_impl
{ {
typedef struct typedef struct
{ {
ae_int_t n; ae_int_t n;
ae_int_t k;
double alpha;
double tau;
double theta;
ae_matrix a;
ae_matrix q;
ae_vector b;
ae_vector r;
ae_vector xc;
ae_vector d;
ae_vector activeset;
ae_matrix tq2dense;
ae_matrix tk2;
ae_vector tq2diag;
ae_vector tq1;
ae_vector tk1;
double tq0;
double tk0;
ae_vector txc;
ae_vector tb;
ae_int_t nfree;
ae_int_t ecakind;
ae_matrix ecadense;
ae_matrix eq;
ae_matrix eccm;
ae_vector ecadiag;
ae_vector eb;
double ec;
ae_vector tmp0;
ae_vector tmp1;
ae_vector tmpg;
ae_matrix tmp2;
ae_bool ismaintermchanged;
ae_bool issecondarytermchanged;
ae_bool islineartermchanged;
ae_bool isactivesetchanged;
} convexquadraticmodel;
typedef struct
{
ae_int_t ns;
ae_int_t nd;
ae_int_t nr;
ae_matrix densea;
ae_vector b;
ae_vector nnc;
ae_int_t refinementits;
double debugflops;
ae_int_t debugmaxnewton;
ae_vector xn;
ae_matrix tmpz;
ae_matrix tmpca;
ae_vector g;
ae_vector d;
ae_vector dx;
ae_vector diagaa;
ae_vector cb;
ae_vector cx;
ae_vector cborg;
ae_vector columnmap;
ae_vector rowmap;
ae_vector tmpcholesky;
ae_vector r;
} snnlssolver;
typedef struct
{
ae_int_t n;
ae_int_t algostate;
ae_vector xc;
ae_bool hasxc;
ae_vector s;
ae_vector h;
ae_vector activeset;
ae_bool basisisready;
ae_matrix sbasis;
ae_matrix pbasis;
ae_matrix ibasis;
ae_int_t basissize;
ae_bool constraintschanged;
ae_vector hasbndl;
ae_vector hasbndu;
ae_vector bndl;
ae_vector bndu;
ae_matrix cleic;
ae_int_t nec;
ae_int_t nic;
ae_vector mtx;
ae_vector mtas;
ae_vector cdtmp;
ae_vector corrtmp;
ae_vector unitdiagonal;
snnlssolver solver;
ae_vector scntmp;
ae_vector tmp0;
ae_vector tmpfeas;
ae_matrix tmpm0;
ae_vector rctmps;
ae_vector rctmpg;
ae_vector rctmprightpart;
ae_matrix rctmpdense0;
ae_matrix rctmpdense1;
ae_vector rctmpisequality;
ae_vector rctmpconstraintidx;
ae_vector rctmplambdas;
ae_matrix tmpbasis;
} sactiveset;
typedef struct
{
ae_int_t n;
double epsg; double epsg;
double epsf; double epsf;
double epsx; double epsx;
ae_int_t maxits; ae_int_t maxits;
double stpmax; double stpmax;
double suggestedstep; double suggestedstep;
ae_bool xrep; ae_bool xrep;
ae_bool drep; ae_bool drep;
ae_int_t cgtype; ae_int_t cgtype;
ae_int_t prectype; ae_int_t prectype;
skipping to change at line 110 skipping to change at line 218
{ {
ae_int_t iterationscount; ae_int_t iterationscount;
ae_int_t nfev; ae_int_t nfev;
ae_int_t varidx; ae_int_t varidx;
ae_int_t terminationtype; ae_int_t terminationtype;
} mincgreport; } mincgreport;
typedef struct typedef struct
{ {
ae_int_t nmain; ae_int_t nmain;
ae_int_t nslack; ae_int_t nslack;
double innerepsg; double epsg;
double innerepsf; double epsf;
double innerepsx; double epsx;
double outerepsx;
double outerepsi;
ae_int_t maxits; ae_int_t maxits;
ae_bool xrep; ae_bool xrep;
double stpmax; double stpmax;
double diffstep; double diffstep;
sactiveset sas;
ae_vector s;
ae_int_t prectype; ae_int_t prectype;
ae_vector diaghoriginal;
ae_vector diagh; ae_vector diagh;
ae_vector x; ae_vector x;
double f; double f;
ae_vector g; ae_vector g;
ae_bool needf; ae_bool needf;
ae_bool needfg; ae_bool needfg;
ae_bool xupdated; ae_bool xupdated;
double teststep; double teststep;
rcommstate rstate; rcommstate rstate;
ae_vector gc;
ae_vector xn;
ae_vector gn;
ae_vector xp;
ae_vector gp;
double fc;
double fn;
double fp;
ae_vector d;
ae_matrix cleic;
ae_int_t nec;
ae_int_t nic;
double lastgoodstep;
double lastscaledgoodstep;
ae_vector hasbndl;
ae_vector hasbndu;
ae_vector bndl;
ae_vector bndu;
ae_int_t repinneriterationscount; ae_int_t repinneriterationscount;
ae_int_t repouteriterationscount; ae_int_t repouteriterationscount;
ae_int_t repnfev; ae_int_t repnfev;
ae_int_t repvaridx; ae_int_t repvaridx;
ae_int_t repterminationtype; ae_int_t repterminationtype;
double repdebugeqerr; double repdebugeqerr;
double repdebugfs; double repdebugfs;
double repdebugff; double repdebugff;
double repdebugdx; double repdebugdx;
ae_int_t repdebugfeasqpits; ae_int_t repdebugfeasqpits;
ae_int_t repdebugfeasgpaits; ae_int_t repdebugfeasgpaits;
ae_vector xcur;
ae_vector xprev;
ae_vector xstart; ae_vector xstart;
ae_int_t itsleft; snnlssolver solver;
ae_vector xend;
ae_vector lastg;
double trimthreshold;
ae_matrix ceoriginal;
ae_matrix ceeffective;
ae_matrix cecurrent;
ae_vector ct;
ae_int_t cecnt;
ae_int_t cedim;
ae_vector xe;
ae_vector hasbndl;
ae_vector hasbndu;
ae_vector bndloriginal;
ae_vector bnduoriginal;
ae_vector bndleffective;
ae_vector bndueffective;
ae_vector activeconstraints;
ae_vector constrainedvalues;
ae_vector transforms;
ae_vector seffective;
ae_vector soriginal;
ae_vector w;
ae_vector tmp0;
ae_vector tmp1;
ae_vector tmp2;
ae_vector r;
ae_matrix lmmatrix;
double errfeas;
double gnorm;
double mpgnorm;
double mba;
ae_int_t variabletofreeze;
double valuetofreeze;
double fbase; double fbase;
double fm2; double fm2;
double fm1; double fm1;
double fp1; double fp1;
double fp2; double fp2;
double xm1; double xm1;
double xp1; double xp1;
mincgstate cgstate; double gm1;
mincgreport cgrep; double gp1;
ae_int_t optdim; ae_int_t cidx;
double cval;
ae_vector tmpprec;
ae_int_t nfev;
ae_int_t mcstage;
double stp;
double curstpmax;
double activationstep;
ae_vector work;
linminstate lstate;
double trimthreshold;
ae_int_t nonmonotoniccnt;
ae_int_t k;
ae_int_t q;
ae_int_t p;
ae_vector rho;
ae_matrix yk;
ae_matrix sk;
ae_vector theta;
} minbleicstate; } minbleicstate;
typedef struct typedef struct
{ {
ae_int_t inneriterationscount; ae_int_t iterationscount;
ae_int_t outeriterationscount;
ae_int_t nfev; ae_int_t nfev;
ae_int_t varidx; ae_int_t varidx;
ae_int_t terminationtype; ae_int_t terminationtype;
double debugeqerr; double debugeqerr;
double debugfs; double debugfs;
double debugff; double debugff;
double debugdx; double debugdx;
ae_int_t debugfeasqpits; ae_int_t debugfeasqpits;
ae_int_t debugfeasgpaits; ae_int_t debugfeasgpaits;
ae_int_t inneriterationscount;
ae_int_t outeriterationscount;
} minbleicreport; } minbleicreport;
typedef struct typedef struct
{ {
ae_int_t n; ae_int_t n;
ae_int_t m; ae_int_t m;
double epsg; double epsg;
double epsf; double epsf;
double epsx; double epsx;
ae_int_t maxits; ae_int_t maxits;
ae_bool xrep; ae_bool xrep;
skipping to change at line 263 skipping to change at line 372
typedef struct typedef struct
{ {
ae_int_t iterationscount; ae_int_t iterationscount;
ae_int_t nfev; ae_int_t nfev;
ae_int_t varidx; ae_int_t varidx;
ae_int_t terminationtype; ae_int_t terminationtype;
} minlbfgsreport; } minlbfgsreport;
typedef struct typedef struct
{ {
ae_int_t n; ae_int_t n;
ae_int_t k;
double alpha;
double tau;
double theta;
ae_matrix a;
ae_matrix q;
ae_vector b;
ae_vector r;
ae_vector xc;
ae_vector d;
ae_vector activeset;
ae_matrix tq2dense;
ae_matrix tk2;
ae_vector tq2diag;
ae_vector tq1;
ae_vector tk1;
double tq0;
double tk0;
ae_vector txc;
ae_vector tb;
ae_int_t nfree;
ae_int_t ecakind;
ae_matrix ecadense;
ae_matrix eq;
ae_matrix eccm;
ae_vector ecadiag;
ae_vector eb;
double ec;
ae_vector tmp0;
ae_vector tmp1;
ae_vector tmpg;
ae_matrix tmp2;
ae_bool ismaintermchanged;
ae_bool issecondarytermchanged;
ae_bool islineartermchanged;
ae_bool isactivesetchanged;
} convexquadraticmodel;
typedef struct
{
ae_int_t n;
ae_int_t algokind; ae_int_t algokind;
convexquadraticmodel a; convexquadraticmodel a;
double anorm; double anorm;
ae_vector b; ae_vector b;
ae_vector bndl; ae_vector bndl;
ae_vector bndu; ae_vector bndu;
ae_vector s;
ae_vector havebndl; ae_vector havebndl;
ae_vector havebndu; ae_vector havebndu;
ae_vector xorigin; ae_vector xorigin;
ae_vector startx; ae_vector startx;
ae_bool havex; ae_bool havex;
ae_matrix cmatrix;
ae_vector cr;
ae_vector ct;
ae_int_t ccount;
ae_matrix cleic; ae_matrix cleic;
ae_int_t nec; ae_int_t nec;
ae_int_t nic; ae_int_t nic;
ae_vector xc; sactiveset sas;
ae_vector gc; ae_vector gc;
ae_vector xn; ae_vector xn;
ae_vector pg; ae_vector pg;
ae_vector activelin;
ae_vector activeb;
ae_matrix activecm;
ae_vector activecr;
ae_int_t activelincnt;
ae_vector workbndl; ae_vector workbndl;
ae_vector workbndu; ae_vector workbndu;
ae_matrix workcleic; ae_matrix workcleic;
ae_vector xs;
ae_int_t repinneriterationscount; ae_int_t repinneriterationscount;
ae_int_t repouteriterationscount; ae_int_t repouteriterationscount;
ae_int_t repncholesky; ae_int_t repncholesky;
ae_int_t repnmv; ae_int_t repnmv;
ae_int_t repterminationtype; ae_int_t repterminationtype;
double debugphase1flops;
double debugphase2flops;
double debugphase3flops;
ae_vector tmp0; ae_vector tmp0;
ae_vector tmp1; ae_vector tmp1;
ae_vector tmpfeas; ae_vector tmpb;
ae_matrix tmpm0; ae_vector rctmpg;
ae_matrix lstransform;
ae_matrix lagrangesystem;
ae_matrix lagrangesystemtmp;
ae_vector prevactiveb;
ae_vector prevactivelin;
ae_vector lsrightpart;
ae_vector lsrightparttmp;
ae_vector lagrangecoeffs;
ae_vector lstmp0;
ae_vector lstmp1;
normestimatorstate estimator; normestimatorstate estimator;
} minqpstate; } minqpstate;
typedef struct typedef struct
{ {
ae_int_t inneriterationscount; ae_int_t inneriterationscount;
ae_int_t outeriterationscount; ae_int_t outeriterationscount;
ae_int_t nmv; ae_int_t nmv;
ae_int_t ncholesky; ae_int_t ncholesky;
ae_int_t terminationtype; ae_int_t terminationtype;
} minqpreport; } minqpreport;
skipping to change at line 503 skipping to change at line 558
double betahs; double betahs;
double betady; double betady;
} minasastate; } minasastate;
typedef struct typedef struct
{ {
ae_int_t iterationscount; ae_int_t iterationscount;
ae_int_t nfev; ae_int_t nfev;
ae_int_t terminationtype; ae_int_t terminationtype;
ae_int_t activeconstraints; ae_int_t activeconstraints;
} minasareport; } minasareport;
typedef struct
{
double debugflops;
} linfeassolver;
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
// //
// THIS SECTION CONTAINS C++ INTERFACE // THIS SECTION CONTAINS C++ INTERFACE
// //
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
namespace alglib namespace alglib
{ {
skipping to change at line 611 skipping to change at line 670
ae_bool &needfg; ae_bool &needfg;
ae_bool &xupdated; ae_bool &xupdated;
double &f; double &f;
real_1d_array g; real_1d_array g;
real_1d_array x; real_1d_array x;
}; };
/************************************************************************* /*************************************************************************
This structure stores optimization report: This structure stores optimization report:
* InnerIterationsCount number of inner iterations * IterationsCount number of iterations
* OuterIterationsCount number of outer iterations
* NFEV number of gradient evaluations * NFEV number of gradient evaluations
* TerminationType termination type (see below) * TerminationType termination type (see below)
TERMINATION CODES TERMINATION CODES
TerminationType field contains completion code, which can be: TerminationType field contains completion code, which can be:
-10 unsupported combination of algorithm settings:
1) StpMax is set to non-zero value,
AND 2) non-default preconditioner is used.
You can't use both features at the same moment,
so you have to choose one of them (and to turn
off another one).
-7 gradient verification failed. -7 gradient verification failed.
See MinBLEICSetGradientCheck() for more information. See MinBLEICSetGradientCheck() for more information.
-3 inconsistent constraints. Feasible point is -3 inconsistent constraints. Feasible point is
either nonexistent or too hard to find. Try to either nonexistent or too hard to find. Try to
restart optimizer with better initial restart optimizer with better initial approximation
approximation 1 relative function improvement is no more than EpsF.
4 conditions on constraints are fulfilled 2 relative step is no more than EpsX.
with error less than or equal to EpsC 4 gradient norm is no more than EpsG
5 MaxIts steps was taken 5 MaxIts steps was taken
7 stopping conditions are too stringent, 7 stopping conditions are too stringent,
further improvement is impossible, further improvement is impossible,
X contains best point found so far. X contains best point found so far.
ADDITIONAL FIELDS ADDITIONAL FIELDS
There are additional fields which can be used for debugging: There are additional fields which can be used for debugging:
* DebugEqErr error in the equality constraints (2-norm) * DebugEqErr error in the equality constraints (2-norm)
* DebugFS f, calculated at projection of initial point * DebugFS f, calculated at projection of initial point
skipping to change at line 666 skipping to change at line 718
protected: protected:
alglib_impl::minbleicreport *p_struct; alglib_impl::minbleicreport *p_struct;
}; };
class minbleicreport : public _minbleicreport_owner class minbleicreport : public _minbleicreport_owner
{ {
public: public:
minbleicreport(); minbleicreport();
minbleicreport(const minbleicreport &rhs); minbleicreport(const minbleicreport &rhs);
minbleicreport& operator=(const minbleicreport &rhs); minbleicreport& operator=(const minbleicreport &rhs);
virtual ~minbleicreport(); virtual ~minbleicreport();
ae_int_t &inneriterationscount; ae_int_t &iterationscount;
ae_int_t &outeriterationscount;
ae_int_t &nfev; ae_int_t &nfev;
ae_int_t &varidx; ae_int_t &varidx;
ae_int_t &terminationtype; ae_int_t &terminationtype;
double &debugeqerr; double &debugeqerr;
double &debugfs; double &debugfs;
double &debugff; double &debugff;
double &debugdx; double &debugdx;
ae_int_t &debugfeasqpits; ae_int_t &debugfeasqpits;
ae_int_t &debugfeasgpaits; ae_int_t &debugfeasgpaits;
ae_int_t &inneriterationscount;
ae_int_t &outeriterationscount;
}; };
/************************************************************************* /*************************************************************************
*************************************************************************/ *************************************************************************/
class _minlbfgsstate_owner class _minlbfgsstate_owner
{ {
public: public:
_minlbfgsstate_owner(); _minlbfgsstate_owner();
skipping to change at line 1472 skipping to change at line 1525
Constrained optimization if far more complex than the unconstrained one. Constrained optimization if far more complex than the unconstrained one.
Here we give very brief outline of the BLEIC optimizer. We strongly recomme nd Here we give very brief outline of the BLEIC optimizer. We strongly recomme nd
you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide
on optimization, which is available at http://www.alglib.net/optimization/ on optimization, which is available at http://www.alglib.net/optimization/
1. User initializes algorithm state with MinBLEICCreate() call 1. User initializes algorithm state with MinBLEICCreate() call
2. USer adds boundary and/or linear constraints by calling 2. USer adds boundary and/or linear constraints by calling
MinBLEICSetBC() and MinBLEICSetLC() functions. MinBLEICSetBC() and MinBLEICSetLC() functions.
3. User sets stopping conditions for underlying unconstrained solver 3. User sets stopping conditions with MinBLEICSetCond().
with MinBLEICSetInnerCond() call.
This function controls accuracy of underlying optimization algorithm.
4. User sets stopping conditions for outer iteration by calling
MinBLEICSetOuterCond() function.
This function controls handling of boundary and inequality constraints.
5. Additionally, user may set limit on number of internal iterations
by MinBLEICSetMaxIts() call.
This function allows to prevent algorithm from looping forever.
6. User calls MinBLEICOptimize() function which takes algorithm state and 4. User calls MinBLEICOptimize() function which takes algorithm state and
pointer (delegate, etc.) to callback function which calculates F/G. pointer (delegate, etc.) to callback function which calculates F/G.
7. User calls MinBLEICResults() to get solution 5. User calls MinBLEICResults() to get solution
8. Optionally user may call MinBLEICRestartFrom() to solve another problem 6. Optionally user may call MinBLEICRestartFrom() to solve another problem
with same N but another starting point. with same N but another starting point.
MinBLEICRestartFrom() allows to reuse already initialized structure. MinBLEICRestartFrom() allows to reuse already initialized structure.
INPUT PARAMETERS: INPUT PARAMETERS:
N - problem dimension, N>0: N - problem dimension, N>0:
* if given, only leading N elements of X are used * if given, only leading N elements of X are used
* if not given, automatically determined from size ofX * if not given, automatically determined from size ofX
X - starting point, array[N]: X - starting point, array[N]:
* it is better to set X to a feasible point * it is better to set X to a feasible point
* but X can be infeasible, in which case algorithm will try * but X can be infeasible, in which case algorithm will try
skipping to change at line 1626 skipping to change at line 1669
(this kind of constraints is always satisfied exactly, both in the final (this kind of constraints is always satisfied exactly, both in the final
solution and in all intermediate points). solution and in all intermediate points).
-- ALGLIB -- -- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, cons t integer_1d_array &ct, const ae_int_t k); void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, cons t integer_1d_array &ct, const ae_int_t k);
void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, cons t integer_1d_array &ct); void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, cons t integer_1d_array &ct);
/************************************************************************* /*************************************************************************
This function sets stopping conditions for the underlying nonlinear CG This function sets stopping conditions for the optimizer.
optimizer. It controls overall accuracy of solution. These conditions
should be strict enough in order for algorithm to converge.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
EpsG - >=0 EpsG - >=0
The subroutine finishes its work if the condition The subroutine finishes its work if the condition
|v|<EpsG is satisfied, where: |v|<EpsG is satisfied, where:
* |.| means Euclidian norm * |.| means Euclidian norm
* v - scaled gradient vector, v[i]=g[i]*s[i] * v - scaled gradient vector, v[i]=g[i]*s[i]
* g - gradient * g - gradient
* s - scaling coefficients set by MinBLEICSetScale() * s - scaling coefficients set by MinBLEICSetScale()
skipping to change at line 1650 skipping to change at line 1691
The subroutine finishes its work if on k+1-th iteration The subroutine finishes its work if on k+1-th iteration
the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
is satisfied. is satisfied.
EpsX - >=0 EpsX - >=0
The subroutine finishes its work if on k+1-th iteration The subroutine finishes its work if on k+1-th iteration
the condition |v|<=EpsX is fulfilled, where: the condition |v|<=EpsX is fulfilled, where:
* |.| means Euclidian norm * |.| means Euclidian norm
* v - scaled step vector, v[i]=dx[i]/s[i] * v - scaled step vector, v[i]=dx[i]/s[i]
* dx - ste pvector, dx=X(k+1)-X(k) * dx - ste pvector, dx=X(k+1)-X(k)
* s - scaling coefficients set by MinBLEICSetScale() * s - scaling coefficients set by MinBLEICSetScale()
MaxIts - maximum number of iterations. If MaxIts=0, the number of
iterations is unlimited.
Passing EpsG=0, EpsF=0 and EpsX=0 (simultaneously) will lead to Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
automatic stopping criterion selection. to automatic stopping criterion selection.
These conditions are used to terminate inner iterations. However, you
need to tune termination conditions for outer iterations too.
-- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/
void minbleicsetinnercond(const minbleicstate &state, const double epsg, co
nst double epsf, const double epsx);
/*************************************************************************
This function sets stopping conditions for outer iteration of BLEIC algo.
These conditions control accuracy of constraint handling and amount of
infeasibility allowed in the solution.
INPUT PARAMETERS:
State - structure which stores algorithm state
EpsX - >0, stopping condition on outer iteration step length
EpsI - >0, stopping condition on infeasibility
Both EpsX and EpsI must be non-zero.
MEANING OF EpsX
EpsX is a stopping condition for outer iterations. Algorithm will stop
when solution of the current modified subproblem will be within EpsX
(using 2-norm) of the previous solution.
MEANING OF EpsI
EpsI controls feasibility properties - algorithm won't stop until all
inequality constraints will be satisfied with error (distance from current
point to the feasible area) at most EpsI.
-- ALGLIB -- -- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicsetoutercond(const minbleicstate &state, const double epsx, co nst double epsi); void minbleicsetcond(const minbleicstate &state, const double epsg, const d ouble epsf, const double epsx, const ae_int_t maxits);
/************************************************************************* /*************************************************************************
This function sets scaling coefficients for BLEIC optimizer. This function sets scaling coefficients for BLEIC optimizer.
ALGLIB optimizers use scaling matrices to test stopping conditions (step ALGLIB optimizers use scaling matrices to test stopping conditions (step
size and gradient are scaled before comparison with tolerances). Scale of size and gradient are scaled before comparison with tolerances). Scale of
the I-th variable is a translation invariant measure of: the I-th variable is a translation invariant measure of:
a) "how large" the variable is a) "how large" the variable is
b) how large the step should be to make significant changes in the function b) how large the step should be to make significant changes in the function
skipping to change at line 1779 skipping to change at line 1789
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
-- ALGLIB -- -- ALGLIB --
Copyright 13.10.2010 by Bochkanov Sergey Copyright 13.10.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicsetprecscale(const minbleicstate &state); void minbleicsetprecscale(const minbleicstate &state);
/************************************************************************* /*************************************************************************
This function allows to stop algorithm after specified number of inner
iterations.
INPUT PARAMETERS:
State - structure which stores algorithm state
MaxIts - maximum number of inner iterations.
If MaxIts=0, the number of iterations is unlimited.
-- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/
void minbleicsetmaxits(const minbleicstate &state, const ae_int_t maxits);
/*************************************************************************
This function turns on/off reporting. This function turns on/off reporting.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
NeedXRep- whether iteration reports are needed or not NeedXRep- whether iteration reports are needed or not
If NeedXRep is True, algorithm will call rep() callback function if it is If NeedXRep is True, algorithm will call rep() callback function if it is
provided to MinBLEICOptimize(). provided to MinBLEICOptimize().
-- ALGLIB -- -- ALGLIB --
skipping to change at line 1909 skipping to change at line 1905
/************************************************************************* /*************************************************************************
BLEIC results BLEIC results
INPUT PARAMETERS: INPUT PARAMETERS:
State - algorithm state State - algorithm state
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
X - array[0..N-1], solution X - array[0..N-1], solution
Rep - optimization report. You should check Rep.TerminationType Rep - optimization report. You should check Rep.TerminationType
in order to distinguish successful termination from in order to distinguish successful termination from
unsuccessful one. unsuccessful one:
* -7 gradient verification failed.
See MinBLEICSetGradientCheck() for more information.
* -3 inconsistent constraints. Feasible point is
either nonexistent or too hard to find. Try to
restart optimizer with better initial approximation
* 1 relative function improvement is no more than EpsF.
* 2 relative step is no more than EpsX.
* 4 gradient norm is no more than EpsG
* 5 MaxIts steps was taken
More information about fields of this structure can be More information about fields of this structure can be
found in the comments on MinBLEICReport datatype. found in the comments on MinBLEICReport datatype.
-- ALGLIB -- -- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicresults(const minbleicstate &state, real_1d_array &x, minbleic report &rep); void minbleicresults(const minbleicstate &state, real_1d_array &x, minbleic report &rep);
/************************************************************************* /*************************************************************************
BLEIC results BLEIC results
skipping to change at line 2623 skipping to change at line 2628
All elements of C (including right part) must be finite. All elements of C (including right part) must be finite.
CT - type of constraints, array[K]: CT - type of constraints, array[K]:
* if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
* if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1]
* if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
K - number of equality/inequality constraints, K>=0: K - number of equality/inequality constraints, K>=0:
* if given, only leading K elements of C/CT are used * if given, only leading K elements of C/CT are used
* if not given, automatically determined from sizes of C/CT * if not given, automatically determined from sizes of C/CT
NOTE 1: linear (non-bound) constraints are satisfied only approximately - NOTE 1: linear (non-bound) constraints are satisfied only approximately -
there always exists some minor violation (about 10^10...10^13) due there always exists some minor violation (about 10^-10...10^-13)
to rounding errors. due to numerical errors.
-- ALGLIB -- -- ALGLIB --
Copyright 19.06.2012 by Bochkanov Sergey Copyright 19.06.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minqpsetlc(const minqpstate &state, const real_2d_array &c, const inte ger_1d_array &ct, const ae_int_t k); void minqpsetlc(const minqpstate &state, const real_2d_array &c, const inte ger_1d_array &ct, const ae_int_t k);
void minqpsetlc(const minqpstate &state, const real_2d_array &c, const inte ger_1d_array &ct); void minqpsetlc(const minqpstate &state, const real_2d_array &c, const inte ger_1d_array &ct);
/************************************************************************* /*************************************************************************
This function solves quadratic programming problem. This function solves quadratic programming problem.
You should call it after setting solver options with MinQPSet...() calls. You should call it after setting solver options with MinQPSet...() calls.
INPUT PARAMETERS: INPUT PARAMETERS:
State - algorithm state State - algorithm state
You should use MinQPResults() function to access results after calls You should use MinQPResults() function to access results after calls
to this function. to this function.
-- ALGLIB -- -- ALGLIB --
Copyright 11.01.2011 by Bochkanov Sergey Copyright 11.01.2011 by Bochkanov Sergey.
Special thanks to Elvira Illarionova for important suggestions on
the linearly constrained QP algorithm.
*************************************************************************/ *************************************************************************/
void minqpoptimize(const minqpstate &state); void minqpoptimize(const minqpstate &state);
/************************************************************************* /*************************************************************************
QP solver results QP solver results
INPUT PARAMETERS: INPUT PARAMETERS:
State - algorithm state State - algorithm state
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
skipping to change at line 3479 skipping to change at line 3486
ae_int_t* gpaits, ae_int_t* gpaits,
ae_state *_state); ae_state *_state);
ae_bool derivativecheck(double f0, ae_bool derivativecheck(double f0,
double df0, double df0,
double f1, double f1,
double df1, double df1,
double f, double f,
double df, double df,
double width, double width,
ae_state *_state); ae_state *_state);
void cqminit(ae_int_t n, convexquadraticmodel* s, ae_state *_state);
void cqmseta(convexquadraticmodel* s,
/* Real */ ae_matrix* a,
ae_bool isupper,
double alpha,
ae_state *_state);
void cqmrewritedensediagonal(convexquadraticmodel* s,
/* Real */ ae_vector* z,
ae_state *_state);
void cqmsetd(convexquadraticmodel* s,
/* Real */ ae_vector* d,
double tau,
ae_state *_state);
void cqmdropa(convexquadraticmodel* s, ae_state *_state);
void cqmsetb(convexquadraticmodel* s,
/* Real */ ae_vector* b,
ae_state *_state);
void cqmsetq(convexquadraticmodel* s,
/* Real */ ae_matrix* q,
/* Real */ ae_vector* r,
ae_int_t k,
double theta,
ae_state *_state);
void cqmsetactiveset(convexquadraticmodel* s,
/* Real */ ae_vector* x,
/* Boolean */ ae_vector* activeset,
ae_state *_state);
double cqmeval(convexquadraticmodel* s,
/* Real */ ae_vector* x,
ae_state *_state);
void cqmevalx(convexquadraticmodel* s,
/* Real */ ae_vector* x,
double* r,
double* noise,
ae_state *_state);
void cqmgradunconstrained(convexquadraticmodel* s,
/* Real */ ae_vector* x,
/* Real */ ae_vector* g,
ae_state *_state);
double cqmxtadx2(convexquadraticmodel* s,
/* Real */ ae_vector* x,
ae_state *_state);
void cqmadx(convexquadraticmodel* s,
/* Real */ ae_vector* x,
/* Real */ ae_vector* y,
ae_state *_state);
ae_bool cqmconstrainedoptimum(convexquadraticmodel* s,
/* Real */ ae_vector* x,
ae_state *_state);
void cqmscalevector(convexquadraticmodel* s,
/* Real */ ae_vector* x,
ae_state *_state);
double cqmdebugconstrainedevalt(convexquadraticmodel* s,
/* Real */ ae_vector* x,
ae_state *_state);
double cqmdebugconstrainedevale(convexquadraticmodel* s,
/* Real */ ae_vector* x,
ae_state *_state);
ae_bool _convexquadraticmodel_init(void* _p, ae_state *_state, ae_bool make
_automatic);
ae_bool _convexquadraticmodel_init_copy(void* _dst, void* _src, ae_state *_
state, ae_bool make_automatic);
void _convexquadraticmodel_clear(void* _p);
void _convexquadraticmodel_destroy(void* _p);
void snnlsinit(ae_int_t nsmax,
ae_int_t ndmax,
ae_int_t nrmax,
snnlssolver* s,
ae_state *_state);
void snnlssetproblem(snnlssolver* s,
/* Real */ ae_matrix* a,
/* Real */ ae_vector* b,
ae_int_t ns,
ae_int_t nd,
ae_int_t nr,
ae_state *_state);
void snnlsdropnnc(snnlssolver* s, ae_int_t idx, ae_state *_state);
void snnlssolve(snnlssolver* s,
/* Real */ ae_vector* x,
ae_state *_state);
ae_bool _snnlssolver_init(void* _p, ae_state *_state, ae_bool make_automati
c);
ae_bool _snnlssolver_init_copy(void* _dst, void* _src, ae_state *_state, ae
_bool make_automatic);
void _snnlssolver_clear(void* _p);
void _snnlssolver_destroy(void* _p);
void sasinit(ae_int_t n, sactiveset* s, ae_state *_state);
void sassetscale(sactiveset* state,
/* Real */ ae_vector* s,
ae_state *_state);
void sassetprecdiag(sactiveset* state,
/* Real */ ae_vector* d,
ae_state *_state);
void sassetbc(sactiveset* state,
/* Real */ ae_vector* bndl,
/* Real */ ae_vector* bndu,
ae_state *_state);
void sassetlc(sactiveset* state,
/* Real */ ae_matrix* c,
/* Integer */ ae_vector* ct,
ae_int_t k,
ae_state *_state);
void sassetlcx(sactiveset* state,
/* Real */ ae_matrix* cleic,
ae_int_t nec,
ae_int_t nic,
ae_state *_state);
ae_bool sasstartoptimization(sactiveset* state,
/* Real */ ae_vector* x,
ae_state *_state);
void sasexploredirection(sactiveset* state,
/* Real */ ae_vector* d,
double* stpmax,
ae_int_t* cidx,
double* vval,
ae_state *_state);
ae_int_t sasmoveto(sactiveset* state,
/* Real */ ae_vector* xn,
ae_bool needact,
ae_int_t cidx,
double cval,
ae_state *_state);
void sasimmediateactivation(sactiveset* state,
ae_int_t cidx,
double cval,
ae_state *_state);
void sasconstraineddescent(sactiveset* state,
/* Real */ ae_vector* g,
/* Real */ ae_vector* d,
ae_state *_state);
void sasconstraineddescentprec(sactiveset* state,
/* Real */ ae_vector* g,
/* Real */ ae_vector* d,
ae_state *_state);
void sasconstraineddirection(sactiveset* state,
/* Real */ ae_vector* d,
ae_state *_state);
void sasconstraineddirectionprec(sactiveset* state,
/* Real */ ae_vector* d,
ae_state *_state);
void sascorrection(sactiveset* state,
/* Real */ ae_vector* x,
double* penalty,
ae_state *_state);
double sasscaledconstrainednorm(sactiveset* state,
/* Real */ ae_vector* d,
ae_state *_state);
void sasstopoptimization(sactiveset* state, ae_state *_state);
void sasreactivateconstraints(sactiveset* state,
/* Real */ ae_vector* gc,
ae_state *_state);
void sasreactivateconstraintsprec(sactiveset* state,
/* Real */ ae_vector* gc,
ae_state *_state);
void sasrebuildbasis(sactiveset* state, ae_state *_state);
ae_bool _sactiveset_init(void* _p, ae_state *_state, ae_bool make_automatic
);
ae_bool _sactiveset_init_copy(void* _dst, void* _src, ae_state *_state, ae_
bool make_automatic);
void _sactiveset_clear(void* _p);
void _sactiveset_destroy(void* _p);
void mincgcreate(ae_int_t n, void mincgcreate(ae_int_t n,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
mincgstate* state, mincgstate* state,
ae_state *_state); ae_state *_state);
void mincgcreatef(ae_int_t n, void mincgcreatef(ae_int_t n,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
double diffstep, double diffstep,
mincgstate* state, mincgstate* state,
ae_state *_state); ae_state *_state);
void mincgsetcond(mincgstate* state, void mincgsetcond(mincgstate* state,
skipping to change at line 3534 skipping to change at line 3696
/* Real */ ae_vector* c, /* Real */ ae_vector* c,
/* Real */ ae_matrix* v, /* Real */ ae_matrix* v,
ae_int_t vcnt, ae_int_t vcnt,
ae_state *_state); ae_state *_state);
void mincgsetprecvarpart(mincgstate* state, void mincgsetprecvarpart(mincgstate* state,
/* Real */ ae_vector* d2, /* Real */ ae_vector* d2,
ae_state *_state); ae_state *_state);
void mincgsetgradientcheck(mincgstate* state, void mincgsetgradientcheck(mincgstate* state,
double teststep, double teststep,
ae_state *_state); ae_state *_state);
ae_bool _mincgstate_init(mincgstate* p, ae_state *_state, ae_bool make_auto ae_bool _mincgstate_init(void* _p, ae_state *_state, ae_bool make_automatic
matic); );
ae_bool _mincgstate_init_copy(mincgstate* dst, mincgstate* src, ae_state *_ ae_bool _mincgstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_
state, ae_bool make_automatic); bool make_automatic);
void _mincgstate_clear(mincgstate* p); void _mincgstate_clear(void* _p);
ae_bool _mincgreport_init(mincgreport* p, ae_state *_state, ae_bool make_au void _mincgstate_destroy(void* _p);
tomatic); ae_bool _mincgreport_init(void* _p, ae_state *_state, ae_bool make_automati
ae_bool _mincgreport_init_copy(mincgreport* dst, mincgreport* src, ae_state c);
*_state, ae_bool make_automatic); ae_bool _mincgreport_init_copy(void* _dst, void* _src, ae_state *_state, ae
void _mincgreport_clear(mincgreport* p); _bool make_automatic);
void _mincgreport_clear(void* _p);
void _mincgreport_destroy(void* _p);
void minbleiccreate(ae_int_t n, void minbleiccreate(ae_int_t n,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
minbleicstate* state, minbleicstate* state,
ae_state *_state); ae_state *_state);
void minbleiccreatef(ae_int_t n, void minbleiccreatef(ae_int_t n,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
double diffstep, double diffstep,
minbleicstate* state, minbleicstate* state,
ae_state *_state); ae_state *_state);
void minbleicsetbc(minbleicstate* state, void minbleicsetbc(minbleicstate* state,
/* Real */ ae_vector* bndl, /* Real */ ae_vector* bndl,
/* Real */ ae_vector* bndu, /* Real */ ae_vector* bndu,
ae_state *_state); ae_state *_state);
void minbleicsetlc(minbleicstate* state, void minbleicsetlc(minbleicstate* state,
/* Real */ ae_matrix* c, /* Real */ ae_matrix* c,
/* Integer */ ae_vector* ct, /* Integer */ ae_vector* ct,
ae_int_t k, ae_int_t k,
ae_state *_state); ae_state *_state);
void minbleicsetinnercond(minbleicstate* state, void minbleicsetcond(minbleicstate* state,
double epsg, double epsg,
double epsf, double epsf,
double epsx, double epsx,
ae_state *_state); ae_int_t maxits,
void minbleicsetoutercond(minbleicstate* state,
double epsx,
double epsi,
ae_state *_state); ae_state *_state);
void minbleicsetscale(minbleicstate* state, void minbleicsetscale(minbleicstate* state,
/* Real */ ae_vector* s, /* Real */ ae_vector* s,
ae_state *_state); ae_state *_state);
void minbleicsetprecdefault(minbleicstate* state, ae_state *_state); void minbleicsetprecdefault(minbleicstate* state, ae_state *_state);
void minbleicsetprecdiag(minbleicstate* state, void minbleicsetprecdiag(minbleicstate* state,
/* Real */ ae_vector* d, /* Real */ ae_vector* d,
ae_state *_state); ae_state *_state);
void minbleicsetprecscale(minbleicstate* state, ae_state *_state); void minbleicsetprecscale(minbleicstate* state, ae_state *_state);
void minbleicsetmaxits(minbleicstate* state,
ae_int_t maxits,
ae_state *_state);
void minbleicsetxrep(minbleicstate* state, void minbleicsetxrep(minbleicstate* state,
ae_bool needxrep, ae_bool needxrep,
ae_state *_state); ae_state *_state);
void minbleicsetstpmax(minbleicstate* state, void minbleicsetstpmax(minbleicstate* state,
double stpmax, double stpmax,
ae_state *_state); ae_state *_state);
ae_bool minbleiciteration(minbleicstate* state, ae_state *_state); ae_bool minbleiciteration(minbleicstate* state, ae_state *_state);
void minbleicresults(minbleicstate* state, void minbleicresults(minbleicstate* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
minbleicreport* rep, minbleicreport* rep,
skipping to change at line 3599 skipping to change at line 3757
void minbleicresultsbuf(minbleicstate* state, void minbleicresultsbuf(minbleicstate* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
minbleicreport* rep, minbleicreport* rep,
ae_state *_state); ae_state *_state);
void minbleicrestartfrom(minbleicstate* state, void minbleicrestartfrom(minbleicstate* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
ae_state *_state); ae_state *_state);
void minbleicsetgradientcheck(minbleicstate* state, void minbleicsetgradientcheck(minbleicstate* state,
double teststep, double teststep,
ae_state *_state); ae_state *_state);
ae_bool _minbleicstate_init(minbleicstate* p, ae_state *_state, ae_bool mak ae_bool _minbleicstate_init(void* _p, ae_state *_state, ae_bool make_automa
e_automatic); tic);
ae_bool _minbleicstate_init_copy(minbleicstate* dst, minbleicstate* src, ae ae_bool _minbleicstate_init_copy(void* _dst, void* _src, ae_state *_state,
_state *_state, ae_bool make_automatic); ae_bool make_automatic);
void _minbleicstate_clear(minbleicstate* p); void _minbleicstate_clear(void* _p);
ae_bool _minbleicreport_init(minbleicreport* p, ae_state *_state, ae_bool m void _minbleicstate_destroy(void* _p);
ake_automatic); ae_bool _minbleicreport_init(void* _p, ae_state *_state, ae_bool make_autom
ae_bool _minbleicreport_init_copy(minbleicreport* dst, minbleicreport* src, atic);
ae_state *_state, ae_bool make_automatic); ae_bool _minbleicreport_init_copy(void* _dst, void* _src, ae_state *_state,
void _minbleicreport_clear(minbleicreport* p); ae_bool make_automatic);
void _minbleicreport_clear(void* _p);
void _minbleicreport_destroy(void* _p);
void minlbfgscreate(ae_int_t n, void minlbfgscreate(ae_int_t n,
ae_int_t m, ae_int_t m,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
minlbfgsstate* state, minlbfgsstate* state,
ae_state *_state); ae_state *_state);
void minlbfgscreatef(ae_int_t n, void minlbfgscreatef(ae_int_t n,
ae_int_t m, ae_int_t m,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
double diffstep, double diffstep,
minlbfgsstate* state, minlbfgsstate* state,
skipping to change at line 3662 skipping to change at line 3822
void minlbfgsresultsbuf(minlbfgsstate* state, void minlbfgsresultsbuf(minlbfgsstate* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
minlbfgsreport* rep, minlbfgsreport* rep,
ae_state *_state); ae_state *_state);
void minlbfgsrestartfrom(minlbfgsstate* state, void minlbfgsrestartfrom(minlbfgsstate* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
ae_state *_state); ae_state *_state);
void minlbfgssetgradientcheck(minlbfgsstate* state, void minlbfgssetgradientcheck(minlbfgsstate* state,
double teststep, double teststep,
ae_state *_state); ae_state *_state);
ae_bool _minlbfgsstate_init(minlbfgsstate* p, ae_state *_state, ae_bool mak ae_bool _minlbfgsstate_init(void* _p, ae_state *_state, ae_bool make_automa
e_automatic); tic);
ae_bool _minlbfgsstate_init_copy(minlbfgsstate* dst, minlbfgsstate* src, ae ae_bool _minlbfgsstate_init_copy(void* _dst, void* _src, ae_state *_state,
_state *_state, ae_bool make_automatic); ae_bool make_automatic);
void _minlbfgsstate_clear(minlbfgsstate* p); void _minlbfgsstate_clear(void* _p);
ae_bool _minlbfgsreport_init(minlbfgsreport* p, ae_state *_state, ae_bool m void _minlbfgsstate_destroy(void* _p);
ake_automatic); ae_bool _minlbfgsreport_init(void* _p, ae_state *_state, ae_bool make_autom
ae_bool _minlbfgsreport_init_copy(minlbfgsreport* dst, minlbfgsreport* src, atic);
ae_state *_state, ae_bool make_automatic); ae_bool _minlbfgsreport_init_copy(void* _dst, void* _src, ae_state *_state,
void _minlbfgsreport_clear(minlbfgsreport* p); ae_bool make_automatic);
void cqminit(ae_int_t n, convexquadraticmodel* s, ae_state *_state); void _minlbfgsreport_clear(void* _p);
void cqmseta(convexquadraticmodel* s, void _minlbfgsreport_destroy(void* _p);
/* Real */ ae_matrix* a,
ae_bool isupper,
double alpha,
ae_state *_state);
void cqmrewritedensediagonal(convexquadraticmodel* s,
/* Real */ ae_vector* z,
ae_state *_state);
void cqmsetd(convexquadraticmodel* s,
/* Real */ ae_vector* d,
double tau,
ae_state *_state);
void cqmdropa(convexquadraticmodel* s, ae_state *_state);
void cqmsetb(convexquadraticmodel* s,
/* Real */ ae_vector* b,
ae_state *_state);
void cqmsetq(convexquadraticmodel* s,
/* Real */ ae_matrix* q,
/* Real */ ae_vector* r,
ae_int_t k,
double theta,
ae_state *_state);
void cqmsetactiveset(convexquadraticmodel* s,
/* Real */ ae_vector* x,
/* Boolean */ ae_vector* activeset,
ae_state *_state);
double cqmeval(convexquadraticmodel* s,
/* Real */ ae_vector* x,
ae_state *_state);
void cqmevalx(convexquadraticmodel* s,
/* Real */ ae_vector* x,
double* r,
double* noise,
ae_state *_state);
void cqmgradunconstrained(convexquadraticmodel* s,
/* Real */ ae_vector* x,
/* Real */ ae_vector* g,
ae_state *_state);
double cqmxtadx2(convexquadraticmodel* s,
/* Real */ ae_vector* x,
ae_state *_state);
void cqmadx(convexquadraticmodel* s,
/* Real */ ae_vector* x,
/* Real */ ae_vector* y,
ae_state *_state);
ae_bool cqmconstrainedoptimum(convexquadraticmodel* s,
/* Real */ ae_vector* x,
ae_state *_state);
void cqmscalevector(convexquadraticmodel* s,
/* Real */ ae_vector* x,
ae_state *_state);
double cqmdebugconstrainedevalt(convexquadraticmodel* s,
/* Real */ ae_vector* x,
ae_state *_state);
double cqmdebugconstrainedevale(convexquadraticmodel* s,
/* Real */ ae_vector* x,
ae_state *_state);
ae_bool _convexquadraticmodel_init(convexquadraticmodel* p, ae_state *_stat
e, ae_bool make_automatic);
ae_bool _convexquadraticmodel_init_copy(convexquadraticmodel* dst, convexqu
adraticmodel* src, ae_state *_state, ae_bool make_automatic);
void _convexquadraticmodel_clear(convexquadraticmodel* p);
void minqpcreate(ae_int_t n, minqpstate* state, ae_state *_state); void minqpcreate(ae_int_t n, minqpstate* state, ae_state *_state);
void minqpsetlinearterm(minqpstate* state, void minqpsetlinearterm(minqpstate* state,
/* Real */ ae_vector* b, /* Real */ ae_vector* b,
ae_state *_state); ae_state *_state);
void minqpsetquadraticterm(minqpstate* state, void minqpsetquadraticterm(minqpstate* state,
/* Real */ ae_matrix* a, /* Real */ ae_matrix* a,
ae_bool isupper, ae_bool isupper,
ae_state *_state); ae_state *_state);
void minqpsetstartingpoint(minqpstate* state, void minqpsetstartingpoint(minqpstate* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
skipping to change at line 3779 skipping to change at line 3880
ae_state *_state); ae_state *_state);
void minqprewritediagonal(minqpstate* state, void minqprewritediagonal(minqpstate* state,
/* Real */ ae_vector* s, /* Real */ ae_vector* s,
ae_state *_state); ae_state *_state);
void minqpsetstartingpointfast(minqpstate* state, void minqpsetstartingpointfast(minqpstate* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
ae_state *_state); ae_state *_state);
void minqpsetoriginfast(minqpstate* state, void minqpsetoriginfast(minqpstate* state,
/* Real */ ae_vector* xorigin, /* Real */ ae_vector* xorigin,
ae_state *_state); ae_state *_state);
ae_bool _minqpstate_init(minqpstate* p, ae_state *_state, ae_bool make_auto ae_bool _minqpstate_init(void* _p, ae_state *_state, ae_bool make_automatic
matic); );
ae_bool _minqpstate_init_copy(minqpstate* dst, minqpstate* src, ae_state *_ ae_bool _minqpstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_
state, ae_bool make_automatic); bool make_automatic);
void _minqpstate_clear(minqpstate* p); void _minqpstate_clear(void* _p);
ae_bool _minqpreport_init(minqpreport* p, ae_state *_state, ae_bool make_au void _minqpstate_destroy(void* _p);
tomatic); ae_bool _minqpreport_init(void* _p, ae_state *_state, ae_bool make_automati
ae_bool _minqpreport_init_copy(minqpreport* dst, minqpreport* src, ae_state c);
*_state, ae_bool make_automatic); ae_bool _minqpreport_init_copy(void* _dst, void* _src, ae_state *_state, ae
void _minqpreport_clear(minqpreport* p); _bool make_automatic);
void _minqpreport_clear(void* _p);
void _minqpreport_destroy(void* _p);
void minlmcreatevj(ae_int_t n, void minlmcreatevj(ae_int_t n,
ae_int_t m, ae_int_t m,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
minlmstate* state, minlmstate* state,
ae_state *_state); ae_state *_state);
void minlmcreatev(ae_int_t n, void minlmcreatev(ae_int_t n,
ae_int_t m, ae_int_t m,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
double diffstep, double diffstep,
minlmstate* state, minlmstate* state,
skipping to change at line 3848 skipping to change at line 3951
minlmstate* state, minlmstate* state,
ae_state *_state); ae_state *_state);
void minlmcreatefj(ae_int_t n, void minlmcreatefj(ae_int_t n,
ae_int_t m, ae_int_t m,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
minlmstate* state, minlmstate* state,
ae_state *_state); ae_state *_state);
void minlmsetgradientcheck(minlmstate* state, void minlmsetgradientcheck(minlmstate* state,
double teststep, double teststep,
ae_state *_state); ae_state *_state);
ae_bool _minlmstate_init(minlmstate* p, ae_state *_state, ae_bool make_auto ae_bool _minlmstate_init(void* _p, ae_state *_state, ae_bool make_automatic
matic); );
ae_bool _minlmstate_init_copy(minlmstate* dst, minlmstate* src, ae_state *_ ae_bool _minlmstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_
state, ae_bool make_automatic); bool make_automatic);
void _minlmstate_clear(minlmstate* p); void _minlmstate_clear(void* _p);
ae_bool _minlmreport_init(minlmreport* p, ae_state *_state, ae_bool make_au void _minlmstate_destroy(void* _p);
tomatic); ae_bool _minlmreport_init(void* _p, ae_state *_state, ae_bool make_automati
ae_bool _minlmreport_init_copy(minlmreport* dst, minlmreport* src, ae_state c);
*_state, ae_bool make_automatic); ae_bool _minlmreport_init_copy(void* _dst, void* _src, ae_state *_state, ae
void _minlmreport_clear(minlmreport* p); _bool make_automatic);
void _minlmreport_clear(void* _p);
void _minlmreport_destroy(void* _p);
void minlbfgssetdefaultpreconditioner(minlbfgsstate* state, void minlbfgssetdefaultpreconditioner(minlbfgsstate* state,
ae_state *_state); ae_state *_state);
void minlbfgssetcholeskypreconditioner(minlbfgsstate* state, void minlbfgssetcholeskypreconditioner(minlbfgsstate* state,
/* Real */ ae_matrix* p, /* Real */ ae_matrix* p,
ae_bool isupper, ae_bool isupper,
ae_state *_state); ae_state *_state);
void minbleicsetbarrierwidth(minbleicstate* state, void minbleicsetbarrierwidth(minbleicstate* state,
double mu, double mu,
ae_state *_state); ae_state *_state);
void minbleicsetbarrierdecay(minbleicstate* state, void minbleicsetbarrierdecay(minbleicstate* state,
skipping to change at line 3897 skipping to change at line 4002
ae_state *_state); ae_state *_state);
void minasaresultsbuf(minasastate* state, void minasaresultsbuf(minasastate* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
minasareport* rep, minasareport* rep,
ae_state *_state); ae_state *_state);
void minasarestartfrom(minasastate* state, void minasarestartfrom(minasastate* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* bndl, /* Real */ ae_vector* bndl,
/* Real */ ae_vector* bndu, /* Real */ ae_vector* bndu,
ae_state *_state); ae_state *_state);
ae_bool _minasastate_init(minasastate* p, ae_state *_state, ae_bool make_au ae_bool _minasastate_init(void* _p, ae_state *_state, ae_bool make_automati
tomatic); c);
ae_bool _minasastate_init_copy(minasastate* dst, minasastate* src, ae_state ae_bool _minasastate_init_copy(void* _dst, void* _src, ae_state *_state, ae
*_state, ae_bool make_automatic); _bool make_automatic);
void _minasastate_clear(minasastate* p); void _minasastate_clear(void* _p);
ae_bool _minasareport_init(minasareport* p, ae_state *_state, ae_bool make_ void _minasastate_destroy(void* _p);
automatic); ae_bool _minasareport_init(void* _p, ae_state *_state, ae_bool make_automat
ae_bool _minasareport_init_copy(minasareport* dst, minasareport* src, ae_st ic);
ate *_state, ae_bool make_automatic); ae_bool _minasareport_init_copy(void* _dst, void* _src, ae_state *_state, a
void _minasareport_clear(minasareport* p); e_bool make_automatic);
void _minasareport_clear(void* _p);
void _minasareport_destroy(void* _p);
ae_bool _linfeassolver_init(void* _p, ae_state *_state, ae_bool make_automa
tic);
ae_bool _linfeassolver_init_copy(void* _dst, void* _src, ae_state *_state,
ae_bool make_automatic);
void _linfeassolver_clear(void* _p);
void _linfeassolver_destroy(void* _p);
} }
#endif #endif
 End of changes. 46 change blocks. 
326 lines changed or deleted 442 lines changed or added


 solvers.h   solvers.h 
skipping to change at line 52 skipping to change at line 52
ae_int_t n; ae_int_t n;
ae_int_t k; ae_int_t k;
} densesolverlsreport; } densesolverlsreport;
typedef struct typedef struct
{ {
normestimatorstate nes; normestimatorstate nes;
ae_vector rx; ae_vector rx;
ae_vector b; ae_vector b;
ae_int_t n; ae_int_t n;
ae_int_t m; ae_int_t m;
ae_int_t prectype;
ae_vector ui; ae_vector ui;
ae_vector uip1; ae_vector uip1;
ae_vector vi; ae_vector vi;
ae_vector vip1; ae_vector vip1;
ae_vector omegai; ae_vector omegai;
ae_vector omegaip1; ae_vector omegaip1;
double alphai; double alphai;
double alphaip1; double alphaip1;
double betai; double betai;
double betaip1; double betaip1;
skipping to change at line 95 skipping to change at line 96
ae_bool xupdated; ae_bool xupdated;
ae_bool needmv; ae_bool needmv;
ae_bool needmtv; ae_bool needmtv;
ae_bool needmv2; ae_bool needmv2;
ae_bool needvmv; ae_bool needvmv;
ae_bool needprec; ae_bool needprec;
ae_int_t repiterationscount; ae_int_t repiterationscount;
ae_int_t repnmv; ae_int_t repnmv;
ae_int_t repterminationtype; ae_int_t repterminationtype;
ae_bool running; ae_bool running;
ae_vector tmpd;
ae_vector tmpx;
rcommstate rstate; rcommstate rstate;
} linlsqrstate; } linlsqrstate;
typedef struct typedef struct
{ {
ae_int_t iterationscount; ae_int_t iterationscount;
ae_int_t nmv; ae_int_t nmv;
ae_int_t terminationtype; ae_int_t terminationtype;
} linlsqrreport; } linlsqrreport;
typedef struct typedef struct
{ {
ae_vector rx; ae_vector rx;
ae_vector b; ae_vector b;
ae_int_t n; ae_int_t n;
ae_int_t prectype;
ae_vector cx; ae_vector cx;
ae_vector cr; ae_vector cr;
ae_vector cz; ae_vector cz;
ae_vector p; ae_vector p;
ae_vector r; ae_vector r;
ae_vector z; ae_vector z;
double alpha; double alpha;
double beta; double beta;
double r2; double r2;
double meritfunction; double meritfunction;
skipping to change at line 138 skipping to change at line 142
ae_bool xupdated; ae_bool xupdated;
ae_bool needmv; ae_bool needmv;
ae_bool needmtv; ae_bool needmtv;
ae_bool needmv2; ae_bool needmv2;
ae_bool needvmv; ae_bool needvmv;
ae_bool needprec; ae_bool needprec;
ae_int_t repiterationscount; ae_int_t repiterationscount;
ae_int_t repnmv; ae_int_t repnmv;
ae_int_t repterminationtype; ae_int_t repterminationtype;
ae_bool running; ae_bool running;
ae_vector tmpd;
rcommstate rstate; rcommstate rstate;
} lincgstate; } lincgstate;
typedef struct typedef struct
{ {
ae_int_t iterationscount; ae_int_t iterationscount;
ae_int_t nmv; ae_int_t nmv;
ae_int_t terminationtype; ae_int_t terminationtype;
double r2; double r2;
} lincgreport; } lincgreport;
typedef struct typedef struct
skipping to change at line 1119 skipping to change at line 1124
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
-- ALGLIB -- -- ALGLIB --
Copyright 30.11.2011 by Bochkanov Sergey Copyright 30.11.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void linlsqrcreate(const ae_int_t m, const ae_int_t n, linlsqrstate &state) ; void linlsqrcreate(const ae_int_t m, const ae_int_t n, linlsqrstate &state) ;
/************************************************************************* /*************************************************************************
This function changes preconditioning settings of LinLSQQSolveSparse()
function. By default, SolveSparse() uses diagonal preconditioner, but if
you want to use solver without preconditioning, you can call this function
which forces solver to use unit matrix for preconditioning.
INPUT PARAMETERS:
State - structure which stores algorithm state
-- ALGLIB --
Copyright 19.11.2012 by Bochkanov Sergey
*************************************************************************/
void linlsqrsetprecunit(const linlsqrstate &state);
/*************************************************************************
This function changes preconditioning settings of LinCGSolveSparse()
function. LinCGSolveSparse() will use diagonal of the system matrix as
preconditioner. This preconditioning mode is active by default.
INPUT PARAMETERS:
State - structure which stores algorithm state
-- ALGLIB --
Copyright 19.11.2012 by Bochkanov Sergey
*************************************************************************/
void linlsqrsetprecdiag(const linlsqrstate &state);
/*************************************************************************
This function sets optional Tikhonov regularization coefficient. This function sets optional Tikhonov regularization coefficient.
It is zero by default. It is zero by default.
INPUT PARAMETERS: INPUT PARAMETERS:
LambdaI - regularization factor, LambdaI>=0 LambdaI - regularization factor, LambdaI>=0
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
-- ALGLIB -- -- ALGLIB --
skipping to change at line 1147 skipping to change at line 1179
State - algorithm state State - algorithm state
A - sparse M*N matrix in the CRS format (you MUST contvert it A - sparse M*N matrix in the CRS format (you MUST contvert it
to CRS format by calling SparseConvertToCRS() function to CRS format by calling SparseConvertToCRS() function
BEFORE you pass it to this function). BEFORE you pass it to this function).
B - right part, array[M] B - right part, array[M]
RESULT: RESULT:
This function returns no result. This function returns no result.
You can get solution by calling LinCGResults() You can get solution by calling LinCGResults()
NOTE: this function uses lightweight preconditioning - multiplication by
inverse of diag(A). If you want, you can turn preconditioning off by
calling LinLSQRSetPrecUnit(). However, preconditioning cost is low
and preconditioner is very important for solution of badly scaled
problems.
-- ALGLIB -- -- ALGLIB --
Copyright 30.11.2011 by Bochkanov Sergey Copyright 30.11.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void linlsqrsolvesparse(const linlsqrstate &state, const sparsematrix &a, c onst real_1d_array &b); void linlsqrsolvesparse(const linlsqrstate &state, const sparsematrix &a, c onst real_1d_array &b);
/************************************************************************* /*************************************************************************
This function sets stopping criteria. This function sets stopping criteria.
INPUT PARAMETERS: INPUT PARAMETERS:
EpsA - algorithm will be stopped if ||A^T*Rk||/(||A||*||Rk||)<=Eps A. EpsA - algorithm will be stopped if ||A^T*Rk||/(||A||*||Rk||)<=Eps A.
skipping to change at line 1257 skipping to change at line 1295
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
-- ALGLIB -- -- ALGLIB --
Copyright 14.11.2011 by Bochkanov Sergey Copyright 14.11.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void lincgsetstartingpoint(const lincgstate &state, const real_1d_array &x) ; void lincgsetstartingpoint(const lincgstate &state, const real_1d_array &x) ;
/************************************************************************* /*************************************************************************
This function changes preconditioning settings of LinCGSolveSparse()
function. By default, SolveSparse() uses diagonal preconditioner, but if
you want to use solver without preconditioning, you can call this function
which forces solver to use unit matrix for preconditioning.
INPUT PARAMETERS:
State - structure which stores algorithm state
-- ALGLIB --
Copyright 19.11.2012 by Bochkanov Sergey
*************************************************************************/
void lincgsetprecunit(const lincgstate &state);
/*************************************************************************
This function changes preconditioning settings of LinCGSolveSparse()
function. LinCGSolveSparse() will use diagonal of the system matrix as
preconditioner. This preconditioning mode is active by default.
INPUT PARAMETERS:
State - structure which stores algorithm state
-- ALGLIB --
Copyright 19.11.2012 by Bochkanov Sergey
*************************************************************************/
void lincgsetprecdiag(const lincgstate &state);
/*************************************************************************
This function sets stopping criteria. This function sets stopping criteria.
INPUT PARAMETERS: INPUT PARAMETERS:
EpsF - algorithm will be stopped if norm of residual is less than EpsF - algorithm will be stopped if norm of residual is less than
EpsF*||b||. EpsF*||b||.
MaxIts - algorithm will be stopped if number of iterations is more MaxIts - algorithm will be stopped if number of iterations is more
than MaxIts. than MaxIts.
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
skipping to change at line 1295 skipping to change at line 1360
* IsUpper=True => only upper triangle is used and lower * IsUpper=True => only upper triangle is used and lower
triangle is not referenced at all triangle is not referenced at all
* IsUpper=False => only lower triangle is used and upper * IsUpper=False => only lower triangle is used and upper
triangle is not referenced at all triangle is not referenced at all
B - right part, array[N] B - right part, array[N]
RESULT: RESULT:
This function returns no result. This function returns no result.
You can get solution by calling LinCGResults() You can get solution by calling LinCGResults()
NOTE: this function uses lightweight preconditioning - multiplication by
inverse of diag(A). If you want, you can turn preconditioning off by
calling LinCGSetPrecUnit(). However, preconditioning cost is low and
preconditioner is very important for solution of badly scaled
problems.
-- ALGLIB -- -- ALGLIB --
Copyright 14.11.2011 by Bochkanov Sergey Copyright 14.11.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void lincgsolvesparse(const lincgstate &state, const sparsematrix &a, const bool isupper, const real_1d_array &b); void lincgsolvesparse(const lincgstate &state, const sparsematrix &a, const bool isupper, const real_1d_array &b);
/************************************************************************* /*************************************************************************
CG-solver: results. CG-solver: results.
This function must be called after LinCGSolve This function must be called after LinCGSolve
skipping to change at line 1773 skipping to change at line 1844
ae_state *_state); ae_state *_state);
void rmatrixsolvels(/* Real */ ae_matrix* a, void rmatrixsolvels(/* Real */ ae_matrix* a,
ae_int_t nrows, ae_int_t nrows,
ae_int_t ncols, ae_int_t ncols,
/* Real */ ae_vector* b, /* Real */ ae_vector* b,
double threshold, double threshold,
ae_int_t* info, ae_int_t* info,
densesolverlsreport* rep, densesolverlsreport* rep,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
ae_state *_state); ae_state *_state);
ae_bool _densesolverreport_init(densesolverreport* p, ae_state *_state, ae_ ae_bool _densesolverreport_init(void* _p, ae_state *_state, ae_bool make_au
bool make_automatic); tomatic);
ae_bool _densesolverreport_init_copy(densesolverreport* dst, densesolverrep ae_bool _densesolverreport_init_copy(void* _dst, void* _src, ae_state *_sta
ort* src, ae_state *_state, ae_bool make_automatic); te, ae_bool make_automatic);
void _densesolverreport_clear(densesolverreport* p); void _densesolverreport_clear(void* _p);
ae_bool _densesolverlsreport_init(densesolverlsreport* p, ae_state *_state, void _densesolverreport_destroy(void* _p);
ae_bool make_automatic); ae_bool _densesolverlsreport_init(void* _p, ae_state *_state, ae_bool make_
ae_bool _densesolverlsreport_init_copy(densesolverlsreport* dst, densesolve automatic);
rlsreport* src, ae_state *_state, ae_bool make_automatic); ae_bool _densesolverlsreport_init_copy(void* _dst, void* _src, ae_state *_s
void _densesolverlsreport_clear(densesolverlsreport* p); tate, ae_bool make_automatic);
void _densesolverlsreport_clear(void* _p);
void _densesolverlsreport_destroy(void* _p);
void linlsqrcreate(ae_int_t m, void linlsqrcreate(ae_int_t m,
ae_int_t n, ae_int_t n,
linlsqrstate* state, linlsqrstate* state,
ae_state *_state); ae_state *_state);
void linlsqrsetb(linlsqrstate* state, void linlsqrsetb(linlsqrstate* state,
/* Real */ ae_vector* b, /* Real */ ae_vector* b,
ae_state *_state); ae_state *_state);
void linlsqrsetprecunit(linlsqrstate* state, ae_state *_state);
void linlsqrsetprecdiag(linlsqrstate* state, ae_state *_state);
void linlsqrsetlambdai(linlsqrstate* state, void linlsqrsetlambdai(linlsqrstate* state,
double lambdai, double lambdai,
ae_state *_state); ae_state *_state);
ae_bool linlsqriteration(linlsqrstate* state, ae_state *_state); ae_bool linlsqriteration(linlsqrstate* state, ae_state *_state);
void linlsqrsolvesparse(linlsqrstate* state, void linlsqrsolvesparse(linlsqrstate* state,
sparsematrix* a, sparsematrix* a,
/* Real */ ae_vector* b, /* Real */ ae_vector* b,
ae_state *_state); ae_state *_state);
void linlsqrsetcond(linlsqrstate* state, void linlsqrsetcond(linlsqrstate* state,
double epsa, double epsa,
skipping to change at line 1807 skipping to change at line 1882
ae_int_t maxits, ae_int_t maxits,
ae_state *_state); ae_state *_state);
void linlsqrresults(linlsqrstate* state, void linlsqrresults(linlsqrstate* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
linlsqrreport* rep, linlsqrreport* rep,
ae_state *_state); ae_state *_state);
void linlsqrsetxrep(linlsqrstate* state, void linlsqrsetxrep(linlsqrstate* state,
ae_bool needxrep, ae_bool needxrep,
ae_state *_state); ae_state *_state);
void linlsqrrestart(linlsqrstate* state, ae_state *_state); void linlsqrrestart(linlsqrstate* state, ae_state *_state);
ae_bool _linlsqrstate_init(linlsqrstate* p, ae_state *_state, ae_bool make_ ae_bool _linlsqrstate_init(void* _p, ae_state *_state, ae_bool make_automat
automatic); ic);
ae_bool _linlsqrstate_init_copy(linlsqrstate* dst, linlsqrstate* src, ae_st ae_bool _linlsqrstate_init_copy(void* _dst, void* _src, ae_state *_state, a
ate *_state, ae_bool make_automatic); e_bool make_automatic);
void _linlsqrstate_clear(linlsqrstate* p); void _linlsqrstate_clear(void* _p);
ae_bool _linlsqrreport_init(linlsqrreport* p, ae_state *_state, ae_bool mak void _linlsqrstate_destroy(void* _p);
e_automatic); ae_bool _linlsqrreport_init(void* _p, ae_state *_state, ae_bool make_automa
ae_bool _linlsqrreport_init_copy(linlsqrreport* dst, linlsqrreport* src, ae tic);
_state *_state, ae_bool make_automatic); ae_bool _linlsqrreport_init_copy(void* _dst, void* _src, ae_state *_state,
void _linlsqrreport_clear(linlsqrreport* p); ae_bool make_automatic);
void _linlsqrreport_clear(void* _p);
void _linlsqrreport_destroy(void* _p);
void lincgcreate(ae_int_t n, lincgstate* state, ae_state *_state); void lincgcreate(ae_int_t n, lincgstate* state, ae_state *_state);
void lincgsetstartingpoint(lincgstate* state, void lincgsetstartingpoint(lincgstate* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
ae_state *_state); ae_state *_state);
void lincgsetb(lincgstate* state, void lincgsetb(lincgstate* state,
/* Real */ ae_vector* b, /* Real */ ae_vector* b,
ae_state *_state); ae_state *_state);
void lincgsetprecunit(lincgstate* state, ae_state *_state);
void lincgsetprecdiag(lincgstate* state, ae_state *_state);
void lincgsetcond(lincgstate* state, void lincgsetcond(lincgstate* state,
double epsf, double epsf,
ae_int_t maxits, ae_int_t maxits,
ae_state *_state); ae_state *_state);
ae_bool lincgiteration(lincgstate* state, ae_state *_state); ae_bool lincgiteration(lincgstate* state, ae_state *_state);
void lincgsolvesparse(lincgstate* state, void lincgsolvesparse(lincgstate* state,
sparsematrix* a, sparsematrix* a,
ae_bool isupper, ae_bool isupper,
/* Real */ ae_vector* b, /* Real */ ae_vector* b,
ae_state *_state); ae_state *_state);
skipping to change at line 1842 skipping to change at line 1921
lincgreport* rep, lincgreport* rep,
ae_state *_state); ae_state *_state);
void lincgsetrestartfreq(lincgstate* state, void lincgsetrestartfreq(lincgstate* state,
ae_int_t srf, ae_int_t srf,
ae_state *_state); ae_state *_state);
void lincgsetrupdatefreq(lincgstate* state, void lincgsetrupdatefreq(lincgstate* state,
ae_int_t freq, ae_int_t freq,
ae_state *_state); ae_state *_state);
void lincgsetxrep(lincgstate* state, ae_bool needxrep, ae_state *_state); void lincgsetxrep(lincgstate* state, ae_bool needxrep, ae_state *_state);
void lincgrestart(lincgstate* state, ae_state *_state); void lincgrestart(lincgstate* state, ae_state *_state);
ae_bool _lincgstate_init(lincgstate* p, ae_state *_state, ae_bool make_auto ae_bool _lincgstate_init(void* _p, ae_state *_state, ae_bool make_automatic
matic); );
ae_bool _lincgstate_init_copy(lincgstate* dst, lincgstate* src, ae_state *_ ae_bool _lincgstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_
state, ae_bool make_automatic); bool make_automatic);
void _lincgstate_clear(lincgstate* p); void _lincgstate_clear(void* _p);
ae_bool _lincgreport_init(lincgreport* p, ae_state *_state, ae_bool make_au void _lincgstate_destroy(void* _p);
tomatic); ae_bool _lincgreport_init(void* _p, ae_state *_state, ae_bool make_automati
ae_bool _lincgreport_init_copy(lincgreport* dst, lincgreport* src, ae_state c);
*_state, ae_bool make_automatic); ae_bool _lincgreport_init_copy(void* _dst, void* _src, ae_state *_state, ae
void _lincgreport_clear(lincgreport* p); _bool make_automatic);
void _lincgreport_clear(void* _p);
void _lincgreport_destroy(void* _p);
void nleqcreatelm(ae_int_t n, void nleqcreatelm(ae_int_t n,
ae_int_t m, ae_int_t m,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
nleqstate* state, nleqstate* state,
ae_state *_state); ae_state *_state);
void nleqsetcond(nleqstate* state, void nleqsetcond(nleqstate* state,
double epsf, double epsf,
ae_int_t maxits, ae_int_t maxits,
ae_state *_state); ae_state *_state);
void nleqsetxrep(nleqstate* state, ae_bool needxrep, ae_state *_state); void nleqsetxrep(nleqstate* state, ae_bool needxrep, ae_state *_state);
skipping to change at line 1871 skipping to change at line 1952
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
nleqreport* rep, nleqreport* rep,
ae_state *_state); ae_state *_state);
void nleqresultsbuf(nleqstate* state, void nleqresultsbuf(nleqstate* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
nleqreport* rep, nleqreport* rep,
ae_state *_state); ae_state *_state);
void nleqrestartfrom(nleqstate* state, void nleqrestartfrom(nleqstate* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
ae_state *_state); ae_state *_state);
ae_bool _nleqstate_init(nleqstate* p, ae_state *_state, ae_bool make_automa ae_bool _nleqstate_init(void* _p, ae_state *_state, ae_bool make_automatic)
tic); ;
ae_bool _nleqstate_init_copy(nleqstate* dst, nleqstate* src, ae_state *_sta ae_bool _nleqstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_b
te, ae_bool make_automatic); ool make_automatic);
void _nleqstate_clear(nleqstate* p); void _nleqstate_clear(void* _p);
ae_bool _nleqreport_init(nleqreport* p, ae_state *_state, ae_bool make_auto void _nleqstate_destroy(void* _p);
matic); ae_bool _nleqreport_init(void* _p, ae_state *_state, ae_bool make_automatic
ae_bool _nleqreport_init_copy(nleqreport* dst, nleqreport* src, ae_state *_ );
state, ae_bool make_automatic); ae_bool _nleqreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_
void _nleqreport_clear(nleqreport* p); bool make_automatic);
void _nleqreport_clear(void* _p);
void _nleqreport_destroy(void* _p);
} }
#endif #endif
 End of changes. 14 change blocks. 
40 lines changed or deleted 123 lines changed or added

This html diff was produced by rfcdiff 1.41. The latest version is available from http://tools.ietf.org/tools/rfcdiff/