alglibinternal.h   alglibinternal.h 
skipping to change at line 75 skipping to change at line 75
typedef struct typedef struct
{ {
ae_complex val; ae_complex val;
} scomplex; } scomplex;
typedef struct typedef struct
{ {
ae_vector val; ae_vector val;
} scomplexarray; } scomplexarray;
typedef struct typedef struct
{ {
ae_int_t chunksize;
ae_int_t ntotal;
ae_int_t nin;
ae_int_t nout;
ae_int_t wcount;
ae_vector batch4buf;
ae_vector hpcbuf;
ae_matrix xy;
ae_matrix xy2;
ae_vector xyrow;
ae_vector x;
ae_vector y;
ae_vector desiredy;
double e;
ae_vector g;
ae_vector tmp0;
} mlpbuffers;
typedef struct
{
ae_bool brackt; ae_bool brackt;
ae_bool stage1; ae_bool stage1;
ae_int_t infoc; ae_int_t infoc;
double dg; double dg;
double dgm; double dgm;
double dginit; double dginit;
double dgtest; double dgtest;
double dgx; double dgx;
double dgxm; double dgxm;
double dgy; double dgy;
skipping to change at line 119 skipping to change at line 138
double stplen; double stplen;
double fcur; double fcur;
double stpmax; double stpmax;
ae_int_t fmax; ae_int_t fmax;
ae_int_t nfev; ae_int_t nfev;
ae_int_t info; ae_int_t info;
rcommstate rstate; rcommstate rstate;
} armijostate; } armijostate;
typedef struct typedef struct
{ {
ae_vector plan; ae_matrix entries;
ae_vector precomputed; ae_vector buffer;
ae_vector tmpbuf; ae_vector precr;
ae_vector stackbuf; ae_vector preci;
} ftplan; ae_shared_pool bluesteinpool;
} fasttransformplan;
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
// //
// THIS SECTION CONTAINS C++ INTERFACE // THIS SECTION CONTAINS C++ INTERFACE
// //
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
namespace alglib namespace alglib
{ {
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
// //
// THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (FUNCTIONS) // THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (FUNCTIONS)
// //
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
namespace alglib_impl namespace alglib_impl
{ {
ae_int_t getrdfserializationcode(ae_state *_state); ae_bool seterrorflag(ae_bool* flag, ae_bool cond, ae_state *_state);
ae_int_t getkdtreeserializationcode(ae_state *_state); ae_bool seterrorflagdiff(ae_bool* flag,
ae_int_t getmlpserializationcode(ae_state *_state); double val,
ae_int_t getmlpeserializationcode(ae_state *_state); double refval,
ae_int_t getrbfserializationcode(ae_state *_state); double tol,
double s,
ae_state *_state);
void touchint(ae_int_t* a, ae_state *_state);
void touchreal(double* a, ae_state *_state);
double inttoreal(ae_int_t a, ae_state *_state);
double log2(double x, ae_state *_state);
ae_bool approxequalrel(double a, double b, double tol, ae_state *_state); ae_bool approxequalrel(double a, double b, double tol, ae_state *_state);
void taskgenint1d(double a, void taskgenint1d(double a,
double b, double b,
ae_int_t n, ae_int_t n,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_state *_state); ae_state *_state);
void taskgenint1dequidist(double a, void taskgenint1dequidist(double a,
double b, double b,
ae_int_t n, ae_int_t n,
skipping to change at line 195 skipping to change at line 221
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
void rmatrixsetlengthatleast(/* Real */ ae_matrix* x, void rmatrixsetlengthatleast(/* Real */ ae_matrix* x,
ae_int_t m, ae_int_t m,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
void rmatrixresize(/* Real */ ae_matrix* x, void rmatrixresize(/* Real */ ae_matrix* x,
ae_int_t m, ae_int_t m,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
void imatrixresize(/* Integer */ ae_matrix* x,
ae_int_t m,
ae_int_t n,
ae_state *_state);
ae_bool isfinitevector(/* Real */ ae_vector* x, ae_bool isfinitevector(/* Real */ ae_vector* x,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
ae_bool isfinitecvector(/* Complex */ ae_vector* z, ae_bool isfinitecvector(/* Complex */ ae_vector* z,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
ae_bool apservisfinitematrix(/* Real */ ae_matrix* x, ae_bool apservisfinitematrix(/* Real */ ae_matrix* x,
ae_int_t m, ae_int_t m,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
skipping to change at line 231 skipping to change at line 261
double safepythag2(double x, double y, ae_state *_state); double safepythag2(double x, double y, ae_state *_state);
double safepythag3(double x, double y, double z, ae_state *_state); double safepythag3(double x, double y, double z, ae_state *_state);
ae_int_t saferdiv(double x, double y, double* r, ae_state *_state); ae_int_t saferdiv(double x, double y, double* r, ae_state *_state);
double safeminposrv(double x, double y, double v, ae_state *_state); double safeminposrv(double x, double y, double v, ae_state *_state);
void apperiodicmap(double* x, void apperiodicmap(double* x,
double a, double a,
double b, double b,
double* k, double* k,
ae_state *_state); ae_state *_state);
double randomnormal(ae_state *_state); double randomnormal(ae_state *_state);
void randomunit(ae_int_t n, /* Real */ ae_vector* x, ae_state *_state);
void inc(ae_int_t* v, ae_state *_state);
void dec(ae_int_t* v, ae_state *_state);
void countdown(ae_int_t* v, ae_state *_state);
double boundval(double x, double b1, double b2, ae_state *_state); double boundval(double x, double b1, double b2, ae_state *_state);
void alloccomplex(ae_serializer* s, ae_complex v, ae_state *_state); void alloccomplex(ae_serializer* s, ae_complex v, ae_state *_state);
void serializecomplex(ae_serializer* s, ae_complex v, ae_state *_state); void serializecomplex(ae_serializer* s, ae_complex v, ae_state *_state);
ae_complex unserializecomplex(ae_serializer* s, ae_state *_state); ae_complex unserializecomplex(ae_serializer* s, ae_state *_state);
void allocrealarray(ae_serializer* s, void allocrealarray(ae_serializer* s,
/* Real */ ae_vector* v, /* Real */ ae_vector* v,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
void serializerealarray(ae_serializer* s, void serializerealarray(ae_serializer* s,
/* Real */ ae_vector* v, /* Real */ ae_vector* v,
skipping to change at line 286 skipping to change at line 320
void copyrealmatrix(/* Real */ ae_matrix* src, void copyrealmatrix(/* Real */ ae_matrix* src,
/* Real */ ae_matrix* dst, /* Real */ ae_matrix* dst,
ae_state *_state); ae_state *_state);
ae_int_t recsearch(/* Integer */ ae_vector* a, ae_int_t recsearch(/* Integer */ ae_vector* a,
ae_int_t nrec, ae_int_t nrec,
ae_int_t nheader, ae_int_t nheader,
ae_int_t i0, ae_int_t i0,
ae_int_t i1, ae_int_t i1,
/* Integer */ ae_vector* b, /* Integer */ ae_vector* b,
ae_state *_state); ae_state *_state);
void splitlengtheven(ae_int_t tasksize,
ae_int_t* task0,
ae_int_t* task1,
ae_state *_state);
void splitlength(ae_int_t tasksize,
ae_int_t chunksize,
ae_int_t* task0,
ae_int_t* task1,
ae_state *_state);
ae_bool _apbuffers_init(void* _p, ae_state *_state, ae_bool make_automatic) ; ae_bool _apbuffers_init(void* _p, ae_state *_state, ae_bool make_automatic) ;
ae_bool _apbuffers_init_copy(void* _dst, void* _src, ae_state *_state, ae_b ool make_automatic); ae_bool _apbuffers_init_copy(void* _dst, void* _src, ae_state *_state, ae_b ool make_automatic);
void _apbuffers_clear(void* _p); void _apbuffers_clear(void* _p);
void _apbuffers_destroy(void* _p); void _apbuffers_destroy(void* _p);
ae_bool _sboolean_init(void* _p, ae_state *_state, ae_bool make_automatic); ae_bool _sboolean_init(void* _p, ae_state *_state, ae_bool make_automatic);
ae_bool _sboolean_init_copy(void* _dst, void* _src, ae_state *_state, ae_bo ol make_automatic); ae_bool _sboolean_init_copy(void* _dst, void* _src, ae_state *_state, ae_bo ol make_automatic);
void _sboolean_clear(void* _p); void _sboolean_clear(void* _p);
void _sboolean_destroy(void* _p); void _sboolean_destroy(void* _p);
ae_bool _sbooleanarray_init(void* _p, ae_state *_state, ae_bool make_automa tic); ae_bool _sbooleanarray_init(void* _p, ae_state *_state, ae_bool make_automa tic);
ae_bool _sbooleanarray_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); ae_bool _sbooleanarray_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
skipping to change at line 322 skipping to change at line 365
void _srealarray_clear(void* _p); void _srealarray_clear(void* _p);
void _srealarray_destroy(void* _p); void _srealarray_destroy(void* _p);
ae_bool _scomplex_init(void* _p, ae_state *_state, ae_bool make_automatic); ae_bool _scomplex_init(void* _p, ae_state *_state, ae_bool make_automatic);
ae_bool _scomplex_init_copy(void* _dst, void* _src, ae_state *_state, ae_bo ol make_automatic); ae_bool _scomplex_init_copy(void* _dst, void* _src, ae_state *_state, ae_bo ol make_automatic);
void _scomplex_clear(void* _p); void _scomplex_clear(void* _p);
void _scomplex_destroy(void* _p); void _scomplex_destroy(void* _p);
ae_bool _scomplexarray_init(void* _p, ae_state *_state, ae_bool make_automa tic); ae_bool _scomplexarray_init(void* _p, ae_state *_state, ae_bool make_automa tic);
ae_bool _scomplexarray_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); ae_bool _scomplexarray_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
void _scomplexarray_clear(void* _p); void _scomplexarray_clear(void* _p);
void _scomplexarray_destroy(void* _p); void _scomplexarray_destroy(void* _p);
ae_int_t getrdfserializationcode(ae_state *_state);
ae_int_t getkdtreeserializationcode(ae_state *_state);
ae_int_t getmlpserializationcode(ae_state *_state);
ae_int_t getmlpeserializationcode(ae_state *_state);
ae_int_t getrbfserializationcode(ae_state *_state);
void tagsort(/* Real */ ae_vector* a, void tagsort(/* Real */ ae_vector* a,
ae_int_t n, ae_int_t n,
/* Integer */ ae_vector* p1, /* Integer */ ae_vector* p1,
/* Integer */ ae_vector* p2, /* Integer */ ae_vector* p2,
ae_state *_state); ae_state *_state);
void tagsortbuf(/* Real */ ae_vector* a, void tagsortbuf(/* Real */ ae_vector* a,
ae_int_t n, ae_int_t n,
/* Integer */ ae_vector* p1, /* Integer */ ae_vector* p1,
/* Integer */ ae_vector* p2, /* Integer */ ae_vector* p2,
apbuffers* buf, apbuffers* buf,
skipping to change at line 380 skipping to change at line 428
ae_int_t lowerbound(/* Real */ ae_vector* a, ae_int_t lowerbound(/* Real */ ae_vector* a,
ae_int_t n, ae_int_t n,
double t, double t,
ae_state *_state); ae_state *_state);
ae_int_t upperbound(/* Real */ ae_vector* a, ae_int_t upperbound(/* Real */ ae_vector* a,
ae_int_t n, ae_int_t n,
double t, double t,
ae_state *_state); ae_state *_state);
void rankx(/* Real */ ae_vector* x, void rankx(/* Real */ ae_vector* x,
ae_int_t n, ae_int_t n,
ae_bool iscentered,
apbuffers* buf, apbuffers* buf,
ae_state *_state); ae_state *_state);
ae_bool cmatrixrank1f(ae_int_t m, ae_bool cmatrixrank1f(ae_int_t m,
ae_int_t n, ae_int_t n,
/* Complex */ ae_matrix* a, /* Complex */ ae_matrix* a,
ae_int_t ia, ae_int_t ia,
ae_int_t ja, ae_int_t ja,
/* Complex */ ae_vector* u, /* Complex */ ae_vector* u,
ae_int_t iu, ae_int_t iu,
/* Complex */ ae_vector* v, /* Complex */ ae_vector* v,
skipping to change at line 532 skipping to change at line 581
ae_int_t optypea, ae_int_t optypea,
/* Complex */ ae_matrix* b, /* Complex */ ae_matrix* b,
ae_int_t ib, ae_int_t ib,
ae_int_t jb, ae_int_t jb,
ae_int_t optypeb, ae_int_t optypeb,
ae_complex beta, ae_complex beta,
/* Complex */ ae_matrix* c, /* Complex */ ae_matrix* c,
ae_int_t ic, ae_int_t ic,
ae_int_t jc, ae_int_t jc,
ae_state *_state); ae_state *_state);
void cmatrixgemmk(ae_int_t m,
ae_int_t n,
ae_int_t k,
ae_complex alpha,
/* Complex */ ae_matrix* a,
ae_int_t ia,
ae_int_t ja,
ae_int_t optypea,
/* Complex */ ae_matrix* b,
ae_int_t ib,
ae_int_t jb,
ae_int_t optypeb,
ae_complex beta,
/* Complex */ ae_matrix* c,
ae_int_t ic,
ae_int_t jc,
ae_state *_state);
void rmatrixgemmk(ae_int_t m,
ae_int_t n,
ae_int_t k,
double alpha,
/* Real */ ae_matrix* a,
ae_int_t ia,
ae_int_t ja,
ae_int_t optypea,
/* Real */ ae_matrix* b,
ae_int_t ib,
ae_int_t jb,
ae_int_t optypeb,
double beta,
/* Real */ ae_matrix* c,
ae_int_t ic,
ae_int_t jc,
ae_state *_state);
void rmatrixgemmk44v00(ae_int_t m,
ae_int_t n,
ae_int_t k,
double alpha,
/* Real */ ae_matrix* a,
ae_int_t ia,
ae_int_t ja,
/* Real */ ae_matrix* b,
ae_int_t ib,
ae_int_t jb,
double beta,
/* Real */ ae_matrix* c,
ae_int_t ic,
ae_int_t jc,
ae_state *_state);
void rmatrixgemmk44v01(ae_int_t m,
ae_int_t n,
ae_int_t k,
double alpha,
/* Real */ ae_matrix* a,
ae_int_t ia,
ae_int_t ja,
/* Real */ ae_matrix* b,
ae_int_t ib,
ae_int_t jb,
double beta,
/* Real */ ae_matrix* c,
ae_int_t ic,
ae_int_t jc,
ae_state *_state);
void rmatrixgemmk44v10(ae_int_t m,
ae_int_t n,
ae_int_t k,
double alpha,
/* Real */ ae_matrix* a,
ae_int_t ia,
ae_int_t ja,
/* Real */ ae_matrix* b,
ae_int_t ib,
ae_int_t jb,
double beta,
/* Real */ ae_matrix* c,
ae_int_t ic,
ae_int_t jc,
ae_state *_state);
void rmatrixgemmk44v11(ae_int_t m,
ae_int_t n,
ae_int_t k,
double alpha,
/* Real */ ae_matrix* a,
ae_int_t ia,
ae_int_t ja,
/* Real */ ae_matrix* b,
ae_int_t ib,
ae_int_t jb,
double beta,
/* Real */ ae_matrix* c,
ae_int_t ic,
ae_int_t jc,
ae_state *_state);
ae_bool rmatrixsyrkmkl(ae_int_t n,
ae_int_t k,
double alpha,
/* Real */ ae_matrix* a,
ae_int_t ia,
ae_int_t ja,
ae_int_t optypea,
double beta,
/* Real */ ae_matrix* c,
ae_int_t ic,
ae_int_t jc,
ae_bool isupper,
ae_state *_state);
ae_bool rmatrixgemmmkl(ae_int_t m,
ae_int_t n,
ae_int_t k,
double alpha,
/* Real */ ae_matrix* a,
ae_int_t ia,
ae_int_t ja,
ae_int_t optypea,
/* Real */ ae_matrix* b,
ae_int_t ib,
ae_int_t jb,
ae_int_t optypeb,
double beta,
/* Real */ ae_matrix* c,
ae_int_t ic,
ae_int_t jc,
ae_state *_state);
double vectornorm2(/* Real */ ae_vector* x, double vectornorm2(/* Real */ ae_vector* x,
ae_int_t i1, ae_int_t i1,
ae_int_t i2, ae_int_t i2,
ae_state *_state); ae_state *_state);
ae_int_t vectoridxabsmax(/* Real */ ae_vector* x, ae_int_t vectoridxabsmax(/* Real */ ae_vector* x,
ae_int_t i1, ae_int_t i1,
ae_int_t i2, ae_int_t i2,
ae_state *_state); ae_state *_state);
ae_int_t columnidxabsmax(/* Real */ ae_matrix* x, ae_int_t columnidxabsmax(/* Real */ ae_matrix* x,
ae_int_t i1, ae_int_t i1,
skipping to change at line 776 skipping to change at line 949
ae_state *_state); ae_state *_state);
ae_bool cmatrixscaledtrsafesolve(/* Complex */ ae_matrix* a, ae_bool cmatrixscaledtrsafesolve(/* Complex */ ae_matrix* a,
double sa, double sa,
ae_int_t n, ae_int_t n,
/* Complex */ ae_vector* x, /* Complex */ ae_vector* x,
ae_bool isupper, ae_bool isupper,
ae_int_t trans, ae_int_t trans,
ae_bool isunit, ae_bool isunit,
double maxgrowth, double maxgrowth,
ae_state *_state); ae_state *_state);
void hpcpreparechunkedgradient(/* Real */ ae_vector* weights,
ae_int_t wcount,
ae_int_t ntotal,
ae_int_t nin,
ae_int_t nout,
mlpbuffers* buf,
ae_state *_state);
void hpcfinalizechunkedgradient(mlpbuffers* buf,
/* Real */ ae_vector* grad,
ae_state *_state);
ae_bool hpcchunkedgradient(/* Real */ ae_vector* weights,
/* Integer */ ae_vector* structinfo,
/* Real */ ae_vector* columnmeans,
/* Real */ ae_vector* columnsigmas,
/* Real */ ae_matrix* xy,
ae_int_t cstart,
ae_int_t csize,
/* Real */ ae_vector* batch4buf,
/* Real */ ae_vector* hpcbuf,
double* e,
ae_bool naturalerrorfunc,
ae_state *_state);
ae_bool hpcchunkedprocess(/* Real */ ae_vector* weights,
/* Integer */ ae_vector* structinfo,
/* Real */ ae_vector* columnmeans,
/* Real */ ae_vector* columnsigmas,
/* Real */ ae_matrix* xy,
ae_int_t cstart,
ae_int_t csize,
/* Real */ ae_vector* batch4buf,
/* Real */ ae_vector* hpcbuf,
ae_state *_state);
ae_bool _mlpbuffers_init(void* _p, ae_state *_state, ae_bool make_automatic
);
ae_bool _mlpbuffers_init_copy(void* _dst, void* _src, ae_state *_state, ae_
bool make_automatic);
void _mlpbuffers_clear(void* _p);
void _mlpbuffers_destroy(void* _p);
void xdot(/* Real */ ae_vector* a, void xdot(/* Real */ ae_vector* a,
/* Real */ ae_vector* b, /* Real */ ae_vector* b,
ae_int_t n, ae_int_t n,
/* Real */ ae_vector* temp, /* Real */ ae_vector* temp,
double* r, double* r,
double* rerr, double* rerr,
ae_state *_state); ae_state *_state);
void xcdot(/* Complex */ ae_vector* a, void xcdot(/* Complex */ ae_vector* a,
/* Complex */ ae_vector* b, /* Complex */ ae_vector* b,
ae_int_t n, ae_int_t n,
skipping to change at line 831 skipping to change at line 1040
double* f, double* f,
ae_state *_state); ae_state *_state);
ae_bool _linminstate_init(void* _p, ae_state *_state, ae_bool make_automati c); ae_bool _linminstate_init(void* _p, ae_state *_state, ae_bool make_automati c);
ae_bool _linminstate_init_copy(void* _dst, void* _src, ae_state *_state, ae _bool make_automatic); ae_bool _linminstate_init_copy(void* _dst, void* _src, ae_state *_state, ae _bool make_automatic);
void _linminstate_clear(void* _p); void _linminstate_clear(void* _p);
void _linminstate_destroy(void* _p); void _linminstate_destroy(void* _p);
ae_bool _armijostate_init(void* _p, ae_state *_state, ae_bool make_automati c); ae_bool _armijostate_init(void* _p, ae_state *_state, ae_bool make_automati c);
ae_bool _armijostate_init_copy(void* _dst, void* _src, ae_state *_state, ae _bool make_automatic); ae_bool _armijostate_init_copy(void* _dst, void* _src, ae_state *_state, ae _bool make_automatic);
void _armijostate_clear(void* _p); void _armijostate_clear(void* _p);
void _armijostate_destroy(void* _p); void _armijostate_destroy(void* _p);
void ftbasegeneratecomplexfftplan(ae_int_t n, void findprimitiverootandinverse(ae_int_t n,
ftplan* plan, ae_int_t* proot,
ae_int_t* invproot,
ae_state *_state);
void ftcomplexfftplan(ae_int_t n,
ae_int_t k,
fasttransformplan* plan,
ae_state *_state); ae_state *_state);
void ftbasegeneraterealfftplan(ae_int_t n, ftplan* plan, ae_state *_state); void ftapplyplan(fasttransformplan* plan,
void ftbasegeneraterealfhtplan(ae_int_t n, ftplan* plan, ae_state *_state); /* Real */ ae_vector* a,
void ftbaseexecuteplan(/* Real */ ae_vector* a, ae_int_t offsa,
ae_int_t aoffset, ae_int_t repcnt,
ae_int_t n,
ftplan* plan,
ae_state *_state);
void ftbaseexecuteplanrec(/* Real */ ae_vector* a,
ae_int_t aoffset,
ftplan* plan,
ae_int_t entryoffset,
ae_int_t stackptr,
ae_state *_state); ae_state *_state);
void ftbasefactorize(ae_int_t n, void ftbasefactorize(ae_int_t n,
ae_int_t tasktype, ae_int_t tasktype,
ae_int_t* n1, ae_int_t* n1,
ae_int_t* n2, ae_int_t* n2,
ae_state *_state); ae_state *_state);
ae_bool ftbaseissmooth(ae_int_t n, ae_state *_state); ae_bool ftbaseissmooth(ae_int_t n, ae_state *_state);
ae_int_t ftbasefindsmooth(ae_int_t n, ae_state *_state); ae_int_t ftbasefindsmooth(ae_int_t n, ae_state *_state);
ae_int_t ftbasefindsmootheven(ae_int_t n, ae_state *_state); ae_int_t ftbasefindsmootheven(ae_int_t n, ae_state *_state);
double ftbasegetflopestimate(ae_int_t n, ae_state *_state); double ftbasegetflopestimate(ae_int_t n, ae_state *_state);
ae_bool _ftplan_init(void* _p, ae_state *_state, ae_bool make_automatic); ae_bool _fasttransformplan_init(void* _p, ae_state *_state, ae_bool make_au
ae_bool _ftplan_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool tomatic);
make_automatic); ae_bool _fasttransformplan_init_copy(void* _dst, void* _src, ae_state *_sta
void _ftplan_clear(void* _p); te, ae_bool make_automatic);
void _ftplan_destroy(void* _p); void _fasttransformplan_clear(void* _p);
void _fasttransformplan_destroy(void* _p);
double nulog1p(double x, ae_state *_state); double nulog1p(double x, ae_state *_state);
double nuexpm1(double x, ae_state *_state); double nuexpm1(double x, ae_state *_state);
double nucosm1(double x, ae_state *_state); double nucosm1(double x, ae_state *_state);
} }
#endif #endif
 End of changes. 13 change blocks. 
29 lines changed or deleted 238 lines changed or added


 alglibmisc.h   alglibmisc.h 
skipping to change at line 35 skipping to change at line 35
// //
// THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (DATATYPES) // THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (DATATYPES)
// //
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
namespace alglib_impl namespace alglib_impl
{ {
typedef struct typedef struct
{ {
ae_int_t s1; ae_int_t s1;
ae_int_t s2; ae_int_t s2;
double v;
ae_int_t magicv; ae_int_t magicv;
} hqrndstate; } hqrndstate;
typedef struct typedef struct
{ {
ae_int_t n; ae_int_t n;
ae_int_t nx; ae_int_t nx;
ae_int_t ny; ae_int_t ny;
ae_int_t normtype; ae_int_t normtype;
ae_matrix xy; ae_matrix xy;
ae_vector tags; ae_vector tags;
skipping to change at line 163 skipping to change at line 162
State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). State structure must be initialized with HQRNDRandomize() or HQRNDSeed().
-- ALGLIB -- -- ALGLIB --
Copyright 02.12.2009 by Bochkanov Sergey Copyright 02.12.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double hqrnduniformr(const hqrndstate &state); double hqrnduniformr(const hqrndstate &state);
/************************************************************************* /*************************************************************************
This function generates random integer number in [0, N) This function generates random integer number in [0, N)
1. N must be less than HQRNDMax-1. 1. State structure must be initialized with HQRNDRandomize() or HQRNDSeed()
2. State structure must be initialized with HQRNDRandomize() or HQRNDSeed() 2. N can be any positive number except for very large numbers:
* close to 2^31 on 32-bit systems
* close to 2^62 on 64-bit systems
An exception will be generated if N is too large.
-- ALGLIB -- -- ALGLIB --
Copyright 02.12.2009 by Bochkanov Sergey Copyright 02.12.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
ae_int_t hqrnduniformi(const hqrndstate &state, const ae_int_t n); ae_int_t hqrnduniformi(const hqrndstate &state, const ae_int_t n);
/************************************************************************* /*************************************************************************
Random number generator: normal numbers Random number generator: normal numbers
This function generates one random number from normal distribution. This function generates one random number from normal distribution.
 End of changes. 2 change blocks. 
3 lines changed or deleted 5 lines changed or added


 ap.h   ap.h 
skipping to change at line 38 skipping to change at line 38
#ifdef __BORLANDC__ #ifdef __BORLANDC__
#include <list.h> #include <list.h>
#include <vector.h> #include <vector.h>
#else #else
#include <list> #include <list>
#include <vector> #include <vector>
#endif #endif
#define AE_USE_CPP #define AE_USE_CPP
/* Definitions */
/////////////////////////////////////////////////////////////////////////
//
// THIS SECTION CONTAINS DECLARATIONS FOR BASIC FUNCTIONALITY
// LIKE MEMORY MANAGEMENT FOR VECTORS/MATRICES WHICH IS SHARED
// BETWEEN C++ AND PURE C LIBRARIES
//
/////////////////////////////////////////////////////////////////////////
namespace alglib_impl
{
#include <stdlib.h>
#include <string.h>
#include <setjmp.h>
#include <math.h>
#include <stddef.h>
/*
* definitions
*/
#define AE_UNKNOWN 0 #define AE_UNKNOWN 0
#define AE_MSVC 1 #define AE_MSVC 1
#define AE_GNUC 2 #define AE_GNUC 2
#define AE_SUNC 3 #define AE_SUNC 3
#define AE_INTEL 1 #define AE_INTEL 1
#define AE_SPARC 2 #define AE_SPARC 2
#define AE_WINDOWS 1 #define AE_WINDOWS 1
#define AE_POSIX 2 #define AE_POSIX 2
#define AE_LOCK_ALIGNMENT 16 #define AE_LOCK_ALIGNMENT 16
/* /* in case no OS is defined, use AE_UNKNOWN */
* in case no OS is defined, use AE_UNKNOWN
*/
#ifndef AE_OS #ifndef AE_OS
#define AE_OS AE_UNKNOWN #define AE_OS AE_UNKNOWN
#endif #endif
/* /* automatically determine compiler */
* automatically determine compiler
*/
#define AE_COMPILER AE_UNKNOWN #define AE_COMPILER AE_UNKNOWN
#ifdef __GNUC__ #ifdef __GNUC__
#undef AE_COMPILER #undef AE_COMPILER
#define AE_COMPILER AE_GNUC #define AE_COMPILER AE_GNUC
#endif #endif
#if defined(__SUNPRO_C)||defined(__SUNPRO_CC) #if defined(__SUNPRO_C)||defined(__SUNPRO_CC)
#undef AE_COMPILER #undef AE_COMPILER
#define AE_COMPILER AE_SUNC #define AE_COMPILER AE_SUNC
#endif #endif
#ifdef _MSC_VER #ifdef _MSC_VER
#undef AE_COMPILER #undef AE_COMPILER
#define AE_COMPILER AE_MSVC #define AE_COMPILER AE_MSVC
#endif #endif
/* /* now we are ready to include headers */
* if we work under C++ environment, define several conditions #include <stdlib.h>
*/ #include <stdio.h>
#ifdef AE_USE_CPP #include <string.h>
#define AE_USE_CPP_BOOL #include <setjmp.h>
#define AE_USE_CPP_ERROR_HANDLING #include <math.h>
#define AE_USE_CPP_SERIALIZATION #include <stddef.h>
#endif
/*
* Include SMP headers
*/
#if AE_OS==AE_WINDOWS #if AE_OS==AE_WINDOWS
#include <windows.h> #include <windows.h>
#include <process.h> #include <process.h>
#elif AE_OS==AE_POSIX #elif AE_OS==AE_POSIX
#include <time.h> #include <time.h>
#include <unistd.h> #include <unistd.h>
#include <pthread.h> #include <pthread.h>
#include <sched.h>
#endif #endif
/*
* define ae_int32_t, ae_int64_t, ae_int_t, ae_bool, ae_complex, ae_error_t
ype and ae_datatype
*/
#if defined(AE_HAVE_STDINT) #if defined(AE_HAVE_STDINT)
#include <stdint.h> #include <stdint.h>
#endif #endif
/*
* SSE2 intrinsics
*
* Preprocessor directives below:
* - include headers for SSE2 intrinsics
* - define AE_HAS_SSE2_INTRINSICS definition
*
* These actions are performed when we have:
* - x86 architecture definition (AE_CPU==AE_INTEL)
* - compiler which supports intrinsics
*
* Presence of AE_HAS_SSE2_INTRINSICS does NOT mean that our CPU
* actually supports SSE2 - such things should be determined at runtime
* with ae_cpuid() call. It means that we are working under Intel and
* out compiler can issue SSE2-capable code.
*
*/
#if defined(AE_CPU)
#if AE_CPU==AE_INTEL
#if AE_COMPILER==AE_MSVC
#include <emmintrin.h>
#define AE_HAS_SSE2_INTRINSICS
#endif
#if AE_COMPILER==AE_GNUC
#include <xmmintrin.h>
#define AE_HAS_SSE2_INTRINSICS
#endif
#if AE_COMPILER==AE_SUNC
#include <xmmintrin.h>
#include <emmintrin.h>
#define AE_HAS_SSE2_INTRINSICS
#endif
#endif
#endif
/* Debugging helpers for Windows */
#ifdef AE_DEBUG4WINDOWS
#include <windows.h>
#include <stdio.h>
#endif
/////////////////////////////////////////////////////////////////////////
//
// THIS SECTION CONTAINS DECLARATIONS FOR BASIC FUNCTIONALITY
// LIKE MEMORY MANAGEMENT FOR VECTORS/MATRICES WHICH IS SHARED
// BETWEEN C++ AND PURE C LIBRARIES
//
/////////////////////////////////////////////////////////////////////////
namespace alglib_impl
{
/* if we work under C++ environment, define several conditions */
#ifdef AE_USE_CPP
#define AE_USE_CPP_BOOL
#define AE_USE_CPP_ERROR_HANDLING
#define AE_USE_CPP_SERIALIZATION
#endif
/*
* define ae_int32_t, ae_int64_t, ae_int_t, ae_bool, ae_complex, ae_error_t
ype and ae_datatype
*/
#if defined(AE_INT32_T) #if defined(AE_INT32_T)
typedef AE_INT32_T ae_int32_t; typedef AE_INT32_T ae_int32_t;
#endif #endif
#if defined(AE_HAVE_STDINT) && !defined(AE_INT32_T) #if defined(AE_HAVE_STDINT) && !defined(AE_INT32_T)
typedef int32_t ae_int32_t; typedef int32_t ae_int32_t;
#endif #endif
#if !defined(AE_HAVE_STDINT) && !defined(AE_INT32_T) #if !defined(AE_HAVE_STDINT) && !defined(AE_INT32_T)
#if AE_COMPILER==AE_MSVC #if AE_COMPILER==AE_MSVC
typedef _int32 ae_int32_t; typedef _int32 ae_int32_t;
#endif #endif
skipping to change at line 164 skipping to change at line 197
#if !defined(AE_USE_CPP_BOOL) #if !defined(AE_USE_CPP_BOOL)
#define ae_bool char #define ae_bool char
#define ae_true 1 #define ae_true 1
#define ae_false 0 #define ae_false 0
#else #else
#define ae_bool bool #define ae_bool bool
#define ae_true true #define ae_true true
#define ae_false false #define ae_false false
#endif #endif
/*
* SSE2 intrinsics
*
* Preprocessor directives below:
* - include headers for SSE2 intrinsics
* - define AE_HAS_SSE2_INTRINSICS definition
*
* These actions are performed when we have:
* - x86 architecture definition (AE_CPU==AE_INTEL)
* - compiler which supports intrinsics
*
* Presence of AE_HAS_SSE2_INTRINSICS does NOT mean that our CPU
* actually supports SSE2 - such things should be determined at runtime
* with ae_cpuid() call. It means that we are working under Intel and
* out compiler can issue SSE2-capable code.
*
*/
#if defined(AE_CPU)
#if AE_CPU==AE_INTEL
#ifdef AE_USE_CPP
} // end of namespace declaration, subsequent includes must be out of names
pace
#endif
#if AE_COMPILER==AE_MSVC
#include <emmintrin.h>
#define AE_HAS_SSE2_INTRINSICS
#endif
#if AE_COMPILER==AE_GNUC
#include <xmmintrin.h>
#define AE_HAS_SSE2_INTRINSICS
#endif
#if AE_COMPILER==AE_SUNC
#include <xmmintrin.h>
#include <emmintrin.h>
#define AE_HAS_SSE2_INTRINSICS
#endif
#ifdef AE_USE_CPP
namespace alglib_impl { // namespace declaration continued
#endif
#endif
#endif
typedef struct { double x, y; } ae_complex; typedef struct { double x, y; } ae_complex;
typedef enum typedef enum
{ {
ERR_OK = 0, ERR_OK = 0,
ERR_OUT_OF_MEMORY = 1, ERR_OUT_OF_MEMORY = 1,
ERR_XARRAY_TOO_LARGE = 2, ERR_XARRAY_TOO_LARGE = 2,
ERR_ASSERTION_FAILED = 3 ERR_ASSERTION_FAILED = 3
} ae_error_type; } ae_error_type;
skipping to change at line 481 skipping to change at line 467
{ {
/* pointer to subscriber; all changes in ptr are translated to subscrib er */ /* pointer to subscriber; all changes in ptr are translated to subscrib er */
void **subscriber; void **subscriber;
/* pointer to object */ /* pointer to object */
void *ptr; void *ptr;
/* whether smart pointer owns ptr */ /* whether smart pointer owns ptr */
ae_bool is_owner; ae_bool is_owner;
/* whether object pointed by ptr is dynamic - clearing such object requ
ires BOTH
calling destructor function AND calling ae_free for memory occupied
by object. */
ae_bool is_dynamic;
/* destructor function for pointer; clears all dynamically allocated me mory */ /* destructor function for pointer; clears all dynamically allocated me mory */
void (*destroy)(void*); void (*destroy)(void*);
/* frame entry; used to ensure automatic deallocation of smart pointer in case of exception/exit */ /* frame entry; used to ensure automatic deallocation of smart pointer in case of exception/exit */
ae_dyn_block frame_entry; ae_dyn_block frame_entry;
} ae_smart_ptr; } ae_smart_ptr;
/************************************************************************* /*************************************************************************
Lock. Lock.
skipping to change at line 612 skipping to change at line 602
ae_bool ae_matrix_init_copy(ae_matrix *dst, ae_matrix *src, ae_state *state , ae_bool make_automatic); ae_bool ae_matrix_init_copy(ae_matrix *dst, ae_matrix *src, ae_state *state , ae_bool make_automatic);
void ae_matrix_init_from_x(ae_matrix *dst, x_matrix *src, ae_state *state, ae_bool make_automatic); void ae_matrix_init_from_x(ae_matrix *dst, x_matrix *src, ae_state *state, ae_bool make_automatic);
ae_bool ae_matrix_set_length(ae_matrix *dst, ae_int_t rows, ae_int_t cols, ae_state *state); ae_bool ae_matrix_set_length(ae_matrix *dst, ae_int_t rows, ae_int_t cols, ae_state *state);
void ae_matrix_clear(ae_matrix *dst); void ae_matrix_clear(ae_matrix *dst);
void ae_matrix_destroy(ae_matrix *dst); void ae_matrix_destroy(ae_matrix *dst);
void ae_swap_matrices(ae_matrix *mat1, ae_matrix *mat2); void ae_swap_matrices(ae_matrix *mat1, ae_matrix *mat2);
ae_bool ae_smart_ptr_init(ae_smart_ptr *dst, void **subscriber, ae_state *s tate, ae_bool make_automatic); ae_bool ae_smart_ptr_init(ae_smart_ptr *dst, void **subscriber, ae_state *s tate, ae_bool make_automatic);
void ae_smart_ptr_clear(void *_dst); /* accepts ae_smart_ptr* */ void ae_smart_ptr_clear(void *_dst); /* accepts ae_smart_ptr* */
void ae_smart_ptr_destroy(void *_dst); void ae_smart_ptr_destroy(void *_dst);
void ae_smart_ptr_assign(ae_smart_ptr *dst, void *new_ptr, ae_bool is_owner , void (*destroy)(void*)); void ae_smart_ptr_assign(ae_smart_ptr *dst, void *new_ptr, ae_bool is_owner , ae_bool is_dynamic, void (*destroy)(void*));
void ae_smart_ptr_release(ae_smart_ptr *dst); void ae_smart_ptr_release(ae_smart_ptr *dst);
void ae_yield();
void ae_init_lock(ae_lock *lock); void ae_init_lock(ae_lock *lock);
void ae_acquire_lock(ae_lock *lock); void ae_acquire_lock(ae_lock *lock);
void ae_release_lock(ae_lock *lock); void ae_release_lock(ae_lock *lock);
void ae_free_lock(ae_lock *lock); void ae_free_lock(ae_lock *lock);
ae_bool ae_shared_pool_init(void *_dst, ae_state *state, ae_bool make_autom atic); ae_bool ae_shared_pool_init(void *_dst, ae_state *state, ae_bool make_autom atic);
ae_bool ae_shared_pool_init_copy(void *_dst, void *_src, ae_state *state, a e_bool make_automatic); ae_bool ae_shared_pool_init_copy(void *_dst, void *_src, ae_state *state, a e_bool make_automatic);
void ae_shared_pool_clear(void *dst); void ae_shared_pool_clear(void *dst);
void ae_shared_pool_destroy(void *dst); void ae_shared_pool_destroy(void *dst);
ae_bool ae_shared_pool_is_initialized(void *_dst);
void ae_shared_pool_set_seed( void ae_shared_pool_set_seed(
ae_shared_pool *dst, ae_shared_pool *dst,
void *seed_object, void *seed_object,
ae_int_t size_of_object, ae_int_t size_of_object,
ae_bool (*init)(void* dst, ae_state* state, ae_bool make_automa tic), ae_bool (*init)(void* dst, ae_state* state, ae_bool make_automa tic),
ae_bool (*init_copy)(void* dst, void* src, ae_state* state, ae_ bool make_automatic), ae_bool (*init_copy)(void* dst, void* src, ae_state* state, ae_ bool make_automatic),
void (*destroy)(void* ptr), void (*destroy)(void* ptr),
ae_state *state); ae_state *state);
void ae_shared_pool_retrieve( void ae_shared_pool_retrieve(
ae_shared_pool *pool, ae_shared_pool *pool,
skipping to change at line 861 skipping to change at line 853
/************************************************************************ /************************************************************************
debug functions (must be turned on by preprocessor definitions): debug functions (must be turned on by preprocessor definitions):
* tickcount(), which is wrapper around GetTickCount() * tickcount(), which is wrapper around GetTickCount()
* flushconsole(), fluches console * flushconsole(), fluches console
* ae_debugrng(), returns random number generated with high-quality random n umbers generator * ae_debugrng(), returns random number generated with high-quality random n umbers generator
* ae_set_seed(), sets seed of the debug RNG (NON-THREAD-SAFE!!!) * ae_set_seed(), sets seed of the debug RNG (NON-THREAD-SAFE!!!)
* ae_get_seed(), returns two seed values of the debug RNG (NON-THREAD-SAFE! !!) * ae_get_seed(), returns two seed values of the debug RNG (NON-THREAD-SAFE! !!)
************************************************************************/ ************************************************************************/
#ifdef AE_DEBUG4WINDOWS #ifdef AE_DEBUG4WINDOWS
#include <windows.h>
#include <stdio.h>
#define tickcount(s) GetTickCount()
#define flushconsole(s) fflush(stdout) #define flushconsole(s) fflush(stdout)
#define tickcount(s) _tickcount()
int _tickcount();
#endif #endif
#ifdef AE_DEBUG4POSIX #ifdef AE_DEBUG4POSIX
#define tickcount(s) PosixGetTickCount()
#define flushconsole(s) fflush(stdout) #define flushconsole(s) fflush(stdout)
int PosixGetTickCount(); #define tickcount(s) _tickcount()
int _tickcount();
#endif #endif
#ifdef AE_DEBUGRNG #ifdef AE_DEBUGRNG
ae_int_t ae_debugrng(); ae_int_t ae_debugrng();
void ae_set_seed(ae_int_t s0, ae_int_t s1); void ae_set_seed(ae_int_t s0, ae_int_t s1);
void ae_get_seed(ae_int_t *s0, ae_int_t *s1); void ae_get_seed(ae_int_t *s0, ae_int_t *s1);
#endif #endif
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
skipping to change at line 963 skipping to change at line 954
const alglib::complex operator-(const double& lhs, const alglib::complex& r hs); const alglib::complex operator-(const double& lhs, const alglib::complex& r hs);
const alglib::complex operator*(const alglib::complex& lhs, const alglib::c omplex& rhs); const alglib::complex operator*(const alglib::complex& lhs, const alglib::c omplex& rhs);
const alglib::complex operator*(const alglib::complex& lhs, const double& r hs); const alglib::complex operator*(const alglib::complex& lhs, const double& r hs);
const alglib::complex operator*(const double& lhs, const alglib::complex& r hs); const alglib::complex operator*(const double& lhs, const alglib::complex& r hs);
const alglib::complex operator/(const alglib::complex& lhs, const alglib::c omplex& rhs); const alglib::complex operator/(const alglib::complex& lhs, const alglib::c omplex& rhs);
const alglib::complex operator/(const double& lhs, const alglib::complex& r hs); const alglib::complex operator/(const double& lhs, const alglib::complex& r hs);
const alglib::complex operator/(const alglib::complex& lhs, const double& r hs); const alglib::complex operator/(const alglib::complex& lhs, const double& r hs);
double abscomplex(const alglib::complex &z); double abscomplex(const alglib::complex &z);
alglib::complex conj(const alglib::complex &z); alglib::complex conj(const alglib::complex &z);
alglib::complex csqr(const alglib::complex &z); alglib::complex csqr(const alglib::complex &z);
void setnworkers(alglib::ae_int_t nworkers);
/******************************************************************** /********************************************************************
Level 1 BLAS functions Level 1 BLAS functions
NOTES: NOTES:
* destination and source should NOT overlap * destination and source should NOT overlap
* stride is assumed to be positive, but it is not * stride is assumed to be positive, but it is not
assert'ed within function assert'ed within function
* conj_src parameter specifies whether complex source is conjugated * conj_src parameter specifies whether complex source is conjugated
before processing or not. Pass string which starts with 'N' or 'n' before processing or not. Pass string which starts with 'N' or 'n'
skipping to change at line 1055 skipping to change at line 1047
********************************************************************/ ********************************************************************/
/******************************************************************** /********************************************************************
1- and 2-dimensional arrays 1- and 2-dimensional arrays
********************************************************************/ ********************************************************************/
class ae_vector_wrapper class ae_vector_wrapper
{ {
public: public:
ae_vector_wrapper(); ae_vector_wrapper();
virtual ~ae_vector_wrapper(); virtual ~ae_vector_wrapper();
ae_vector_wrapper(const ae_vector_wrapper &rhs);
const ae_vector_wrapper& operator=(const ae_vector_wrapper &rhs);
void setlength(ae_int_t iLen); void setlength(ae_int_t iLen);
ae_int_t length() const; ae_int_t length() const;
void attach_to(alglib_impl::ae_vector *ptr); void attach_to(alglib_impl::ae_vector *ptr);
void allocate_own(ae_int_t size, alglib_impl::ae_datatype datatype); void allocate_own(ae_int_t size, alglib_impl::ae_datatype datatype);
const alglib_impl::ae_vector* c_ptr() const; const alglib_impl::ae_vector* c_ptr() const;
alglib_impl::ae_vector* c_ptr(); alglib_impl::ae_vector* c_ptr();
private:
ae_vector_wrapper(const ae_vector_wrapper &rhs);
const ae_vector_wrapper& operator=(const ae_vector_wrapper &rhs);
protected: protected:
//
// Copies source vector RHS into current object.
//
// Current object is considered empty (this function should be
// called from copy constructor).
//
void create(const ae_vector_wrapper &rhs);
//
// Copies array given by string into current object. Additional
// parameter DATATYPE contains information about type of the data
// in S and type of the array to create.
//
// Current object is considered empty (this function should be
// called from copy constructor).
//
void create(const char *s, alglib_impl::ae_datatype datatype);
//
// Assigns RHS to current object.
//
// It has several branches depending on target object status:
// * in case it is proxy object, data are copied into memory pointed by
// proxy. Function checks that source has exactly same size as target
// (exception is thrown on failure).
// * in case it is non-proxy object, data allocated by object are clear
ed
// and a copy of RHS is created in target.
//
// NOTE: this function correctly handles assignments of the object to i
tself.
//
void assign(const ae_vector_wrapper &rhs);
alglib_impl::ae_vector *p_vec; alglib_impl::ae_vector *p_vec;
alglib_impl::ae_vector vec; alglib_impl::ae_vector vec;
}; };
class boolean_1d_array : public ae_vector_wrapper class boolean_1d_array : public ae_vector_wrapper
{ {
public: public:
boolean_1d_array(); boolean_1d_array();
boolean_1d_array(const char *s); boolean_1d_array(const char *s);
boolean_1d_array(const boolean_1d_array &rhs);
boolean_1d_array(alglib_impl::ae_vector *p); boolean_1d_array(alglib_impl::ae_vector *p);
const boolean_1d_array& operator=(const boolean_1d_array &rhs);
virtual ~boolean_1d_array() ; virtual ~boolean_1d_array() ;
const ae_bool& operator()(ae_int_t i) const; const ae_bool& operator()(ae_int_t i) const;
ae_bool& operator()(ae_int_t i); ae_bool& operator()(ae_int_t i);
const ae_bool& operator[](ae_int_t i) const; const ae_bool& operator[](ae_int_t i) const;
ae_bool& operator[](ae_int_t i); ae_bool& operator[](ae_int_t i);
void setcontent(ae_int_t iLen, const bool *pContent ); void setcontent(ae_int_t iLen, const bool *pContent );
ae_bool* getcontent(); ae_bool* getcontent();
const ae_bool* getcontent() const; const ae_bool* getcontent() const;
std::string tostring() const; std::string tostring() const;
}; };
class integer_1d_array : public ae_vector_wrapper class integer_1d_array : public ae_vector_wrapper
{ {
public: public:
integer_1d_array(); integer_1d_array();
integer_1d_array(alglib_impl::ae_vector *p);
integer_1d_array(const char *s); integer_1d_array(const char *s);
integer_1d_array(const integer_1d_array &rhs);
integer_1d_array(alglib_impl::ae_vector *p);
const integer_1d_array& operator=(const integer_1d_array &rhs);
virtual ~integer_1d_array(); virtual ~integer_1d_array();
const ae_int_t& operator()(ae_int_t i) const; const ae_int_t& operator()(ae_int_t i) const;
ae_int_t& operator()(ae_int_t i); ae_int_t& operator()(ae_int_t i);
const ae_int_t& operator[](ae_int_t i) const; const ae_int_t& operator[](ae_int_t i) const;
ae_int_t& operator[](ae_int_t i); ae_int_t& operator[](ae_int_t i);
void setcontent(ae_int_t iLen, const ae_int_t *pContent ); void setcontent(ae_int_t iLen, const ae_int_t *pContent );
ae_int_t* getcontent(); ae_int_t* getcontent();
const ae_int_t* getcontent() const; const ae_int_t* getcontent() const;
std::string tostring() const; std::string tostring() const;
}; };
class real_1d_array : public ae_vector_wrapper class real_1d_array : public ae_vector_wrapper
{ {
public: public:
real_1d_array(); real_1d_array();
real_1d_array(alglib_impl::ae_vector *p);
real_1d_array(const char *s); real_1d_array(const char *s);
real_1d_array(const real_1d_array &rhs);
real_1d_array(alglib_impl::ae_vector *p);
const real_1d_array& operator=(const real_1d_array &rhs);
virtual ~real_1d_array(); virtual ~real_1d_array();
const double& operator()(ae_int_t i) const; const double& operator()(ae_int_t i) const;
double& operator()(ae_int_t i); double& operator()(ae_int_t i);
const double& operator[](ae_int_t i) const; const double& operator[](ae_int_t i) const;
double& operator[](ae_int_t i); double& operator[](ae_int_t i);
void setcontent(ae_int_t iLen, const double *pContent ); void setcontent(ae_int_t iLen, const double *pContent );
double* getcontent(); double* getcontent();
const double* getcontent() const; const double* getcontent() const;
std::string tostring(int dps) const; std::string tostring(int dps) const;
}; };
class complex_1d_array : public ae_vector_wrapper class complex_1d_array : public ae_vector_wrapper
{ {
public: public:
complex_1d_array(); complex_1d_array();
complex_1d_array(alglib_impl::ae_vector *p);
complex_1d_array(const char *s); complex_1d_array(const char *s);
complex_1d_array(const complex_1d_array &rhs);
complex_1d_array(alglib_impl::ae_vector *p);
const complex_1d_array& operator=(const complex_1d_array &rhs);
virtual ~complex_1d_array(); virtual ~complex_1d_array();
const alglib::complex& operator()(ae_int_t i) const; const alglib::complex& operator()(ae_int_t i) const;
alglib::complex& operator()(ae_int_t i); alglib::complex& operator()(ae_int_t i);
const alglib::complex& operator[](ae_int_t i) const; const alglib::complex& operator[](ae_int_t i) const;
alglib::complex& operator[](ae_int_t i); alglib::complex& operator[](ae_int_t i);
void setcontent(ae_int_t iLen, const alglib::complex *pContent ); void setcontent(ae_int_t iLen, const alglib::complex *pContent );
alglib::complex* getcontent(); alglib::complex* getcontent();
const alglib::complex* getcontent() const; const alglib::complex* getcontent() const;
std::string tostring(int dps) const; std::string tostring(int dps) const;
}; };
class ae_matrix_wrapper class ae_matrix_wrapper
{ {
public: public:
ae_matrix_wrapper(); ae_matrix_wrapper();
virtual ~ae_matrix_wrapper(); virtual ~ae_matrix_wrapper();
ae_matrix_wrapper(const ae_matrix_wrapper &rhs);
const ae_matrix_wrapper& operator=(const ae_matrix_wrapper &rhs); const ae_matrix_wrapper& operator=(const ae_matrix_wrapper &rhs);
void setlength(ae_int_t rows, ae_int_t cols); void setlength(ae_int_t rows, ae_int_t cols);
ae_int_t rows() const; ae_int_t rows() const;
ae_int_t cols() const; ae_int_t cols() const;
bool isempty() const; bool isempty() const;
ae_int_t getstride() const; ae_int_t getstride() const;
void attach_to(alglib_impl::ae_matrix *ptr); void attach_to(alglib_impl::ae_matrix *ptr);
void allocate_own(ae_int_t rows, ae_int_t cols, alglib_impl::ae_datatyp e datatype); void allocate_own(ae_int_t rows, ae_int_t cols, alglib_impl::ae_datatyp e datatype);
const alglib_impl::ae_matrix* c_ptr() const; const alglib_impl::ae_matrix* c_ptr() const;
alglib_impl::ae_matrix* c_ptr(); alglib_impl::ae_matrix* c_ptr();
private:
ae_matrix_wrapper(const ae_matrix_wrapper &rhs);
protected: protected:
//
// Copies source matrix RHS into current object.
//
// Current object is considered empty (this function should be
// called from copy constructor).
//
void create(const ae_matrix_wrapper &rhs);
//
// Copies array given by string into current object. Additional
// parameter DATATYPE contains information about type of the data
// in S and type of the array to create.
//
// Current object is considered empty (this function should be
// called from copy constructor).
//
void create(const char *s, alglib_impl::ae_datatype datatype);
//
// Assigns RHS to current object.
//
// It has several branches depending on target object status:
// * in case it is proxy object, data are copied into memory pointed by
// proxy. Function checks that source has exactly same size as target
// (exception is thrown on failure).
// * in case it is non-proxy object, data allocated by object are clear
ed
// and a copy of RHS is created in target.
//
// NOTE: this function correctly handles assignments of the object to i
tself.
//
void assign(const ae_matrix_wrapper &rhs);
alglib_impl::ae_matrix *p_mat; alglib_impl::ae_matrix *p_mat;
alglib_impl::ae_matrix mat; alglib_impl::ae_matrix mat;
}; };
class boolean_2d_array : public ae_matrix_wrapper class boolean_2d_array : public ae_matrix_wrapper
{ {
public: public:
boolean_2d_array(); boolean_2d_array();
boolean_2d_array(const boolean_2d_array &rhs);
boolean_2d_array(alglib_impl::ae_matrix *p); boolean_2d_array(alglib_impl::ae_matrix *p);
boolean_2d_array(const char *s); boolean_2d_array(const char *s);
virtual ~boolean_2d_array(); virtual ~boolean_2d_array();
const ae_bool& operator()(ae_int_t i, ae_int_t j) const; const ae_bool& operator()(ae_int_t i, ae_int_t j) const;
ae_bool& operator()(ae_int_t i, ae_int_t j); ae_bool& operator()(ae_int_t i, ae_int_t j);
const ae_bool* operator[](ae_int_t i) const; const ae_bool* operator[](ae_int_t i) const;
ae_bool* operator[](ae_int_t i); ae_bool* operator[](ae_int_t i);
void setcontent(ae_int_t irows, ae_int_t icols, const bool *pContent ); void setcontent(ae_int_t irows, ae_int_t icols, const bool *pContent );
std::string tostring() const ; std::string tostring() const ;
}; };
class integer_2d_array : public ae_matrix_wrapper class integer_2d_array : public ae_matrix_wrapper
{ {
public: public:
integer_2d_array(); integer_2d_array();
integer_2d_array(const integer_2d_array &rhs);
integer_2d_array(alglib_impl::ae_matrix *p); integer_2d_array(alglib_impl::ae_matrix *p);
integer_2d_array(const char *s); integer_2d_array(const char *s);
virtual ~integer_2d_array(); virtual ~integer_2d_array();
const ae_int_t& operator()(ae_int_t i, ae_int_t j) const; const ae_int_t& operator()(ae_int_t i, ae_int_t j) const;
ae_int_t& operator()(ae_int_t i, ae_int_t j); ae_int_t& operator()(ae_int_t i, ae_int_t j);
const ae_int_t* operator[](ae_int_t i) const; const ae_int_t* operator[](ae_int_t i) const;
ae_int_t* operator[](ae_int_t i); ae_int_t* operator[](ae_int_t i);
void setcontent(ae_int_t irows, ae_int_t icols, const ae_int_t *pConten t ); void setcontent(ae_int_t irows, ae_int_t icols, const ae_int_t *pConten t );
std::string tostring() const; std::string tostring() const;
}; };
class real_2d_array : public ae_matrix_wrapper class real_2d_array : public ae_matrix_wrapper
{ {
public: public:
real_2d_array(); real_2d_array();
real_2d_array(const real_2d_array &rhs);
real_2d_array(alglib_impl::ae_matrix *p); real_2d_array(alglib_impl::ae_matrix *p);
real_2d_array(const char *s); real_2d_array(const char *s);
virtual ~real_2d_array(); virtual ~real_2d_array();
const double& operator()(ae_int_t i, ae_int_t j) const; const double& operator()(ae_int_t i, ae_int_t j) const;
double& operator()(ae_int_t i, ae_int_t j); double& operator()(ae_int_t i, ae_int_t j);
const double* operator[](ae_int_t i) const; const double* operator[](ae_int_t i) const;
double* operator[](ae_int_t i); double* operator[](ae_int_t i);
void setcontent(ae_int_t irows, ae_int_t icols, const double *pContent ); void setcontent(ae_int_t irows, ae_int_t icols, const double *pContent );
std::string tostring(int dps) const; std::string tostring(int dps) const;
}; };
class complex_2d_array : public ae_matrix_wrapper class complex_2d_array : public ae_matrix_wrapper
{ {
public: public:
complex_2d_array(); complex_2d_array();
complex_2d_array(const complex_2d_array &rhs);
complex_2d_array(alglib_impl::ae_matrix *p); complex_2d_array(alglib_impl::ae_matrix *p);
complex_2d_array(const char *s); complex_2d_array(const char *s);
virtual ~complex_2d_array(); virtual ~complex_2d_array();
const alglib::complex& operator()(ae_int_t i, ae_int_t j) const; const alglib::complex& operator()(ae_int_t i, ae_int_t j) const;
alglib::complex& operator()(ae_int_t i, ae_int_t j); alglib::complex& operator()(ae_int_t i, ae_int_t j);
const alglib::complex* operator[](ae_int_t i) const; const alglib::complex* operator[](ae_int_t i) const;
alglib::complex* operator[](ae_int_t i); alglib::complex* operator[](ae_int_t i);
skipping to change at line 1338 skipping to change at line 1408
bool fp_isfinite(double x); bool fp_isfinite(double x);
}//namespace alglib }//namespace alglib
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
// //
// THIS SECTIONS CONTAINS DECLARATIONS FOR OPTIMIZED LINEAR ALGEBRA CODES // THIS SECTIONS CONTAINS DECLARATIONS FOR OPTIMIZED LINEAR ALGEBRA CODES
// IT IS SHARED BETWEEN C++ AND PURE C LIBRARIES // IT IS SHARED BETWEEN C++ AND PURE C LIBRARIES
// //
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
namespace alglib_impl namespace alglib_impl
{ {
#define ALGLIB_INTERCEPTS_ABLAS #define ALGLIB_INTERCEPTS_ABLAS
void _ialglib_vzero(ae_int_t n, double *p, ae_int_t stride); void _ialglib_vzero(ae_int_t n, double *p, ae_int_t stride);
void _ialglib_vzero_complex(ae_int_t n, ae_complex *p, ae_int_t stride); void _ialglib_vzero_complex(ae_int_t n, ae_complex *p, ae_int_t stride);
void _ialglib_vcopy(ae_int_t n, const double *a, ae_int_t stridea, double * b, ae_int_t strideb); void _ialglib_vcopy(ae_int_t n, const double *a, ae_int_t stridea, double * b, ae_int_t strideb);
void _ialglib_vcopy_complex(ae_int_t n, const ae_complex *a, ae_int_t strid ea, double *b, ae_int_t strideb, const char *conj); void _ialglib_vcopy_complex(ae_int_t n, const ae_complex *a, ae_int_t strid ea, double *b, ae_int_t strideb, const char *conj);
void _ialglib_vcopy_dcomplex(ae_int_t n, const double *a, ae_int_t stridea, double *b, ae_int_t strideb, const char *conj); void _ialglib_vcopy_dcomplex(ae_int_t n, const double *a, ae_int_t stridea, double *b, ae_int_t strideb, const char *conj);
void _ialglib_mcopyblock(ae_int_t m, ae_int_t n, const double *a, ae_int_t op, ae_int_t stride, double *b); void _ialglib_mcopyblock(ae_int_t m, ae_int_t n, const double *a, ae_int_t op, ae_int_t stride, double *b);
void _ialglib_mcopyunblock(ae_int_t m, ae_int_t n, const double *a, ae_int_ t op, double *b, ae_int_t stride); void _ialglib_mcopyunblock(ae_int_t m, ae_int_t n, const double *a, ae_int_ t op, double *b, ae_int_t stride);
void _ialglib_mcopyblock_complex(ae_int_t m, ae_int_t n, const ae_complex * a, ae_int_t op, ae_int_t stride, double *b); void _ialglib_mcopyblock_complex(ae_int_t m, ae_int_t n, const ae_complex * a, ae_int_t op, ae_int_t stride, double *b);
void _ialglib_mcopyunblock_complex(ae_int_t m, ae_int_t n, const double *a, ae_int_t op, ae_complex* b, ae_int_t stride); void _ialglib_mcopyunblock_complex(ae_int_t m, ae_int_t n, const double *a, ae_int_t op, ae_complex* b, ae_int_t stride);
skipping to change at line 1473 skipping to change at line 1543
ae_matrix *a, ae_matrix *a,
ae_int_t ia, ae_int_t ia,
ae_int_t ja, ae_int_t ja,
ae_vector *u, ae_vector *u,
ae_int_t uoffs, ae_int_t uoffs,
ae_vector *v, ae_vector *v,
ae_int_t voffs); ae_int_t voffs);
} }
/////////////////////////////////////////////////////////////////////////
//
// THIS SECTION CONTAINS PARALLEL SUBROUTINES
//
/////////////////////////////////////////////////////////////////////////
namespace alglib_impl
{
}
#endif #endif
 End of changes. 40 change blocks. 
102 lines changed or deleted 188 lines changed or added


 dataanalysis.h   dataanalysis.h 
skipping to change at line 25 skipping to change at line 25
A copy of the GNU General Public License is available at A copy of the GNU General Public License is available at
http://www.fsf.org/licensing/licenses http://www.fsf.org/licensing/licenses
>>> END OF LICENSE >>> >>> END OF LICENSE >>>
*************************************************************************/ *************************************************************************/
#ifndef _dataanalysis_pkg_h #ifndef _dataanalysis_pkg_h
#define _dataanalysis_pkg_h #define _dataanalysis_pkg_h
#include "ap.h" #include "ap.h"
#include "alglibinternal.h" #include "alglibinternal.h"
#include "linalg.h" #include "linalg.h"
#include "statistics.h" #include "statistics.h"
#include "specialfunctions.h"
#include "alglibmisc.h" #include "alglibmisc.h"
#include "specialfunctions.h"
#include "solvers.h" #include "solvers.h"
#include "optimization.h" #include "optimization.h"
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
// //
// THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (DATATYPES) // THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (DATATYPES)
// //
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
namespace alglib_impl namespace alglib_impl
{ {
skipping to change at line 128 skipping to change at line 128
double avgerror; double avgerror;
double avgrelerror; double avgrelerror;
double cvrmserror; double cvrmserror;
double cvavgerror; double cvavgerror;
double cvavgrelerror; double cvavgrelerror;
ae_int_t ncvdefects; ae_int_t ncvdefects;
ae_vector cvdefects; ae_vector cvdefects;
} lrreport; } lrreport;
typedef struct typedef struct
{ {
double relclserror;
double avgce;
double rmserror;
double avgerror;
double avgrelerror;
} modelerrors;
typedef struct
{
double f;
ae_vector g;
} smlpgrad;
typedef struct
{
ae_int_t hlnetworktype; ae_int_t hlnetworktype;
ae_int_t hlnormtype; ae_int_t hlnormtype;
ae_vector hllayersizes; ae_vector hllayersizes;
ae_vector hlconnections; ae_vector hlconnections;
ae_vector hlneurons; ae_vector hlneurons;
ae_vector structinfo; ae_vector structinfo;
ae_vector weights; ae_vector weights;
ae_vector columnmeans; ae_vector columnmeans;
ae_vector columnsigmas; ae_vector columnsigmas;
ae_vector neurons; ae_vector neurons;
ae_vector dfdnet; ae_vector dfdnet;
ae_vector derror; ae_vector derror;
ae_vector x; ae_vector x;
ae_vector y; ae_vector y;
ae_matrix xy; ae_matrix xy;
ae_vector xyrow; ae_vector xyrow;
ae_matrix chunks;
ae_vector nwbuf; ae_vector nwbuf;
ae_vector integerbuf; ae_vector integerbuf;
modelerrors err;
ae_vector rndbuf;
ae_shared_pool buf;
ae_shared_pool gradbuf;
ae_matrix dummydxy;
sparsematrix dummysxy;
ae_vector dummyidx;
ae_shared_pool dummypool;
} multilayerperceptron; } multilayerperceptron;
typedef struct typedef struct
{ {
double relclserror;
double avgce;
double rmserror;
double avgerror;
double avgrelerror;
} modelerrors;
typedef struct
{
ae_vector w; ae_vector w;
} logitmodel; } logitmodel;
typedef struct typedef struct
{ {
ae_bool brackt; ae_bool brackt;
ae_bool stage1; ae_bool stage1;
ae_int_t infoc; ae_int_t infoc;
double dg; double dg;
double dgm; double dgm;
double dginit; double dginit;
skipping to change at line 260 skipping to change at line 272
typedef struct typedef struct
{ {
double relclserror; double relclserror;
double avgce; double avgce;
double rmserror; double rmserror;
double avgerror; double avgerror;
double avgrelerror; double avgrelerror;
} mlpcvreport; } mlpcvreport;
typedef struct typedef struct
{ {
ae_vector bestparameters;
double bestrmserror;
ae_bool randomizenetwork;
multilayerperceptron network;
minlbfgsstate optimizer;
minlbfgsreport optimizerrep;
ae_vector wbuf0;
ae_vector wbuf1;
ae_vector allminibatches;
ae_vector currentminibatch;
rcommstate rstate;
ae_int_t algoused;
ae_int_t minibatchsize;
hqrndstate generator;
} smlptrnsession;
typedef struct
{
ae_vector trnsubset;
ae_vector valsubset;
ae_shared_pool mlpsessions;
mlpreport mlprep;
multilayerperceptron network;
} mlpetrnsession;
typedef struct
{
ae_int_t nin; ae_int_t nin;
ae_int_t nout; ae_int_t nout;
ae_bool rcpar; ae_bool rcpar;
ae_int_t lbfgsfactor; ae_int_t lbfgsfactor;
double decay; double decay;
double wstep; double wstep;
ae_int_t maxits; ae_int_t maxits;
ae_int_t datatype; ae_int_t datatype;
ae_int_t npoints; ae_int_t npoints;
ae_matrix densexy; ae_matrix densexy;
sparsematrix sparsexy; sparsematrix sparsexy;
multilayerperceptron tnetwork; smlptrnsession session;
minlbfgsstate tstate;
ae_vector wbest;
ae_vector wfinal;
ae_int_t ngradbatch; ae_int_t ngradbatch;
ae_vector subset; ae_vector subset;
ae_int_t subsetsize; ae_int_t subsetsize;
ae_vector valsubset; ae_vector valsubset;
ae_int_t valsubsetsize; ae_int_t valsubsetsize;
ae_int_t algokind;
ae_int_t minibatchsize;
} mlptrainer; } mlptrainer;
typedef struct typedef struct
{ {
multilayerperceptron network; multilayerperceptron network;
multilayerperceptron tnetwork;
minlbfgsstate state;
mlpreport rep; mlpreport rep;
ae_vector subset; ae_vector subset;
ae_int_t subsetsize; ae_int_t subsetsize;
ae_vector xyrow; ae_vector xyrow;
ae_vector y; ae_vector y;
ae_int_t ngrad; ae_int_t ngrad;
ae_vector bufwbest; ae_shared_pool trnpool;
ae_vector bufwfinal;
} mlpparallelizationcv; } mlpparallelizationcv;
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
// //
// THIS SECTION CONTAINS C++ INTERFACE // THIS SECTION CONTAINS C++ INTERFACE
// //
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
namespace alglib namespace alglib
skipping to change at line 645 skipping to change at line 678
double &avgrelerror; double &avgrelerror;
double &cvrmserror; double &cvrmserror;
double &cvavgerror; double &cvavgerror;
double &cvavgrelerror; double &cvavgrelerror;
ae_int_t &ncvdefects; ae_int_t &ncvdefects;
integer_1d_array cvdefects; integer_1d_array cvdefects;
}; };
/************************************************************************* /*************************************************************************
*************************************************************************/
class _multilayerperceptron_owner
{
public:
_multilayerperceptron_owner();
_multilayerperceptron_owner(const _multilayerperceptron_owner &rhs);
_multilayerperceptron_owner& operator=(const _multilayerperceptron_owne
r &rhs);
virtual ~_multilayerperceptron_owner();
alglib_impl::multilayerperceptron* c_ptr();
alglib_impl::multilayerperceptron* c_ptr() const;
protected:
alglib_impl::multilayerperceptron *p_struct;
};
class multilayerperceptron : public _multilayerperceptron_owner
{
public:
multilayerperceptron();
multilayerperceptron(const multilayerperceptron &rhs);
multilayerperceptron& operator=(const multilayerperceptron &rhs);
virtual ~multilayerperceptron();
};
/*************************************************************************
Model's errors: Model's errors:
* RelCLSError - fraction of misclassified cases. * RelCLSError - fraction of misclassified cases.
* AvgCE - acerage cross-entropy * AvgCE - acerage cross-entropy
* RMSError - root-mean-square error * RMSError - root-mean-square error
* AvgError - average error * AvgError - average error
* AvgRelError - average relative error * AvgRelError - average relative error
NOTE 1: RelCLSError/AvgCE are zero on regression problems. NOTE 1: RelCLSError/AvgCE are zero on regression problems.
NOTE 2: on classification problems RMSError/AvgError/AvgRelError contain NOTE 2: on classification problems RMSError/AvgError/AvgRelError contain
skipping to change at line 712 skipping to change at line 720
double &avgce; double &avgce;
double &rmserror; double &rmserror;
double &avgerror; double &avgerror;
double &avgrelerror; double &avgrelerror;
}; };
/************************************************************************* /*************************************************************************
*************************************************************************/ *************************************************************************/
class _multilayerperceptron_owner
{
public:
_multilayerperceptron_owner();
_multilayerperceptron_owner(const _multilayerperceptron_owner &rhs);
_multilayerperceptron_owner& operator=(const _multilayerperceptron_owne
r &rhs);
virtual ~_multilayerperceptron_owner();
alglib_impl::multilayerperceptron* c_ptr();
alglib_impl::multilayerperceptron* c_ptr() const;
protected:
alglib_impl::multilayerperceptron *p_struct;
};
class multilayerperceptron : public _multilayerperceptron_owner
{
public:
multilayerperceptron();
multilayerperceptron(const multilayerperceptron &rhs);
multilayerperceptron& operator=(const multilayerperceptron &rhs);
virtual ~multilayerperceptron();
};
/*************************************************************************
*************************************************************************/
class _logitmodel_owner class _logitmodel_owner
{ {
public: public:
_logitmodel_owner(); _logitmodel_owner();
_logitmodel_owner(const _logitmodel_owner &rhs); _logitmodel_owner(const _logitmodel_owner &rhs);
_logitmodel_owner& operator=(const _logitmodel_owner &rhs); _logitmodel_owner& operator=(const _logitmodel_owner &rhs);
virtual ~_logitmodel_owner(); virtual ~_logitmodel_owner();
alglib_impl::logitmodel* c_ptr(); alglib_impl::logitmodel* c_ptr();
alglib_impl::logitmodel* c_ptr() const; alglib_impl::logitmodel* c_ptr() const;
protected: protected:
skipping to change at line 1152 skipping to change at line 1185
number of iterations. number of iterations.
-- ALGLIB -- -- ALGLIB --
Copyright 10.07.2012 by Bochkanov Sergey Copyright 10.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void clusterizersetkmeanslimits(const clusterizerstate &s, const ae_int_t r estarts, const ae_int_t maxits); void clusterizersetkmeanslimits(const clusterizerstate &s, const ae_int_t r estarts, const ae_int_t maxits);
/************************************************************************* /*************************************************************************
This function performs agglomerative hierarchical clustering This function performs agglomerative hierarchical clustering
FOR USERS OF SMP EDITION:
! This function can utilize multicore capabilities of your system. In
! order to do this you have to call version with "smp_" prefix, which
! indicates that multicore code will be used.
!
! This note is given for users of SMP edition; if you use GPL edition,
! or commercial edition of ALGLIB without SMP support, you still will
! be able to call smp-version of this function, but all computations
! will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
!
! You should remember that starting/stopping worker thread always have
! non-zero cost. Multicore version is pretty efficient on large
! problems which need more than 1.000.000 operations to be solved,
! gives moderate speed-up in mid-range (from 100.000 to 1.000.000 CPU
! cycles), but gives no speed-up for small problems (less than 100.000
! operations).
INPUT PARAMETERS: INPUT PARAMETERS:
S - clusterizer state, initialized by ClusterizerCreate() S - clusterizer state, initialized by ClusterizerCreate()
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
Rep - clustering results; see description of AHCReport Rep - clustering results; see description of AHCReport
structure for more information. structure for more information.
NOTE 1: hierarchical clustering algorithms require large amounts of memory. NOTE 1: hierarchical clustering algorithms require large amounts of memory.
In particular, this implementation needs sizeof(double)*NPoints^2 In particular, this implementation needs sizeof(double)*NPoints^2
bytes, which are used to store distance matrix. In case we work bytes, which are used to store distance matrix. In case we work
with user-supplied matrix, this amount is multiplied by 2 (we have with user-supplied matrix, this amount is multiplied by 2 (we have
to store original matrix and to work with its copy). to store original matrix and to work with its copy).
For example, problem with 10000 points would require 800M of RAM, For example, problem with 10000 points would require 800M of RAM,
even when working in a 1-dimensional space. even when working in a 1-dimensional space.
-- ALGLIB -- -- ALGLIB --
Copyright 10.07.2012 by Bochkanov Sergey Copyright 10.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void clusterizerrunahc(const clusterizerstate &s, ahcreport &rep); void clusterizerrunahc(const clusterizerstate &s, ahcreport &rep);
void smp_clusterizerrunahc(const clusterizerstate &s, ahcreport &rep);
/************************************************************************* /*************************************************************************
This function performs clustering by k-means++ algorithm. This function performs clustering by k-means++ algorithm.
You may change algorithm properties like number of restarts or iterations You may change algorithm properties like number of restarts or iterations
limit by calling ClusterizerSetKMeansLimits() functions. limit by calling ClusterizerSetKMeansLimits() functions.
INPUT PARAMETERS: INPUT PARAMETERS:
S - clusterizer state, initialized by ClusterizerCreate() S - clusterizer state, initialized by ClusterizerCreate()
K - number of clusters, K>=0. K - number of clusters, K>=0.
skipping to change at line 1208 skipping to change at line 1263
specified by distance matrix instead of explicitly given points). specified by distance matrix instead of explicitly given points).
-- ALGLIB -- -- ALGLIB --
Copyright 10.07.2012 by Bochkanov Sergey Copyright 10.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void clusterizerrunkmeans(const clusterizerstate &s, const ae_int_t k, kmea nsreport &rep); void clusterizerrunkmeans(const clusterizerstate &s, const ae_int_t k, kmea nsreport &rep);
/************************************************************************* /*************************************************************************
This function returns distance matrix for dataset This function returns distance matrix for dataset
FOR USERS OF SMP EDITION:
! This function can utilize multicore capabilities of your system. In
! order to do this you have to call version with "smp_" prefix, which
! indicates that multicore code will be used.
!
! This note is given for users of SMP edition; if you use GPL edition,
! or commercial edition of ALGLIB without SMP support, you still will
! be able to call smp-version of this function, but all computations
! will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
!
! You should remember that starting/stopping worker thread always have
! non-zero cost. Multicore version is pretty efficient on large
! problems which need more than 1.000.000 operations to be solved,
! gives moderate speed-up in mid-range (from 100.000 to 1.000.000 CPU
! cycles), but gives no speed-up for small problems (less than 100.000
! operations).
INPUT PARAMETERS: INPUT PARAMETERS:
XY - array[NPoints,NFeatures], dataset XY - array[NPoints,NFeatures], dataset
NPoints - number of points, >=0 NPoints - number of points, >=0
NFeatures- number of features, >=1 NFeatures- number of features, >=1
DistType- distance function: DistType- distance function:
* 0 Chebyshev distance (L-inf norm) * 0 Chebyshev distance (L-inf norm)
* 1 city block distance (L1 norm) * 1 city block distance (L1 norm)
* 2 Euclidean distance (L2 norm) * 2 Euclidean distance (L2 norm)
* 10 Pearson correlation: * 10 Pearson correlation:
dist(a,b) = 1-corr(a,b) dist(a,b) = 1-corr(a,b)
skipping to change at line 1247 skipping to change at line 1323
The reason behing difference in performance is that correlation-base d The reason behing difference in performance is that correlation-base d
distance functions are computed using optimized linear algebra kerne ls, distance functions are computed using optimized linear algebra kerne ls,
while Chebyshev and city block distance functions are computed using while Chebyshev and city block distance functions are computed using
simple nested loops with two branches at each iteration. simple nested loops with two branches at each iteration.
-- ALGLIB -- -- ALGLIB --
Copyright 10.07.2012 by Bochkanov Sergey Copyright 10.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void clusterizergetdistances(const real_2d_array &xy, const ae_int_t npoint s, const ae_int_t nfeatures, const ae_int_t disttype, real_2d_array &d); void clusterizergetdistances(const real_2d_array &xy, const ae_int_t npoint s, const ae_int_t nfeatures, const ae_int_t disttype, real_2d_array &d);
void smp_clusterizergetdistances(const real_2d_array &xy, const ae_int_t np oints, const ae_int_t nfeatures, const ae_int_t disttype, real_2d_array &d) ;
/************************************************************************* /*************************************************************************
This function takes as input clusterization report Rep, desired clusters This function takes as input clusterization report Rep, desired clusters
count K, and builds top K clusters from hierarchical clusterization tree. count K, and builds top K clusters from hierarchical clusterization tree.
It returns assignment of points to clusters (array of cluster indexes). It returns assignment of points to clusters (array of cluster indexes).
INPUT PARAMETERS: INPUT PARAMETERS:
Rep - report from ClusterizerRunAHC() performed on XY Rep - report from ClusterizerRunAHC() performed on XY
K - desired number of clusters, 1<=K<=NPoints. K - desired number of clusters, 1<=K<=NPoints.
K can be zero only when NPoints=0. K can be zero only when NPoints=0.
skipping to change at line 2453 skipping to change at line 2530
when you call it from command line. when you call it from command line.
-- ALGLIB -- -- ALGLIB --
Copyright 21.09.2010 by Bochkanov Sergey Copyright 21.09.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpprocessi(const multilayerperceptron &network, const real_1d_array & x, real_1d_array &y); void mlpprocessi(const multilayerperceptron &network, const real_1d_array & x, real_1d_array &y);
/************************************************************************* /*************************************************************************
Error of the neural network on dataset. Error of the neural network on dataset.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x, depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network; Network - neural network;
XY - training set, see below for information on the XY - training set, see below for information on the
training set format; training set format;
SSize - points count. NPoints - points count.
RESULT: RESULT:
sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2) sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2)
DATASET FORMAT: DATASET FORMAT:
This function uses two different dataset formats - one for regression This function uses two different dataset formats - one for regression
networks, another one for classification networks. networks, another one for classification networks.
For regression networks with NIn inputs and NOut outputs following dataset For regression networks with NIn inputs and NOut outputs following dataset
skipping to change at line 2483 skipping to change at line 2586
For classification networks with NIn inputs and NClasses clases following For classification networks with NIn inputs and NClasses clases following
dataset format is used: dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix * dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example * each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to * first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1). NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlperror(const multilayerperceptron &network, const real_2d_array &x double mlperror(const multilayerperceptron &network, const real_2d_array &x
y, const ae_int_t ssize); y, const ae_int_t npoints);
double smp_mlperror(const multilayerperceptron &network, const real_2d_arra
y &xy, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
Error of the neural network on dataset given by sparse matrix. Error of the neural network on dataset given by sparse matrix.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x, depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network Network - neural network
XY - training set, see below for information on the XY - training set, see below for information on the
training set format. This function checks correctness training set format. This function checks correctness
of the dataset (no NANs/INFs, class numbers are of the dataset (no NANs/INFs, class numbers are
correct) and throws exception when incorrect dataset correct) and throws exception when incorrect dataset
is passed. Sparse matrix must use CRS format for is passed. Sparse matrix must use CRS format for
storage. storage.
NPoints - points count, >=0 NPoints - points count, >=0
skipping to change at line 2523 skipping to change at line 2653
dataset format is used: dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix * dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example * each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to * first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1). NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 23.07.2012 by Bochkanov Sergey Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlperrorsparse(const multilayerperceptron &network, const sparsematr ix &xy, const ae_int_t npoints); double mlperrorsparse(const multilayerperceptron &network, const sparsematr ix &xy, const ae_int_t npoints);
double smp_mlperrorsparse(const multilayerperceptron &network, const sparse matrix &xy, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
Natural error function for neural network, internal subroutine. Natural error function for neural network, internal subroutine.
NOTE: this function is single-threaded. Unlike other error function, it
receives no speed-up from being executed in SMP mode.
-- ALGLIB -- -- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlperrorn(const multilayerperceptron &network, const real_2d_array & xy, const ae_int_t ssize); double mlperrorn(const multilayerperceptron &network, const real_2d_array & xy, const ae_int_t ssize);
/************************************************************************* /*************************************************************************
Classification error Classification error of the neural network on dataset.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS:
Network - neural network;
XY - training set, see below for information on the
training set format;
NPoints - points count.
RESULT:
classification error (number of misclassified cases)
DATASET FORMAT:
This function uses two different dataset formats - one for regression
networks, another one for classification networks.
For regression networks with NIn inputs and NOut outputs following dataset
format is used:
* dataset is given by NPoints*(NIn+NOut) matrix
* each row corresponds to one example
* first NIn columns are inputs, next NOut columns are outputs
For classification networks with NIn inputs and NClasses clases following
dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
ae_int_t mlpclserror(const multilayerperceptron &network, const real_2d_arr ae_int_t mlpclserror(const multilayerperceptron &network, const real_2d_arr
ay &xy, const ae_int_t ssize); ay &xy, const ae_int_t npoints);
ae_int_t smp_mlpclserror(const multilayerperceptron &network, const real_2d
_array &xy, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
Relative classification error on the test set. Relative classification error on the test set.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network; Network - neural network;
XY - training set, see below for information on the XY - training set, see below for information on the
training set format; training set format;
NPoints - points count. NPoints - points count.
RESULT: RESULT:
Percent of incorrectly classified cases. Works both for classifier Percent of incorrectly classified cases. Works both for classifier
networks and general purpose networks used as classifiers. networks and general purpose networks used as classifiers.
skipping to change at line 2575 skipping to change at line 2789
dataset format is used: dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix * dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example * each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to * first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1). NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 25.12.2008 by Bochkanov Sergey Copyright 25.12.2008 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlprelclserror(const multilayerperceptron &network, const real_2d_ar ray &xy, const ae_int_t npoints); double mlprelclserror(const multilayerperceptron &network, const real_2d_ar ray &xy, const ae_int_t npoints);
double smp_mlprelclserror(const multilayerperceptron &network, const real_2 d_array &xy, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
Relative classification error on the test set given by sparse matrix. Relative classification error on the test set given by sparse matrix.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network; Network - neural network;
XY - training set, see below for information on the XY - training set, see below for information on the
training set format. This function checks correctness training set format. Sparse matrix must use CRS format
of the dataset (no NANs/INFs, class numbers are for storage.
correct) and throws exception when incorrect dataset
is passed. Sparse matrix must use CRS format for
storage.
NPoints - points count, >=0. NPoints - points count, >=0.
RESULT: RESULT:
Percent of incorrectly classified cases. Works both for classifier Percent of incorrectly classified cases. Works both for classifier
networks and general purpose networks used as classifiers. networks and general purpose networks used as classifiers.
DATASET FORMAT: DATASET FORMAT:
This function uses two different dataset formats - one for regression This function uses two different dataset formats - one for regression
networks, another one for classification networks. networks, another one for classification networks.
skipping to change at line 2615 skipping to change at line 2853
dataset format is used: dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix * dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example * each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to * first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1). NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 09.08.2012 by Bochkanov Sergey Copyright 09.08.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlprelclserrorsparse(const multilayerperceptron &network, const spar sematrix &xy, const ae_int_t npoints); double mlprelclserrorsparse(const multilayerperceptron &network, const spar sematrix &xy, const ae_int_t npoints);
double smp_mlprelclserrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
Average cross-entropy (in bits per element) on the test set. Average cross-entropy (in bits per element) on the test set.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network; Network - neural network;
XY - training set, see below for information on the XY - training set, see below for information on the
training set format; training set format;
NPoints - points count. NPoints - points count.
RESULT: RESULT:
CrossEntropy/(NPoints*LN(2)). CrossEntropy/(NPoints*LN(2)).
Zero if network solves regression task. Zero if network solves regression task.
skipping to change at line 2651 skipping to change at line 2916
dataset format is used: dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix * dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example * each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to * first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1). NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 08.01.2009 by Bochkanov Sergey Copyright 08.01.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlpavgce(const multilayerperceptron &network, const real_2d_array &x y, const ae_int_t npoints); double mlpavgce(const multilayerperceptron &network, const real_2d_array &x y, const ae_int_t npoints);
double smp_mlpavgce(const multilayerperceptron &network, const real_2d_arra y &xy, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
Average cross-entropy (in bits per element) on the test set given by Average cross-entropy (in bits per element) on the test set given by
sparse matrix. sparse matrix.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network; Network - neural network;
XY - training set, see below for information on the XY - training set, see below for information on the
training set format. This function checks correctness training set format. This function checks correctness
of the dataset (no NANs/INFs, class numbers are of the dataset (no NANs/INFs, class numbers are
correct) and throws exception when incorrect dataset correct) and throws exception when incorrect dataset
is passed. Sparse matrix must use CRS format for is passed. Sparse matrix must use CRS format for
storage. storage.
NPoints - points count, >=0. NPoints - points count, >=0.
skipping to change at line 2692 skipping to change at line 2984
dataset format is used: dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix * dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example * each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to * first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1). NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 9.08.2012 by Bochkanov Sergey Copyright 9.08.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlpavgcesparse(const multilayerperceptron &network, const sparsematr ix &xy, const ae_int_t npoints); double mlpavgcesparse(const multilayerperceptron &network, const sparsematr ix &xy, const ae_int_t npoints);
double smp_mlpavgcesparse(const multilayerperceptron &network, const sparse matrix &xy, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
RMS error on the test set given. RMS error on the test set given.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network; Network - neural network;
XY - training set, see below for information on the XY - training set, see below for information on the
training set format; training set format;
NPoints - points count. NPoints - points count.
RESULT: RESULT:
Root mean square error. Its meaning for regression task is obvious. As for Root mean square error. Its meaning for regression task is obvious. As for
classification task, RMS error means error when estimating posterior classification task, RMS error means error when estimating posterior
probabilities. probabilities.
skipping to change at line 2729 skipping to change at line 3048
dataset format is used: dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix * dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example * each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to * first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1). NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlprmserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints); double mlprmserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints);
double smp_mlprmserror(const multilayerperceptron &network, const real_2d_a rray &xy, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
RMS error on the test set given by sparse matrix. RMS error on the test set given by sparse matrix.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network; Network - neural network;
XY - training set, see below for information on the XY - training set, see below for information on the
training set format. This function checks correctness training set format. This function checks correctness
of the dataset (no NANs/INFs, class numbers are of the dataset (no NANs/INFs, class numbers are
correct) and throws exception when incorrect dataset correct) and throws exception when incorrect dataset
is passed. Sparse matrix must use CRS format for is passed. Sparse matrix must use CRS format for
storage. storage.
NPoints - points count, >=0. NPoints - points count, >=0.
skipping to change at line 2770 skipping to change at line 3116
dataset format is used: dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix * dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example * each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to * first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1). NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 09.08.2012 by Bochkanov Sergey Copyright 09.08.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlprmserrorsparse(const multilayerperceptron &network, const sparsem atrix &xy, const ae_int_t npoints); double mlprmserrorsparse(const multilayerperceptron &network, const sparsem atrix &xy, const ae_int_t npoints);
double smp_mlprmserrorsparse(const multilayerperceptron &network, const spa rsematrix &xy, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
Average error on the test set. Average absolute error on the test set.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network; Network - neural network;
XY - training set, see below for information on the XY - training set, see below for information on the
training set format; training set format;
NPoints - points count. NPoints - points count.
RESULT: RESULT:
Its meaning for regression task is obvious. As for classification task, it Its meaning for regression task is obvious. As for classification task, it
means average error when estimating posterior probabilities. means average error when estimating posterior probabilities.
skipping to change at line 2806 skipping to change at line 3179
dataset format is used: dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix * dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example * each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to * first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1). NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 11.03.2008 by Bochkanov Sergey Copyright 11.03.2008 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlpavgerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints); double mlpavgerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints);
double smp_mlpavgerror(const multilayerperceptron &network, const real_2d_a rray &xy, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
Average error on the test set given by sparse matrix. Average absolute error on the test set given by sparse matrix.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network; Network - neural network;
XY - training set, see below for information on the XY - training set, see below for information on the
training set format. This function checks correctness training set format. This function checks correctness
of the dataset (no NANs/INFs, class numbers are of the dataset (no NANs/INFs, class numbers are
correct) and throws exception when incorrect dataset correct) and throws exception when incorrect dataset
is passed. Sparse matrix must use CRS format for is passed. Sparse matrix must use CRS format for
storage. storage.
NPoints - points count, >=0. NPoints - points count, >=0.
skipping to change at line 2846 skipping to change at line 3246
dataset format is used: dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix * dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example * each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to * first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1). NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 09.08.2012 by Bochkanov Sergey Copyright 09.08.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlpavgerrorsparse(const multilayerperceptron &network, const sparsem atrix &xy, const ae_int_t npoints); double mlpavgerrorsparse(const multilayerperceptron &network, const sparsem atrix &xy, const ae_int_t npoints);
double smp_mlpavgerrorsparse(const multilayerperceptron &network, const spa rsematrix &xy, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
Average relative error on the test set. Average relative error on the test set.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network; Network - neural network;
XY - training set, see below for information on the XY - training set, see below for information on the
training set format; training set format;
NPoints - points count. NPoints - points count.
RESULT: RESULT:
Its meaning for regression task is obvious. As for classification task, it Its meaning for regression task is obvious. As for classification task, it
means average relative error when estimating posterior probability of means average relative error when estimating posterior probability of
belonging to the correct class. belonging to the correct class.
skipping to change at line 2883 skipping to change at line 3310
dataset format is used: dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix * dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example * each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to * first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1). NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 11.03.2008 by Bochkanov Sergey Copyright 11.03.2008 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlpavgrelerror(const multilayerperceptron &network, const real_2d_ar ray &xy, const ae_int_t npoints); double mlpavgrelerror(const multilayerperceptron &network, const real_2d_ar ray &xy, const ae_int_t npoints);
double smp_mlpavgrelerror(const multilayerperceptron &network, const real_2 d_array &xy, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
Average relative error on the test set given by sparse matrix. Average relative error on the test set given by sparse matrix.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network; Network - neural network;
XY - training set, see below for information on the XY - training set, see below for information on the
training set format. This function checks correctness training set format. This function checks correctness
of the dataset (no NANs/INFs, class numbers are of the dataset (no NANs/INFs, class numbers are
correct) and throws exception when incorrect dataset correct) and throws exception when incorrect dataset
is passed. Sparse matrix must use CRS format for is passed. Sparse matrix must use CRS format for
storage. storage.
NPoints - points count, >=0. NPoints - points count, >=0.
skipping to change at line 2924 skipping to change at line 3378
dataset format is used: dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix * dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example * each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to * first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1). NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 09.08.2012 by Bochkanov Sergey Copyright 09.08.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlpavgrelerrorsparse(const multilayerperceptron &network, const spar sematrix &xy, const ae_int_t npoints); double mlpavgrelerrorsparse(const multilayerperceptron &network, const spar sematrix &xy, const ae_int_t npoints);
double smp_mlpavgrelerrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints);
/************************************************************************* /*************************************************************************
Gradient calculation Gradient calculation
INPUT PARAMETERS: INPUT PARAMETERS:
Network - network initialized with one of the network creation funcs Network - network initialized with one of the network creation funcs
X - input vector, length of array must be at least NIn X - input vector, length of array must be at least NIn
DesiredY- desired outputs, length of array must be at least NOut DesiredY- desired outputs, length of array must be at least NOut
Grad - possibly preallocated array. If size of array is smaller Grad - possibly preallocated array. If size of array is smaller
than WCount, it will be reallocated. It is recommended to than WCount, it will be reallocated. It is recommended to
skipping to change at line 2971 skipping to change at line 3426
Grad - gradient of E with respect to weights of network, array[WCo unt] Grad - gradient of E with respect to weights of network, array[WCo unt]
-- ALGLIB -- -- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpgradn(const multilayerperceptron &network, const real_1d_array &x, const real_1d_array &desiredy, double &e, real_1d_array &grad); void mlpgradn(const multilayerperceptron &network, const real_1d_array &x, const real_1d_array &desiredy, double &e, real_1d_array &grad);
/************************************************************************* /*************************************************************************
Batch gradient calculation for a set of inputs/outputs Batch gradient calculation for a set of inputs/outputs
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - network initialized with one of the network creation funcs Network - network initialized with one of the network creation funcs
XY - set of inputs/outputs; one sample = one row; XY - original dataset in dense format; one sample = one row:
first NIn columns contain inputs, * first NIn columns contain inputs,
next NOut columns - desired outputs. * for regression problem, next NOut columns store
desired outputs.
* for classification problem, next column (just one!)
stores class number.
SSize - number of elements in XY SSize - number of elements in XY
Grad - possibly preallocated array. If size of array is smaller Grad - possibly preallocated array. If size of array is smaller
than WCount, it will be reallocated. It is recommended to than WCount, it will be reallocated. It is recommended to
reuse previously allocated array to reduce allocation reuse previously allocated array to reduce allocation
overhead. overhead.
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) E - error function, SUM(sqr(y[i]-desiredy[i])/2,i)
Grad - gradient of E with respect to weights of network, array[WCo unt] Grad - gradient of E with respect to weights of network, array[WCo unt]
-- ALGLIB -- -- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpgradbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad); void mlpgradbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad);
void smp_mlpgradbatch(const multilayerperceptron &network, const real_2d_ar ray &xy, const ae_int_t ssize, double &e, real_1d_array &grad);
/************************************************************************* /*************************************************************************
Batch gradient calculation for a set of inputs/outputs given by sparse Batch gradient calculation for a set of inputs/outputs given by sparse
matrices matrices
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - network initialized with one of the network creation funcs Network - network initialized with one of the network creation funcs
XY - set of inputs/outputs; one sample = one row; XY - original dataset in sparse format; one sample = one row:
first NIn columns contain inputs, * MATRIX MUST BE STORED IN CRS FORMAT
next NOut columns - desired outputs. * first NIn columns contain inputs.
* for regression problem, next NOut columns store
desired outputs.
* for classification problem, next column (just one!)
stores class number.
SSize - number of elements in XY SSize - number of elements in XY
Grad - possibly preallocated array. If size of array is smaller Grad - possibly preallocated array. If size of array is smaller
than WCount, it will be reallocated. It is recommended to than WCount, it will be reallocated. It is recommended to
reuse previously allocated array to reduce allocation reuse previously allocated array to reduce allocation
overhead. overhead.
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) E - error function, SUM(sqr(y[i]-desiredy[i])/2,i)
Grad - gradient of E with respect to weights of network, array[WCo unt] Grad - gradient of E with respect to weights of network, array[WCo unt]
-- ALGLIB -- -- ALGLIB --
Copyright 26.07.2012 by Bochkanov Sergey Copyright 26.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpgradbatchsparse(const multilayerperceptron &network, const sparsema trix &xy, const ae_int_t ssize, double &e, real_1d_array &grad); void mlpgradbatchsparse(const multilayerperceptron &network, const sparsema trix &xy, const ae_int_t ssize, double &e, real_1d_array &grad);
void smp_mlpgradbatchsparse(const multilayerperceptron &network, const spar sematrix &xy, const ae_int_t ssize, double &e, real_1d_array &grad);
/************************************************************************* /*************************************************************************
Batch gradient calculation for a subset of dataset Batch gradient calculation for a subset of dataset
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - network initialized with one of the network creation funcs Network - network initialized with one of the network creation funcs
XY - original dataset; one sample = one row; XY - original dataset in dense format; one sample = one row:
first NIn columns contain inputs, * first NIn columns contain inputs,
next NOut columns - desired outputs. * for regression problem, next NOut columns store
desired outputs.
* for classification problem, next column (just one!)
stores class number.
SetSize - real size of XY, SetSize>=0; SetSize - real size of XY, SetSize>=0;
Idx - subset of SubsetSize elements, array[SubsetSize]: Idx - subset of SubsetSize elements, array[SubsetSize]:
* Idx[I] stores row index in the original dataset which is * Idx[I] stores row index in the original dataset which is
given by XY. Gradient is calculated with respect to rows given by XY. Gradient is calculated with respect to rows
whose indexes are stored in Idx[]. whose indexes are stored in Idx[].
* Idx[] must store correct indexes; this function throws * Idx[] must store correct indexes; this function throws
an exception in case incorrect index (less than 0 or an exception in case incorrect index (less than 0 or
larger than rows(XY)) is given larger than rows(XY)) is given
* Idx[] may store indexes in any order and even with * Idx[] may store indexes in any order and even with
repetitions. repetitions.
SubsetSize- number of elements in Idx[] array. SubsetSize- number of elements in Idx[] array:
* positive value means that subset given by Idx[] is proces
sed
* zero value results in zero gradient
* negative value means that full dataset is processed
Grad - possibly preallocated array. If size of array is smaller Grad - possibly preallocated array. If size of array is smaller
than WCount, it will be reallocated. It is recommended to than WCount, it will be reallocated. It is recommended to
reuse previously allocated array to reduce allocation reuse previously allocated array to reduce allocation
overhead. overhead.
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) E - error function, SUM(sqr(y[i]-desiredy[i])/2,i)
Grad - gradient of E with respect to weights of network, Grad - gradient of E with respect to weights of network,
array[WCount] array[WCount]
NOTE: when SubsetSize<0 is used full dataset by call MLPGradBatch function.
-- ALGLIB -- -- ALGLIB --
Copyright 26.07.2012 by Bochkanov Sergey Copyright 26.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpgradbatchsubset(const multilayerperceptron &network, const real_2d_ array &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_in t_t subsetsize, double &e, real_1d_array &grad); void mlpgradbatchsubset(const multilayerperceptron &network, const real_2d_ array &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_in t_t subsetsize, double &e, real_1d_array &grad);
void smp_mlpgradbatchsubset(const multilayerperceptron &network, const real _2d_array &xy, const ae_int_t setsize, const integer_1d_array &idx, const a e_int_t subsetsize, double &e, real_1d_array &grad);
/************************************************************************* /*************************************************************************
Batch gradient calculation for a set of inputs/outputs for a subset of Batch gradient calculation for a set of inputs/outputs for a subset of
dataset given by boolean mask. dataset given by set of indexes.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - network initialized with one of the network creation funcs Network - network initialized with one of the network creation funcs
XY - set of inputs/outputs; one sample = one row; XY - original dataset in sparse format; one sample = one row:
first NIn columns contain inputs, * MATRIX MUST BE STORED IN CRS FORMAT
next NOut columns - desired outputs. * first NIn columns contain inputs,
* for regression problem, next NOut columns store
desired outputs.
* for classification problem, next column (just one!)
stores class number.
SetSize - real size of XY, SetSize>=0; SetSize - real size of XY, SetSize>=0;
Idx - subset of SubsetSize elements, array[SubsetSize]: Idx - subset of SubsetSize elements, array[SubsetSize]:
* Idx[I] stores row index in the original dataset which is * Idx[I] stores row index in the original dataset which is
given by XY. Gradient is calculated with respect to rows given by XY. Gradient is calculated with respect to rows
whose indexes are stored in Idx[]. whose indexes are stored in Idx[].
* Idx[] must store correct indexes; this function throws * Idx[] must store correct indexes; this function throws
an exception in case incorrect index (less than 0 or an exception in case incorrect index (less than 0 or
larger than rows(XY)) is given larger than rows(XY)) is given
* Idx[] may store indexes in any order and even with * Idx[] may store indexes in any order and even with
repetitions. repetitions.
SubsetSize- number of elements in Idx[] array. SubsetSize- number of elements in Idx[] array:
Grad - possibly preallocated array. If size of array is smaller * positive value means that subset given by Idx[] is proces
than WCount, it will be reallocated. It is recommended to sed
reuse previously allocated array to reduce allocation * zero value results in zero gradient
* negative value means that full dataset is processed
Grad - possibly preallocated array. If size of array is smaller
than WCount, it will be reallocated. It is recommended to
reuse previously allocated array to reduce allocation
overhead. overhead.
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) E - error function, SUM(sqr(y[i]-desiredy[i])/2,i)
Grad - gradient of E with respect to weights of network, Grad - gradient of E with respect to weights of network,
array[WCount] array[WCount]
NOTE: when SubsetSize<0 is used full dataset by call MLPGradBatchSparse NOTE: when SubsetSize<0 is used full dataset by call MLPGradBatchSparse
function. function.
-- ALGLIB -- -- ALGLIB --
Copyright 26.07.2012 by Bochkanov Sergey Copyright 26.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpgradbatchsparsesubset(const multilayerperceptron &network, const sp arsematrix &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_int_t subsetsize, double &e, real_1d_array &grad); void mlpgradbatchsparsesubset(const multilayerperceptron &network, const sp arsematrix &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_int_t subsetsize, double &e, real_1d_array &grad);
void smp_mlpgradbatchsparsesubset(const multilayerperceptron &network, cons t sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &idx, co nst ae_int_t subsetsize, double &e, real_1d_array &grad);
/************************************************************************* /*************************************************************************
Batch gradient calculation for a set of inputs/outputs Batch gradient calculation for a set of inputs/outputs
(natural error function is used) (natural error function is used)
INPUT PARAMETERS: INPUT PARAMETERS:
Network - network initialized with one of the network creation funcs Network - network initialized with one of the network creation funcs
XY - set of inputs/outputs; one sample = one row; XY - set of inputs/outputs; one sample = one row;
first NIn columns contain inputs, first NIn columns contain inputs,
next NOut columns - desired outputs. next NOut columns - desired outputs.
skipping to change at line 3145 skipping to change at line 3726
Hessian calculation based on R-algorithm described in Hessian calculation based on R-algorithm described in
"Fast Exact Multiplication by the Hessian", "Fast Exact Multiplication by the Hessian",
B. A. Pearlmutter, B. A. Pearlmutter,
Neural Computation, 1994. Neural Computation, 1994.
*************************************************************************/ *************************************************************************/
void mlphessianbatch(const multilayerperceptron &network, const real_2d_arr ay &xy, const ae_int_t ssize, double &e, real_1d_array &grad, real_2d_array &h); void mlphessianbatch(const multilayerperceptron &network, const real_2d_arr ay &xy, const ae_int_t ssize, double &e, real_1d_array &grad, real_2d_array &h);
/************************************************************************* /*************************************************************************
Calculation of all types of errors. Calculation of all types of errors.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - network initialized with one of the network creation funcs Network - network initialized with one of the network creation funcs
XY - original dataset; one sample = one row; XY - original dataset; one sample = one row;
first NIn columns contain inputs, first NIn columns contain inputs,
next NOut columns - desired outputs. next NOut columns - desired outputs.
SetSize - real size of XY, SetSize>=0; SetSize - real size of XY, SetSize>=0;
Subset - subset of SubsetSize elements, array[SubsetSize]; Subset - subset of SubsetSize elements, array[SubsetSize];
SubsetSize- number of elements in Subset[] array. SubsetSize- number of elements in Subset[] array.
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
Rep - it contains all type of errors. Rep - it contains all type of errors.
NOTE: when SubsetSize<0 is used full dataset by call MLPGradBatch function. NOTE: when SubsetSize<0 is used full dataset by call MLPGradBatch function.
-- ALGLIB -- -- ALGLIB --
Copyright 04.09.2012 by Bochkanov Sergey Copyright 04.09.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpallerrorssubset(const multilayerperceptron &network, const real_2d_ array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae _int_t subsetsize, modelerrors &rep); void mlpallerrorssubset(const multilayerperceptron &network, const real_2d_ array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae _int_t subsetsize, modelerrors &rep);
void smp_mlpallerrorssubset(const multilayerperceptron &network, const real _2d_array &xy, const ae_int_t setsize, const integer_1d_array &subset, cons t ae_int_t subsetsize, modelerrors &rep);
/************************************************************************* /*************************************************************************
Calculation of all types of errors. Calculation of all types of errors on sparse dataset.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - network initialized with one of the network creation funcs Network - network initialized with one of the network creation funcs
XY - original dataset given by sparse matrix; XY - original dataset given by sparse matrix;
one sample = one row; one sample = one row;
first NIn columns contain inputs, first NIn columns contain inputs,
next NOut columns - desired outputs. next NOut columns - desired outputs.
SetSize - real size of XY, SetSize>=0; SetSize - real size of XY, SetSize>=0;
Subset - subset of SubsetSize elements, array[SubsetSize]; Subset - subset of SubsetSize elements, array[SubsetSize];
SubsetSize- number of elements in Subset[] array. SubsetSize- number of elements in Subset[] array.
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
Rep - it contains all type of errors. Rep - it contains all type of errors.
NOTE: when SubsetSize<0 is used full dataset by call MLPGradBatch function. NOTE: when SubsetSize<0 is used full dataset by call MLPGradBatch function.
-- ALGLIB -- -- ALGLIB --
Copyright 04.09.2012 by Bochkanov Sergey Copyright 04.09.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpallerrorssparsesubset(const multilayerperceptron &network, const sp arsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, con st ae_int_t subsetsize, modelerrors &rep); void mlpallerrorssparsesubset(const multilayerperceptron &network, const sp arsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, con st ae_int_t subsetsize, modelerrors &rep);
void smp_mlpallerrorssparsesubset(const multilayerperceptron &network, cons t sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, modelerrors &rep);
/************************************************************************* /*************************************************************************
Error of the neural network on dataset. Error of the neural network on dataset.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network; Network - neural network;
XY - training set, see below for information on the XY - training set, see below for information on the
training set format; training set format;
SetSize - real size of XY, SetSize>=0; SetSize - real size of XY, SetSize>=0;
Subset - subset of SubsetSize elements, array[SubsetSize]; Subset - subset of SubsetSize elements, array[SubsetSize];
SubsetSize- number of elements in Subset[] array. SubsetSize- number of elements in Subset[] array.
RESULT: RESULT:
sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2) sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2)
skipping to change at line 3223 skipping to change at line 3884
dataset format is used: dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix * dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example * each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to * first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1). NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 04.09.2012 by Bochkanov Sergey Copyright 04.09.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlperrorsubset(const multilayerperceptron &network, const real_2d_ar ray &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_i nt_t subsetsize); double mlperrorsubset(const multilayerperceptron &network, const real_2d_ar ray &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_i nt_t subsetsize);
double smp_mlperrorsubset(const multilayerperceptron &network, const real_2 d_array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize);
/************************************************************************* /*************************************************************************
Error of the neural network on dataset. Error of the neural network on sparse dataset.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support
!
! First improvement gives close-to-linear speedup on multicore systems.
! Second improvement gives constant speedup (2-3x depending on your CPU)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
Network - neural network; Network - neural network;
XY - training set, see below for information on the XY - training set, see below for information on the
training set format. This function checks correctness training set format. This function checks correctness
of the dataset (no NANs/INFs, class numbers are of the dataset (no NANs/INFs, class numbers are
correct) and throws exception when incorrect dataset correct) and throws exception when incorrect dataset
is passed. Sparse matrix must use CRS format for is passed. Sparse matrix must use CRS format for
storage. storage.
SetSize - real size of XY, SetSize>=0; SetSize - real size of XY, SetSize>=0;
skipping to change at line 3265 skipping to change at line 3953
dataset format is used: dataset format is used:
* dataset is given by NPoints*(NIn+1) matrix * dataset is given by NPoints*(NIn+1) matrix
* each row corresponds to one example * each row corresponds to one example
* first NIn columns are inputs, last column stores class number (from 0 to * first NIn columns are inputs, last column stores class number (from 0 to
NClasses-1). NClasses-1).
-- ALGLIB -- -- ALGLIB --
Copyright 04.09.2012 by Bochkanov Sergey Copyright 04.09.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double mlperrorsparsesubset(const multilayerperceptron &network, const spar sematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize); double mlperrorsparsesubset(const multilayerperceptron &network, const spar sematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize);
double smp_mlperrorsparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, c onst ae_int_t subsetsize);
/************************************************************************* /*************************************************************************
This subroutine trains logit model. This subroutine trains logit model.
INPUT PARAMETERS: INPUT PARAMETERS:
XY - training set, array[0..NPoints-1,0..NVars] XY - training set, array[0..NPoints-1,0..NVars]
First NVars columns store values of independent First NVars columns store values of independent
variables, next column stores number of class (from 0 variables, next column stores number of class (from 0
to NClasses-1) which dataset element belongs to. Fracti onal to NClasses-1) which dataset element belongs to. Fracti onal
values are rounded to nearest integer. values are rounded to nearest integer.
skipping to change at line 4525 skipping to change at line 5214
-- ALGLIB -- -- ALGLIB --
Copyright 09.12.2007 by Bochkanov Sergey Copyright 09.12.2007 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpkfoldcvlm(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, c onst ae_int_t foldscount, ae_int_t &info, mlpreport &rep, mlpcvreport &cvre p); void mlpkfoldcvlm(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, c onst ae_int_t foldscount, ae_int_t &info, mlpreport &rep, mlpcvreport &cvre p);
/************************************************************************* /*************************************************************************
This function estimates generalization error using cross-validation on the This function estimates generalization error using cross-validation on the
current dataset with current training settings. current dataset with current training settings.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support (C++ computational core)
!
! Second improvement gives constant speedup (2-3X). First improvement
! gives close-to-linear speedup on multicore systems. Following
! operations can be executed in parallel:
! * FoldsCount cross-validation rounds (always)
! * NRestarts training sessions performed within each of
! cross-validation rounds (if NRestarts>1)
! * gradient calculation over large dataset (if dataset is large enough)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
S - trainer object S - trainer object
Network - neural network. It must have same number of inputs and Network - neural network. It must have same number of inputs and
output/classes as was specified during creation of the output/classes as was specified during creation of the
trainer object. Network is not changed during cross- trainer object. Network is not changed during cross-
validation and is not trained - it is used only as validation and is not trained - it is used only as
representative of its architecture. I.e., we estimate representative of its architecture. I.e., we estimate
generalization properties of ARCHITECTURE, not some generalization properties of ARCHITECTURE, not some
specific network. specific network.
NRestarts - number of restarts, >=0: NRestarts - number of restarts, >=0:
skipping to change at line 4708 skipping to change at line 5428
/************************************************************************* /*************************************************************************
This function sets stopping criteria for the optimizer. This function sets stopping criteria for the optimizer.
INPUT PARAMETERS: INPUT PARAMETERS:
S - trainer object S - trainer object
WStep - stopping criterion. Algorithm stops if step size is WStep - stopping criterion. Algorithm stops if step size is
less than WStep. Recommended value - 0.01. Zero step less than WStep. Recommended value - 0.01. Zero step
size means stopping after MaxIts iterations. size means stopping after MaxIts iterations.
WStep>=0. WStep>=0.
MaxIts - stopping criterion. Algorithm stops after MaxIts MaxIts - stopping criterion. Algorithm stops after MaxIts
iterations (NOT gradient calculations). Zero MaxIts epochs (full passes over entire dataset). Zero MaxIts
means stopping when step is sufficiently small. means stopping when step is sufficiently small.
MaxIts>=0. MaxIts>=0.
NOTE: by default, WStep=0.005 and MaxIts=0 are used. These values are also NOTE: by default, WStep=0.005 and MaxIts=0 are used. These values are also
used when MLPSetCond() is called with WStep=0 and MaxIts=0. used when MLPSetCond() is called with WStep=0 and MaxIts=0.
NOTE: these stopping criteria are used for all kinds of neural training -
from "conventional" networks to early stopping ensembles. When used
for "conventional" networks, they are used as the only stopping
criteria. When combined with early stopping, they used as ADDITIONAL
stopping criteria which can terminate early stopping algorithm.
-- ALGLIB -- -- ALGLIB --
Copyright 23.07.2012 by Bochkanov Sergey Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpsetcond(const mlptrainer &s, const double wstep, const ae_int_t max its); void mlpsetcond(const mlptrainer &s, const double wstep, const ae_int_t max its);
/************************************************************************* /*************************************************************************
This function sets training algorithm: batch training using L-BFGS will be
used.
This algorithm:
* the most robust for small-scale problems, but may be too slow for large
scale ones.
* perfoms full pass through the dataset before performing step
* uses conditions specified by MLPSetCond() for stopping
* is default one used by trainer object
INPUT PARAMETERS:
S - trainer object
-- ALGLIB --
Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/
void mlpsetalgobatch(const mlptrainer &s);
/*************************************************************************
This function trains neural network passed to this function, using current This function trains neural network passed to this function, using current
dataset (one which was passed to MLPSetDataset() or MLPSetSparseDataset()) dataset (one which was passed to MLPSetDataset() or MLPSetSparseDataset())
and current training settings. Training from NRestarts random starting and current training settings. Training from NRestarts random starting
positions is performed, best network is chosen. positions is performed, best network is chosen.
Training is performed using current training algorithm. Training is performed using current training algorithm.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support (C++ computational core)
!
! Second improvement gives constant speedup (2-3X). First improvement
! gives close-to-linear speedup on multicore systems. Following
! operations can be executed in parallel:
! * NRestarts training sessions performed within each of
! cross-validation rounds (if NRestarts>1)
! * gradient calculation over large dataset (if dataset is large enough)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
S - trainer object S - trainer object
Network - neural network. It must have same number of inputs and Network - neural network. It must have same number of inputs and
output/classes as was specified during creation of the output/classes as was specified during creation of the
trainer object. trainer object.
NRestarts - number of restarts, >=0: NRestarts - number of restarts, >=0:
* NRestarts>0 means that specified number of random * NRestarts>0 means that specified number of random
restarts are performed, best network is chosen after restarts are performed, best network is chosen after
training training
* NRestarts=0 means that current state of the network * NRestarts=0 means that current state of the network
skipping to change at line 4753 skipping to change at line 5528
NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(), NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
network is filled by zero values. Same behavior for functions network is filled by zero values. Same behavior for functions
MLPStartTraining and MLPContinueTraining. MLPStartTraining and MLPContinueTraining.
NOTE: this method uses sum-of-squares error function for training. NOTE: this method uses sum-of-squares error function for training.
-- ALGLIB -- -- ALGLIB --
Copyright 23.07.2012 by Bochkanov Sergey Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlptrainnetwork(const mlptrainer &s, const multilayerperceptron &netwo rk, const ae_int_t nrestarts, mlpreport &rep); void mlptrainnetwork(const mlptrainer &s, const multilayerperceptron &netwo rk, const ae_int_t nrestarts, mlpreport &rep);
void smp_mlptrainnetwork(const mlptrainer &s, const multilayerperceptron &n etwork, const ae_int_t nrestarts, mlpreport &rep);
/************************************************************************* /*************************************************************************
IMPORTANT: this is an "expert" version of the MLPTrain() function. We do IMPORTANT: this is an "expert" version of the MLPTrain() function. We do
not recommend you to use it unless you are pretty sure that you not recommend you to use it unless you are pretty sure that you
need ability to monitor training progress. need ability to monitor training progress.
This function performs step-by-step training of the neural network. Here This function performs step-by-step training of the neural network. Here
"step-by-step" means that training starts with MLPStartTraining() call, "step-by-step" means that training starts with MLPStartTraining() call,
and then user subsequently calls MLPContinueTraining() to perform one more and then user subsequently calls MLPContinueTraining() to perform one more
iteration of the training. iteration of the training.
skipping to change at line 4813 skipping to change at line 5589
-- ALGLIB -- -- ALGLIB --
Copyright 23.07.2012 by Bochkanov Sergey Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpstarttraining(const mlptrainer &s, const multilayerperceptron &netw ork, const bool randomstart); void mlpstarttraining(const mlptrainer &s, const multilayerperceptron &netw ork, const bool randomstart);
/************************************************************************* /*************************************************************************
IMPORTANT: this is an "expert" version of the MLPTrain() function. We do IMPORTANT: this is an "expert" version of the MLPTrain() function. We do
not recommend you to use it unless you are pretty sure that you not recommend you to use it unless you are pretty sure that you
need ability to monitor training progress. need ability to monitor training progress.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support (C++ computational core)
!
! Second improvement gives constant speedup (2-3X). First improvement
! gives close-to-linear speedup on multicore systems. Following
! operations can be executed in parallel:
! * gradient calculation over large dataset (if dataset is large enough)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
This function performs step-by-step training of the neural network. Here This function performs step-by-step training of the neural network. Here
"step-by-step" means that training starts with MLPStartTraining() call, "step-by-step" means that training starts with MLPStartTraining() call,
and then user subsequently calls MLPContinueTraining() to perform one more and then user subsequently calls MLPContinueTraining() to perform one more
iteration of the training. iteration of the training.
This function performs one more iteration of the training and returns This function performs one more iteration of the training and returns
either True (training continues) or False (training stopped). In case True either True (training continues) or False (training stopped). In case True
was returned, Network weights are updated according to the current state was returned, Network weights are updated according to the current state
of the optimization progress. In case False was returned, no additional of the optimization progress. In case False was returned, no additional
updates is performed (previous update of the network weights moved us to updates is performed (previous update of the network weights moved us to
skipping to change at line 4871 skipping to change at line 5675
It is also expected that you do not change state of the network on It is also expected that you do not change state of the network on
your own - the only party who has right to change network during its your own - the only party who has right to change network during its
training is a trainer object. Any attempt to interfere with trainer training is a trainer object. Any attempt to interfere with trainer
may lead to unpredictable results. may lead to unpredictable results.
-- ALGLIB -- -- ALGLIB --
Copyright 23.07.2012 by Bochkanov Sergey Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
bool mlpcontinuetraining(const mlptrainer &s, const multilayerperceptron &n etwork); bool mlpcontinuetraining(const mlptrainer &s, const multilayerperceptron &n etwork);
bool smp_mlpcontinuetraining(const mlptrainer &s, const multilayerperceptro n &network);
/************************************************************************* /*************************************************************************
Training neural networks ensemble using bootstrap aggregating (bagging). Training neural networks ensemble using bootstrap aggregating (bagging).
Modified Levenberg-Marquardt algorithm is used as base training method. Modified Levenberg-Marquardt algorithm is used as base training method.
INPUT PARAMETERS: INPUT PARAMETERS:
Ensemble - model with initialized geometry Ensemble - model with initialized geometry
XY - training set XY - training set
NPoints - training set size NPoints - training set size
Decay - weight decay coefficient, >=0.001 Decay - weight decay coefficient, >=0.001
skipping to change at line 4961 skipping to change at line 5766
Copyright 10.03.2009 by Bochkanov Sergey Copyright 10.03.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlpetraines(const mlpensemble &ensemble, const real_2d_array &xy, cons t ae_int_t npoints, const double decay, const ae_int_t restarts, ae_int_t & info, mlpreport &rep); void mlpetraines(const mlpensemble &ensemble, const real_2d_array &xy, cons t ae_int_t npoints, const double decay, const ae_int_t restarts, ae_int_t & info, mlpreport &rep);
/************************************************************************* /*************************************************************************
This function trains neural network ensemble passed to this function using This function trains neural network ensemble passed to this function using
current dataset and early stopping training algorithm. Each early stopping current dataset and early stopping training algorithm. Each early stopping
round performs NRestarts random restarts (thus, EnsembleSize*NRestarts round performs NRestarts random restarts (thus, EnsembleSize*NRestarts
training rounds is performed in total). training rounds is performed in total).
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support (C++ computational core)
!
! Second improvement gives constant speedup (2-3X). First improvement
! gives close-to-linear speedup on multicore systems. Following
! operations can be executed in parallel:
! * EnsembleSize training sessions performed for each of ensemble
! members (always parallelized)
! * NRestarts training sessions performed within each of training
! sessions (if NRestarts>1)
! * gradient calculation over large dataset (if dataset is large enough)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS: INPUT PARAMETERS:
S - trainer object; S - trainer object;
Ensemble - neural network ensemble. It must have same number of Ensemble - neural network ensemble. It must have same number of
inputs and outputs/classes as was specified during inputs and outputs/classes as was specified during
creation of the trainer object. creation of the trainer object.
NRestarts - number of restarts, >=0: NRestarts - number of restarts, >=0:
* NRestarts>0 means that specified number of random * NRestarts>0 means that specified number of random
restarts are performed during each ES round; restarts are performed during each ES round;
* NRestarts=0 is silently replaced by 1. * NRestarts=0 is silently replaced by 1.
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
Ensemble - trained ensemble; Ensemble - trained ensemble;
Rep - it contains all type of errors. Rep - it contains all type of errors.
NOTE: this training method uses BOTH early stopping and weight decay! So,
you should select weight decay before starting training just as you
select it before training "conventional" networks.
NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(), NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
or single-point dataset was passed, ensemble is filled by zero or single-point dataset was passed, ensemble is filled by zero
values. values.
NOTE: this method uses sum-of-squares error function for training. NOTE: this method uses sum-of-squares error function for training.
-- ALGLIB -- -- ALGLIB --
Copyright 22.08.2012 by Bochkanov Sergey Copyright 22.08.2012 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void mlptrainensemblees(const mlptrainer &s, const mlpensemble &ensemble, c onst ae_int_t nrestarts, mlpreport &rep); void mlptrainensemblees(const mlptrainer &s, const mlpensemble &ensemble, c onst ae_int_t nrestarts, mlpreport &rep);
void smp_mlptrainensemblees(const mlptrainer &s, const mlpensemble &ensembl e, const ae_int_t nrestarts, mlpreport &rep);
/************************************************************************* /*************************************************************************
Principal components analysis Principal components analysis
Subroutine builds orthogonal basis where first axis corresponds to Subroutine builds orthogonal basis where first axis corresponds to
direction with maximum variance, second axis maximizes variance in subspace direction with maximum variance, second axis maximizes variance in subspace
orthogonal to first axis and so on. orthogonal to first axis and so on.
It should be noted that, unlike LDA, PCA does not use class labels. It should be noted that, unlike LDA, PCA does not use class labels.
skipping to change at line 5137 skipping to change at line 5979
void clusterizersetahcalgo(clusterizerstate* s, void clusterizersetahcalgo(clusterizerstate* s,
ae_int_t algo, ae_int_t algo,
ae_state *_state); ae_state *_state);
void clusterizersetkmeanslimits(clusterizerstate* s, void clusterizersetkmeanslimits(clusterizerstate* s,
ae_int_t restarts, ae_int_t restarts,
ae_int_t maxits, ae_int_t maxits,
ae_state *_state); ae_state *_state);
void clusterizerrunahc(clusterizerstate* s, void clusterizerrunahc(clusterizerstate* s,
ahcreport* rep, ahcreport* rep,
ae_state *_state); ae_state *_state);
void _pexec_clusterizerrunahc(clusterizerstate* s,
ahcreport* rep, ae_state *_state);
void clusterizerrunkmeans(clusterizerstate* s, void clusterizerrunkmeans(clusterizerstate* s,
ae_int_t k, ae_int_t k,
kmeansreport* rep, kmeansreport* rep,
ae_state *_state); ae_state *_state);
void clusterizergetdistances(/* Real */ ae_matrix* xy, void clusterizergetdistances(/* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_int_t nfeatures, ae_int_t nfeatures,
ae_int_t disttype, ae_int_t disttype,
/* Real */ ae_matrix* d, /* Real */ ae_matrix* d,
ae_state *_state); ae_state *_state);
void _pexec_clusterizergetdistances(/* Real */ ae_matrix* xy,
ae_int_t npoints,
ae_int_t nfeatures,
ae_int_t disttype,
/* Real */ ae_matrix* d, ae_state *_state);
void clusterizergetkclusters(ahcreport* rep, void clusterizergetkclusters(ahcreport* rep,
ae_int_t k, ae_int_t k,
/* Integer */ ae_vector* cidx, /* Integer */ ae_vector* cidx,
/* Integer */ ae_vector* cz, /* Integer */ ae_vector* cz,
ae_state *_state); ae_state *_state);
void clusterizerseparatedbydist(ahcreport* rep, void clusterizerseparatedbydist(ahcreport* rep,
double r, double r,
ae_int_t* k, ae_int_t* k,
/* Integer */ ae_vector* cidx, /* Integer */ ae_vector* cidx,
/* Integer */ ae_vector* cz, /* Integer */ ae_vector* cz,
skipping to change at line 5385 skipping to change at line 6234
ae_int_t* info, ae_int_t* info,
/* Real */ ae_vector* w, /* Real */ ae_vector* w,
ae_state *_state); ae_state *_state);
void fisherldan(/* Real */ ae_matrix* xy, void fisherldan(/* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_int_t nvars, ae_int_t nvars,
ae_int_t nclasses, ae_int_t nclasses,
ae_int_t* info, ae_int_t* info,
/* Real */ ae_matrix* w, /* Real */ ae_matrix* w,
ae_state *_state); ae_state *_state);
ae_int_t mlpgradsplitcost(ae_state *_state);
ae_int_t mlpgradsplitsize(ae_state *_state);
void mlpcreate0(ae_int_t nin, void mlpcreate0(ae_int_t nin,
ae_int_t nout, ae_int_t nout,
multilayerperceptron* network, multilayerperceptron* network,
ae_state *_state); ae_state *_state);
void mlpcreate1(ae_int_t nin, void mlpcreate1(ae_int_t nin,
ae_int_t nhid, ae_int_t nhid,
ae_int_t nout, ae_int_t nout,
multilayerperceptron* network, multilayerperceptron* network,
ae_state *_state); ae_state *_state);
void mlpcreate2(ae_int_t nin, void mlpcreate2(ae_int_t nin,
skipping to change at line 5460 skipping to change at line 6311
ae_state *_state); ae_state *_state);
void mlpcreatec2(ae_int_t nin, void mlpcreatec2(ae_int_t nin,
ae_int_t nhid1, ae_int_t nhid1,
ae_int_t nhid2, ae_int_t nhid2,
ae_int_t nout, ae_int_t nout,
multilayerperceptron* network, multilayerperceptron* network,
ae_state *_state); ae_state *_state);
void mlpcopy(multilayerperceptron* network1, void mlpcopy(multilayerperceptron* network1,
multilayerperceptron* network2, multilayerperceptron* network2,
ae_state *_state); ae_state *_state);
void mlpcopyshared(multilayerperceptron* network1,
multilayerperceptron* network2,
ae_state *_state);
ae_bool mlpsamearchitecture(multilayerperceptron* network1,
multilayerperceptron* network2,
ae_state *_state);
void mlpcopytunableparameters(multilayerperceptron* network1,
multilayerperceptron* network2,
ae_state *_state);
void mlpexporttunableparameters(multilayerperceptron* network,
/* Real */ ae_vector* p,
ae_int_t* pcount,
ae_state *_state);
void mlpimporttunableparameters(multilayerperceptron* network,
/* Real */ ae_vector* p,
ae_state *_state);
void mlpserializeold(multilayerperceptron* network, void mlpserializeold(multilayerperceptron* network,
/* Real */ ae_vector* ra, /* Real */ ae_vector* ra,
ae_int_t* rlen, ae_int_t* rlen,
ae_state *_state); ae_state *_state);
void mlpunserializeold(/* Real */ ae_vector* ra, void mlpunserializeold(/* Real */ ae_vector* ra,
multilayerperceptron* network, multilayerperceptron* network,
ae_state *_state); ae_state *_state);
void mlprandomize(multilayerperceptron* network, ae_state *_state); void mlprandomize(multilayerperceptron* network, ae_state *_state);
void mlprandomizefull(multilayerperceptron* network, ae_state *_state); void mlprandomizefull(multilayerperceptron* network, ae_state *_state);
void mlpinitpreprocessor(multilayerperceptron* network, void mlpinitpreprocessor(multilayerperceptron* network,
skipping to change at line 5494 skipping to change at line 6361
sparsematrix* xy, sparsematrix* xy,
ae_int_t setsize, ae_int_t setsize,
/* Integer */ ae_vector* idx, /* Integer */ ae_vector* idx,
ae_int_t subsetsize, ae_int_t subsetsize,
ae_state *_state); ae_state *_state);
void mlpproperties(multilayerperceptron* network, void mlpproperties(multilayerperceptron* network,
ae_int_t* nin, ae_int_t* nin,
ae_int_t* nout, ae_int_t* nout,
ae_int_t* wcount, ae_int_t* wcount,
ae_state *_state); ae_state *_state);
ae_int_t mlpntotal(multilayerperceptron* network, ae_state *_state);
ae_int_t mlpgetinputscount(multilayerperceptron* network, ae_int_t mlpgetinputscount(multilayerperceptron* network,
ae_state *_state); ae_state *_state);
ae_int_t mlpgetoutputscount(multilayerperceptron* network, ae_int_t mlpgetoutputscount(multilayerperceptron* network,
ae_state *_state); ae_state *_state);
ae_int_t mlpgetweightscount(multilayerperceptron* network, ae_int_t mlpgetweightscount(multilayerperceptron* network,
ae_state *_state); ae_state *_state);
ae_bool mlpissoftmax(multilayerperceptron* network, ae_state *_state); ae_bool mlpissoftmax(multilayerperceptron* network, ae_state *_state);
ae_int_t mlpgetlayerscount(multilayerperceptron* network, ae_int_t mlpgetlayerscount(multilayerperceptron* network,
ae_state *_state); ae_state *_state);
ae_int_t mlpgetlayersize(multilayerperceptron* network, ae_int_t mlpgetlayersize(multilayerperceptron* network,
skipping to change at line 5567 skipping to change at line 6435
void mlpprocess(multilayerperceptron* network, void mlpprocess(multilayerperceptron* network,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_state *_state); ae_state *_state);
void mlpprocessi(multilayerperceptron* network, void mlpprocessi(multilayerperceptron* network,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_state *_state); ae_state *_state);
double mlperror(multilayerperceptron* network, double mlperror(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t ssize, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double _pexec_mlperror(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t npoints, ae_state *_state);
double mlperrorsparse(multilayerperceptron* network, double mlperrorsparse(multilayerperceptron* network,
sparsematrix* xy, sparsematrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double _pexec_mlperrorsparse(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t npoints, ae_state *_state);
double mlperrorn(multilayerperceptron* network, double mlperrorn(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t ssize, ae_int_t ssize,
ae_state *_state); ae_state *_state);
ae_int_t mlpclserror(multilayerperceptron* network, ae_int_t mlpclserror(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t ssize, ae_int_t npoints,
ae_state *_state); ae_state *_state);
ae_int_t _pexec_mlpclserror(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t npoints, ae_state *_state);
double mlprelclserror(multilayerperceptron* network, double mlprelclserror(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double _pexec_mlprelclserror(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t npoints, ae_state *_state);
double mlprelclserrorsparse(multilayerperceptron* network, double mlprelclserrorsparse(multilayerperceptron* network,
sparsematrix* xy, sparsematrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double _pexec_mlprelclserrorsparse(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t npoints, ae_state *_state);
double mlpavgce(multilayerperceptron* network, double mlpavgce(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double _pexec_mlpavgce(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t npoints, ae_state *_state);
double mlpavgcesparse(multilayerperceptron* network, double mlpavgcesparse(multilayerperceptron* network,
sparsematrix* xy, sparsematrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double _pexec_mlpavgcesparse(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t npoints, ae_state *_state);
double mlprmserror(multilayerperceptron* network, double mlprmserror(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double _pexec_mlprmserror(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t npoints, ae_state *_state);
double mlprmserrorsparse(multilayerperceptron* network, double mlprmserrorsparse(multilayerperceptron* network,
sparsematrix* xy, sparsematrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double _pexec_mlprmserrorsparse(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t npoints, ae_state *_state);
double mlpavgerror(multilayerperceptron* network, double mlpavgerror(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double _pexec_mlpavgerror(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t npoints, ae_state *_state);
double mlpavgerrorsparse(multilayerperceptron* network, double mlpavgerrorsparse(multilayerperceptron* network,
sparsematrix* xy, sparsematrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double _pexec_mlpavgerrorsparse(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t npoints, ae_state *_state);
double mlpavgrelerror(multilayerperceptron* network, double mlpavgrelerror(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double _pexec_mlpavgrelerror(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t npoints, ae_state *_state);
double mlpavgrelerrorsparse(multilayerperceptron* network, double mlpavgrelerrorsparse(multilayerperceptron* network,
sparsematrix* xy, sparsematrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
double _pexec_mlpavgrelerrorsparse(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t npoints, ae_state *_state);
void mlpgrad(multilayerperceptron* network, void mlpgrad(multilayerperceptron* network,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* desiredy, /* Real */ ae_vector* desiredy,
double* e, double* e,
/* Real */ ae_vector* grad, /* Real */ ae_vector* grad,
ae_state *_state); ae_state *_state);
void mlpgradn(multilayerperceptron* network, void mlpgradn(multilayerperceptron* network,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* desiredy, /* Real */ ae_vector* desiredy,
double* e, double* e,
/* Real */ ae_vector* grad, /* Real */ ae_vector* grad,
ae_state *_state); ae_state *_state);
void mlpgradbatch(multilayerperceptron* network, void mlpgradbatch(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t ssize, ae_int_t ssize,
double* e, double* e,
/* Real */ ae_vector* grad, /* Real */ ae_vector* grad,
ae_state *_state); ae_state *_state);
void _pexec_mlpgradbatch(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t ssize,
double* e,
/* Real */ ae_vector* grad, ae_state *_state);
void mlpgradbatchsparse(multilayerperceptron* network, void mlpgradbatchsparse(multilayerperceptron* network,
sparsematrix* xy, sparsematrix* xy,
ae_int_t ssize, ae_int_t ssize,
double* e, double* e,
/* Real */ ae_vector* grad, /* Real */ ae_vector* grad,
ae_state *_state); ae_state *_state);
void _pexec_mlpgradbatchsparse(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t ssize,
double* e,
/* Real */ ae_vector* grad, ae_state *_state);
void mlpgradbatchsubset(multilayerperceptron* network, void mlpgradbatchsubset(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t setsize, ae_int_t setsize,
/* Integer */ ae_vector* idx, /* Integer */ ae_vector* idx,
ae_int_t subsetsize, ae_int_t subsetsize,
double* e, double* e,
/* Real */ ae_vector* grad, /* Real */ ae_vector* grad,
ae_state *_state); ae_state *_state);
void _pexec_mlpgradbatchsubset(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t setsize,
/* Integer */ ae_vector* idx,
ae_int_t subsetsize,
double* e,
/* Real */ ae_vector* grad, ae_state *_state);
void mlpgradbatchsparsesubset(multilayerperceptron* network, void mlpgradbatchsparsesubset(multilayerperceptron* network,
sparsematrix* xy, sparsematrix* xy,
ae_int_t setsize, ae_int_t setsize,
/* Integer */ ae_vector* idx, /* Integer */ ae_vector* idx,
ae_int_t subsetsize, ae_int_t subsetsize,
double* e, double* e,
/* Real */ ae_vector* grad, /* Real */ ae_vector* grad,
ae_state *_state); ae_state *_state);
void _pexec_mlpgradbatchsparsesubset(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t setsize,
/* Integer */ ae_vector* idx,
ae_int_t subsetsize,
double* e,
/* Real */ ae_vector* grad, ae_state *_state);
void mlpgradbatchx(multilayerperceptron* network,
/* Real */ ae_matrix* densexy,
sparsematrix* sparsexy,
ae_int_t datasetsize,
ae_int_t datasettype,
/* Integer */ ae_vector* idx,
ae_int_t subset0,
ae_int_t subset1,
ae_int_t subsettype,
ae_shared_pool* buf,
ae_shared_pool* gradbuf,
ae_state *_state);
void mlpgradnbatch(multilayerperceptron* network, void mlpgradnbatch(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t ssize, ae_int_t ssize,
double* e, double* e,
/* Real */ ae_vector* grad, /* Real */ ae_vector* grad,
ae_state *_state); ae_state *_state);
void mlphessiannbatch(multilayerperceptron* network, void mlphessiannbatch(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t ssize, ae_int_t ssize,
double* e, double* e,
skipping to change at line 5706 skipping to change at line 6649
void mlpunserialize(ae_serializer* s, void mlpunserialize(ae_serializer* s,
multilayerperceptron* network, multilayerperceptron* network,
ae_state *_state); ae_state *_state);
void mlpallerrorssubset(multilayerperceptron* network, void mlpallerrorssubset(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t setsize, ae_int_t setsize,
/* Integer */ ae_vector* subset, /* Integer */ ae_vector* subset,
ae_int_t subsetsize, ae_int_t subsetsize,
modelerrors* rep, modelerrors* rep,
ae_state *_state); ae_state *_state);
void _pexec_mlpallerrorssubset(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t setsize,
/* Integer */ ae_vector* subset,
ae_int_t subsetsize,
modelerrors* rep, ae_state *_state);
void mlpallerrorssparsesubset(multilayerperceptron* network, void mlpallerrorssparsesubset(multilayerperceptron* network,
sparsematrix* xy, sparsematrix* xy,
ae_int_t setsize, ae_int_t setsize,
/* Integer */ ae_vector* subset, /* Integer */ ae_vector* subset,
ae_int_t subsetsize, ae_int_t subsetsize,
modelerrors* rep, modelerrors* rep,
ae_state *_state); ae_state *_state);
void _pexec_mlpallerrorssparsesubset(multilayerperceptron* network,
sparsematrix* xy,
ae_int_t setsize,
/* Integer */ ae_vector* subset,
ae_int_t subsetsize,
modelerrors* rep, ae_state *_state);
double mlperrorsubset(multilayerperceptron* network, double mlperrorsubset(multilayerperceptron* network,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t setsize, ae_int_t setsize,
/* Integer */ ae_vector* subset, /* Integer */ ae_vector* subset,
ae_int_t subsetsize, ae_int_t subsetsize,
ae_state *_state); ae_state *_state);
double _pexec_mlperrorsubset(multilayerperceptron* network,
/* Real */ ae_matrix* xy,
ae_int_t setsize,
/* Integer */ ae_vector* subset,
ae_int_t subsetsize, ae_state *_state);
double mlperrorsparsesubset(multilayerperceptron* network, double mlperrorsparsesubset(multilayerperceptron* network,
sparsematrix* xy, sparsematrix* xy,
ae_int_t setsize, ae_int_t setsize,
/* Integer */ ae_vector* subset, /* Integer */ ae_vector* subset,
ae_int_t subsetsize, ae_int_t subsetsize,
ae_state *_state); ae_state *_state);
ae_bool _multilayerperceptron_init(void* _p, ae_state *_state, ae_bool make double _pexec_mlperrorsparsesubset(multilayerperceptron* network,
_automatic); sparsematrix* xy,
ae_bool _multilayerperceptron_init_copy(void* _dst, void* _src, ae_state *_ ae_int_t setsize,
state, ae_bool make_automatic); /* Integer */ ae_vector* subset,
void _multilayerperceptron_clear(void* _p); ae_int_t subsetsize, ae_state *_state);
void _multilayerperceptron_destroy(void* _p); void mlpallerrorsx(multilayerperceptron* network,
/* Real */ ae_matrix* densexy,
sparsematrix* sparsexy,
ae_int_t datasetsize,
ae_int_t datasettype,
/* Integer */ ae_vector* idx,
ae_int_t subset0,
ae_int_t subset1,
ae_int_t subsettype,
ae_shared_pool* buf,
modelerrors* rep,
ae_state *_state);
ae_bool _modelerrors_init(void* _p, ae_state *_state, ae_bool make_automati c); ae_bool _modelerrors_init(void* _p, ae_state *_state, ae_bool make_automati c);
ae_bool _modelerrors_init_copy(void* _dst, void* _src, ae_state *_state, ae _bool make_automatic); ae_bool _modelerrors_init_copy(void* _dst, void* _src, ae_state *_state, ae _bool make_automatic);
void _modelerrors_clear(void* _p); void _modelerrors_clear(void* _p);
void _modelerrors_destroy(void* _p); void _modelerrors_destroy(void* _p);
ae_bool _smlpgrad_init(void* _p, ae_state *_state, ae_bool make_automatic);
ae_bool _smlpgrad_init_copy(void* _dst, void* _src, ae_state *_state, ae_bo
ol make_automatic);
void _smlpgrad_clear(void* _p);
void _smlpgrad_destroy(void* _p);
ae_bool _multilayerperceptron_init(void* _p, ae_state *_state, ae_bool make
_automatic);
ae_bool _multilayerperceptron_init_copy(void* _dst, void* _src, ae_state *_
state, ae_bool make_automatic);
void _multilayerperceptron_clear(void* _p);
void _multilayerperceptron_destroy(void* _p);
void mnltrainh(/* Real */ ae_matrix* xy, void mnltrainh(/* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_int_t nvars, ae_int_t nvars,
ae_int_t nclasses, ae_int_t nclasses,
ae_int_t* info, ae_int_t* info,
logitmodel* lm, logitmodel* lm,
mnlreport* rep, mnlreport* rep,
ae_state *_state); ae_state *_state);
void mnlprocess(logitmodel* lm, void mnlprocess(logitmodel* lm,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
skipping to change at line 5962 skipping to change at line 6943
ae_state *_state); ae_state *_state);
ae_bool mlpeissoftmax(mlpensemble* ensemble, ae_state *_state); ae_bool mlpeissoftmax(mlpensemble* ensemble, ae_state *_state);
void mlpeprocess(mlpensemble* ensemble, void mlpeprocess(mlpensemble* ensemble,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_state *_state); ae_state *_state);
void mlpeprocessi(mlpensemble* ensemble, void mlpeprocessi(mlpensemble* ensemble,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_state *_state); ae_state *_state);
void mlpeallerrors(mlpensemble* ensemble, void mlpeallerrorsx(mlpensemble* ensemble,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* densexy,
ae_int_t npoints, sparsematrix* sparsexy,
double* relcls, ae_int_t datasetsize,
double* avgce, ae_int_t datasettype,
double* rms, /* Integer */ ae_vector* idx,
double* avg, ae_int_t subset0,
double* avgrel, ae_int_t subset1,
ae_int_t subsettype,
ae_shared_pool* buf,
modelerrors* rep,
ae_state *_state); ae_state *_state);
void mlpeallerrorssparse(mlpensemble* ensemble, void mlpeallerrorssparse(mlpensemble* ensemble,
sparsematrix* xy, sparsematrix* xy,
ae_int_t npoints, ae_int_t npoints,
double* relcls, double* relcls,
double* avgce, double* avgce,
double* rms, double* rms,
double* avg, double* avg,
double* avgrel, double* avgrel,
ae_state *_state); ae_state *_state);
skipping to change at line 6093 skipping to change at line 7077
ae_state *_state); ae_state *_state);
void mlpsetsparsedataset(mlptrainer* s, void mlpsetsparsedataset(mlptrainer* s,
sparsematrix* xy, sparsematrix* xy,
ae_int_t npoints, ae_int_t npoints,
ae_state *_state); ae_state *_state);
void mlpsetdecay(mlptrainer* s, double decay, ae_state *_state); void mlpsetdecay(mlptrainer* s, double decay, ae_state *_state);
void mlpsetcond(mlptrainer* s, void mlpsetcond(mlptrainer* s,
double wstep, double wstep,
ae_int_t maxits, ae_int_t maxits,
ae_state *_state); ae_state *_state);
void mlpsetalgobatch(mlptrainer* s, ae_state *_state);
void mlptrainnetwork(mlptrainer* s, void mlptrainnetwork(mlptrainer* s,
multilayerperceptron* network, multilayerperceptron* network,
ae_int_t nrestarts, ae_int_t nrestarts,
mlpreport* rep, mlpreport* rep,
ae_state *_state); ae_state *_state);
void _pexec_mlptrainnetwork(mlptrainer* s,
multilayerperceptron* network,
ae_int_t nrestarts,
mlpreport* rep, ae_state *_state);
void mlpstarttraining(mlptrainer* s, void mlpstarttraining(mlptrainer* s,
multilayerperceptron* network, multilayerperceptron* network,
ae_bool randomstart, ae_bool randomstart,
ae_state *_state); ae_state *_state);
ae_bool mlpcontinuetraining(mlptrainer* s, ae_bool mlpcontinuetraining(mlptrainer* s,
multilayerperceptron* network, multilayerperceptron* network,
ae_state *_state); ae_state *_state);
ae_bool _pexec_mlpcontinuetraining(mlptrainer* s,
multilayerperceptron* network, ae_state *_state);
void mlpebagginglm(mlpensemble* ensemble, void mlpebagginglm(mlpensemble* ensemble,
/* Real */ ae_matrix* xy, /* Real */ ae_matrix* xy,
ae_int_t npoints, ae_int_t npoints,
double decay, double decay,
ae_int_t restarts, ae_int_t restarts,
ae_int_t* info, ae_int_t* info,
mlpreport* rep, mlpreport* rep,
mlpcvreport* ooberrors, mlpcvreport* ooberrors,
ae_state *_state); ae_state *_state);
void mlpebagginglbfgs(mlpensemble* ensemble, void mlpebagginglbfgs(mlpensemble* ensemble,
skipping to change at line 6138 skipping to change at line 7129
double decay, double decay,
ae_int_t restarts, ae_int_t restarts,
ae_int_t* info, ae_int_t* info,
mlpreport* rep, mlpreport* rep,
ae_state *_state); ae_state *_state);
void mlptrainensemblees(mlptrainer* s, void mlptrainensemblees(mlptrainer* s,
mlpensemble* ensemble, mlpensemble* ensemble,
ae_int_t nrestarts, ae_int_t nrestarts,
mlpreport* rep, mlpreport* rep,
ae_state *_state); ae_state *_state);
void _pexec_mlptrainensemblees(mlptrainer* s,
mlpensemble* ensemble,
ae_int_t nrestarts,
mlpreport* rep, ae_state *_state);
ae_bool _mlpreport_init(void* _p, ae_state *_state, ae_bool make_automatic) ; ae_bool _mlpreport_init(void* _p, ae_state *_state, ae_bool make_automatic) ;
ae_bool _mlpreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_b ool make_automatic); ae_bool _mlpreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_b ool make_automatic);
void _mlpreport_clear(void* _p); void _mlpreport_clear(void* _p);
void _mlpreport_destroy(void* _p); void _mlpreport_destroy(void* _p);
ae_bool _mlpcvreport_init(void* _p, ae_state *_state, ae_bool make_automati c); ae_bool _mlpcvreport_init(void* _p, ae_state *_state, ae_bool make_automati c);
ae_bool _mlpcvreport_init_copy(void* _dst, void* _src, ae_state *_state, ae _bool make_automatic); ae_bool _mlpcvreport_init_copy(void* _dst, void* _src, ae_state *_state, ae _bool make_automatic);
void _mlpcvreport_clear(void* _p); void _mlpcvreport_clear(void* _p);
void _mlpcvreport_destroy(void* _p); void _mlpcvreport_destroy(void* _p);
ae_bool _smlptrnsession_init(void* _p, ae_state *_state, ae_bool make_autom
atic);
ae_bool _smlptrnsession_init_copy(void* _dst, void* _src, ae_state *_state,
ae_bool make_automatic);
void _smlptrnsession_clear(void* _p);
void _smlptrnsession_destroy(void* _p);
ae_bool _mlpetrnsession_init(void* _p, ae_state *_state, ae_bool make_autom
atic);
ae_bool _mlpetrnsession_init_copy(void* _dst, void* _src, ae_state *_state,
ae_bool make_automatic);
void _mlpetrnsession_clear(void* _p);
void _mlpetrnsession_destroy(void* _p);
ae_bool _mlptrainer_init(void* _p, ae_state *_state, ae_bool make_automatic ); ae_bool _mlptrainer_init(void* _p, ae_state *_state, ae_bool make_automatic );
ae_bool _mlptrainer_init_copy(void* _dst, void* _src, ae_state *_state, ae_ bool make_automatic); ae_bool _mlptrainer_init_copy(void* _dst, void* _src, ae_state *_state, ae_ bool make_automatic);
void _mlptrainer_clear(void* _p); void _mlptrainer_clear(void* _p);
void _mlptrainer_destroy(void* _p); void _mlptrainer_destroy(void* _p);
ae_bool _mlpparallelizationcv_init(void* _p, ae_state *_state, ae_bool make _automatic); ae_bool _mlpparallelizationcv_init(void* _p, ae_state *_state, ae_bool make _automatic);
ae_bool _mlpparallelizationcv_init_copy(void* _dst, void* _src, ae_state *_ state, ae_bool make_automatic); ae_bool _mlpparallelizationcv_init_copy(void* _dst, void* _src, ae_state *_ state, ae_bool make_automatic);
void _mlpparallelizationcv_clear(void* _p); void _mlpparallelizationcv_clear(void* _p);
void _mlpparallelizationcv_destroy(void* _p); void _mlpparallelizationcv_destroy(void* _p);
void pcabuildbasis(/* Real */ ae_matrix* x, void pcabuildbasis(/* Real */ ae_matrix* x,
ae_int_t npoints, ae_int_t npoints,
 End of changes. 115 change blocks. 
97 lines changed or deleted 1109 lines changed or added


 fasttransforms.h   fasttransforms.h 
skipping to change at line 566 skipping to change at line 566
ae_int_t n, ae_int_t n,
/* Complex */ ae_vector* f, /* Complex */ ae_vector* f,
ae_state *_state); ae_state *_state);
void fftr1dinv(/* Complex */ ae_vector* f, void fftr1dinv(/* Complex */ ae_vector* f,
ae_int_t n, ae_int_t n,
/* Real */ ae_vector* a, /* Real */ ae_vector* a,
ae_state *_state); ae_state *_state);
void fftr1dinternaleven(/* Real */ ae_vector* a, void fftr1dinternaleven(/* Real */ ae_vector* a,
ae_int_t n, ae_int_t n,
/* Real */ ae_vector* buf, /* Real */ ae_vector* buf,
ftplan* plan, fasttransformplan* plan,
ae_state *_state); ae_state *_state);
void fftr1dinvinternaleven(/* Real */ ae_vector* a, void fftr1dinvinternaleven(/* Real */ ae_vector* a,
ae_int_t n, ae_int_t n,
/* Real */ ae_vector* buf, /* Real */ ae_vector* buf,
ftplan* plan, fasttransformplan* plan,
ae_state *_state); ae_state *_state);
void convc1d(/* Complex */ ae_vector* a, void convc1d(/* Complex */ ae_vector* a,
ae_int_t m, ae_int_t m,
/* Complex */ ae_vector* b, /* Complex */ ae_vector* b,
ae_int_t n, ae_int_t n,
/* Complex */ ae_vector* r, /* Complex */ ae_vector* r,
ae_state *_state); ae_state *_state);
void convc1dinv(/* Complex */ ae_vector* a, void convc1dinv(/* Complex */ ae_vector* a,
ae_int_t m, ae_int_t m,
/* Complex */ ae_vector* b, /* Complex */ ae_vector* b,
 End of changes. 2 change blocks. 
2 lines changed or deleted 2 lines changed or added


 interpolation.h   interpolation.h 
skipping to change at line 1836 skipping to change at line 1836
void spline1dbuildhermite(const real_1d_array &x, const real_1d_array &y, c onst real_1d_array &d, const ae_int_t n, spline1dinterpolant &c); void spline1dbuildhermite(const real_1d_array &x, const real_1d_array &y, c onst real_1d_array &d, const ae_int_t n, spline1dinterpolant &c);
void spline1dbuildhermite(const real_1d_array &x, const real_1d_array &y, c onst real_1d_array &d, spline1dinterpolant &c); void spline1dbuildhermite(const real_1d_array &x, const real_1d_array &y, c onst real_1d_array &d, spline1dinterpolant &c);
/************************************************************************* /*************************************************************************
This subroutine builds Akima spline interpolant This subroutine builds Akima spline interpolant
INPUT PARAMETERS: INPUT PARAMETERS:
X - spline nodes, array[0..N-1] X - spline nodes, array[0..N-1]
Y - function values, array[0..N-1] Y - function values, array[0..N-1]
N - points count (optional): N - points count (optional):
* N>=5 * N>=2
* if given, only first N points are used to build splin e * if given, only first N points are used to build splin e
* if not given, automatically detected from X/Y sizes * if not given, automatically detected from X/Y sizes
(len(X) must be equal to len(Y)) (len(X) must be equal to len(Y))
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
C - spline interpolant C - spline interpolant
ORDER OF POINTS ORDER OF POINTS
Subroutine automatically sorts points, so caller may pass unsorted array. Subroutine automatically sorts points, so caller may pass unsorted array.
skipping to change at line 2578 skipping to change at line 2578
void spline1dfithermite(const real_1d_array &x, const real_1d_array &y, con st ae_int_t n, const ae_int_t m, ae_int_t &info, spline1dinterpolant &s, sp line1dfitreport &rep); void spline1dfithermite(const real_1d_array &x, const real_1d_array &y, con st ae_int_t n, const ae_int_t m, ae_int_t &info, spline1dinterpolant &s, sp line1dfitreport &rep);
void spline1dfithermite(const real_1d_array &x, const real_1d_array &y, con st ae_int_t m, ae_int_t &info, spline1dinterpolant &s, spline1dfitreport &r ep); void spline1dfithermite(const real_1d_array &x, const real_1d_array &y, con st ae_int_t m, ae_int_t &info, spline1dinterpolant &s, spline1dfitreport &r ep);
/************************************************************************* /*************************************************************************
Weighted linear least squares fitting. Weighted linear least squares fitting.
QR decomposition is used to reduce task to MxM, then triangular solver or QR decomposition is used to reduce task to MxM, then triangular solver or
SVD-based solver is used depending on condition number of the system. It SVD-based solver is used depending on condition number of the system. It
allows to maximize speed and retain decent accuracy. allows to maximize speed and retain decent accuracy.
IMPORTANT: if you want to perform polynomial fitting, it may be more
convenient to use PolynomialFit() function. This function gives
best results on polynomial problems and solves numerical
stability issues which arise when you fit high-degree
polynomials to your data.
INPUT PARAMETERS: INPUT PARAMETERS:
Y - array[0..N-1] Function values in N points. Y - array[0..N-1] Function values in N points.
W - array[0..N-1] Weights corresponding to function values. W - array[0..N-1] Weights corresponding to function values.
Each summand in square sum of approximation deviations Each summand in square sum of approximation deviations
from given values is multiplied by the square of from given values is multiplied by the square of
corresponding weight. corresponding weight.
FMatrix - a table of basis functions values, array[0..N-1, 0..M-1]. FMatrix - a table of basis functions values, array[0..N-1, 0..M-1].
FMatrix[I, J] - value of J-th basis function in I-th point. FMatrix[I, J] - value of J-th basis function in I-th point.
N - number of points used. N>=1. N - number of points used. N>=1.
M - number of basis functions, M>=1. M - number of basis functions, M>=1.
skipping to change at line 2638 skipping to change at line 2644
NOTE: we apply small amount of regularization when we invert squared NOTE: we apply small amount of regularization when we invert squared
Jacobian and calculate covariance matrix. It guarantees that Jacobian and calculate covariance matrix. It guarantees that
algorithm won't divide by zero during inversion, but skews algorithm won't divide by zero during inversion, but skews
error estimates a bit (fractional error is about 10^-9). error estimates a bit (fractional error is about 10^-9).
However, we believe that this difference is insignificant for However, we believe that this difference is insignificant for
all practical purposes except for the situation when you want all practical purposes except for the situation when you want
to compare ALGLIB results with "reference" implementation up to compare ALGLIB results with "reference" implementation up
to the last significant digit. to the last significant digit.
NOTE: covariance matrix is estimated using correction for degrees
of freedom (covariances are divided by N-M instead of dividing
by N).
-- ALGLIB -- -- ALGLIB --
Copyright 17.08.2009 by Bochkanov Sergey Copyright 17.08.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void lsfitlinearw(const real_1d_array &y, const real_1d_array &w, const rea l_2d_array &fmatrix, const ae_int_t n, const ae_int_t m, ae_int_t &info, re al_1d_array &c, lsfitreport &rep); void lsfitlinearw(const real_1d_array &y, const real_1d_array &w, const rea l_2d_array &fmatrix, const ae_int_t n, const ae_int_t m, ae_int_t &info, re al_1d_array &c, lsfitreport &rep);
void lsfitlinearw(const real_1d_array &y, const real_1d_array &w, const rea l_2d_array &fmatrix, ae_int_t &info, real_1d_array &c, lsfitreport &rep); void lsfitlinearw(const real_1d_array &y, const real_1d_array &w, const rea l_2d_array &fmatrix, ae_int_t &info, real_1d_array &c, lsfitreport &rep);
/************************************************************************* /*************************************************************************
Weighted constained linear least squares fitting. Weighted constained linear least squares fitting.
This is variation of LSFitLinearW(), which searchs for min|A*x=b| given This is variation of LSFitLinearW(), which searchs for min|A*x=b| given
that K additional constaints C*x=bc are satisfied. It reduces original that K additional constaints C*x=bc are satisfied. It reduces original
task to modified one: min|B*y-d| WITHOUT constraints, then LSFitLinearW() task to modified one: min|B*y-d| WITHOUT constraints, then LSFitLinearW()
is called. is called.
IMPORTANT: if you want to perform polynomial fitting, it may be more
convenient to use PolynomialFit() function. This function gives
best results on polynomial problems and solves numerical
stability issues which arise when you fit high-degree
polynomials to your data.
INPUT PARAMETERS: INPUT PARAMETERS:
Y - array[0..N-1] Function values in N points. Y - array[0..N-1] Function values in N points.
W - array[0..N-1] Weights corresponding to function values. W - array[0..N-1] Weights corresponding to function values.
Each summand in square sum of approximation deviations Each summand in square sum of approximation deviations
from given values is multiplied by the square of from given values is multiplied by the square of
corresponding weight. corresponding weight.
FMatrix - a table of basis functions values, array[0..N-1, 0..M-1]. FMatrix - a table of basis functions values, array[0..N-1, 0..M-1].
FMatrix[I,J] - value of J-th basis function in I-th point. FMatrix[I,J] - value of J-th basis function in I-th point.
CMatrix - a table of constaints, array[0..K-1,0..M]. CMatrix - a table of constaints, array[0..K-1,0..M].
I-th row of CMatrix corresponds to I-th linear constraint: I-th row of CMatrix corresponds to I-th linear constraint:
skipping to change at line 2727 skipping to change at line 2743
NOTE: we apply small amount of regularization when we invert squared NOTE: we apply small amount of regularization when we invert squared
Jacobian and calculate covariance matrix. It guarantees that Jacobian and calculate covariance matrix. It guarantees that
algorithm won't divide by zero during inversion, but skews algorithm won't divide by zero during inversion, but skews
error estimates a bit (fractional error is about 10^-9). error estimates a bit (fractional error is about 10^-9).
However, we believe that this difference is insignificant for However, we believe that this difference is insignificant for
all practical purposes except for the situation when you want all practical purposes except for the situation when you want
to compare ALGLIB results with "reference" implementation up to compare ALGLIB results with "reference" implementation up
to the last significant digit. to the last significant digit.
NOTE: covariance matrix is estimated using correction for degrees
of freedom (covariances are divided by N-M instead of dividing
by N).
-- ALGLIB -- -- ALGLIB --
Copyright 07.09.2009 by Bochkanov Sergey Copyright 07.09.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void lsfitlinearwc(const real_1d_array &y, const real_1d_array &w, const re al_2d_array &fmatrix, const real_2d_array &cmatrix, const ae_int_t n, const ae_int_t m, const ae_int_t k, ae_int_t &info, real_1d_array &c, lsfitrepor t &rep); void lsfitlinearwc(const real_1d_array &y, const real_1d_array &w, const re al_2d_array &fmatrix, const real_2d_array &cmatrix, const ae_int_t n, const ae_int_t m, const ae_int_t k, ae_int_t &info, real_1d_array &c, lsfitrepor t &rep);
void lsfitlinearwc(const real_1d_array &y, const real_1d_array &w, const re al_2d_array &fmatrix, const real_2d_array &cmatrix, ae_int_t &info, real_1d _array &c, lsfitreport &rep); void lsfitlinearwc(const real_1d_array &y, const real_1d_array &w, const re al_2d_array &fmatrix, const real_2d_array &cmatrix, ae_int_t &info, real_1d _array &c, lsfitreport &rep);
/************************************************************************* /*************************************************************************
Linear least squares fitting. Linear least squares fitting.
QR decomposition is used to reduce task to MxM, then triangular solver or QR decomposition is used to reduce task to MxM, then triangular solver or
SVD-based solver is used depending on condition number of the system. It SVD-based solver is used depending on condition number of the system. It
allows to maximize speed and retain decent accuracy. allows to maximize speed and retain decent accuracy.
IMPORTANT: if you want to perform polynomial fitting, it may be more
convenient to use PolynomialFit() function. This function gives
best results on polynomial problems and solves numerical
stability issues which arise when you fit high-degree
polynomials to your data.
INPUT PARAMETERS: INPUT PARAMETERS:
Y - array[0..N-1] Function values in N points. Y - array[0..N-1] Function values in N points.
FMatrix - a table of basis functions values, array[0..N-1, 0..M-1]. FMatrix - a table of basis functions values, array[0..N-1, 0..M-1].
FMatrix[I, J] - value of J-th basis function in I-th point. FMatrix[I, J] - value of J-th basis function in I-th point.
N - number of points used. N>=1. N - number of points used. N>=1.
M - number of basis functions, M>=1. M - number of basis functions, M>=1.
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
Info - error code: Info - error code:
* -4 internal SVD decomposition subroutine failed (very * -4 internal SVD decomposition subroutine failed (very
skipping to change at line 2795 skipping to change at line 2821
NOTE: we apply small amount of regularization when we invert squared NOTE: we apply small amount of regularization when we invert squared
Jacobian and calculate covariance matrix. It guarantees that Jacobian and calculate covariance matrix. It guarantees that
algorithm won't divide by zero during inversion, but skews algorithm won't divide by zero during inversion, but skews
error estimates a bit (fractional error is about 10^-9). error estimates a bit (fractional error is about 10^-9).
However, we believe that this difference is insignificant for However, we believe that this difference is insignificant for
all practical purposes except for the situation when you want all practical purposes except for the situation when you want
to compare ALGLIB results with "reference" implementation up to compare ALGLIB results with "reference" implementation up
to the last significant digit. to the last significant digit.
NOTE: covariance matrix is estimated using correction for degrees
of freedom (covariances are divided by N-M instead of dividing
by N).
-- ALGLIB -- -- ALGLIB --
Copyright 17.08.2009 by Bochkanov Sergey Copyright 17.08.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void lsfitlinear(const real_1d_array &y, const real_2d_array &fmatrix, cons t ae_int_t n, const ae_int_t m, ae_int_t &info, real_1d_array &c, lsfitrepo rt &rep); void lsfitlinear(const real_1d_array &y, const real_2d_array &fmatrix, cons t ae_int_t n, const ae_int_t m, ae_int_t &info, real_1d_array &c, lsfitrepo rt &rep);
void lsfitlinear(const real_1d_array &y, const real_2d_array &fmatrix, ae_i nt_t &info, real_1d_array &c, lsfitreport &rep); void lsfitlinear(const real_1d_array &y, const real_2d_array &fmatrix, ae_i nt_t &info, real_1d_array &c, lsfitreport &rep);
/************************************************************************* /*************************************************************************
Constained linear least squares fitting. Constained linear least squares fitting.
This is variation of LSFitLinear(), which searchs for min|A*x=b| given This is variation of LSFitLinear(), which searchs for min|A*x=b| given
that K additional constaints C*x=bc are satisfied. It reduces original that K additional constaints C*x=bc are satisfied. It reduces original
task to modified one: min|B*y-d| WITHOUT constraints, then LSFitLinear() task to modified one: min|B*y-d| WITHOUT constraints, then LSFitLinear()
is called. is called.
IMPORTANT: if you want to perform polynomial fitting, it may be more
convenient to use PolynomialFit() function. This function gives
best results on polynomial problems and solves numerical
stability issues which arise when you fit high-degree
polynomials to your data.
INPUT PARAMETERS: INPUT PARAMETERS:
Y - array[0..N-1] Function values in N points. Y - array[0..N-1] Function values in N points.
FMatrix - a table of basis functions values, array[0..N-1, 0..M-1]. FMatrix - a table of basis functions values, array[0..N-1, 0..M-1].
FMatrix[I,J] - value of J-th basis function in I-th point. FMatrix[I,J] - value of J-th basis function in I-th point.
CMatrix - a table of constaints, array[0..K-1,0..M]. CMatrix - a table of constaints, array[0..K-1,0..M].
I-th row of CMatrix corresponds to I-th linear constraint: I-th row of CMatrix corresponds to I-th linear constraint:
CMatrix[I,0]*C[0] + ... + CMatrix[I,M-1]*C[M-1] = CMatrix[I ,M] CMatrix[I,0]*C[0] + ... + CMatrix[I,M-1]*C[M-1] = CMatrix[I ,M]
N - number of points used. N>=1. N - number of points used. N>=1.
M - number of basis functions, M>=1. M - number of basis functions, M>=1.
K - number of constraints, 0 <= K < M K - number of constraints, 0 <= K < M
skipping to change at line 2880 skipping to change at line 2916
NOTE: we apply small amount of regularization when we invert squared NOTE: we apply small amount of regularization when we invert squared
Jacobian and calculate covariance matrix. It guarantees that Jacobian and calculate covariance matrix. It guarantees that
algorithm won't divide by zero during inversion, but skews algorithm won't divide by zero during inversion, but skews
error estimates a bit (fractional error is about 10^-9). error estimates a bit (fractional error is about 10^-9).
However, we believe that this difference is insignificant for However, we believe that this difference is insignificant for
all practical purposes except for the situation when you want all practical purposes except for the situation when you want
to compare ALGLIB results with "reference" implementation up to compare ALGLIB results with "reference" implementation up
to the last significant digit. to the last significant digit.
NOTE: covariance matrix is estimated using correction for degrees
of freedom (covariances are divided by N-M instead of dividing
by N).
-- ALGLIB -- -- ALGLIB --
Copyright 07.09.2009 by Bochkanov Sergey Copyright 07.09.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void lsfitlinearc(const real_1d_array &y, const real_2d_array &fmatrix, con st real_2d_array &cmatrix, const ae_int_t n, const ae_int_t m, const ae_int _t k, ae_int_t &info, real_1d_array &c, lsfitreport &rep); void lsfitlinearc(const real_1d_array &y, const real_2d_array &fmatrix, con st real_2d_array &cmatrix, const ae_int_t n, const ae_int_t m, const ae_int _t k, ae_int_t &info, real_1d_array &c, lsfitreport &rep);
void lsfitlinearc(const real_1d_array &y, const real_2d_array &fmatrix, con st real_2d_array &cmatrix, ae_int_t &info, real_1d_array &c, lsfitreport &r ep); void lsfitlinearc(const real_1d_array &y, const real_2d_array &fmatrix, con st real_2d_array &cmatrix, ae_int_t &info, real_1d_array &c, lsfitreport &r ep);
/************************************************************************* /*************************************************************************
Weighted nonlinear least squares fitting using function values only. Weighted nonlinear least squares fitting using function values only.
Combination of numerical differentiation and secant updates is used to Combination of numerical differentiation and secant updates is used to
skipping to change at line 3394 skipping to change at line 3434
NOTE: we apply small amount of regularization when we invert squared NOTE: we apply small amount of regularization when we invert squared
Jacobian and calculate covariance matrix. It guarantees that Jacobian and calculate covariance matrix. It guarantees that
algorithm won't divide by zero during inversion, but skews algorithm won't divide by zero during inversion, but skews
error estimates a bit (fractional error is about 10^-9). error estimates a bit (fractional error is about 10^-9).
However, we believe that this difference is insignificant for However, we believe that this difference is insignificant for
all practical purposes except for the situation when you want all practical purposes except for the situation when you want
to compare ALGLIB results with "reference" implementation up to compare ALGLIB results with "reference" implementation up
to the last significant digit. to the last significant digit.
NOTE: covariance matrix is estimated using correction for degrees
of freedom (covariances are divided by N-M instead of dividing
by N).
-- ALGLIB -- -- ALGLIB --
Copyright 17.08.2009 by Bochkanov Sergey Copyright 17.08.2009 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void lsfitresults(const lsfitstate &state, ae_int_t &info, real_1d_array &c , lsfitreport &rep); void lsfitresults(const lsfitstate &state, ae_int_t &info, real_1d_array &c , lsfitreport &rep);
/************************************************************************* /*************************************************************************
This subroutine turns on verification of the user-supplied analytic This subroutine turns on verification of the user-supplied analytic
gradient: gradient:
* user calls this subroutine before fitting begins * user calls this subroutine before fitting begins
* LSFitFit() is called * LSFitFit() is called
 End of changes. 10 change blocks. 
1 lines changed or deleted 45 lines changed or added


 linalg.h   linalg.h 
skipping to change at line 214 skipping to change at line 214
A - source matrix, MxN submatrix is copied and transposed A - source matrix, MxN submatrix is copied and transposed
IA - submatrix offset (row index) IA - submatrix offset (row index)
JA - submatrix offset (column index) JA - submatrix offset (column index)
B - destination matrix, must be large enough to store result B - destination matrix, must be large enough to store result
IB - submatrix offset (row index) IB - submatrix offset (row index)
JB - submatrix offset (column index) JB - submatrix offset (column index)
*************************************************************************/ *************************************************************************/
void rmatrixtranspose(const ae_int_t m, const ae_int_t n, const real_2d_arr ay &a, const ae_int_t ia, const ae_int_t ja, real_2d_array &b, const ae_int _t ib, const ae_int_t jb); void rmatrixtranspose(const ae_int_t m, const ae_int_t n, const real_2d_arr ay &a, const ae_int_t ia, const ae_int_t ja, real_2d_array &b, const ae_int _t ib, const ae_int_t jb);
/************************************************************************* /*************************************************************************
This code enforces symmetricy of the matrix by copying Upper part to lower
one (or vice versa).
INPUT PARAMETERS:
A - matrix
N - number of rows/columns
IsUpper - whether we want to copy upper triangle to lower one (True)
or vice versa (False).
*************************************************************************/
void rmatrixenforcesymmetricity(const real_2d_array &a, const ae_int_t n, c
onst bool isupper);
/*************************************************************************
Copy Copy
Input parameters: Input parameters:
M - number of rows M - number of rows
N - number of columns N - number of columns
A - source matrix, MxN submatrix is copied and transposed A - source matrix, MxN submatrix is copied and transposed
IA - submatrix offset (row index) IA - submatrix offset (row index)
JA - submatrix offset (column index) JA - submatrix offset (column index)
B - destination matrix, must be large enough to store result B - destination matrix, must be large enough to store result
IB - submatrix offset (row index) IB - submatrix offset (row index)
skipping to change at line 339 skipping to change at line 351
if N=0, Y is filled by zeros. if N=0, Y is filled by zeros.
-- ALGLIB routine -- -- ALGLIB routine --
28.01.2010 28.01.2010
Bochkanov Sergey Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void rmatrixmv(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t opa, const real_1d_arr ay &x, const ae_int_t ix, real_1d_array &y, const ae_int_t iy); void rmatrixmv(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t opa, const real_1d_arr ay &x, const ae_int_t ix, real_1d_array &y, const ae_int_t iy);
/************************************************************************* /*************************************************************************
This subroutine calculates X*op(A^-1) where:
* X is MxN general matrix
* A is NxN upper/lower triangular/unitriangular matrix
* "op" may be identity transformation, transposition, conjugate transpositi
on
Multiplication result replaces X.
Cache-oblivious algorithm is used.
INPUT PARAMETERS
N - matrix size, N>=0
M - matrix size, N>=0
A - matrix, actial matrix is stored in A[I1:I1+N-1,J1:J1+N-1]
I1 - submatrix offset
J1 - submatrix offset
IsUpper - whether matrix is upper triangular
IsUnit - whether matrix is unitriangular
OpType - transformation type:
* 0 - no transformation
* 1 - transposition
* 2 - conjugate transposition
X - matrix, actial matrix is stored in X[I2:I2+M-1,J2:J2+N-1]
I2 - submatrix offset
J2 - submatrix offset
-- ALGLIB routine --
15.12.2009
Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void cmatrixrighttrsm(const ae_int_t m, const ae_int_t n, const complex_2d_ void cmatrixrighttrsm(const ae_int_t m, const ae_int_t n, const complex_2d_
array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const b array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const b
ool isunit, const ae_int_t optype, complex_2d_array &x, const ae_int_t i2, ool isunit, const ae_int_t optype, const complex_2d_array &x, const ae_int_
const ae_int_t j2); t i2, const ae_int_t j2);
void smp_cmatrixrighttrsm(const ae_int_t m, const ae_int_t n, const complex
_2d_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, con
st bool isunit, const ae_int_t optype, const complex_2d_array &x, const ae_
int_t i2, const ae_int_t j2);
/************************************************************************* /*************************************************************************
This subroutine calculates op(A^-1)*X where:
* X is MxN general matrix
* A is MxM upper/lower triangular/unitriangular matrix
* "op" may be identity transformation, transposition, conjugate transpositi
on
Multiplication result replaces X.
Cache-oblivious algorithm is used.
INPUT PARAMETERS
N - matrix size, N>=0
M - matrix size, N>=0
A - matrix, actial matrix is stored in A[I1:I1+M-1,J1:J1+M-1]
I1 - submatrix offset
J1 - submatrix offset
IsUpper - whether matrix is upper triangular
IsUnit - whether matrix is unitriangular
OpType - transformation type:
* 0 - no transformation
* 1 - transposition
* 2 - conjugate transposition
X - matrix, actial matrix is stored in X[I2:I2+M-1,J2:J2+N-1]
I2 - submatrix offset
J2 - submatrix offset
-- ALGLIB routine --
15.12.2009
Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void cmatrixlefttrsm(const ae_int_t m, const ae_int_t n, const complex_2d_a void cmatrixlefttrsm(const ae_int_t m, const ae_int_t n, const complex_2d_a
rray &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bo rray &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bo
ol isunit, const ae_int_t optype, complex_2d_array &x, const ae_int_t i2, c ol isunit, const ae_int_t optype, const complex_2d_array &x, const ae_int_t
onst ae_int_t j2); i2, const ae_int_t j2);
void smp_cmatrixlefttrsm(const ae_int_t m, const ae_int_t n, const complex_
2d_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, cons
t bool isunit, const ae_int_t optype, const complex_2d_array &x, const ae_i
nt_t i2, const ae_int_t j2);
/************************************************************************* /*************************************************************************
This subroutine calculates X*op(A^-1) where:
* X is MxN general matrix
* A is NxN upper/lower triangular/unitriangular matrix
* "op" may be identity transformation, transposition
Multiplication result replaces X.
Cache-oblivious algorithm is used.
INPUT PARAMETERS
N - matrix size, N>=0
M - matrix size, N>=0
A - matrix, actial matrix is stored in A[I1:I1+N-1,J1:J1+N-1]
I1 - submatrix offset
J1 - submatrix offset
IsUpper - whether matrix is upper triangular
IsUnit - whether matrix is unitriangular
OpType - transformation type:
* 0 - no transformation
* 1 - transposition
X - matrix, actial matrix is stored in X[I2:I2+M-1,J2:J2+N-1]
I2 - submatrix offset
J2 - submatrix offset
-- ALGLIB routine --
15.12.2009
Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void rmatrixrighttrsm(const ae_int_t m, const ae_int_t n, const real_2d_arr void rmatrixrighttrsm(const ae_int_t m, const ae_int_t n, const real_2d_arr
ay &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool ay &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool
isunit, const ae_int_t optype, real_2d_array &x, const ae_int_t i2, const isunit, const ae_int_t optype, const real_2d_array &x, const ae_int_t i2,
ae_int_t j2); const ae_int_t j2);
void smp_rmatrixrighttrsm(const ae_int_t m, const ae_int_t n, const real_2d
_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const
bool isunit, const ae_int_t optype, const real_2d_array &x, const ae_int_t
i2, const ae_int_t j2);
/************************************************************************* /*************************************************************************
This subroutine calculates op(A^-1)*X where:
* X is MxN general matrix
* A is MxM upper/lower triangular/unitriangular matrix
* "op" may be identity transformation, transposition
Multiplication result replaces X.
Cache-oblivious algorithm is used.
INPUT PARAMETERS
N - matrix size, N>=0
M - matrix size, N>=0
A - matrix, actial matrix is stored in A[I1:I1+M-1,J1:J1+M-1]
I1 - submatrix offset
J1 - submatrix offset
IsUpper - whether matrix is upper triangular
IsUnit - whether matrix is unitriangular
OpType - transformation type:
* 0 - no transformation
* 1 - transposition
X - matrix, actial matrix is stored in X[I2:I2+M-1,J2:J2+N-1]
I2 - submatrix offset
J2 - submatrix offset
-- ALGLIB routine --
15.12.2009
Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void rmatrixlefttrsm(const ae_int_t m, const ae_int_t n, const real_2d_arra void rmatrixlefttrsm(const ae_int_t m, const ae_int_t n, const real_2d_arra
y &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool y &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool
isunit, const ae_int_t optype, real_2d_array &x, const ae_int_t i2, const a isunit, const ae_int_t optype, const real_2d_array &x, const ae_int_t i2, c
e_int_t j2); onst ae_int_t j2);
void smp_rmatrixlefttrsm(const ae_int_t m, const ae_int_t n, const real_2d_
array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const b
ool isunit, const ae_int_t optype, const real_2d_array &x, const ae_int_t i
2, const ae_int_t j2);
/************************************************************************* /*************************************************************************
This subroutine calculates C=alpha*A*A^H+beta*C or C=alpha*A^H*A+beta*C
where:
* C is NxN Hermitian matrix given by its upper/lower triangle
* A is NxK matrix when A*A^H is calculated, KxN matrix otherwise
Additional info:
* cache-oblivious algorithm is used.
* multiplication result replaces C. If Beta=0, C elements are not used in
calculations (not multiplied by zero - just not referenced)
* if Alpha=0, A is not used (not multiplied by zero - just not referenced)
* if both Beta and Alpha are zero, C is filled by zeros.
INPUT PARAMETERS
N - matrix size, N>=0
K - matrix size, K>=0
Alpha - coefficient
A - matrix
IA - submatrix offset
JA - submatrix offset
OpTypeA - multiplication type:
* 0 - A*A^H is calculated
* 2 - A^H*A is calculated
Beta - coefficient
C - matrix
IC - submatrix offset
JC - submatrix offset
IsUpper - whether C is upper triangular or lower triangular
-- ALGLIB routine --
16.12.2009
Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void cmatrixsyrk(const ae_int_t n, const ae_int_t k, const double alpha, co void cmatrixsyrk(const ae_int_t n, const ae_int_t k, const double alpha, co
nst complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int nst complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int
_t optypea, const double beta, complex_2d_array &c, const ae_int_t ic, cons _t optypea, const double beta, const complex_2d_array &c, const ae_int_t ic
t ae_int_t jc, const bool isupper); , const ae_int_t jc, const bool isupper);
void smp_cmatrixsyrk(const ae_int_t n, const ae_int_t k, const double alpha
, const complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae
_int_t optypea, const double beta, const complex_2d_array &c, const ae_int_
t ic, const ae_int_t jc, const bool isupper);
/************************************************************************* /*************************************************************************
This subroutine calculates C=alpha*A*A^T+beta*C or C=alpha*A^T*A+beta*C
where:
* C is NxN symmetric matrix given by its upper/lower triangle
* A is NxK matrix when A*A^T is calculated, KxN matrix otherwise
Additional info:
* cache-oblivious algorithm is used.
* multiplication result replaces C. If Beta=0, C elements are not used in
calculations (not multiplied by zero - just not referenced)
* if Alpha=0, A is not used (not multiplied by zero - just not referenced)
* if both Beta and Alpha are zero, C is filled by zeros.
INPUT PARAMETERS
N - matrix size, N>=0
K - matrix size, K>=0
Alpha - coefficient
A - matrix
IA - submatrix offset
JA - submatrix offset
OpTypeA - multiplication type:
* 0 - A*A^T is calculated
* 2 - A^T*A is calculated
Beta - coefficient
C - matrix
IC - submatrix offset
JC - submatrix offset
IsUpper - whether C is upper triangular or lower triangular
-- ALGLIB routine --
16.12.2009
Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void rmatrixsyrk(const ae_int_t n, const ae_int_t k, const double alpha, co void rmatrixsyrk(const ae_int_t n, const ae_int_t k, const double alpha, co
nst real_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t nst real_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t
optypea, const double beta, real_2d_array &c, const ae_int_t ic, const ae_i optypea, const double beta, const real_2d_array &c, const ae_int_t ic, cons
nt_t jc, const bool isupper); t ae_int_t jc, const bool isupper);
void smp_rmatrixsyrk(const ae_int_t n, const ae_int_t k, const double alpha
, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_in
t_t optypea, const double beta, const real_2d_array &c, const ae_int_t ic,
const ae_int_t jc, const bool isupper);
/************************************************************************* /*************************************************************************
This subroutine calculates C = alpha*op1(A)*op2(B) +beta*C where:
* C is MxN general matrix
* op1(A) is MxK matrix
* op2(B) is KxN matrix
* "op" may be identity transformation, transposition, conjugate transpositi
on
Additional info:
* cache-oblivious algorithm is used.
* multiplication result replaces C. If Beta=0, C elements are not used in
calculations (not multiplied by zero - just not referenced)
* if Alpha=0, A is not used (not multiplied by zero - just not referenced)
* if both Beta and Alpha are zero, C is filled by zeros.
INPUT PARAMETERS
M - matrix size, M>0
N - matrix size, N>0
K - matrix size, K>0
Alpha - coefficient
A - matrix
IA - submatrix offset
JA - submatrix offset
OpTypeA - transformation type:
* 0 - no transformation
* 1 - transposition
* 2 - conjugate transposition
B - matrix
IB - submatrix offset
JB - submatrix offset
OpTypeB - transformation type:
* 0 - no transformation
* 1 - transposition
* 2 - conjugate transposition
Beta - coefficient
C - matrix
IC - submatrix offset
JC - submatrix offset
-- ALGLIB routine --
16.12.2009
Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void cmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k, cons void cmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k, cons
t alglib::complex alpha, const complex_2d_array &a, const ae_int_t ia, cons t alglib::complex alpha, const complex_2d_array &a, const ae_int_t ia, cons
t ae_int_t ja, const ae_int_t optypea, const complex_2d_array &b, const ae_ t ae_int_t ja, const ae_int_t optypea, const complex_2d_array &b, const ae_
int_t ib, const ae_int_t jb, const ae_int_t optypeb, const alglib::complex int_t ib, const ae_int_t jb, const ae_int_t optypeb, const alglib::complex
beta, complex_2d_array &c, const ae_int_t ic, const ae_int_t jc); beta, const complex_2d_array &c, const ae_int_t ic, const ae_int_t jc);
void smp_cmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k,
const alglib::complex alpha, const complex_2d_array &a, const ae_int_t ia,
const ae_int_t ja, const ae_int_t optypea, const complex_2d_array &b, const
ae_int_t ib, const ae_int_t jb, const ae_int_t optypeb, const alglib::comp
lex beta, const complex_2d_array &c, const ae_int_t ic, const ae_int_t jc);
/************************************************************************* /*************************************************************************
*************************************************************************/ *************************************************************************/
void rmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k, cons t double alpha, const real_2d_array &a, const ae_int_t ia, const ae_int_t j a, const ae_int_t optypea, const real_2d_array &b, const ae_int_t ib, const ae_int_t jb, const ae_int_t optypeb, const double beta, const real_2d_arra y &c, const ae_int_t ic, const ae_int_t jc); void rmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k, cons t double alpha, const real_2d_array &a, const ae_int_t ia, const ae_int_t j a, const ae_int_t optypea, const real_2d_array &b, const ae_int_t ib, const ae_int_t jb, const ae_int_t optypeb, const double beta, const real_2d_arra y &c, const ae_int_t ic, const ae_int_t jc);
void smp_rmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k, const double alpha, const real_2d_array &a, const ae_int_t ia, const ae_int _t ja, const ae_int_t optypea, const real_2d_array &b, const ae_int_t ib, c onst ae_int_t jb, const ae_int_t optypeb, const double beta, const real_2d_ array &c, const ae_int_t ic, const ae_int_t jc); void smp_rmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k, const double alpha, const real_2d_array &a, const ae_int_t ia, const ae_int _t ja, const ae_int_t optypea, const real_2d_array &b, const ae_int_t ib, c onst ae_int_t jb, const ae_int_t optypeb, const double beta, const real_2d_ array &c, const ae_int_t ic, const ae_int_t jc);
/************************************************************************* /*************************************************************************
QR decomposition of a rectangular matrix of size MxN QR decomposition of a rectangular matrix of size MxN
skipping to change at line 4206 skipping to change at line 4024
ae_state *_state); ae_state *_state);
void rmatrixtranspose(ae_int_t m, void rmatrixtranspose(ae_int_t m,
ae_int_t n, ae_int_t n,
/* Real */ ae_matrix* a, /* Real */ ae_matrix* a,
ae_int_t ia, ae_int_t ia,
ae_int_t ja, ae_int_t ja,
/* Real */ ae_matrix* b, /* Real */ ae_matrix* b,
ae_int_t ib, ae_int_t ib,
ae_int_t jb, ae_int_t jb,
ae_state *_state); ae_state *_state);
void rmatrixenforcesymmetricity(/* Real */ ae_matrix* a,
ae_int_t n,
ae_bool isupper,
ae_state *_state);
void cmatrixcopy(ae_int_t m, void cmatrixcopy(ae_int_t m,
ae_int_t n, ae_int_t n,
/* Complex */ ae_matrix* a, /* Complex */ ae_matrix* a,
ae_int_t ia, ae_int_t ia,
ae_int_t ja, ae_int_t ja,
/* Complex */ ae_matrix* b, /* Complex */ ae_matrix* b,
ae_int_t ib, ae_int_t ib,
ae_int_t jb, ae_int_t jb,
ae_state *_state); ae_state *_state);
void rmatrixcopy(ae_int_t m, void rmatrixcopy(ae_int_t m,
skipping to change at line 4278 skipping to change at line 4100
/* Complex */ ae_matrix* a, /* Complex */ ae_matrix* a,
ae_int_t i1, ae_int_t i1,
ae_int_t j1, ae_int_t j1,
ae_bool isupper, ae_bool isupper,
ae_bool isunit, ae_bool isunit,
ae_int_t optype, ae_int_t optype,
/* Complex */ ae_matrix* x, /* Complex */ ae_matrix* x,
ae_int_t i2, ae_int_t i2,
ae_int_t j2, ae_int_t j2,
ae_state *_state); ae_state *_state);
void _pexec_cmatrixrighttrsm(ae_int_t m,
ae_int_t n,
/* Complex */ ae_matrix* a,
ae_int_t i1,
ae_int_t j1,
ae_bool isupper,
ae_bool isunit,
ae_int_t optype,
/* Complex */ ae_matrix* x,
ae_int_t i2,
ae_int_t j2, ae_state *_state);
void cmatrixlefttrsm(ae_int_t m, void cmatrixlefttrsm(ae_int_t m,
ae_int_t n, ae_int_t n,
/* Complex */ ae_matrix* a, /* Complex */ ae_matrix* a,
ae_int_t i1, ae_int_t i1,
ae_int_t j1, ae_int_t j1,
ae_bool isupper, ae_bool isupper,
ae_bool isunit, ae_bool isunit,
ae_int_t optype, ae_int_t optype,
/* Complex */ ae_matrix* x, /* Complex */ ae_matrix* x,
ae_int_t i2, ae_int_t i2,
ae_int_t j2, ae_int_t j2,
ae_state *_state); ae_state *_state);
void _pexec_cmatrixlefttrsm(ae_int_t m,
ae_int_t n,
/* Complex */ ae_matrix* a,
ae_int_t i1,
ae_int_t j1,
ae_bool isupper,
ae_bool isunit,
ae_int_t optype,
/* Complex */ ae_matrix* x,
ae_int_t i2,
ae_int_t j2, ae_state *_state);
void rmatrixrighttrsm(ae_int_t m, void rmatrixrighttrsm(ae_int_t m,
ae_int_t n, ae_int_t n,
/* Real */ ae_matrix* a, /* Real */ ae_matrix* a,
ae_int_t i1, ae_int_t i1,
ae_int_t j1, ae_int_t j1,
ae_bool isupper, ae_bool isupper,
ae_bool isunit, ae_bool isunit,
ae_int_t optype, ae_int_t optype,
/* Real */ ae_matrix* x, /* Real */ ae_matrix* x,
ae_int_t i2, ae_int_t i2,
ae_int_t j2, ae_int_t j2,
ae_state *_state); ae_state *_state);
void _pexec_rmatrixrighttrsm(ae_int_t m,
ae_int_t n,
/* Real */ ae_matrix* a,
ae_int_t i1,
ae_int_t j1,
ae_bool isupper,
ae_bool isunit,
ae_int_t optype,
/* Real */ ae_matrix* x,
ae_int_t i2,
ae_int_t j2, ae_state *_state);
void rmatrixlefttrsm(ae_int_t m, void rmatrixlefttrsm(ae_int_t m,
ae_int_t n, ae_int_t n,
/* Real */ ae_matrix* a, /* Real */ ae_matrix* a,
ae_int_t i1, ae_int_t i1,
ae_int_t j1, ae_int_t j1,
ae_bool isupper, ae_bool isupper,
ae_bool isunit, ae_bool isunit,
ae_int_t optype, ae_int_t optype,
/* Real */ ae_matrix* x, /* Real */ ae_matrix* x,
ae_int_t i2, ae_int_t i2,
ae_int_t j2, ae_int_t j2,
ae_state *_state); ae_state *_state);
void _pexec_rmatrixlefttrsm(ae_int_t m,
ae_int_t n,
/* Real */ ae_matrix* a,
ae_int_t i1,
ae_int_t j1,
ae_bool isupper,
ae_bool isunit,
ae_int_t optype,
/* Real */ ae_matrix* x,
ae_int_t i2,
ae_int_t j2, ae_state *_state);
void cmatrixsyrk(ae_int_t n, void cmatrixsyrk(ae_int_t n,
ae_int_t k, ae_int_t k,
double alpha, double alpha,
/* Complex */ ae_matrix* a, /* Complex */ ae_matrix* a,
ae_int_t ia, ae_int_t ia,
ae_int_t ja, ae_int_t ja,
ae_int_t optypea, ae_int_t optypea,
double beta, double beta,
/* Complex */ ae_matrix* c, /* Complex */ ae_matrix* c,
ae_int_t ic, ae_int_t ic,
ae_int_t jc, ae_int_t jc,
ae_bool isupper, ae_bool isupper,
ae_state *_state); ae_state *_state);
void _pexec_cmatrixsyrk(ae_int_t n,
ae_int_t k,
double alpha,
/* Complex */ ae_matrix* a,
ae_int_t ia,
ae_int_t ja,
ae_int_t optypea,
double beta,
/* Complex */ ae_matrix* c,
ae_int_t ic,
ae_int_t jc,
ae_bool isupper, ae_state *_state);
void rmatrixsyrk(ae_int_t n, void rmatrixsyrk(ae_int_t n,
ae_int_t k, ae_int_t k,
double alpha, double alpha,
/* Real */ ae_matrix* a, /* Real */ ae_matrix* a,
ae_int_t ia, ae_int_t ia,
ae_int_t ja, ae_int_t ja,
ae_int_t optypea, ae_int_t optypea,
double beta, double beta,
/* Real */ ae_matrix* c, /* Real */ ae_matrix* c,
ae_int_t ic, ae_int_t ic,
ae_int_t jc, ae_int_t jc,
ae_bool isupper, ae_bool isupper,
ae_state *_state); ae_state *_state);
void _pexec_rmatrixsyrk(ae_int_t n,
ae_int_t k,
double alpha,
/* Real */ ae_matrix* a,
ae_int_t ia,
ae_int_t ja,
ae_int_t optypea,
double beta,
/* Real */ ae_matrix* c,
ae_int_t ic,
ae_int_t jc,
ae_bool isupper, ae_state *_state);
void cmatrixgemm(ae_int_t m, void cmatrixgemm(ae_int_t m,
ae_int_t n, ae_int_t n,
ae_int_t k, ae_int_t k,
ae_complex alpha, ae_complex alpha,
/* Complex */ ae_matrix* a, /* Complex */ ae_matrix* a,
ae_int_t ia, ae_int_t ia,
ae_int_t ja, ae_int_t ja,
ae_int_t optypea, ae_int_t optypea,
/* Complex */ ae_matrix* b, /* Complex */ ae_matrix* b,
ae_int_t ib, ae_int_t ib,
ae_int_t jb, ae_int_t jb,
ae_int_t optypeb, ae_int_t optypeb,
ae_complex beta, ae_complex beta,
/* Complex */ ae_matrix* c, /* Complex */ ae_matrix* c,
ae_int_t ic, ae_int_t ic,
ae_int_t jc, ae_int_t jc,
ae_state *_state); ae_state *_state);
void _pexec_cmatrixgemm(ae_int_t m,
ae_int_t n,
ae_int_t k,
ae_complex alpha,
/* Complex */ ae_matrix* a,
ae_int_t ia,
ae_int_t ja,
ae_int_t optypea,
/* Complex */ ae_matrix* b,
ae_int_t ib,
ae_int_t jb,
ae_int_t optypeb,
ae_complex beta,
/* Complex */ ae_matrix* c,
ae_int_t ic,
ae_int_t jc, ae_state *_state);
void rmatrixgemm(ae_int_t m, void rmatrixgemm(ae_int_t m,
ae_int_t n, ae_int_t n,
ae_int_t k, ae_int_t k,
double alpha, double alpha,
/* Real */ ae_matrix* a, /* Real */ ae_matrix* a,
ae_int_t ia, ae_int_t ia,
ae_int_t ja, ae_int_t ja,
ae_int_t optypea, ae_int_t optypea,
/* Real */ ae_matrix* b, /* Real */ ae_matrix* b,
ae_int_t ib, ae_int_t ib,
 End of changes. 30 change blocks. 
233 lines changed or deleted 159 lines changed or added


 optimization.h   optimization.h 
skipping to change at line 223 skipping to change at line 223
} mincgreport; } mincgreport;
typedef struct typedef struct
{ {
ae_int_t nmain; ae_int_t nmain;
ae_int_t nslack; ae_int_t nslack;
double epsg; double epsg;
double epsf; double epsf;
double epsx; double epsx;
ae_int_t maxits; ae_int_t maxits;
ae_bool xrep; ae_bool xrep;
ae_bool drep;
double stpmax; double stpmax;
double diffstep; double diffstep;
sactiveset sas; sactiveset sas;
ae_vector s; ae_vector s;
ae_int_t prectype; ae_int_t prectype;
ae_vector diagh; ae_vector diagh;
ae_vector x; ae_vector x;
double f; double f;
ae_vector g; ae_vector g;
ae_bool needf; ae_bool needf;
ae_bool needfg; ae_bool needfg;
ae_bool xupdated; ae_bool xupdated;
ae_bool lsstart;
ae_bool lbfgssearch;
ae_bool boundedstep;
double teststep; double teststep;
rcommstate rstate; rcommstate rstate;
ae_vector gc; ae_vector gc;
ae_vector xn; ae_vector xn;
ae_vector gn; ae_vector gn;
ae_vector xp; ae_vector xp;
ae_vector gp; ae_vector gp;
double fc; double fc;
double fn; double fn;
double fp; double fp;
ae_vector d; ae_vector d;
ae_matrix cleic; ae_matrix cleic;
ae_int_t nec; ae_int_t nec;
ae_int_t nic; ae_int_t nic;
double lastgoodstep; double lastgoodstep;
double lastscaledgoodstep; double lastscaledgoodstep;
double maxscaledgrad;
ae_vector hasbndl; ae_vector hasbndl;
ae_vector hasbndu; ae_vector hasbndu;
ae_vector bndl; ae_vector bndl;
ae_vector bndu; ae_vector bndu;
ae_int_t repinneriterationscount; ae_int_t repinneriterationscount;
ae_int_t repouteriterationscount; ae_int_t repouteriterationscount;
ae_int_t repnfev; ae_int_t repnfev;
ae_int_t repvaridx; ae_int_t repvaridx;
ae_int_t repterminationtype; ae_int_t repterminationtype;
double repdebugeqerr; double repdebugeqerr;
skipping to change at line 373 skipping to change at line 378
{ {
ae_int_t iterationscount; ae_int_t iterationscount;
ae_int_t nfev; ae_int_t nfev;
ae_int_t varidx; ae_int_t varidx;
ae_int_t terminationtype; ae_int_t terminationtype;
} minlbfgsreport; } minlbfgsreport;
typedef struct typedef struct
{ {
ae_int_t n; ae_int_t n;
ae_int_t algokind; ae_int_t algokind;
ae_int_t akind;
convexquadraticmodel a; convexquadraticmodel a;
sparsematrix sparsea;
ae_bool sparseaupper;
double anorm; double anorm;
ae_vector b; ae_vector b;
ae_vector bndl; ae_vector bndl;
ae_vector bndu; ae_vector bndu;
ae_vector s; ae_vector s;
ae_vector havebndl; ae_vector havebndl;
ae_vector havebndu; ae_vector havebndu;
ae_vector xorigin; ae_vector xorigin;
ae_vector startx; ae_vector startx;
ae_bool havex; ae_bool havex;
ae_matrix cleic; ae_matrix cleic;
ae_int_t nec; ae_int_t nec;
ae_int_t nic; ae_int_t nic;
double bleicepsg;
double bleicepsf;
double bleicepsx;
ae_int_t bleicmaxits;
sactiveset sas; sactiveset sas;
ae_vector gc; ae_vector gc;
ae_vector xn; ae_vector xn;
ae_vector pg; ae_vector pg;
ae_vector workbndl; ae_vector workbndl;
ae_vector workbndu; ae_vector workbndu;
ae_matrix workcleic; ae_matrix workcleic;
ae_vector xs; ae_vector xs;
ae_int_t repinneriterationscount; ae_int_t repinneriterationscount;
ae_int_t repouteriterationscount; ae_int_t repouteriterationscount;
ae_int_t repncholesky; ae_int_t repncholesky;
ae_int_t repnmv; ae_int_t repnmv;
ae_int_t repterminationtype; ae_int_t repterminationtype;
double debugphase1flops; double debugphase1flops;
double debugphase2flops; double debugphase2flops;
double debugphase3flops; double debugphase3flops;
ae_vector tmp0; ae_vector tmp0;
ae_vector tmp1; ae_vector tmp1;
ae_vector tmpb; ae_vector tmpb;
ae_vector rctmpg; ae_vector rctmpg;
ae_vector tmpi;
normestimatorstate estimator; normestimatorstate estimator;
minbleicstate solver;
minbleicreport solverrep;
} minqpstate; } minqpstate;
typedef struct typedef struct
{ {
ae_int_t inneriterationscount; ae_int_t inneriterationscount;
ae_int_t outeriterationscount; ae_int_t outeriterationscount;
ae_int_t nmv; ae_int_t nmv;
ae_int_t ncholesky; ae_int_t ncholesky;
ae_int_t terminationtype; ae_int_t terminationtype;
} minqpreport; } minqpreport;
typedef struct typedef struct
skipping to change at line 558 skipping to change at line 573
double betahs; double betahs;
double betady; double betady;
} minasastate; } minasastate;
typedef struct typedef struct
{ {
ae_int_t iterationscount; ae_int_t iterationscount;
ae_int_t nfev; ae_int_t nfev;
ae_int_t terminationtype; ae_int_t terminationtype;
ae_int_t activeconstraints; ae_int_t activeconstraints;
} minasareport; } minasareport;
typedef struct
{
double debugflops;
} linfeassolver;
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
// //
// THIS SECTION CONTAINS C++ INTERFACE // THIS SECTION CONTAINS C++ INTERFACE
// //
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
namespace alglib namespace alglib
{ {
skipping to change at line 834 skipping to change at line 845
* NCholesky number of Cholesky decomposition * NCholesky number of Cholesky decomposition
* NMV number of matrix-vector products * NMV number of matrix-vector products
(only products calculated as part of iterative (only products calculated as part of iterative
process are counted) process are counted)
* TerminationType completion code (see below) * TerminationType completion code (see below)
Completion codes: Completion codes:
* -5 inappropriate solver was used: * -5 inappropriate solver was used:
* Cholesky solver for semidefinite or indefinite problems * Cholesky solver for semidefinite or indefinite problems
* Cholesky solver for problems with non-boundary constraints * Cholesky solver for problems with non-boundary constraints
* -4 BLEIC-QP algorithm found unconstrained direction
of negative curvature (function is unbounded from
below even under constraints), no meaningful
minimum can be found.
* -3 inconsistent constraints (or, maybe, feasible point is * -3 inconsistent constraints (or, maybe, feasible point is
too hard to find). If you are sure that constraints are feasible, too hard to find). If you are sure that constraints are feasible,
try to restart optimizer with better initial approximation. try to restart optimizer with better initial approximation.
* -1 solver error * -1 solver error
* 4 successful completion * 4 successful completion
* 5 MaxIts steps was taken * 5 MaxIts steps was taken
* 7 stopping conditions are too stringent, * 7 stopping conditions are too stringent,
further improvement is impossible, further improvement is impossible,
X contains best point found so far. X contains best point found so far.
*************************************************************************/ *************************************************************************/
skipping to change at line 1689 skipping to change at line 1704
* s - scaling coefficients set by MinBLEICSetScale() * s - scaling coefficients set by MinBLEICSetScale()
EpsF - >=0 EpsF - >=0
The subroutine finishes its work if on k+1-th iteration The subroutine finishes its work if on k+1-th iteration
the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
is satisfied. is satisfied.
EpsX - >=0 EpsX - >=0
The subroutine finishes its work if on k+1-th iteration The subroutine finishes its work if on k+1-th iteration
the condition |v|<=EpsX is fulfilled, where: the condition |v|<=EpsX is fulfilled, where:
* |.| means Euclidian norm * |.| means Euclidian norm
* v - scaled step vector, v[i]=dx[i]/s[i] * v - scaled step vector, v[i]=dx[i]/s[i]
* dx - ste pvector, dx=X(k+1)-X(k) * dx - step vector, dx=X(k+1)-X(k)
* s - scaling coefficients set by MinBLEICSetScale() * s - scaling coefficients set by MinBLEICSetScale()
MaxIts - maximum number of iterations. If MaxIts=0, the number of MaxIts - maximum number of iterations. If MaxIts=0, the number of
iterations is unlimited. iterations is unlimited.
Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
to automatic stopping criterion selection. to automatic stopping criterion selection.
NOTE: when SetCond() called with non-zero MaxIts, BLEIC solver may perform
slightly more than MaxIts iterations. I.e., MaxIts sets non-strict
limit on iterations count.
-- ALGLIB -- -- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicsetcond(const minbleicstate &state, const double epsg, const d ouble epsf, const double epsx, const ae_int_t maxits); void minbleicsetcond(const minbleicstate &state, const double epsg, const d ouble epsf, const double epsx, const ae_int_t maxits);
/************************************************************************* /*************************************************************************
This function sets scaling coefficients for BLEIC optimizer. This function sets scaling coefficients for BLEIC optimizer.
ALGLIB optimizers use scaling matrices to test stopping conditions (step ALGLIB optimizers use scaling matrices to test stopping conditions (step
size and gradient are scaled before comparison with tolerances). Scale of size and gradient are scaled before comparison with tolerances). Scale of
skipping to change at line 1912 skipping to change at line 1931
X - array[0..N-1], solution X - array[0..N-1], solution
Rep - optimization report. You should check Rep.TerminationType Rep - optimization report. You should check Rep.TerminationType
in order to distinguish successful termination from in order to distinguish successful termination from
unsuccessful one: unsuccessful one:
* -7 gradient verification failed. * -7 gradient verification failed.
See MinBLEICSetGradientCheck() for more information. See MinBLEICSetGradientCheck() for more information.
* -3 inconsistent constraints. Feasible point is * -3 inconsistent constraints. Feasible point is
either nonexistent or too hard to find. Try to either nonexistent or too hard to find. Try to
restart optimizer with better initial approximation restart optimizer with better initial approximation
* 1 relative function improvement is no more than EpsF. * 1 relative function improvement is no more than EpsF.
* 2 relative step is no more than EpsX. * 2 scaled step is no more than EpsX.
* 4 gradient norm is no more than EpsG * 4 scaled gradient norm is no more than EpsG.
* 5 MaxIts steps was taken * 5 MaxIts steps was taken
More information about fields of this structure can be More information about fields of this structure can be
found in the comments on MinBLEICReport datatype. found in the comments on MinBLEICReport datatype.
-- ALGLIB -- -- ALGLIB --
Copyright 28.11.2010 by Bochkanov Sergey Copyright 28.11.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minbleicresults(const minbleicstate &state, real_1d_array &x, minbleic report &rep); void minbleicresults(const minbleicstate &state, real_1d_array &x, minbleic report &rep);
/************************************************************************* /*************************************************************************
skipping to change at line 2499 skipping to change at line 2518
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
B - linear term, array[N]. B - linear term, array[N].
-- ALGLIB -- -- ALGLIB --
Copyright 11.01.2011 by Bochkanov Sergey Copyright 11.01.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minqpsetlinearterm(const minqpstate &state, const real_1d_array &b); void minqpsetlinearterm(const minqpstate &state, const real_1d_array &b);
/************************************************************************* /*************************************************************************
This function sets quadratic term for QP solver. This function sets dense quadratic term for QP solver. By default,
quadratic term is zero.
By default quadratic term is zero. SUPPORT BY ALGLIB QP ALGORITHMS:
IMPORTANT: this solver minimizes following function: Dense quadratic term can be handled by any of the QP algorithms supported
by ALGLIB QP Solver.
IMPORTANT:
This solver minimizes following function:
f(x) = 0.5*x'*A*x + b'*x. f(x) = 0.5*x'*A*x + b'*x.
Note that quadratic term has 0.5 before it. So if you want to minimize Note that quadratic term has 0.5 before it. So if you want to minimize
f(x) = x^2 + x f(x) = x^2 + x
you should rewrite your problem as follows: you should rewrite your problem as follows:
f(x) = 0.5*(2*x^2) + x f(x) = 0.5*(2*x^2) + x
and your matrix A will be equal to [[2.0]], not to [[1.0]] and your matrix A will be equal to [[2.0]], not to [[1.0]]
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
A - matrix, array[N,N] A - matrix, array[N,N]
skipping to change at line 2529 skipping to change at line 2554
* if not given, both lower and upper triangles must be * if not given, both lower and upper triangles must be
filled. filled.
-- ALGLIB -- -- ALGLIB --
Copyright 11.01.2011 by Bochkanov Sergey Copyright 11.01.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a, const bool isupper); void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a, const bool isupper);
void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a) ; void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a) ;
/************************************************************************* /*************************************************************************
This function sets sparse quadratic term for QP solver. By default,
quadratic term is zero.
SUPPORT BY ALGLIB QP ALGORITHMS:
Sparse quadratic term is supported only by BLEIC-based QP algorithm (one
which is activated by MinQPSetAlgoBLEIC function). Cholesky-based QP algo
won't be able to deal with sparse quadratic term and will terminate
abnormally.
IF YOU CALLED THIS FUNCTION, YOU MUST SWITCH TO BLEIC-BASED QP ALGORITHM
BEFORE CALLING MINQPOPTIMIZE() FUNCTION.
IMPORTANT:
This solver minimizes following function:
f(x) = 0.5*x'*A*x + b'*x.
Note that quadratic term has 0.5 before it. So if you want to minimize
f(x) = x^2 + x
you should rewrite your problem as follows:
f(x) = 0.5*(2*x^2) + x
and your matrix A will be equal to [[2.0]], not to [[1.0]]
INPUT PARAMETERS:
State - structure which stores algorithm state
A - matrix, array[N,N]
IsUpper - (optional) storage type:
* if True, symmetric matrix A is given by its upper
triangle, and the lower triangle isn
* if False, symmetric matrix A is given by its lower
triangle, and the upper triangle isn
* if not given, both lower and upper triangles must be
filled.
-- ALGLIB --
Copyright 11.01.2011 by Bochkanov Sergey
*************************************************************************/
void minqpsetquadratictermsparse(const minqpstate &state, const sparsematri
x &a, const bool isupper);
/*************************************************************************
This function sets starting point for QP solver. It is useful to have This function sets starting point for QP solver. It is useful to have
good initial approximation to the solution, because it will increase good initial approximation to the solution, because it will increase
speed of convergence and identification of active constraints. speed of convergence and identification of active constraints.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
X - starting point, array[N]. X - starting point, array[N].
-- ALGLIB -- -- ALGLIB --
Copyright 11.01.2011 by Bochkanov Sergey Copyright 11.01.2011 by Bochkanov Sergey
skipping to change at line 2562 skipping to change at line 2627
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
XOrigin - origin, array[N]. XOrigin - origin, array[N].
-- ALGLIB -- -- ALGLIB --
Copyright 11.01.2011 by Bochkanov Sergey Copyright 11.01.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minqpsetorigin(const minqpstate &state, const real_1d_array &xorigin); void minqpsetorigin(const minqpstate &state, const real_1d_array &xorigin);
/************************************************************************* /*************************************************************************
This function tells solver to use Cholesky-based algorithm. This function sets scaling coefficients.
Cholesky-based algorithm can be used when: ALGLIB optimizers use scaling matrices to test stopping conditions (step
* problem is convex size and gradient are scaled before comparison with tolerances). Scale of
* there is no constraints or only boundary constraints are present the I-th variable is a translation invariant measure of:
a) "how large" the variable is
This algorithm has O(N^3) complexity for unconstrained problem and is up b) how large the step should be to make significant changes in the function
to several times slower on bound constrained problems (these additional
iterations are needed to identify active constraints). BLEIC-based QP solver uses scale for two purposes:
* to evaluate stopping conditions
* for preconditioning of the underlying BLEIC solver
INPUT PARAMETERS:
State - structure stores algorithm state
S - array[N], non-zero scaling coefficients
S[i] may be negative, sign doesn't matter.
-- ALGLIB --
Copyright 14.01.2011 by Bochkanov Sergey
*************************************************************************/
void minqpsetscale(const minqpstate &state, const real_1d_array &s);
/*************************************************************************
This function tells solver to use Cholesky-based algorithm. This algorithm
is active by default.
DESCRIPTION:
Cholesky-based algorithm can be used only for problems which:
* have dense quadratic term, set by MinQPSetQuadraticTerm(), sparse or
structured problems are not supported.
* are strictly convex, i.e. quadratic term is symmetric positive definite,
indefinite or semidefinite problems are not supported by this algorithm.
If anything of what listed above is violated, you may use BLEIC-based QP
algorithm which can be activated by MinQPSetAlgoBLEIC().
BENEFITS AND DRAWBACKS:
This algorithm gives best precision amongst all QP solvers provided by
ALGLIB (Newton iterations have much higher precision than any other
optimization algorithm). This solver also gracefully handles problems with
very large amount of constraints.
Performance of the algorithm is good because internally it uses Level 3
Dense BLAS for its performance-critical parts.
From the other side, algorithm has O(N^3) complexity for unconstrained
problems and up to orders of magnitude slower on constrained problems
(these additional iterations are needed to identify active constraints).
So, its running time depends on number of constraints active at solution.
Furthermore, this algorithm can not solve problems with sparse matrices or
problems with semidefinite/indefinite matrices of any kind (dense/sparse).
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure which stores algorithm state State - structure which stores algorithm state
-- ALGLIB -- -- ALGLIB --
Copyright 11.01.2011 by Bochkanov Sergey Copyright 11.01.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minqpsetalgocholesky(const minqpstate &state); void minqpsetalgocholesky(const minqpstate &state);
/************************************************************************* /*************************************************************************
This function tells solver to use BLEIC-based algorithm and sets stopping
criteria for the algorithm.
DESCRIPTION:
BLEIC-based QP algorithm can be used for any kind of QP problems:
* problems with both dense and sparse quadratic terms
* problems with positive definite, semidefinite, indefinite terms
BLEIC-based algorithm can solve even indefinite problems - as long as they
are bounded from below on the feasible set. Of course, global minimum is
found only for positive definite and semidefinite problems. As for
indefinite ones - only local minimum is found.
BENEFITS AND DRAWBACKS:
This algorithm can be used to solve both convex and indefinite QP problems
and it can utilize sparsity of the quadratic term (algorithm calculates
matrix-vector products, which can be performed efficiently in case of
sparse matrix).
Algorithm has iteration cost, which (assuming fixed amount of non-boundary
linear constraints) linearly depends on problem size. Boundary constraints
does not significantly change iteration cost.
Thus, it outperforms Cholesky-based QP algorithm (CQP) on high-dimensional
sparse problems with moderate amount of constraints.
From the other side, unlike CQP solver, this algorithm does NOT make use
of Level 3 Dense BLAS. Thus, its performance on dense problems is inferior
to that of CQP solver.
Its precision is also inferior to that of CQP. CQP performs Newton steps
which are know to achieve very good precision. In many cases Newton step
leads us exactly to the solution. BLEIC-QP performs LBFGS steps, which are
good at detecting neighborhood of the solution, buy need many iterations
to find solution with 6 digits of precision.
INPUT PARAMETERS:
State - structure which stores algorithm state
EpsG - >=0
The subroutine finishes its work if the condition
|v|<EpsG is satisfied, where:
* |.| means Euclidian norm
* v - scaled constrained gradient vector, v[i]=g[i]*s[i]
* g - gradient
* s - scaling coefficients set by MinQPSetScale()
EpsF - >=0
The subroutine finishes its work if exploratory steepest
descent step on k+1-th iteration satisfies following
condition: |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
EpsX - >=0
The subroutine finishes its work if exploratory steepest
descent step on k+1-th iteration satisfies following
condition:
* |.| means Euclidian norm
* v - scaled step vector, v[i]=dx[i]/s[i]
* dx - step vector, dx=X(k+1)-X(k)
* s - scaling coefficients set by MinQPSetScale()
MaxIts - maximum number of iterations. If MaxIts=0, the number of
iterations is unlimited.
Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
to automatic stopping criterion selection (presently it is small step
length, but it may change in the future versions of ALGLIB).
IT IS VERY IMPORTANT THAT YOU CALL MinQPSetScale() WHEN YOU USE THIS ALGO!
-- ALGLIB --
Copyright 11.01.2011 by Bochkanov Sergey
*************************************************************************/
void minqpsetalgobleic(const minqpstate &state, const double epsg, const do
uble epsf, const double epsx, const ae_int_t maxits);
/*************************************************************************
This function sets boundary constraints for QP solver This function sets boundary constraints for QP solver
Boundary constraints are inactive by default (after initial creation). Boundary constraints are inactive by default (after initial creation).
After being set, they are preserved until explicitly turned off with After being set, they are preserved until explicitly turned off with
another SetBC() call. another SetBC() call.
INPUT PARAMETERS: INPUT PARAMETERS:
State - structure stores algorithm state State - structure stores algorithm state
BndL - lower bounds, array[N]. BndL - lower bounds, array[N].
If some (all) variables are unbounded, you may specify If some (all) variables are unbounded, you may specify
skipping to change at line 2661 skipping to change at line 2845
*************************************************************************/ *************************************************************************/
void minqpoptimize(const minqpstate &state); void minqpoptimize(const minqpstate &state);
/************************************************************************* /*************************************************************************
QP solver results QP solver results
INPUT PARAMETERS: INPUT PARAMETERS:
State - algorithm state State - algorithm state
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
X - array[0..N-1], solution X - array[0..N-1], solution.
This array is allocated and initialized only when
Rep.TerminationType parameter is positive (success).
Rep - optimization report. You should check Rep.TerminationType, Rep - optimization report. You should check Rep.TerminationType,
which contains completion code, and you may check another which contains completion code, and you may check another
fields which contain another information about algorithm fields which contain another information about algorithm
functioning. functioning.
Failure codes returned by algorithm are:
* -5 inappropriate solver was used:
* Cholesky solver for (semi)indefinite problems
* Cholesky solver for problems with sparse matrix
* -4 BLEIC-QP algorithm found unconstrained direction
of negative curvature (function is unbounded from
below even under constraints), no meaningful
minimum can be found.
* -3 inconsistent constraints (or maybe feasible point
is too hard to find). If you are sure that
constraints are feasible, try to restart optimizer
with better initial approximation.
Completion codes specific for Cholesky algorithm:
* 4 successful completion
Completion codes specific for BLEIC-based algorithm:
* 1 relative function improvement is no more than EpsF.
* 2 scaled step is no more than EpsX.
* 4 scaled gradient norm is no more than EpsG.
* 5 MaxIts steps was taken
-- ALGLIB -- -- ALGLIB --
Copyright 11.01.2011 by Bochkanov Sergey Copyright 11.01.2011 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void minqpresults(const minqpstate &state, real_1d_array &x, minqpreport &r ep); void minqpresults(const minqpstate &state, real_1d_array &x, minqpreport &r ep);
/************************************************************************* /*************************************************************************
QP results QP results
Buffered implementation of MinQPResults() which uses pre-allocated buffer Buffered implementation of MinQPResults() which uses pre-allocated buffer
to store X[]. If buffer size is too small, it resizes buffer. It is to store X[]. If buffer size is too small, it resizes buffer. It is
skipping to change at line 3626 skipping to change at line 3834
void sasconstraineddirection(sactiveset* state, void sasconstraineddirection(sactiveset* state,
/* Real */ ae_vector* d, /* Real */ ae_vector* d,
ae_state *_state); ae_state *_state);
void sasconstraineddirectionprec(sactiveset* state, void sasconstraineddirectionprec(sactiveset* state,
/* Real */ ae_vector* d, /* Real */ ae_vector* d,
ae_state *_state); ae_state *_state);
void sascorrection(sactiveset* state, void sascorrection(sactiveset* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
double* penalty, double* penalty,
ae_state *_state); ae_state *_state);
double sasactivelcpenalty1(sactiveset* state,
/* Real */ ae_vector* x,
ae_state *_state);
double sasscaledconstrainednorm(sactiveset* state, double sasscaledconstrainednorm(sactiveset* state,
/* Real */ ae_vector* d, /* Real */ ae_vector* d,
ae_state *_state); ae_state *_state);
void sasstopoptimization(sactiveset* state, ae_state *_state); void sasstopoptimization(sactiveset* state, ae_state *_state);
void sasreactivateconstraints(sactiveset* state, void sasreactivateconstraints(sactiveset* state,
/* Real */ ae_vector* gc, /* Real */ ae_vector* gc,
ae_state *_state); ae_state *_state);
void sasreactivateconstraintsprec(sactiveset* state, void sasreactivateconstraintsprec(sactiveset* state,
/* Real */ ae_vector* gc, /* Real */ ae_vector* gc,
ae_state *_state); ae_state *_state);
skipping to change at line 3739 skipping to change at line 3950
/* Real */ ae_vector* s, /* Real */ ae_vector* s,
ae_state *_state); ae_state *_state);
void minbleicsetprecdefault(minbleicstate* state, ae_state *_state); void minbleicsetprecdefault(minbleicstate* state, ae_state *_state);
void minbleicsetprecdiag(minbleicstate* state, void minbleicsetprecdiag(minbleicstate* state,
/* Real */ ae_vector* d, /* Real */ ae_vector* d,
ae_state *_state); ae_state *_state);
void minbleicsetprecscale(minbleicstate* state, ae_state *_state); void minbleicsetprecscale(minbleicstate* state, ae_state *_state);
void minbleicsetxrep(minbleicstate* state, void minbleicsetxrep(minbleicstate* state,
ae_bool needxrep, ae_bool needxrep,
ae_state *_state); ae_state *_state);
void minbleicsetdrep(minbleicstate* state,
ae_bool needdrep,
ae_state *_state);
void minbleicsetstpmax(minbleicstate* state, void minbleicsetstpmax(minbleicstate* state,
double stpmax, double stpmax,
ae_state *_state); ae_state *_state);
ae_bool minbleiciteration(minbleicstate* state, ae_state *_state); ae_bool minbleiciteration(minbleicstate* state, ae_state *_state);
void minbleicresults(minbleicstate* state, void minbleicresults(minbleicstate* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
minbleicreport* rep, minbleicreport* rep,
ae_state *_state); ae_state *_state);
void minbleicresultsbuf(minbleicstate* state, void minbleicresultsbuf(minbleicstate* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
minbleicreport* rep, minbleicreport* rep,
ae_state *_state); ae_state *_state);
void minbleicrestartfrom(minbleicstate* state, void minbleicrestartfrom(minbleicstate* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
ae_state *_state); ae_state *_state);
void minbleicemergencytermination(minbleicstate* state, ae_state *_state);
void minbleicsetgradientcheck(minbleicstate* state, void minbleicsetgradientcheck(minbleicstate* state,
double teststep, double teststep,
ae_state *_state); ae_state *_state);
ae_bool _minbleicstate_init(void* _p, ae_state *_state, ae_bool make_automa tic); ae_bool _minbleicstate_init(void* _p, ae_state *_state, ae_bool make_automa tic);
ae_bool _minbleicstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); ae_bool _minbleicstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
void _minbleicstate_clear(void* _p); void _minbleicstate_clear(void* _p);
void _minbleicstate_destroy(void* _p); void _minbleicstate_destroy(void* _p);
ae_bool _minbleicreport_init(void* _p, ae_state *_state, ae_bool make_autom atic); ae_bool _minbleicreport_init(void* _p, ae_state *_state, ae_bool make_autom atic);
ae_bool _minbleicreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); ae_bool _minbleicreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
void _minbleicreport_clear(void* _p); void _minbleicreport_clear(void* _p);
skipping to change at line 3838 skipping to change at line 4053
void _minlbfgsreport_clear(void* _p); void _minlbfgsreport_clear(void* _p);
void _minlbfgsreport_destroy(void* _p); void _minlbfgsreport_destroy(void* _p);
void minqpcreate(ae_int_t n, minqpstate* state, ae_state *_state); void minqpcreate(ae_int_t n, minqpstate* state, ae_state *_state);
void minqpsetlinearterm(minqpstate* state, void minqpsetlinearterm(minqpstate* state,
/* Real */ ae_vector* b, /* Real */ ae_vector* b,
ae_state *_state); ae_state *_state);
void minqpsetquadraticterm(minqpstate* state, void minqpsetquadraticterm(minqpstate* state,
/* Real */ ae_matrix* a, /* Real */ ae_matrix* a,
ae_bool isupper, ae_bool isupper,
ae_state *_state); ae_state *_state);
void minqpsetquadratictermsparse(minqpstate* state,
sparsematrix* a,
ae_bool isupper,
ae_state *_state);
void minqpsetstartingpoint(minqpstate* state, void minqpsetstartingpoint(minqpstate* state,
/* Real */ ae_vector* x, /* Real */ ae_vector* x,
ae_state *_state); ae_state *_state);
void minqpsetorigin(minqpstate* state, void minqpsetorigin(minqpstate* state,
/* Real */ ae_vector* xorigin, /* Real */ ae_vector* xorigin,
ae_state *_state); ae_state *_state);
void minqpsetscale(minqpstate* state,
/* Real */ ae_vector* s,
ae_state *_state);
void minqpsetalgocholesky(minqpstate* state, ae_state *_state); void minqpsetalgocholesky(minqpstate* state, ae_state *_state);
void minqpsetalgobleic(minqpstate* state,
double epsg,
double epsf,
double epsx,
ae_int_t maxits,
ae_state *_state);
void minqpsetbc(minqpstate* state, void minqpsetbc(minqpstate* state,
/* Real */ ae_vector* bndl, /* Real */ ae_vector* bndl,
/* Real */ ae_vector* bndu, /* Real */ ae_vector* bndu,
ae_state *_state); ae_state *_state);
void minqpsetlc(minqpstate* state, void minqpsetlc(minqpstate* state,
/* Real */ ae_matrix* c, /* Real */ ae_matrix* c,
/* Integer */ ae_vector* ct, /* Integer */ ae_vector* ct,
ae_int_t k, ae_int_t k,
ae_state *_state); ae_state *_state);
void minqpoptimize(minqpstate* state, ae_state *_state); void minqpoptimize(minqpstate* state, ae_state *_state);
skipping to change at line 4010 skipping to change at line 4238
/* Real */ ae_vector* bndu, /* Real */ ae_vector* bndu,
ae_state *_state); ae_state *_state);
ae_bool _minasastate_init(void* _p, ae_state *_state, ae_bool make_automati c); ae_bool _minasastate_init(void* _p, ae_state *_state, ae_bool make_automati c);
ae_bool _minasastate_init_copy(void* _dst, void* _src, ae_state *_state, ae _bool make_automatic); ae_bool _minasastate_init_copy(void* _dst, void* _src, ae_state *_state, ae _bool make_automatic);
void _minasastate_clear(void* _p); void _minasastate_clear(void* _p);
void _minasastate_destroy(void* _p); void _minasastate_destroy(void* _p);
ae_bool _minasareport_init(void* _p, ae_state *_state, ae_bool make_automat ic); ae_bool _minasareport_init(void* _p, ae_state *_state, ae_bool make_automat ic);
ae_bool _minasareport_init_copy(void* _dst, void* _src, ae_state *_state, a e_bool make_automatic); ae_bool _minasareport_init_copy(void* _dst, void* _src, ae_state *_state, a e_bool make_automatic);
void _minasareport_clear(void* _p); void _minasareport_clear(void* _p);
void _minasareport_destroy(void* _p); void _minasareport_destroy(void* _p);
ae_bool _linfeassolver_init(void* _p, ae_state *_state, ae_bool make_automa
tic);
ae_bool _linfeassolver_init_copy(void* _dst, void* _src, ae_state *_state,
ae_bool make_automatic);
void _linfeassolver_clear(void* _p);
void _linfeassolver_destroy(void* _p);
} }
#endif #endif
 End of changes. 29 change blocks. 
25 lines changed or deleted 249 lines changed or added


 solvers.h   solvers.h 
skipping to change at line 1053 skipping to change at line 1053
Copyright 27.01.2010 by Bochkanov Sergey Copyright 27.01.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void hpdmatrixcholeskysolve(const complex_2d_array &cha, const ae_int_t n, const bool isupper, const complex_1d_array &b, ae_int_t &info, densesolverr eport &rep, complex_1d_array &x); void hpdmatrixcholeskysolve(const complex_2d_array &cha, const ae_int_t n, const bool isupper, const complex_1d_array &b, ae_int_t &info, densesolverr eport &rep, complex_1d_array &x);
/************************************************************************* /*************************************************************************
Dense solver. Dense solver.
This subroutine finds solution of the linear system A*X=B with non-square, This subroutine finds solution of the linear system A*X=B with non-square,
possibly degenerate A. System is solved in the least squares sense, and possibly degenerate A. System is solved in the least squares sense, and
general least squares solution X = X0 + CX*y which minimizes |A*X-B| is general least squares solution X = X0 + CX*y which minimizes |A*X-B| is
returned. If A is non-degenerate, solution in the usual sense is returned returned. If A is non-degenerate, solution in the usual sense is returned.
Algorithm features: Algorithm features:
* automatic detection of degenerate cases * automatic detection (and correct handling!) of degenerate cases
* iterative refinement * iterative refinement
* O(N^3) complexity * O(N^3) complexity
INPUT PARAMETERS INPUT PARAMETERS
A - array[0..NRows-1,0..NCols-1], system matrix A - array[0..NRows-1,0..NCols-1], system matrix
NRows - vertical size of A NRows - vertical size of A
NCols - horizontal size of A NCols - horizontal size of A
B - array[0..NCols-1], right part B - array[0..NCols-1], right part
Threshold- a number in [0,1]. Singular values beyond Threshold are Threshold- a number in [0,1]. Singular values beyond Threshold are
considered zero. Set it to 0.0, if you don't understand considered zero. Set it to 0.0, if you don't understand
what it means, so the solver will choose good value on its what it means, so the solver will choose good value on its
own. own.
OUTPUT PARAMETERS OUTPUT PARAMETERS
Info - return code: Info - return code:
* -4 SVD subroutine failed * -4 SVD subroutine failed
* -1 if NRows<=0 or NCols<=0 or Threshold<0 was passed * -1 if NRows<=0 or NCols<=0 or Threshold<0 was passed
* 1 if task is solved * 1 if task is solved
Rep - solver report, see below for more info Rep - solver report, see below for more info
X - array[0..N-1,0..M-1], it contains: X - array[0..N-1,0..M-1], it contains:
* solution of A*X=B if A is non-singular (well-conditioned * solution of A*X=B (even for singular A)
or ill-conditioned, but not very close to singular) * zeros, if SVD subroutine failed
* zeros, if A is singular or VERY close to singular
(in this case Info=-3).
SOLVER REPORT SOLVER REPORT
Subroutine sets following fields of the Rep structure: Subroutine sets following fields of the Rep structure:
* R2 reciprocal of condition number: 1/cond(A), 2-norm. * R2 reciprocal of condition number: 1/cond(A), 2-norm.
* N = NCols * N = NCols
* K dim(Null(A)) * K dim(Null(A))
* CX array[0..N-1,0..K-1], kernel of A. * CX array[0..N-1,0..K-1], kernel of A.
Columns of CX store such vectors that A*CX[i]=0. Columns of CX store such vectors that A*CX[i]=0.
 End of changes. 3 change blocks. 
6 lines changed or deleted 4 lines changed or added


 statistics.h   statistics.h 
skipping to change at line 262 skipping to change at line 262
-- ALGLIB -- -- ALGLIB --
Copyright 09.04.2007 by Bochkanov Sergey Copyright 09.04.2007 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double spearmancorr2(const real_1d_array &x, const real_1d_array &y, const ae_int_t n); double spearmancorr2(const real_1d_array &x, const real_1d_array &y, const ae_int_t n);
double spearmancorr2(const real_1d_array &x, const real_1d_array &y); double spearmancorr2(const real_1d_array &x, const real_1d_array &y);
/************************************************************************* /*************************************************************************
Covariance matrix Covariance matrix
SMP EDITION OF ALGLIB:
! This function can utilize multicore capabilities of your system. In
! order to do this you have to call version with "smp_" prefix, which
! indicates that multicore code will be used.
!
! This note is given for users of SMP edition; if you use GPL edition,
! or commercial edition of ALGLIB without SMP support, you still will
! be able to call smp-version of this function, but all computations
! will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
!
! You should remember that starting/stopping worker thread always have
! non-zero cost. Although multicore version is pretty efficient on
! large problems, we do not recommend you to use it on small problems -
! with covariance matrices smaller than 128*128.
INPUT PARAMETERS: INPUT PARAMETERS:
X - array[N,M], sample matrix: X - array[N,M], sample matrix:
* J-th column corresponds to J-th variable * J-th column corresponds to J-th variable
* I-th row corresponds to I-th observation * I-th row corresponds to I-th observation
N - N>=0, number of observations: N - N>=0, number of observations:
* if given, only leading N rows of X are used * if given, only leading N rows of X are used
* if not given, automatically determined from input size * if not given, automatically determined from input size
M - M>0, number of variables: M - M>0, number of variables:
* if given, only leading M columns of X are used * if given, only leading M columns of X are used
* if not given, automatically determined from input size * if not given, automatically determined from input size
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
C - array[M,M], covariance matrix (zero if N=0 or N=1) C - array[M,M], covariance matrix (zero if N=0 or N=1)
-- ALGLIB -- -- ALGLIB --
Copyright 28.10.2010 by Bochkanov Sergey Copyright 28.10.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void covm(const real_2d_array &x, const ae_int_t n, const ae_int_t m, real_ 2d_array &c); void covm(const real_2d_array &x, const ae_int_t n, const ae_int_t m, real_ 2d_array &c);
void smp_covm(const real_2d_array &x, const ae_int_t n, const ae_int_t m, r eal_2d_array &c);
void covm(const real_2d_array &x, real_2d_array &c); void covm(const real_2d_array &x, real_2d_array &c);
void smp_covm(const real_2d_array &x, real_2d_array &c);
/************************************************************************* /*************************************************************************
Pearson product-moment correlation matrix Pearson product-moment correlation matrix
SMP EDITION OF ALGLIB:
! This function can utilize multicore capabilities of your system. In
! order to do this you have to call version with "smp_" prefix, which
! indicates that multicore code will be used.
!
! This note is given for users of SMP edition; if you use GPL edition,
! or commercial edition of ALGLIB without SMP support, you still will
! be able to call smp-version of this function, but all computations
! will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
!
! You should remember that starting/stopping worker thread always have
! non-zero cost. Although multicore version is pretty efficient on
! large problems, we do not recommend you to use it on small problems -
! with correlation matrices smaller than 128*128.
INPUT PARAMETERS: INPUT PARAMETERS:
X - array[N,M], sample matrix: X - array[N,M], sample matrix:
* J-th column corresponds to J-th variable * J-th column corresponds to J-th variable
* I-th row corresponds to I-th observation * I-th row corresponds to I-th observation
N - N>=0, number of observations: N - N>=0, number of observations:
* if given, only leading N rows of X are used * if given, only leading N rows of X are used
* if not given, automatically determined from input size * if not given, automatically determined from input size
M - M>0, number of variables: M - M>0, number of variables:
* if given, only leading M columns of X are used * if given, only leading M columns of X are used
* if not given, automatically determined from input size * if not given, automatically determined from input size
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
C - array[M,M], correlation matrix (zero if N=0 or N=1) C - array[M,M], correlation matrix (zero if N=0 or N=1)
-- ALGLIB -- -- ALGLIB --
Copyright 28.10.2010 by Bochkanov Sergey Copyright 28.10.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void pearsoncorrm(const real_2d_array &x, const ae_int_t n, const ae_int_t m, real_2d_array &c); void pearsoncorrm(const real_2d_array &x, const ae_int_t n, const ae_int_t m, real_2d_array &c);
void smp_pearsoncorrm(const real_2d_array &x, const ae_int_t n, const ae_in t_t m, real_2d_array &c);
void pearsoncorrm(const real_2d_array &x, real_2d_array &c); void pearsoncorrm(const real_2d_array &x, real_2d_array &c);
void smp_pearsoncorrm(const real_2d_array &x, real_2d_array &c);
/************************************************************************* /*************************************************************************
Spearman's rank correlation matrix Spearman's rank correlation matrix
SMP EDITION OF ALGLIB:
! This function can utilize multicore capabilities of your system. In
! order to do this you have to call version with "smp_" prefix, which
! indicates that multicore code will be used.
!
! This note is given for users of SMP edition; if you use GPL edition,
! or commercial edition of ALGLIB without SMP support, you still will
! be able to call smp-version of this function, but all computations
! will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
!
! You should remember that starting/stopping worker thread always have
! non-zero cost. Although multicore version is pretty efficient on
! large problems, we do not recommend you to use it on small problems -
! with correlation matrices smaller than 128*128.
INPUT PARAMETERS: INPUT PARAMETERS:
X - array[N,M], sample matrix: X - array[N,M], sample matrix:
* J-th column corresponds to J-th variable * J-th column corresponds to J-th variable
* I-th row corresponds to I-th observation * I-th row corresponds to I-th observation
N - N>=0, number of observations: N - N>=0, number of observations:
* if given, only leading N rows of X are used * if given, only leading N rows of X are used
* if not given, automatically determined from input size * if not given, automatically determined from input size
M - M>0, number of variables: M - M>0, number of variables:
* if given, only leading M columns of X are used * if given, only leading M columns of X are used
* if not given, automatically determined from input size * if not given, automatically determined from input size
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
C - array[M,M], correlation matrix (zero if N=0 or N=1) C - array[M,M], correlation matrix (zero if N=0 or N=1)
-- ALGLIB -- -- ALGLIB --
Copyright 28.10.2010 by Bochkanov Sergey Copyright 28.10.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void spearmancorrm(const real_2d_array &x, const ae_int_t n, const ae_int_t m, real_2d_array &c); void spearmancorrm(const real_2d_array &x, const ae_int_t n, const ae_int_t m, real_2d_array &c);
void smp_spearmancorrm(const real_2d_array &x, const ae_int_t n, const ae_i nt_t m, real_2d_array &c);
void spearmancorrm(const real_2d_array &x, real_2d_array &c); void spearmancorrm(const real_2d_array &x, real_2d_array &c);
void smp_spearmancorrm(const real_2d_array &x, real_2d_array &c);
/************************************************************************* /*************************************************************************
Cross-covariance matrix Cross-covariance matrix
SMP EDITION OF ALGLIB:
! This function can utilize multicore capabilities of your system. In
! order to do this you have to call version with "smp_" prefix, which
! indicates that multicore code will be used.
!
! This note is given for users of SMP edition; if you use GPL edition,
! or commercial edition of ALGLIB without SMP support, you still will
! be able to call smp-version of this function, but all computations
! will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
!
! You should remember that starting/stopping worker thread always have
! non-zero cost. Although multicore version is pretty efficient on
! large problems, we do not recommend you to use it on small problems -
! with covariance matrices smaller than 128*128.
INPUT PARAMETERS: INPUT PARAMETERS:
X - array[N,M1], sample matrix: X - array[N,M1], sample matrix:
* J-th column corresponds to J-th variable * J-th column corresponds to J-th variable
* I-th row corresponds to I-th observation * I-th row corresponds to I-th observation
Y - array[N,M2], sample matrix: Y - array[N,M2], sample matrix:
* J-th column corresponds to J-th variable * J-th column corresponds to J-th variable
* I-th row corresponds to I-th observation * I-th row corresponds to I-th observation
N - N>=0, number of observations: N - N>=0, number of observations:
* if given, only leading N rows of X/Y are used * if given, only leading N rows of X/Y are used
* if not given, automatically determined from input sizes * if not given, automatically determined from input sizes
skipping to change at line 355 skipping to change at line 437
* if given, only leading M1 columns of X are used * if given, only leading M1 columns of X are used
* if not given, automatically determined from input size * if not given, automatically determined from input size
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
C - array[M1,M2], cross-covariance matrix (zero if N=0 or N=1) C - array[M1,M2], cross-covariance matrix (zero if N=0 or N=1)
-- ALGLIB -- -- ALGLIB --
Copyright 28.10.2010 by Bochkanov Sergey Copyright 28.10.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void covm2(const real_2d_array &x, const real_2d_array &y, const ae_int_t n , const ae_int_t m1, const ae_int_t m2, real_2d_array &c); void covm2(const real_2d_array &x, const real_2d_array &y, const ae_int_t n , const ae_int_t m1, const ae_int_t m2, real_2d_array &c);
void smp_covm2(const real_2d_array &x, const real_2d_array &y, const ae_int _t n, const ae_int_t m1, const ae_int_t m2, real_2d_array &c);
void covm2(const real_2d_array &x, const real_2d_array &y, real_2d_array &c ); void covm2(const real_2d_array &x, const real_2d_array &y, real_2d_array &c );
void smp_covm2(const real_2d_array &x, const real_2d_array &y, real_2d_arra y &c);
/************************************************************************* /*************************************************************************
Pearson product-moment cross-correlation matrix Pearson product-moment cross-correlation matrix
SMP EDITION OF ALGLIB:
! This function can utilize multicore capabilities of your system. In
! order to do this you have to call version with "smp_" prefix, which
! indicates that multicore code will be used.
!
! This note is given for users of SMP edition; if you use GPL edition,
! or commercial edition of ALGLIB without SMP support, you still will
! be able to call smp-version of this function, but all computations
! will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
!
! You should remember that starting/stopping worker thread always have
! non-zero cost. Although multicore version is pretty efficient on
! large problems, we do not recommend you to use it on small problems -
! with correlation matrices smaller than 128*128.
INPUT PARAMETERS: INPUT PARAMETERS:
X - array[N,M1], sample matrix: X - array[N,M1], sample matrix:
* J-th column corresponds to J-th variable * J-th column corresponds to J-th variable
* I-th row corresponds to I-th observation * I-th row corresponds to I-th observation
Y - array[N,M2], sample matrix: Y - array[N,M2], sample matrix:
* J-th column corresponds to J-th variable * J-th column corresponds to J-th variable
* I-th row corresponds to I-th observation * I-th row corresponds to I-th observation
N - N>=0, number of observations: N - N>=0, number of observations:
* if given, only leading N rows of X/Y are used * if given, only leading N rows of X/Y are used
* if not given, automatically determined from input sizes * if not given, automatically determined from input sizes
skipping to change at line 384 skipping to change at line 487
* if given, only leading M1 columns of X are used * if given, only leading M1 columns of X are used
* if not given, automatically determined from input size * if not given, automatically determined from input size
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
C - array[M1,M2], cross-correlation matrix (zero if N=0 or N=1) C - array[M1,M2], cross-correlation matrix (zero if N=0 or N=1)
-- ALGLIB -- -- ALGLIB --
Copyright 28.10.2010 by Bochkanov Sergey Copyright 28.10.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void pearsoncorrm2(const real_2d_array &x, const real_2d_array &y, const ae _int_t n, const ae_int_t m1, const ae_int_t m2, real_2d_array &c); void pearsoncorrm2(const real_2d_array &x, const real_2d_array &y, const ae _int_t n, const ae_int_t m1, const ae_int_t m2, real_2d_array &c);
void smp_pearsoncorrm2(const real_2d_array &x, const real_2d_array &y, cons t ae_int_t n, const ae_int_t m1, const ae_int_t m2, real_2d_array &c);
void pearsoncorrm2(const real_2d_array &x, const real_2d_array &y, real_2d_ array &c); void pearsoncorrm2(const real_2d_array &x, const real_2d_array &y, real_2d_ array &c);
void smp_pearsoncorrm2(const real_2d_array &x, const real_2d_array &y, real _2d_array &c);
/************************************************************************* /*************************************************************************
Spearman's rank cross-correlation matrix Spearman's rank cross-correlation matrix
SMP EDITION OF ALGLIB:
! This function can utilize multicore capabilities of your system. In
! order to do this you have to call version with "smp_" prefix, which
! indicates that multicore code will be used.
!
! This note is given for users of SMP edition; if you use GPL edition,
! or commercial edition of ALGLIB without SMP support, you still will
! be able to call smp-version of this function, but all computations
! will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
!
! You should remember that starting/stopping worker thread always have
! non-zero cost. Although multicore version is pretty efficient on
! large problems, we do not recommend you to use it on small problems -
! with correlation matrices smaller than 128*128.
INPUT PARAMETERS: INPUT PARAMETERS:
X - array[N,M1], sample matrix: X - array[N,M1], sample matrix:
* J-th column corresponds to J-th variable * J-th column corresponds to J-th variable
* I-th row corresponds to I-th observation * I-th row corresponds to I-th observation
Y - array[N,M2], sample matrix: Y - array[N,M2], sample matrix:
* J-th column corresponds to J-th variable * J-th column corresponds to J-th variable
* I-th row corresponds to I-th observation * I-th row corresponds to I-th observation
N - N>=0, number of observations: N - N>=0, number of observations:
* if given, only leading N rows of X/Y are used * if given, only leading N rows of X/Y are used
* if not given, automatically determined from input sizes * if not given, automatically determined from input sizes
skipping to change at line 413 skipping to change at line 537
* if given, only leading M1 columns of X are used * if given, only leading M1 columns of X are used
* if not given, automatically determined from input size * if not given, automatically determined from input size
OUTPUT PARAMETERS: OUTPUT PARAMETERS:
C - array[M1,M2], cross-correlation matrix (zero if N=0 or N=1) C - array[M1,M2], cross-correlation matrix (zero if N=0 or N=1)
-- ALGLIB -- -- ALGLIB --
Copyright 28.10.2010 by Bochkanov Sergey Copyright 28.10.2010 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void spearmancorrm2(const real_2d_array &x, const real_2d_array &y, const a e_int_t n, const ae_int_t m1, const ae_int_t m2, real_2d_array &c); void spearmancorrm2(const real_2d_array &x, const real_2d_array &y, const a e_int_t n, const ae_int_t m1, const ae_int_t m2, real_2d_array &c);
void smp_spearmancorrm2(const real_2d_array &x, const real_2d_array &y, con st ae_int_t n, const ae_int_t m1, const ae_int_t m2, real_2d_array &c);
void spearmancorrm2(const real_2d_array &x, const real_2d_array &y, real_2d _array &c); void spearmancorrm2(const real_2d_array &x, const real_2d_array &y, real_2d _array &c);
void smp_spearmancorrm2(const real_2d_array &x, const real_2d_array &y, rea
l_2d_array &c);
/*************************************************************************
*************************************************************************/
void rankdata(const real_2d_array &xy, const ae_int_t npoints, const ae_int
_t nfeatures);
void smp_rankdata(const real_2d_array &xy, const ae_int_t npoints, const ae
_int_t nfeatures);
void rankdata(real_2d_array &xy);
void smp_rankdata(real_2d_array &xy);
/*************************************************************************
*************************************************************************/
void rankdatacentered(const real_2d_array &xy, const ae_int_t npoints, cons
t ae_int_t nfeatures);
void smp_rankdatacentered(const real_2d_array &xy, const ae_int_t npoints,
const ae_int_t nfeatures);
void rankdatacentered(real_2d_array &xy);
void smp_rankdatacentered(real_2d_array &xy);
/************************************************************************* /*************************************************************************
Obsolete function, we recommend to use PearsonCorr2(). Obsolete function, we recommend to use PearsonCorr2().
-- ALGLIB -- -- ALGLIB --
Copyright 09.04.2007 by Bochkanov Sergey Copyright 09.04.2007 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
double pearsoncorrelation(const real_1d_array &x, const real_1d_array &y, c onst ae_int_t n); double pearsoncorrelation(const real_1d_array &x, const real_1d_array &y, c onst ae_int_t n);
/************************************************************************* /*************************************************************************
skipping to change at line 673 skipping to change at line 815
value) value)
* left-tailed test (null hypothesis - the mean is greater than or * left-tailed test (null hypothesis - the mean is greater than or
equal to the given value) equal to the given value)
* right-tailed test (null hypothesis - the mean is less than or equal * right-tailed test (null hypothesis - the mean is less than or equal
to the given value). to the given value).
The test is based on the assumption that a given sample has a normal The test is based on the assumption that a given sample has a normal
distribution and an unknown dispersion. If the distribution sharply distribution and an unknown dispersion. If the distribution sharply
differs from normal, the test will work incorrectly. differs from normal, the test will work incorrectly.
Input parameters: INPUT PARAMETERS:
X - sample. Array whose index goes from 0 to N-1. X - sample. Array whose index goes from 0 to N-1.
N - size of sample. N - size of sample, N>=0
Mean - assumed value of the mean. Mean - assumed value of the mean.
Output parameters: OUTPUT PARAMETERS:
BothTails - p-value for two-tailed test. BothTails - p-value for two-tailed test.
If BothTails is less than the given significance level If BothTails is less than the given significance level
the null hypothesis is rejected. the null hypothesis is rejected.
LeftTail - p-value for left-tailed test. LeftTail - p-value for left-tailed test.
If LeftTail is less than the given significance level, If LeftTail is less than the given significance level,
the null hypothesis is rejected. the null hypothesis is rejected.
RightTail - p-value for right-tailed test. RightTail - p-value for right-tailed test.
If RightTail is less than the given significance level If RightTail is less than the given significance level
the null hypothesis is rejected. the null hypothesis is rejected.
NOTE: this function correctly handles degenerate cases:
* when N=0, all p-values are set to 1.0
* when variance of X[] is exactly zero, p-values are set
to 1.0 or 0.0, depending on difference between sample mean and
value of mean being tested.
-- ALGLIB -- -- ALGLIB --
Copyright 08.09.2006 by Bochkanov Sergey Copyright 08.09.2006 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void studentttest1(const real_1d_array &x, const ae_int_t n, const double m ean, double &bothtails, double &lefttail, double &righttail); void studentttest1(const real_1d_array &x, const ae_int_t n, const double m ean, double &bothtails, double &lefttail, double &righttail);
/************************************************************************* /*************************************************************************
Two-sample pooled test Two-sample pooled test
This test checks three hypotheses about the mean of the given samples. The This test checks three hypotheses about the mean of the given samples. The
following tests are performed: following tests are performed:
skipping to change at line 727 skipping to change at line 875
BothTails - p-value for two-tailed test. BothTails - p-value for two-tailed test.
If BothTails is less than the given significance level If BothTails is less than the given significance level
the null hypothesis is rejected. the null hypothesis is rejected.
LeftTail - p-value for left-tailed test. LeftTail - p-value for left-tailed test.
If LeftTail is less than the given significance level, If LeftTail is less than the given significance level,
the null hypothesis is rejected. the null hypothesis is rejected.
RightTail - p-value for right-tailed test. RightTail - p-value for right-tailed test.
If RightTail is less than the given significance level If RightTail is less than the given significance level
the null hypothesis is rejected. the null hypothesis is rejected.
NOTE: this function correctly handles degenerate cases:
* when N=0 or M=0, all p-values are set to 1.0
* when both samples has exactly zero variance, p-values are set
to 1.0 or 0.0, depending on difference between means.
-- ALGLIB -- -- ALGLIB --
Copyright 18.09.2006 by Bochkanov Sergey Copyright 18.09.2006 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void studentttest2(const real_1d_array &x, const ae_int_t n, const real_1d_ array &y, const ae_int_t m, double &bothtails, double &lefttail, double &ri ghttail); void studentttest2(const real_1d_array &x, const ae_int_t n, const real_1d_ array &y, const ae_int_t m, double &bothtails, double &lefttail, double &ri ghttail);
/************************************************************************* /*************************************************************************
Two-sample unpooled test Two-sample unpooled test
This test checks three hypotheses about the mean of the given samples. The This test checks three hypotheses about the mean of the given samples. The
following tests are performed: following tests are performed:
* two-tailed test (null hypothesis - the means are equal) * two-tailed test (null hypothesis - the means are equal)
* left-tailed test (null hypothesis - the mean of the first sample is * left-tailed test (null hypothesis - the mean of the first sample is
greater than or equal to the mean of the second sample) greater than or equal to the mean of the second sample)
* right-tailed test (null hypothesis - the mean of the first sample is * right-tailed test (null hypothesis - the mean of the first sample is
less than or equal to the mean of the second sample). less than or equal to the mean of the second sample).
Test is based on the following assumptions: Test is based on the following assumptions:
* given samples have normal distributions * given samples have normal distributions
* samples are independent. * samples are independent.
Dispersion equality is not required Equality of variances is NOT required.
Input parameters: Input parameters:
X - sample 1. Array whose index goes from 0 to N-1. X - sample 1. Array whose index goes from 0 to N-1.
N - size of the sample. N - size of the sample.
Y - sample 2. Array whose index goes from 0 to M-1. Y - sample 2. Array whose index goes from 0 to M-1.
M - size of the sample. M - size of the sample.
Output parameters: Output parameters:
BothTails - p-value for two-tailed test. BothTails - p-value for two-tailed test.
If BothTails is less than the given significance level If BothTails is less than the given significance level
the null hypothesis is rejected. the null hypothesis is rejected.
LeftTail - p-value for left-tailed test. LeftTail - p-value for left-tailed test.
If LeftTail is less than the given significance level, If LeftTail is less than the given significance level,
the null hypothesis is rejected. the null hypothesis is rejected.
RightTail - p-value for right-tailed test. RightTail - p-value for right-tailed test.
If RightTail is less than the given significance level If RightTail is less than the given significance level
the null hypothesis is rejected. the null hypothesis is rejected.
NOTE: this function correctly handles degenerate cases:
* when N=0 or M=0, all p-values are set to 1.0
* when both samples has zero variance, p-values are set
to 1.0 or 0.0, depending on difference between means.
* when only one sample has zero variance, test reduces to 1-sample
version.
-- ALGLIB -- -- ALGLIB --
Copyright 18.09.2006 by Bochkanov Sergey Copyright 18.09.2006 by Bochkanov Sergey
*************************************************************************/ *************************************************************************/
void unequalvariancettest(const real_1d_array &x, const ae_int_t n, const r eal_1d_array &y, const ae_int_t m, double &bothtails, double &lefttail, dou ble &righttail); void unequalvariancettest(const real_1d_array &x, const ae_int_t n, const r eal_1d_array &y, const ae_int_t m, double &bothtails, double &lefttail, dou ble &righttail);
/************************************************************************* /*************************************************************************
Two-sample F-test Two-sample F-test
This test checks three hypotheses about dispersions of the given samples. This test checks three hypotheses about dispersions of the given samples.
The following tests are performed: The following tests are performed:
skipping to change at line 952 skipping to change at line 1112
ae_state *_state); ae_state *_state);
double spearmancorr2(/* Real */ ae_vector* x, double spearmancorr2(/* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
void covm(/* Real */ ae_matrix* x, void covm(/* Real */ ae_matrix* x,
ae_int_t n, ae_int_t n,
ae_int_t m, ae_int_t m,
/* Real */ ae_matrix* c, /* Real */ ae_matrix* c,
ae_state *_state); ae_state *_state);
void _pexec_covm(/* Real */ ae_matrix* x,
ae_int_t n,
ae_int_t m,
/* Real */ ae_matrix* c, ae_state *_state);
void pearsoncorrm(/* Real */ ae_matrix* x, void pearsoncorrm(/* Real */ ae_matrix* x,
ae_int_t n, ae_int_t n,
ae_int_t m, ae_int_t m,
/* Real */ ae_matrix* c, /* Real */ ae_matrix* c,
ae_state *_state); ae_state *_state);
void _pexec_pearsoncorrm(/* Real */ ae_matrix* x,
ae_int_t n,
ae_int_t m,
/* Real */ ae_matrix* c, ae_state *_state);
void spearmancorrm(/* Real */ ae_matrix* x, void spearmancorrm(/* Real */ ae_matrix* x,
ae_int_t n, ae_int_t n,
ae_int_t m, ae_int_t m,
/* Real */ ae_matrix* c, /* Real */ ae_matrix* c,
ae_state *_state); ae_state *_state);
void _pexec_spearmancorrm(/* Real */ ae_matrix* x,
ae_int_t n,
ae_int_t m,
/* Real */ ae_matrix* c, ae_state *_state);
void covm2(/* Real */ ae_matrix* x, void covm2(/* Real */ ae_matrix* x,
/* Real */ ae_matrix* y, /* Real */ ae_matrix* y,
ae_int_t n, ae_int_t n,
ae_int_t m1, ae_int_t m1,
ae_int_t m2, ae_int_t m2,
/* Real */ ae_matrix* c, /* Real */ ae_matrix* c,
ae_state *_state); ae_state *_state);
void _pexec_covm2(/* Real */ ae_matrix* x,
/* Real */ ae_matrix* y,
ae_int_t n,
ae_int_t m1,
ae_int_t m2,
/* Real */ ae_matrix* c, ae_state *_state);
void pearsoncorrm2(/* Real */ ae_matrix* x, void pearsoncorrm2(/* Real */ ae_matrix* x,
/* Real */ ae_matrix* y, /* Real */ ae_matrix* y,
ae_int_t n, ae_int_t n,
ae_int_t m1, ae_int_t m1,
ae_int_t m2, ae_int_t m2,
/* Real */ ae_matrix* c, /* Real */ ae_matrix* c,
ae_state *_state); ae_state *_state);
void _pexec_pearsoncorrm2(/* Real */ ae_matrix* x,
/* Real */ ae_matrix* y,
ae_int_t n,
ae_int_t m1,
ae_int_t m2,
/* Real */ ae_matrix* c, ae_state *_state);
void spearmancorrm2(/* Real */ ae_matrix* x, void spearmancorrm2(/* Real */ ae_matrix* x,
/* Real */ ae_matrix* y, /* Real */ ae_matrix* y,
ae_int_t n, ae_int_t n,
ae_int_t m1, ae_int_t m1,
ae_int_t m2, ae_int_t m2,
/* Real */ ae_matrix* c, /* Real */ ae_matrix* c,
ae_state *_state); ae_state *_state);
void _pexec_spearmancorrm2(/* Real */ ae_matrix* x,
/* Real */ ae_matrix* y,
ae_int_t n,
ae_int_t m1,
ae_int_t m2,
/* Real */ ae_matrix* c, ae_state *_state);
void rankdata(/* Real */ ae_matrix* xy,
ae_int_t npoints,
ae_int_t nfeatures,
ae_state *_state);
void _pexec_rankdata(/* Real */ ae_matrix* xy,
ae_int_t npoints,
ae_int_t nfeatures, ae_state *_state);
void rankdatacentered(/* Real */ ae_matrix* xy,
ae_int_t npoints,
ae_int_t nfeatures,
ae_state *_state);
void _pexec_rankdatacentered(/* Real */ ae_matrix* xy,
ae_int_t npoints,
ae_int_t nfeatures, ae_state *_state);
double pearsoncorrelation(/* Real */ ae_vector* x, double pearsoncorrelation(/* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
double spearmanrankcorrelation(/* Real */ ae_vector* x, double spearmanrankcorrelation(/* Real */ ae_vector* x,
/* Real */ ae_vector* y, /* Real */ ae_vector* y,
ae_int_t n, ae_int_t n,
ae_state *_state); ae_state *_state);
void pearsoncorrelationsignificance(double r, void pearsoncorrelationsignificance(double r,
ae_int_t n, ae_int_t n,
 End of changes. 31 change blocks. 
4 lines changed or deleted 213 lines changed or added

This html diff was produced by rfcdiff 1.41. The latest version is available from http://tools.ietf.org/tools/rfcdiff/