Index: trunk/npstat/nm/AbsClassicalOrthoPoly1D.icc =================================================================== --- trunk/npstat/nm/AbsClassicalOrthoPoly1D.icc (revision 729) +++ trunk/npstat/nm/AbsClassicalOrthoPoly1D.icc (revision 730) @@ -1,624 +1,624 @@ #include #include #include #include namespace npstat { namespace Private { template class ClassOrthoPolyHlp : public Functor1 { public: inline ClassOrthoPolyHlp(const SomePoly1D& p, const Functor& f, const unsigned d1) : poly(p), fcn(f), deg(d1) {} inline long double operator()(const long double& x) const {return fcn(x)*poly.poly(deg, x)*poly.weight(x);} private: const SomePoly1D& poly; const Functor& fcn; unsigned deg; }; } template void AbsClassicalOrthoPoly1D::calculateCoeffs( const Functor& fcn, const Quadrature& quad, double *coeffs, const unsigned maxdeg) const { assert(coeffs); const double a = xmin(); const double b = xmax(); for (unsigned i=0; i<=maxdeg; ++i) { Private::ClassOrthoPolyHlp func(*this, fcn, i); coeffs[i] = quad.integrate(func, a, b); } } template double AbsClassicalOrthoPoly1D::empiricalKroneckerDelta( const Quadrature& quad, const unsigned deg1, const unsigned deg2) const { ProdFcn fcn(*this, deg1, deg2); return quad.integrate(fcn, xmin(), xmax()); } template double AbsClassicalOrthoPoly1D::jointAverage( const Quadrature& quad, const unsigned deg1, const unsigned deg2, const unsigned deg3, const unsigned deg4) const { unsigned degs[4]; unsigned nDegs = 0; if (deg1) degs[nDegs++] = deg1; if (deg2) degs[nDegs++] = deg2; if (deg3) degs[nDegs++] = deg3; if (deg4) degs[nDegs++] = deg4; return jointAverage(quad, degs, nDegs, true); } template double AbsClassicalOrthoPoly1D::jointAverage( const Quadrature& quad, const unsigned* degrees, const unsigned nDegrees, const bool checkedForZeros) const { if (nDegrees) { assert(degrees); if (nDegrees == 1U) return kdelta(degrees[0], 0U); if (nDegrees == 2U) return kdelta(degrees[0], degrees[1]); if (!checkedForZeros) { bool allNonZero = true; for (unsigned ideg=0; ideg Matrix AbsClassicalOrthoPoly1D::sampleProductAverages( const Numeric* coords, const unsigned long lenCoords, const unsigned maxdeg) const { if (!lenCoords) throw std::invalid_argument( "In npstat::AbsClassicalOrthoPoly1D::sampleProductAverages:" " empty sample"); assert(coords); const unsigned long nsums = (maxdeg + 1)*(unsigned long)(maxdeg + 2)/2; std::vector membuf(nsums + (maxdeg + 1U), 0.0L); long double* poly = &membuf[0]; long double* sums = poly + (maxdeg + 1U); for (unsigned long i=0; i mat(maxdeg + 1U, maxdeg + 1U); unsigned long isum = 0; for (unsigned k=0; k<=maxdeg; ++k) for (unsigned j=0; j<=k; ++j, ++isum) { const double v = sums[isum]/lenCoords; mat[k][j] = v; if (j != k) mat[j][k] = v; } return mat; } template - Matrix AbsClassicalOrthoPoly1D::altWeightProductAverages( + Matrix AbsClassicalOrthoPoly1D::extWeightProductAverages( const Functor& fcn, const AbsIntervalQuadrature1D& quad, const unsigned maxdeg) const { const unsigned nInteg = quad.npoints(); const unsigned long nsums = (maxdeg + 1)*(unsigned long)(maxdeg + 2)/2; std::vector membuf(nsums + (maxdeg + 1U) + 2U*nInteg, 0.0L); long double* poly = &membuf[0]; long double* sums = poly + (maxdeg + 1U); long double* absc = sums + nsums; long double* weights = absc + nInteg; quad.getAllAbscissae(absc, nInteg); quad.getAllWeights(weights, nInteg); const long double midpoint = (static_cast(xmin()) + xmax())/2.0L; const long double unit = (static_cast(xmax()) - xmin())/2.0L; for (unsigned long i=0; i mat(maxdeg + 1U, maxdeg + 1U); unsigned long isum = 0; for (unsigned k=0; k<=maxdeg; ++k) for (unsigned j=0; j<=k; ++j, ++isum) { const double v = sums[isum]; mat[k][j] = v; if (j != k) mat[j][k] = v; } return mat; } template void AbsClassicalOrthoPoly1D::sampleAverages( const Numeric* coords, const unsigned long lenCoords, double *averages, const unsigned maxdeg) const { if (!lenCoords) throw std::invalid_argument( "In npstat::AbsClassicalOrthoPoly1D::sampleAverages: empty sample"); assert(coords); assert(averages); std::vector membuf(2U*(maxdeg + 1U), 0.0L); long double* poly = &membuf[0]; long double* sums = poly + (maxdeg + 1U); for (unsigned long i=0; i void AbsClassicalOrthoPoly1D::sampleCoeffs( const Numeric* coords, const unsigned long lenCoords, const Numeric location, const Numeric scale, double *coeffs, const unsigned maxdeg) const { if (!lenCoords) throw std::invalid_argument( "In npstat::AbsClassicalOrthoPoly1D::sampleCoeffs: empty sample"); assert(coords); assert(coeffs); const Numeric zero = Numeric(); if (scale == zero) throw std::invalid_argument( "In npstat::AbsClassicalOrthoPoly1D::sampleCoeffs: zero scale"); std::vector membuf(2U*(maxdeg + 1U), 0.0L); long double* poly = &membuf[0]; long double* sums = poly + (maxdeg + 1U); for (unsigned long i=0; i void AbsClassicalOrthoPoly1D::sampleCoeffVars( const Numeric* coords, const unsigned long lenCoords, const Numeric location, const Numeric scale, const double *coeffs, const unsigned maxdeg, double *variances) const { if (lenCoords < 2UL) throw std::invalid_argument( "In npstat::AbsClassicalOrthoPoly1D::sampleCoeffVars: " "insufficient sample size"); assert(coords); assert(coeffs); assert(variances); const Numeric zero = Numeric(); if (scale == zero) throw std::invalid_argument( "In npstat::AbsClassicalOrthoPoly1D::sampleCoeffVars: zero scale"); std::vector membuf(2U*(maxdeg + 1U), 0.0L); long double* poly = &membuf[0]; long double* sums = poly + (maxdeg + 1U); for (unsigned long i=0; i npstat::Matrix AbsClassicalOrthoPoly1D::sampleCoeffCovariance( const Numeric* coords, const unsigned long lenCoords, const Numeric location, const Numeric scale, const double *coeffs, const unsigned maxdeg) const { if (lenCoords < 2UL) throw std::invalid_argument( "In npstat::AbsClassicalOrthoPoly1D::sampleCoeffCovariance: " "insufficient sample size"); assert(coords); assert(coeffs); const Numeric zero = Numeric(); if (scale == zero) throw std::invalid_argument( "In npstat::AbsClassicalOrthoPoly1D::sampleCoeffCovariance: " "zero scale"); std::vector membuf(maxdeg + 1U, 0.0L); long double* poly = &membuf[0]; npstat::Matrix sums(maxdeg + 1U, maxdeg + 1U, 0); for (unsigned long i=0; i void AbsClassicalOrthoPoly1D::sampleCoeffs( const Numeric* coords, const unsigned long lenCoords, double *coeffs, const unsigned maxdeg) const { const Numeric zero = Numeric(); const Numeric one = static_cast(1); sampleCoeffs(coords, lenCoords, zero, one, coeffs, maxdeg); } template void AbsClassicalOrthoPoly1D::sampleCoeffVars( const Numeric* coords, const unsigned long lenCoords, const double *coeffs, const unsigned maxdeg, double *variances) const { const Numeric zero = Numeric(); const Numeric one = static_cast(1); sampleCoeffVars(coords, lenCoords, zero, one, coeffs, maxdeg, variances); } template npstat::Matrix AbsClassicalOrthoPoly1D::sampleCoeffCovariance( const Numeric* coords, unsigned long lenCoords, const double* coeffs, unsigned maxdeg) const { const Numeric zero = Numeric(); const Numeric one = static_cast(1); return sampleCoeffCovariance(coords, lenCoords, zero, one, coeffs, maxdeg); } template void AbsClassicalOrthoPoly1D::weightedSampleCoeffs( const Pair* points, const unsigned long numPoints, const typename Pair::first_type location, const typename Pair::first_type scale, double *coeffs, const unsigned maxdeg) const { typedef typename Pair::first_type Numeric; if (!numPoints) throw std::invalid_argument( "In npstat::AbsClassicalOrthoPoly1D::weightedSampleCoeffs: " "empty sample"); assert(points); assert(coeffs); const Numeric zero = Numeric(); if (scale == zero) throw std::invalid_argument( "In npstat::AbsClassicalOrthoPoly1D::weightedSampleCoeffs: " "zero scale"); std::vector membuf(2U*(maxdeg + 1U), 0.0L); long double* poly = &membuf[0]; long double* sums = poly + (maxdeg + 1U); long double wsum = 0.0L; for (unsigned long i=0; i void AbsClassicalOrthoPoly1D::weightedSampleCoeffVars( const Pair* points, const unsigned long numPoints, typename Pair::first_type location, typename Pair::first_type scale, const double *coeffs, const unsigned maxdeg, double *variances) const { typedef typename Pair::first_type Numeric; if (numPoints < 2UL) throw std::invalid_argument( "In npstat::AbsClassicalOrthoPoly1D::weightedSampleCoeffVars: " "insufficient sample size"); assert(points); assert(coeffs); assert(variances); const Numeric zero = Numeric(); if (scale == zero) throw std::invalid_argument( "In npstat::AbsClassicalOrthoPoly1D::weightedSampleCoeffVars: " "zero scale"); std::vector membuf(2U*(maxdeg + 1U), 0.0L); long double* poly = &membuf[0]; long double* sums = poly + (maxdeg + 1U); long double wsum = 0.0L, wsqsum = 0.0L; for (unsigned long i=0; i npstat::Matrix AbsClassicalOrthoPoly1D::weightedSampleCoeffCovariance( const Pair* points, const unsigned long numPoints, typename Pair::first_type location, typename Pair::first_type scale, const double *coeffs, const unsigned maxdeg) const { typedef typename Pair::first_type Numeric; if (numPoints < 2UL) throw std::invalid_argument( "In npstat::AbsClassicalOrthoPoly1D::weightedSampleCoeffCovariance: " "insufficient sample size"); assert(points); assert(coeffs); const Numeric zero = Numeric(); if (scale == zero) throw std::invalid_argument( "In npstat::AbsClassicalOrthoPoly1D::weightedSampleCoeffCovariance: " "zero scale"); std::vector membuf(maxdeg + 1U, 0.0L); long double* poly = &membuf[0]; npstat::Matrix sums(maxdeg + 1U, maxdeg + 1U, 0); long double wsum = 0.0L, wsqsum = 0.0L; for (unsigned long i=0; i void AbsClassicalOrthoPoly1D::weightedSampleCoeffs( const Pair* points, const unsigned long numPoints, double *coeffs, const unsigned maxdeg) const { typedef typename Pair::first_type Numeric; const Numeric zero = Numeric(); const Numeric one = static_cast(1); weightedSampleCoeffs(points, numPoints, zero, one, coeffs, maxdeg); } template void AbsClassicalOrthoPoly1D::weightedSampleCoeffVars( const Pair* points, const unsigned long nPoints, const double *coeffs, const unsigned maxdeg, double *variances) const { typedef typename Pair::first_type Numeric; const Numeric zero = Numeric(); const Numeric one = static_cast(1); weightedSampleCoeffVars(points, nPoints, zero, one, coeffs, maxdeg, variances); } template npstat::Matrix AbsClassicalOrthoPoly1D::weightedSampleCoeffCovariance( const Pair* points, unsigned long nPoints, const double* coeffs, unsigned maxdeg) const { typedef typename Pair::first_type Numeric; const Numeric zero = Numeric(); const Numeric one = static_cast(1); return weightedSampleCoeffCovariance(points, nPoints, zero, one, coeffs, maxdeg); } template double AbsClassicalOrthoPoly1D::jointSampleAverage( const Numeric* coords, const unsigned long lenCoords, const unsigned* degrees, const unsigned lenDegrees) const { if (!lenCoords) throw std::invalid_argument( "In npstat::AbsClassicalOrthoPoly1D::jointSampleAverage: " "empty sample"); assert(coords); if (lenDegrees) { assert(degrees); const unsigned maxdeg = *std::max_element(degrees, degrees+lenDegrees); std::vector membuf(maxdeg + 1U, 0.0L); long double* poly = &membuf[0]; long double sum = 0.0L; for (unsigned long i=0; i std::pair AbsClassicalOrthoPoly1D::twoJointSampleAverages( const Numeric* coords, const unsigned long lenCoords, const unsigned* degrees1, const unsigned lenDegrees1, const unsigned* degrees2, const unsigned lenDegrees2) const { if (!lenCoords) throw std::invalid_argument( "In npstat::AbsClassicalOrthoPoly1D::twoJointSampleAverages: empty sample"); assert(coords); unsigned maxdeg1 = 0, maxdeg2 = 0; if (lenDegrees1) { assert(degrees1); maxdeg1 = *std::max_element(degrees1, degrees1+lenDegrees1); } if (lenDegrees2) { assert(degrees2); maxdeg2 = *std::max_element(degrees2, degrees2+lenDegrees2); } const unsigned maxdeg = std::max(maxdeg1, maxdeg2); if (lenDegrees1 || lenDegrees2) { std::vector membuf(maxdeg + 1U, 0.0L); long double* poly = &membuf[0]; long double sum1 = 0.0L, sum2 = 0.0L; for (unsigned long i=0; i(sum1/lenCoords, sum2/lenCoords); } else return std::pair(1.0, 1.0); } } Index: trunk/npstat/nm/ContOrthoPoly1D.hh =================================================================== --- trunk/npstat/nm/ContOrthoPoly1D.hh (revision 729) +++ trunk/npstat/nm/ContOrthoPoly1D.hh (revision 730) @@ -1,647 +1,658 @@ #ifndef NPSTAT_CONTORTHOPOLY1D_HH_ #define NPSTAT_CONTORTHOPOLY1D_HH_ /*! // \file ContOrthoPoly1D.hh // // \brief Continuous orthogonal polynomial series in one dimension // generated for a discrete measure // // Author: I. Volobouev // // May 2017 */ #include #include #include #include #include #include "geners/CPP11_auto_ptr.hh" #include "npstat/nm/OrthoPolyMethod.hh" #include "npstat/nm/Matrix.hh" #include "npstat/nm/SimpleFunctors.hh" #include "npstat/nm/PreciseType.hh" +#include "npstat/nm/AbsIntervalQuadrature1D.hh" #ifdef USE_LAPACK_QUAD #include "npstat/nm/GaussLegendreQuadratureQ.hh" #else #include "npstat/nm/GaussLegendreQuadrature.hh" #endif namespace npstat { class StorablePolySeries1D; class ContOrthoPoly1D { public: typedef PreciseType::type precise_type; // The first element of the pair is the coordinate and the // second element is the weight (all weights must be non-negative) typedef std::pair MeasurePoint; #ifdef USE_LAPACK_QUAD typedef GaussLegendreQuadratureQ PreciseQuadrature; #else typedef GaussLegendreQuadrature PreciseQuadrature; #endif /** // Main constructor, with obvious arguments. The first element // of the measure pair is the coordinate and the second element // is the weight (all weights must be non-negative). Internally, // the measure will be sorted in the order of increasing weight // and the measure coordinates will be shifted so that their // weighted mean is at 0. */ template ContOrthoPoly1D(unsigned maxDegree, const std::vector >& measure, OrthoPolyMethod m = OPOLY_STIELTJES); /** Constructor which assumes that all weights are 1.0 */ template ContOrthoPoly1D(unsigned maxDegree, const std::vector& coords, OrthoPolyMethod m = OPOLY_STIELTJES); //@{ /** A simple inspector of object properties */ inline unsigned maxDegree() const {return maxdeg_;} inline unsigned long measureLength() const {return measure_.size();} inline double weight(const unsigned long index) const {return measure_.at(index).second;} inline precise_type sumOfWeights() const {return wsum_;} inline precise_type sumOfWeightSquares() const {return wsumsq_;} inline bool areAllWeightsEqual() const {return allWeightsEqual_;} inline double minCoordinate() const {return minX_;} inline double maxCoordinate() const {return maxX_;} inline precise_type meanCoordinate() const {return meanX_;} //@} //@{ /** // Note that this returns the internal, shifted coordinate. // Add the mean coordinate to restore the original coordinate. */ inline MeasurePoint measure(const unsigned long index) const {return measure_.at(index);} inline double coordinate(const unsigned long index) const {return measure_.at(index).first;} //@} /** Kish's effective sample size for the measure */ double effectiveSampleSize() const; /** Return the value of one of the normalized polynomials */ double poly(unsigned deg, double x) const; /** Return the values of two of the normalized polynomials */ std::pair polyPair( unsigned deg1, unsigned deg2, double x) const; /** // Return the values of all orthonormal polynomials up to some // maximum degree. Size of the "polyValues" array should be // at least maxdeg + 1. */ template inline void allPolys(const double x, const unsigned maxdeg, Numeric* polyValues) const {getAllPolys(x - meanX_, maxdeg, polyValues);} /** // Function added for interface compatibility with // AbsClassicalOrthoPoly1D */ inline void allpoly(const long double x, long double* values, const unsigned maxdeg) const {getAllPolys(x - meanX_, maxdeg, values);} /** // Average values of the polynomials for a sample of points. // Size of the "averages" array should be at least maxdeg + 1. // Note that the resulting averages are not weighted by the measure. */ template void sampleAverages(const Numeric* coords, unsigned long lenCoords, double* averages, unsigned maxdeg) const; /** // Average values of the pairwise polynomial products for // a sample of points. The returned matrix will be symmetric // and will have the dimensions (maxdeg + 1) x (maxdeg + 1). // Note that the resulting averages are not weighted by the measure. */ template Matrix sampleProductAverages(const Numeric* coords, unsigned long lenCoords, unsigned maxdeg) const; /** Return the value of the orthonormal polynomial series */ template double series(const Numeric* coeffs, unsigned deg, double x) const; /** // Build the coefficients of the orthogonal polynomial series // for the given function. The length of the array "coeffs" // should be at least "maxdeg" + 1. Note that the coefficients // are returned in the order of increasing degree (same order // is used by the "series" function). */ template void calculateCoeffs(const Functor& fcn, Numeric* coeffs, unsigned maxdeg) const; /** // Build the coefficients of the orthogonal polynomial series // for the given function in such a way that the integral of // the truncated series on the [xmin, xmax] interval is constrained // to "integralValue". The length of the array "coeffs" should be // at least "maxdeg" + 1. Note that the coefficients are returned // in the order of increasing degree (same order is used by the // "series" function). The construction minimizes the ISE weighted // by the empirical density. // // The function returns the chi-square from the corresponding // constrained least squares problem. This is the sum of // (coeffs[i] - c[i])^2, 0 <= i <= maxdeg, where c[i] are // determined by the "calculateCoeffs" method. */ template double constrainedCoeffs(const Functor& fcn, double *coeffs, unsigned maxdeg, double xmin, double xmax, double integralValue = 1.0) const; /** // Build the coefficients of the orthogonal polynomial series // for the discrete weight values (that is, fcn(x_i) = w_i, // using the terminology of the "calculateCoeffs" method). This // can sometimes be useful for weighted measures. Of course, // for unweighted measures this results in just delta_{deg,0}. */ void weightCoeffs(double *coeffs, unsigned maxdeg) const; /** // Integrated squared error between the given function and the // polynomial series. Parameter "integrationRulePoints" specifies // the parameter "npoints" for the GaussLegendreQuadrature class. // If "integrationRulePoints" is 0, the rule will be picked // automatically in such a way that it integrates a polynomial // of degree maxdeg*2 exactly. */ template double integratedSquaredError(const Functor& fcn, const double *coeffs, unsigned maxdeg, double xmin, double xmax, unsigned integrationRulePoints=0U) const; /** // Squared error between the given function and the polynomial // series, weighted according to the measure */ template double weightedSquaredError(const Functor& fcn, const double *coeffs, unsigned maxdeg) const; /** // This method is useful for testing the numerical precision // of the orthonormalization procedure. It returns the scalar // products between various polynomials. */ precise_type empiricalKroneckerDelta(unsigned deg1, unsigned deg2) const; /** // If the Kronecker deltas were calculated from a sample, they // would be random and would have a covariance matrix. This // is an estimate of the terms in that covariance matrix. The // covariance is between delta_{deg1,deg2} and delta_{deg3,deg4}. */ double empiricalKroneckerCovariance(unsigned deg1, unsigned deg2, unsigned deg3, unsigned deg4) const; /** // Examine the recurrence coefficients. The first element of // the returned pair is alpha, and the second element is beta. // Higher precision than double is used internally. */ std::pair recurrenceCoeffs(unsigned deg) const; /** // Generate principal minor of order n of the Jacobi matrix. // n must not exceed "maxDegree" */ Matrix jacobiMatrix(unsigned n) const; /** // Roots of the polynomial with the given degree. Naturally, // the degree argument must not exceed "maxDegree". */ void calculateRoots(precise_type* roots, unsigned degree) const; /** // Integral of an orthonormal polynomials to some power without // the weight (so this is not an inner product with the poly of // degree 0). */ precise_type integratePoly(unsigned degree, unsigned power, double xmin, double xmax) const; /** // Integral of the product of three orthonormal polynomials // without the weight (so this is not an inner product) */ precise_type integrateTripleProduct(unsigned deg1, unsigned deg2, unsigned deg3, double xmin, double xmax) const; /** // Unweighted triple product integrals arranged in a matrix. // The returned matrix will be dimensioned pairs.size() x (maxdeg+1). */ Matrix tripleProductMatrix( const std::vector >& pairs, unsigned maxdeg, double xmin, double xmax) const; /** // Integral of the product of two orthonormal polynomials // with some external weight function (e.g., oracle density). // "EW" in the method name stands for "externally weighted". // // "integrationRulePoints" argument will be passed to the // GaussLegendreQuadrature constructor and must be supported // by that class. */ template double integrateEWPolyProduct(const Functor& weight, unsigned deg1, unsigned deg2, double xmin, double xmax, unsigned integrationRulePoints) const; /** + // Integral of all products of two orthonormal polynomials + // with some external weight function (e.g., oracle density). + */ + template + Matrix extWeightProductAverages(const Functor& weight, + const AbsIntervalQuadrature1D& quad, + unsigned maxdeg, + double xmin, double xmax) const; + + /** // A measure-weighted average of orthonormal poly values // to some power */ double powerAverage(unsigned deg, unsigned power) const; /** // A measure-weighted average of the product of two orthonormal // poly values to some powers */ double jointPowerAverage(unsigned deg1, unsigned power1, unsigned deg2, unsigned power2) const; /** // A measure-weighted average of the product of several // orthonormal poly values */ double jointAverage(const unsigned* degrees, unsigned nDegrees, bool degreesSortedIncreasingOrder = false) const; //@{ /** // A measure-weighted average that will be remembered internally // so that another call to this function with the same arguments // will return the answer quickly */ double cachedJointAverage(unsigned deg1, unsigned deg2, unsigned deg3, unsigned deg4) const; double cachedJointAverage(unsigned deg1, unsigned deg2, unsigned deg3, unsigned deg4, unsigned deg5, unsigned deg6) const; double cachedJointAverage(unsigned deg1, unsigned deg2, unsigned deg3, unsigned deg4, unsigned deg5, unsigned deg6, unsigned deg7, unsigned deg8) const; //@} /** Covariance between v_{ab} and v_{cd} */ double cov4(unsigned a, unsigned b, unsigned c, unsigned d) const; /** Covariance between v_{ab}*v_{cd} and v_{ef} */ double cov6(unsigned a, unsigned b, unsigned c, unsigned d, unsigned e, unsigned f) const; /** Covariance between v_{ab}*v_{cd} and v_{ef}*v_{gh} */ double cov8(unsigned a, unsigned b, unsigned c, unsigned d, unsigned e, unsigned f, unsigned g, unsigned h) const; /** Covariance between two cov4 estimates (i.e., error on the error) */ double covCov4(unsigned a, unsigned b, unsigned c, unsigned d, unsigned e, unsigned f, unsigned g, unsigned h) const; /** // Alternative implementation of "cov8". It is slower than "cov8" // but easier to program and verify. Useful mainly for testing "cov8". */ double slowCov8(unsigned a, unsigned b, unsigned c, unsigned d, unsigned e, unsigned f, unsigned g, unsigned h) const; /** Estimate of eps_{mn} expectation */ double epsExpectation(unsigned m, unsigned n, bool highOrder) const; /** // Estimate of covariance between eps_{ij} and eps_{mn}. // // The result should be identical with "empiricalKroneckerCovariance" // in case "highOrder" argument is "false". However, this method // caches results of various calculations of averages and should // perform better inside cycles over indices. */ double epsCovariance(unsigned i, unsigned j, unsigned m, unsigned n, bool highOrder) const; /** // Covariance matrix created by multiple calls to "epsCovariance" */ Matrix epsCovarianceMatrix( const std::vector >& pairs, bool highOrder) const; /** Construct a StorablePolySeries1D object out of this one */ CPP11_auto_ptr makeStorablePolySeries( double xmin, double xmax, const double *coeffs=0, unsigned maxdeg=0) const; private: static const precise_type precise_zero; struct Recurrence { inline Recurrence(const precise_type a, const precise_type b) : alpha(a), beta(b) { assert(beta > 0.0); sqrbeta = std::sqrt(beta); } precise_type alpha; precise_type beta; precise_type sqrbeta; }; class PolyFcn; friend class PolyFcn; class PolyFcn : public Functor1 { public: inline PolyFcn(const ContOrthoPoly1D& p, const unsigned d1, const unsigned power) : poly(p), deg1(d1), polypow(power) {} inline precise_type operator()(const precise_type& x) const { precise_type p = 1.0; if (polypow) { p = poly.normpoly(deg1, x); switch (polypow) { case 1U: break; case 2U: p *= p; break; default: const precise_type myPow = polypow; p = std::pow(p, myPow); break; } } return p; } private: const ContOrthoPoly1D& poly; unsigned deg1; unsigned polypow; }; class TripleProdFcn; friend class TripleProdFcn; class TripleProdFcn : public Functor1 { public: inline TripleProdFcn(const ContOrthoPoly1D& p, const unsigned d1, const unsigned d2, const unsigned d3) : poly(p) { degs[0] = d1; degs[1] = d2; degs[2] = d3; degmax = std::max(d1, std::max(d2, d3)); } inline precise_type operator()(const precise_type& x) const {return poly.normpolyprod(degs, 3, degmax, x);} private: const ContOrthoPoly1D& poly; unsigned degs[3]; unsigned degmax; }; template class EWPolyProductFcn; template friend class EWPolyProductFcn; template class EWPolyProductFcn : public Functor1 { public: inline EWPolyProductFcn(const ContOrthoPoly1D& p, const Funct& w, const unsigned d1, const unsigned d2) : poly(p), wf(w) { degs[0] = d1; degs[1] = d2; degmax = std::max(d1, d2); } inline precise_type operator()(const precise_type& x) const {return poly.normpolyprod(degs,2,degmax,x-poly.meanX_)*wf(x);} private: const ContOrthoPoly1D& poly; const Funct& wf; unsigned degs[2]; unsigned degmax; }; class MemoKey { public: inline MemoKey(const unsigned d0, const unsigned d1, const unsigned d2, const unsigned d3) : nDegs_(4U) { degs_[0] = d0; degs_[1] = d1; degs_[2] = d2; degs_[3] = d3; std::sort(degs_, degs_+nDegs_); } inline MemoKey(const unsigned d0, const unsigned d1, const unsigned d2, const unsigned d3, const unsigned d4, const unsigned d5) : nDegs_(6U) { degs_[0] = d0; degs_[1] = d1; degs_[2] = d2; degs_[3] = d3; degs_[4] = d4; degs_[5] = d5; std::sort(degs_, degs_+nDegs_); } inline MemoKey(const unsigned d0, const unsigned d1, const unsigned d2, const unsigned d3, const unsigned d4, const unsigned d5, const unsigned d6, const unsigned d7) : nDegs_(8U) { degs_[0] = d0; degs_[1] = d1; degs_[2] = d2; degs_[3] = d3; degs_[4] = d4; degs_[5] = d5; degs_[6] = d6; degs_[7] = d7; std::sort(degs_, degs_+nDegs_); } inline const unsigned* degrees() const {return degs_;} inline unsigned nDegrees() const {return nDegs_;} inline bool operator<(const MemoKey& r) const { if (nDegs_ < r.nDegs_) return true; if (r.nDegs_ < nDegs_) return false; for (unsigned i=0; i monicInnerProducts( unsigned degree) const; precise_type normpoly(unsigned degree, precise_type x) const; std::pair twonormpoly( unsigned deg1, unsigned deg2, precise_type x) const; precise_type normpolyprod(const unsigned* degrees, unsigned nDeg, unsigned degmax, precise_type x) const; template precise_type normseries(const Numeric* coeffs, unsigned maxdeg, precise_type x) const; template void getAllPolys(double x, unsigned maxdeg, Numeric *polyValues) const; double getCachedAverage(const MemoKey& key) const; std::vector measure_; std::vector rCoeffs_; mutable std::map cachedAverages_; precise_type wsum_; precise_type wsumsq_; precise_type meanX_; double minX_; double maxX_; unsigned maxdeg_; bool allWeightsEqual_; #ifdef SWIG public: inline StorablePolySeries1D* makeStorablePolySeries_2( double xmin, double xmax, const std::vector& coeffs) const { CPP11_auto_ptr ptr; if (coeffs.empty()) ptr = this->makeStorablePolySeries(xmin, xmax); else ptr = this->makeStorablePolySeries(xmin, xmax, &coeffs[0], coeffs.size() - 1); return ptr.release(); } inline double sumOfWeights_2() const {return sumOfWeights();} inline double sumOfWeightSquares_2() const {return sumOfWeightSquares();} inline double meanCoordinate_2() const {return meanCoordinate();} inline double empiricalKroneckerDelta_2(const unsigned deg1, const unsigned deg2) const {return empiricalKroneckerDelta(deg1, deg2);} inline Matrix jacobiMatrix_2(const unsigned n) const {return jacobiMatrix(n);} inline double integratePoly_2(const unsigned degree, const unsigned power, const double xmin, const double xmax) const {return integratePoly(degree, power, xmin, xmax);} inline double integrateTripleProduct_2(const unsigned deg1, const unsigned deg2, const unsigned deg3, const double xmin, const double xmax) const {return integrateTripleProduct(deg1, deg2, deg3, xmin, xmax);} inline Matrix tripleProductMatrix_2( const std::vector >& pairs, const unsigned maxdeg, const double xmin, const double xmax) const {return tripleProductMatrix(pairs, maxdeg, xmin, xmax);} #endif }; } #include "npstat/nm/ContOrthoPoly1D.icc" #endif // NPSTAT_CONTORTHOPOLY1D_HH_ Index: trunk/npstat/nm/AbsClassicalOrthoPoly1D.hh =================================================================== --- trunk/npstat/nm/AbsClassicalOrthoPoly1D.hh (revision 729) +++ trunk/npstat/nm/AbsClassicalOrthoPoly1D.hh (revision 730) @@ -1,514 +1,514 @@ #ifndef NPSTAT_ABSCLASSICALORTHOPOLY1D_HH_ #define NPSTAT_ABSCLASSICALORTHOPOLY1D_HH_ /*! // \file AbsClassicalOrthoPoly1D.hh // // \brief Base class for classical continuous orthonormal polynomials // // Author: I. Volobouev // // May 2017 */ #include #include #include #include #include #include "geners/CPP11_auto_ptr.hh" #include "npstat/nm/Matrix.hh" #include "npstat/nm/SimpleFunctors.hh" #include "npstat/nm/AbsIntervalQuadrature1D.hh" namespace npstat { class StorablePolySeries1D; class AbsClassicalOrthoPoly1D { public: inline virtual ~AbsClassicalOrthoPoly1D() {} /** Virtual copy constructor */ virtual AbsClassicalOrthoPoly1D* clone() const = 0; /** The weight function should be normalized */ virtual long double weight(long double x) const = 0; //@{ /** Support of the polynomial */ virtual double xmin() const = 0; virtual double xmax() const = 0; //@} /** Maximum polynomial degree supported */ inline virtual unsigned maxDegree() const {return UINT_MAX - 1U;} /** Polynomial values */ long double poly(unsigned deg, long double x) const; /** // Values of two orthonormal polynomials. // Faster than calling "poly" two times. */ std::pair twopoly( unsigned deg1, unsigned deg2, long double x) const; /** // Values of all orthonormal polynomials up to some degree. // Faster than calling "poly" multiple times. The size of // the "values" array should be at least maxdeg + 1. */ void allpoly(long double x, long double* values, unsigned maxdeg) const; /** Polynomial series */ double series(const double* coeffs, unsigned maxdeg, double x) const; /** Integral of the series */ double integrateSeries(const double* coeffs, unsigned maxdeg) const; /** // Build the coefficients of the orthogonal polynomial series // for the given function. The length of the array "coeffs" // should be at least maxdeg + 1. Note that the coefficients // are returned in the order of increasing degree (same order // is used by the "series" function). */ template void calculateCoeffs(const Functor& fcn, const Quadrature& quad, double* coeffs, unsigned maxdeg) const; /** // Build the coefficients of the orthogonal polynomial series // for the given sample of points (empirical density function). // The length of the array "coeffs" should be at least maxdeg + 1. // Note that the coefficients are returned in the order of // increasing degree (same order is used by the "series" function). */ template void sampleCoeffs(const Numeric* coords, unsigned long lenCoords, double* coeffs, unsigned maxdeg) const; /** // Estimate the variances of the coefficients returned by // the "sampleCoeffs" function. The "coeffs" array should be // at least maxdeg + 1 elements long and should be filled // by a previous call to "sampleCoeffs". The "variances" array // should have at least maxdeg + 1 elements. It will contain // the respective variances upon return. */ template void sampleCoeffVars(const Numeric* coords, unsigned long lenCoords, const double* coeffs, unsigned maxdeg, double* variances) const; /** // Estimate the covariances of the coefficients returned by // the "sampleCoeffs" function. The "coeffs" array should be // at least maxdeg + 1 elements long and should be filled // by a previous call to "sampleCoeffs". The returned matrix // will be dimensioned (maxdeg + 1) x (maxdeg + 1). */ template npstat::Matrix sampleCoeffCovariance(const Numeric* coords, unsigned long lenCoords, const double* coeffs, unsigned maxdeg) const; /** // Build the coefficients of the orthogonal polynomial series // for the given sample of points (empirical density function). // The length of the array "coeffs" should be at least maxdeg + 1. // Note that the coefficients are returned in the order of // increasing degree (same order is used by the "series" function). // Before calculating the coefficients, the coordinates are shifted // and scaled according to x_new = (x_original - location)/scale. // The resulting coefficients are also divided by scale. */ template void sampleCoeffs(const Numeric* coords, unsigned long lenCoords, Numeric location, Numeric scale, double* coeffs, unsigned maxdeg) const; /** // Estimate the variances of the coefficients returned by // the "sampleCoeffs" function. The "coeffs" array should be // at least maxdeg + 1 elements long and should be filled // by a previous call to "sampleCoeffs" with the same location // and scale. The "variances" array should have at least maxdeg + 1 // elements. It will contain the respective variances upon return. */ template void sampleCoeffVars(const Numeric* coords, unsigned long lenCoords, Numeric location, Numeric scale, const double* coeffs, unsigned maxdeg, double* variances) const; /** // Estimate the covariances of the coefficients returned by // the "sampleCoeffs" function. The "coeffs" array should be // at least maxdeg + 1 elements long and should be filled // by a previous call to "sampleCoeffs" with the same location // and scale. The returned matrix will be dimensioned // (maxdeg + 1) x (maxdeg + 1). */ template npstat::Matrix sampleCoeffCovariance(const Numeric* coords, unsigned long lenCoords, Numeric location, Numeric scale, const double* coeffs, unsigned maxdeg) const; /** // Build the coefficients of the orthogonal polynomial series for // the given sample of weighted points (empirical density function). // The first element of the pair will be treated as the coordinate // and the second element will be treated as weight. Weights must // not be negative. The length of the array "coeffs" should be // at least maxdeg + 1. Note that the coefficients are returned // in the order of increasing degree (same order is used by the // "series" function). */ template void weightedSampleCoeffs(const Pair* points, unsigned long numPoints, double* coeffs, unsigned maxdeg) const; /** // Estimate the variances of the coefficients returned by the // "weightedSampleCoeffs" function. The "coeffs" array should // be at least maxdeg + 1 elements long and should be filled // by a previous call to "weightedSampleCoeffs". The "variances" // array should have at least maxdeg + 1 elements. It will contain // the respective variances upon return. This code assumes that // weights and coordinates are statistically independent from // each other. */ template void weightedSampleCoeffVars(const Pair* points, unsigned long nPoints, const double* coeffs, unsigned maxdeg, double* variances) const; /** // Estimate the covariances of the coefficients returned by the // "weightedSampleCoeffs" function. The "coeffs" array should // be at least maxdeg + 1 elements long and should be filled // by a previous call to "weightedSampleCoeffs". The returned // matrix will be dimensioned (maxdeg + 1) x (maxdeg + 1). This // code assumes that weights and coordinates are statistically // independent from each other. */ template npstat::Matrix weightedSampleCoeffCovariance(const Pair* points, unsigned long nPoints, const double* coeffs, unsigned maxdeg) const; /** // Build the coefficients of the orthogonal polynomial series for // the given sample of weighted points (empirical density function). // The first element of the pair will be treated as the coordinate // and the second element will be treated as weight. Weights must // not be negative. The length of the array "coeffs" should be // at least maxdeg + 1. Note that the coefficients are returned // in the order of increasing degree (same order is used by the // "series" function). Before calculating the coefficients, the // coordinates are shifted and scaled according to // x_new = (x_original - location)/scale. The resulting coefficients // are also divided by scale. */ template void weightedSampleCoeffs(const Pair* points, unsigned long numPoints, typename Pair::first_type location, typename Pair::first_type scale, double* coeffs, unsigned maxdeg) const; /** // Estimate the variances of the coefficients returned by the // "weightedSampleCoeffs" function. The "coeffs" array should // be at least maxdeg + 1 elements long and should be filled // by a previous call to "weightedSampleCoeffs" with the same // location and scale. The "variances" array should have at least // maxdeg + 1 elements. It will contain the respective variances // upon return. This code assumes that weights and coordinates // are statistically independent from each other. */ template void weightedSampleCoeffVars(const Pair* points, unsigned long nPoints, typename Pair::first_type location, typename Pair::first_type scale, const double* coeffs, unsigned maxdeg, double* variances) const; /** // Estimate the covariances of the coefficients returned by the // "weightedSampleCoeffs" function. The "coeffs" array should // be at least maxdeg + 1 elements long and should be filled // by a previous call to "weightedSampleCoeffs" with the same // location and scale. The returned matrix will be dimensioned // (maxdeg + 1) x (maxdeg + 1). This code assumes that weights // and coordinates are statistically independent from each other. */ template npstat::Matrix weightedSampleCoeffCovariance(const Pair* points, unsigned long nPoints, typename Pair::first_type location, typename Pair::first_type scale, const double* coeffs, unsigned maxdeg) const; /** // This method is useful for testing the numerical precision // of the orthonormalization procedure. It returns the scalar // products between various polynomials. */ template double empiricalKroneckerDelta(const Quadrature& quad, unsigned deg1, unsigned deg2) const; /** // A measure-weighted average of a product of four orthonormal // poly values */ template double jointAverage(const Quadrature& q, unsigned deg1, unsigned deg2, unsigned deg3, unsigned deg4) const; /** // A measure-weighted average of a product of multiple orthonormal // poly values. "checkedForZeros" argument should be set to "true" // if we are sure that there are no zero elements in the "degrees" // array. */ template double jointAverage(const Quadrature& q, const unsigned* degrees, unsigned nDegrees, bool checkedForZeros = false) const; /** // An unweighted integral of the orthonormal polynomial with the // given degree to some power. For this method, it is assumed // that the polynomials are supported on a closed interval (without // such an assumption unweighted integrals do not make much sense) // and that Gauss-Legendre quadratures can be used. */ double integratePoly(unsigned degree, unsigned power) const; /** // An unweighted integral of a product of multiple orthonormal // polynomials. For this method, it is assumed that the polynomials // are supported on a closed interval (without such an assumption // unweighted integrals do not make much sense) and that Gauss-Legendre // quadratures can be used. */ double jointIntegral(const unsigned* degrees, unsigned nDegrees) const; /** // Unweighted averages of the polynomial values for the given sample. // The length of array "averages" should be at least maxdeg + 1. */ template void sampleAverages(const Numeric* coords, unsigned long lenCoords, double* averages, unsigned maxdeg) const; /** // Unweighted averages of the pairwise products of the polynomial // values for the given sample. The returned matrix will be symmetric // and will have the dimensions (maxdeg + 1) x (maxdeg + 1). */ template Matrix sampleProductAverages(const Numeric* coords, unsigned long lenCoords, unsigned maxdeg) const; /** // Pairwise products of the polynomial values weighted by - // a different weight function. The returned matrix will be + // an external weight function. The returned matrix will be // symmetric and will have the dimensions (maxdeg + 1) x (maxdeg + 1). */ template - Matrix altWeightProductAverages(const Functor& altWeight, + Matrix extWeightProductAverages(const Functor& extWeight, const AbsIntervalQuadrature1D& quad, unsigned maxdeg) const; /** // Unweighted average of a product of polynomial values for the // given sample. "degrees" is the collection of polynomial degrees. // Polynomials of these degrees will be included in the product. */ template double jointSampleAverage(const Numeric* coords, unsigned long lenCoords, const unsigned* degrees, unsigned lenDegrees) const; // Similar to the previous method but calculates two averages // simultaneously template std::pair twoJointSampleAverages( const Numeric* coords, unsigned long lenCoords, const unsigned* degrees1, unsigned lenDegrees1, const unsigned* degrees2, unsigned lenDegrees2) const; CPP11_auto_ptr makeStorablePolySeries( unsigned maxPolyDeg, const double *coeffs=0, unsigned maxdeg=0) const; protected: inline static int kdelta(const unsigned i, const unsigned j) {return i == j ? 1 : 0;} // Recurrence relationship function should return alpha // as the first element of the pair and the square root // of beta as the second virtual std::pair recurrenceCoeffs(unsigned deg) const = 0; private: // Helper classes class ProdFcn : public Functor1 { public: inline ProdFcn(const AbsClassicalOrthoPoly1D& p, const unsigned d1, const unsigned d2) : poly(p), deg1(d1), deg2(d2) {} inline long double operator()(const long double& x) const { const std::pair& p = poly.twopoly(deg1, deg2, x); return p.first*p.second*poly.weight(x); } private: const AbsClassicalOrthoPoly1D& poly; unsigned deg1; unsigned deg2; }; class MultiProdFcn; friend class MultiProdFcn; class MultiProdFcn : public Functor1 { public: inline MultiProdFcn(const AbsClassicalOrthoPoly1D& p, const unsigned* degrees, unsigned lenDegrees, const bool includeWeight=true) : poly(p), degs(degrees, degrees+lenDegrees), maxdeg(0), weighted(includeWeight) { if (lenDegrees) maxdeg = *std::max_element(degrees, degrees+lenDegrees); } inline long double operator()(const long double& x) const { long double w = 1.0L, p = 1.0L; if (weighted) w = poly.weight(x); if (maxdeg) p = poly.normpolyprod(°s[0], degs.size(), maxdeg, x); return w*p; } private: const AbsClassicalOrthoPoly1D& poly; std::vector degs; unsigned maxdeg; bool weighted; }; // For calling the function below, maxdeg should be calculated as // *std::max_element(degrees, degrees+nDegrees) long double normpolyprod(const unsigned* degrees, unsigned nDegrees, unsigned maxdeg, long double x) const; #ifdef SWIG public: inline double weight2(const double x) const {return weight(x);} inline double poly2(const unsigned deg, const double x) const {return poly(deg, x);} inline std::vector allpoly2(const unsigned maxdeg, const double x) const { std::vector p(maxdeg+1); allpoly(x, &p[0], p.size()); return std::vector(p.begin(), p.end()); } inline StorablePolySeries1D* makeStorablePolySeries_2( unsigned maxPolyDeg, const std::vector& coeffs) const { CPP11_auto_ptr ptr; if (coeffs.empty()) ptr = this->makeStorablePolySeries(maxPolyDeg); else ptr = this->makeStorablePolySeries(maxPolyDeg, &coeffs[0], coeffs.size() - 1); return ptr.release(); } #endif // SWIG }; /** // A functor for the weight function of the given ortho poly system. // The poly system is not copied, only a reference is used. It is // a responsibility of the user to make sure that the lifetime of // the poly system object exceeds the lifetime of the functor. */ class OrthoPoly1DWeight : public Functor1 { public: inline explicit OrthoPoly1DWeight(const AbsClassicalOrthoPoly1D& fcn, const long double normfactor=1.0L) : fcn_(fcn), norm_(normfactor) {} inline virtual ~OrthoPoly1DWeight() {} inline virtual long double operator()(const long double& a) const {return norm_*fcn_.weight(a);} private: OrthoPoly1DWeight(); const AbsClassicalOrthoPoly1D& fcn_; long double norm_; }; /** // A functor for a particular degree polynomial of the given ortho // poly system. The poly system is not copied, only a reference is used. // It is a responsibility of the user to make sure that the lifetime of // the poly system object exceeds the lifetime of the functor. */ class OrthoPoly1DDeg : public Functor1 { public: inline OrthoPoly1DDeg(const AbsClassicalOrthoPoly1D& fcn, const unsigned degree, const long double normfactor=1.0L) : fcn_(fcn), norm_(normfactor), deg_(degree) { if (deg_ > fcn_.maxDegree()) throw std::invalid_argument( "In npstat::OrthoPoly1DDeg::constructor: " "degree argument is out of range"); } inline virtual ~OrthoPoly1DDeg() {} inline virtual long double operator()(const long double& a) const {return norm_*fcn_.poly(deg_, a);} private: OrthoPoly1DDeg(); const AbsClassicalOrthoPoly1D& fcn_; long double norm_; unsigned deg_; }; } #include "npstat/nm/AbsClassicalOrthoPoly1D.icc" #endif // NPSTAT_ABSCLASSICALORTHOPOLY1D_HH_ Index: trunk/npstat/nm/ContOrthoPoly1D.icc =================================================================== --- trunk/npstat/nm/ContOrthoPoly1D.icc (revision 729) +++ trunk/npstat/nm/ContOrthoPoly1D.icc (revision 730) @@ -1,337 +1,385 @@ #include #include #include #include #include #include "npstat/nm/PairCompare.hh" namespace npstat { template ContOrthoPoly1D::ContOrthoPoly1D(const unsigned maxDegree, const std::vector >& inMeasure, const OrthoPolyMethod m) : wsum_(precise_zero), wsumsq_(precise_zero), minX_(DBL_MAX), maxX_(-DBL_MAX), maxdeg_(maxDegree), allWeightsEqual_(true) { if (inMeasure.empty()) throw std::invalid_argument( "In npstat::ContOrthoPoly1D constructor: empty measure"); const unsigned long mSize = inMeasure.size(); measure_.reserve(mSize); for (unsigned long i=0; i& p(inMeasure[i]); measure_.push_back(MeasurePoint(p.first, p.second)); } // From time to time, we expect all weights to be equal. // Check if this is indeed the case. If not, sort the // weights in the increasing order, hopefully reducing // the round-off error of subsequent calculations. const double w0 = measure_[0].second; for (unsigned long i = 1; i < mSize && allWeightsEqual_; ++i) allWeightsEqual_ = (w0 == measure_[i].second); if (!allWeightsEqual_) std::sort(measure_.begin(), measure_.end(), LessBySecond()); if (measure_[0].second < 0.0) throw std::invalid_argument( "In npstat::ContOrthoPoly1D constructor: negative measure entry found"); // Make sure that we don't have too many points with 0 weights unsigned long nZeroWeights = 0; if (allWeightsEqual_) { if (w0 == 0.0) nZeroWeights = mSize; } else { for (unsigned long i = 0; i < mSize; ++i) { if (measure_[i].second == 0.0) ++nZeroWeights; else break; } } if (mSize <= maxDegree + nZeroWeights) throw std::invalid_argument( "In npstat::ContOrthoPoly1D constructor: insufficient number " "of measure points with non-zero weights"); calcMeanXandWeightSums(); calcRecurrenceCoeffs(m); } template ContOrthoPoly1D::ContOrthoPoly1D(const unsigned maxDegree, const std::vector& inCoords, const OrthoPolyMethod m) : wsum_(precise_zero), wsumsq_(precise_zero), minX_(DBL_MAX), maxX_(-DBL_MAX), maxdeg_(maxDegree), allWeightsEqual_(true) { if (inCoords.empty()) throw std::invalid_argument( "In npstat::ContOrthoPoly1D constructor: empty list of coordinates"); const unsigned long sz = inCoords.size(); if (sz <= maxDegree) throw std::invalid_argument( "In npstat::ContOrthoPoly1D constructor: insufficient number of points"); // Fill the measure, converting the input type as needed measure_.reserve(sz); for (unsigned long i = 0; i < sz; ++i) measure_.push_back(MeasurePoint(inCoords[i], 1.0)); calcMeanXandWeightSums(); calcRecurrenceCoeffs(m); } template void ContOrthoPoly1D::calculateCoeffs(const Functor& fcn, Numeric* coeffs, const unsigned maxdeg) const { if (maxdeg > maxdeg_) throw std::invalid_argument( "In npstat::ContOrthoPoly1D::calculateCoeffs: " "maximum degree is out of range"); assert(coeffs); std::vector scalarProducts(maxdeg+1U, precise_zero); const unsigned long mSize = measure_.size(); for (unsigned long i = 0; i < mSize; ++i) { const double x = measure_[i].first; const double w = measure_[i].second; const precise_type f = fcn(x + meanX_); precise_type polyk = 1.0, polykm1 = precise_zero; for (unsigned k=0; k double ContOrthoPoly1D::weightedSquaredError(const Functor& fcn, const double *coeffs, const unsigned maxdeg) const { if (maxdeg > maxdeg_) throw std::invalid_argument( "In npstat::ContOrthoPoly1D::weightedSquaredError: " "maximum degree is out of range"); assert(coeffs); precise_type sum = precise_zero; const unsigned long mSize = measure_.size(); for (unsigned long i = 0; i < mSize; ++i) { const double x = measure_[i].first; const precise_type delta = normseries(coeffs, maxdeg, x) - fcn(x + meanX_); sum += delta*delta*measure_[i].second; } return sum/wsum_; } template double ContOrthoPoly1D::integratedSquaredError( const Functor& infcn, const double *coeffs, const unsigned maxdeg, const double xmin, const double xmax, unsigned integrationRulePoints) const { if (maxdeg > maxdeg_) throw std::invalid_argument( "In npstat::ContOrthoPoly1D::integratedSquaredError: " "maximum degree is out of range"); assert(coeffs); if (!integrationRulePoints) { integrationRulePoints = PreciseQuadrature::minimalExactRule(2*maxdeg); if (!integrationRulePoints) throw std::invalid_argument( "In npstat::ContOrthoPoly1D::integratedSquaredError: " "order of requested integration rule is too high"); } DeltaSquaredSerFcn fcn(*this, infcn, coeffs, maxdeg); PreciseQuadrature glq(integrationRulePoints); return glq.integrate(fcn, xmin, xmax); } template double ContOrthoPoly1D::integrateEWPolyProduct( const Functor& weight, const unsigned deg1, const unsigned deg2, const double xmin, const double xmax, const unsigned integrationRulePoints) const { if (deg1 > maxdeg_ || deg2 > maxdeg_) throw std::invalid_argument( "In npstat::ContOrthoPoly1D::integrateEWPolyProduct: " "degree argument is out of range"); PreciseQuadrature glq(integrationRulePoints); EWPolyProductFcn fcn(*this, weight, deg1, deg2); // The shift of the poly argument will be performed inside "fcn" return glq.integrate(fcn, xmin, xmax); } + template + Matrix ContOrthoPoly1D::extWeightProductAverages( + const Functor& fcn, const AbsIntervalQuadrature1D& quad, + const unsigned maxdeg, const double i_xmin, const double i_xmax) const + { + if (maxdeg > maxdeg_) throw std::invalid_argument( + "In npstat::ContOrthoPoly1D::extWeightProductAverages: " + "degree argument is out of range"); + if (i_xmin >= i_xmax) throw std::invalid_argument( + "In npstat::ContOrthoPoly1D::extWeightProductAverages: " + "please provide reasonable xmin and xmax"); + + const unsigned nInteg = quad.npoints(); + const unsigned long nsums = (maxdeg + 1)*(unsigned long)(maxdeg + 2)/2; + std::vector membuf(nsums + (maxdeg + 1U) + 2U*nInteg, 0.0L); + long double* poly = &membuf[0]; + long double* sums = poly + (maxdeg + 1U); + long double* absc = sums + nsums; + long double* weights = absc + nInteg; + + quad.getAllAbscissae(absc, nInteg); + quad.getAllWeights(weights, nInteg); + const long double midpoint = (static_cast(i_xmin) + i_xmax)/2.0L; + const long double unit = (static_cast(i_xmax) - i_xmin)/2.0L; + + for (unsigned long i=0; i mat(maxdeg + 1U, maxdeg + 1U); + unsigned long isum = 0; + for (unsigned k=0; k<=maxdeg; ++k) + for (unsigned j=0; j<=k; ++j, ++isum) + { + const double v = sums[isum]; + mat[k][j] = v; + if (j != k) + mat[j][k] = v; + } + return mat; + } + template void ContOrthoPoly1D::getAllPolys(const double x, const unsigned maxdeg, Numeric *polyValues) const { if (maxdeg > maxdeg_) throw std::invalid_argument( "In npstat::ContOrthoPoly1D::getAllPolys: " "maximum polynomial degree is out of range"); assert(polyValues); precise_type polyk = 1.0, polykm1 = precise_zero; polyValues[0] = polyk; for (unsigned k=0; k double ContOrthoPoly1D::constrainedCoeffs( const Functor& fcn, double *coeffs, const unsigned maxdeg, const double xmin, const double xmax, const double integralValue) const { const unsigned nPolys = maxdeg + 1U; // Calculate the unconstrained coefficients std::vector coeffs0(nPolys); calculateCoeffs(fcn, &coeffs0[0], maxdeg); // Construct the normalization constraint matrix npstat::Matrix constrMat(1, nPolys); lapack_double* constrData = constrMat[0]; for (unsigned i=0; i fitted(nPolys); npstat::Matrix U(nPolys, nPolys, 1); const bool status = U.constrainedLeastSquares( &coeffs0[0], nPolys, constrMat, &constraint, 1U, &fitted[0], nPolys, &chisq); assert(status); // Fill out the result buffer for (unsigned i=0; i void ContOrthoPoly1D::sampleAverages(const Numeric* xValues, const unsigned long sz, double *polyValues, const unsigned maxdeg) const { if (!sz) throw std::invalid_argument( "In npstat::ContOrthoPoly1D::sampleAverages: empty sample"); assert(xValues); assert(polyValues); std::vector buffer(2U*(maxdeg + 1U), precise_zero); precise_type* sums = &buffer[0]; precise_type* tmp = sums + (maxdeg + 1U); for (unsigned long i=0; i(xValues[i]) - meanX_, maxdeg, tmp); for (unsigned deg=0; deg<=maxdeg; ++deg) sums[deg] += tmp[deg]; } const precise_type ldsz = sz; for (unsigned deg=0; deg<=maxdeg; ++deg) polyValues[deg] = sums[deg]/ldsz; } template Matrix ContOrthoPoly1D::sampleProductAverages( const Numeric* xValues, const unsigned long sz, const unsigned maxdeg) const { if (!sz) throw std::invalid_argument( "In npstat::ContOrthoPoly1D::sampleProductAverages: empty sample"); assert(xValues); const unsigned maxdegp1 = maxdeg+1U; Matrix acc(maxdegp1, maxdegp1, 0); std::vector buf(maxdegp1); precise_type* tmp = &buf[0]; for (unsigned long i=0; i(xValues[i]) - meanX_, maxdeg, tmp); for (unsigned deg=0; deg<=maxdeg; ++deg) { precise_type* row = acc[deg]; for (unsigned k=0; k<=deg; ++k) row[k] += tmp[deg]*tmp[k]; } } for (unsigned deg=1; deg<=maxdeg; ++deg) { precise_type* row = acc[deg]; for (unsigned k=0; k(sz); return acc; } template ContOrthoPoly1D::precise_type ContOrthoPoly1D::normseries( const Numeric* coeffs, const unsigned maxdeg, const precise_type x) const { assert(coeffs); assert(maxdeg <= maxdeg_); precise_type sum = coeffs[0]; precise_type polyk = 1.0, polykm1 = precise_zero; for (unsigned k=0; k double ContOrthoPoly1D::series(const Numeric* coeffs, const unsigned deg, const double x) const { if (deg > maxdeg_) throw std::invalid_argument( "In npstat::ContOrthoPoly1D::series: degree argument is out of range"); return normseries(coeffs, deg, x - meanX_); } } Index: trunk/tests/test_ClassicalOrthoPolys1D.cc =================================================================== --- trunk/tests/test_ClassicalOrthoPolys1D.cc (revision 729) +++ trunk/tests/test_ClassicalOrthoPolys1D.cc (revision 730) @@ -1,433 +1,433 @@ #include #include #include #include #include "UnitTest++.h" #include "test_utils.hh" #include "npstat/nm/ClassicalOrthoPolys1D.hh" #include "npstat/nm/GaussLegendreQuadrature.hh" #include "npstat/nm/FejerQuadrature.hh" #include "npstat/nm/StorablePolySeries1D.hh" #include "npstat/nm/ClassicalOrthoPoly1DFromWeight.hh" #include "npstat/rng/MersenneTwister.hh" #include "npstat/stat/Distributions1D.hh" #include "npstat/stat/DensityOrthoPoly1D.hh" using namespace npstat; using namespace std; namespace { TEST(AbsOrthoPoly1D_cloning) { const unsigned ntries = 100U; const unsigned degs[3] = {3, 4, 5}; AbsClassicalOrthoPoly1D *poly1 = 0, *poly2 = 0; { HermiteProbOrthoPoly1D He; const AbsClassicalOrthoPoly1D& ref(He); poly1 = ref.clone(); poly2 = ref.clone(); const double x = test_rng()*10 - 5; for (unsigned ideg=0; idegpoly(degs[ideg], x), He.poly(degs[ideg], x)); } for (unsigned i=0; ipoly(degs[ideg], x), poly2->poly(degs[ideg], x)); CHECK_EQUAL(poly1->weight(x), poly2->weight(x)); } } delete poly2; delete poly1; } TEST(LegendreOrthoPoly1D_values) { const double eps = 1.0e-14; LegendreOrthoPoly1D leg; CPP11_auto_ptr leg2(leg.makeStorablePolySeries(8)); CHECK(leg.weight(0.123) == 0.5); CHECK(leg.weight(10.0) == 0.0); unsigned deg = 0; CHECK_CLOSE(1.0, leg.poly(deg, 0.3), eps); CHECK_CLOSE(1.0, leg2->poly(deg, 0.3), eps); deg = 3; CHECK_CLOSE(-71/343.0L*sqrtl(2*deg+1.0L), leg.poly(deg, 1/7.0L), eps); CHECK_CLOSE(-71/343.0L*sqrtl(2*deg+1.0L), leg2->poly(deg, 1/7.0L), eps); deg = 7; CHECK_CLOSE(-326569/1771561.0L*sqrtl(2*deg+1.0L), leg.poly(deg, 1/11.0L), eps); CHECK_CLOSE(-326569/1771561.0L*sqrtl(2*deg+1.0L), leg2->poly(deg, 1/11.0L), eps); GaussLegendreQuadrature glq(10); for (unsigned i=0; i<5; ++i) for (unsigned j=0; j<5; ++j) CHECK_CLOSE((i == j ? 1.0 : 0.0), leg.empiricalKroneckerDelta(glq, i, j), eps); const unsigned maxdeg = 8; double coeffs[maxdeg+1]; for (unsigned i=0; i<=maxdeg; ++i) coeffs[i] = test_rng(); leg2->setCoeffs(coeffs, maxdeg); for (unsigned i=0; i<=maxdeg; ++i) { const double x = test_rng()*2 - 1; CHECK_CLOSE(leg.series(coeffs, maxdeg, x), (*leg2)(x), eps); } CHECK_CLOSE(3374/715.0L, leg.integratePoly(3, 4), eps); CHECK_CLOSE(3374/715.0L, leg2->integratePoly(3, 4), eps); unsigned degs[3] = {3, 4, 5}; CHECK_CLOSE(1.051943782704350297, leg.jointIntegral(degs, 3), eps); std::ostringstream os; CHECK(leg2->classId().write(os)); CHECK(leg2->write(os)); std::istringstream is(os.str()); gs::ClassId clid(is, 1); StorablePolySeries1D* rb = StorablePolySeries1D::read(clid, is); assert(rb); CHECK(*rb == *leg2); delete rb; } TEST(JacobiOrthoPoly1D_values) { const double eps = 1.0e-15; const unsigned densMaxDeg = 10; JacobiOrthoPoly1D jac(3, 4); const Beta1D b1(-1.0, 2.0, 5, 4); const DensityOrthoPoly1D dpoly_00(b1, densMaxDeg, 2*densMaxDeg+5+4, OPOLY_STIELTJES); const DensityOrthoPoly1D dpoly_01(b1, densMaxDeg, 2*densMaxDeg+5+4, OPOLY_LANCZOS); OrthoPoly1DWeight kernel(jac); const ClassicalOrthoPoly1DFromWeight wpoly_00( kernel, densMaxDeg, 2*densMaxDeg+5+4, -1, 1, OPOLY_STIELTJES); const ClassicalOrthoPoly1DFromWeight wpoly_01( kernel, densMaxDeg, 2*densMaxDeg+5+4, -1, 1, OPOLY_LANCZOS); CHECK_EQUAL(densMaxDeg, dpoly_00.maxDegree()); CHECK_EQUAL(densMaxDeg, dpoly_01.maxDegree()); CHECK_EQUAL(densMaxDeg, wpoly_00.maxDegree()); CHECK_EQUAL(densMaxDeg, wpoly_01.maxDegree()); long double x = 1/3.0L; CHECK_CLOSE(35/32.0L*powl(1-x,3)*powl(1+x,4), jac.weight(x), eps); for (unsigned i=0; i<100; ++i) { const double x = test_rng()*2.0 - 1.0; const double w = jac.weight(x); CHECK_CLOSE(w, dpoly_00.weight(x), eps); CHECK_CLOSE(w, dpoly_01.weight(x), eps); CHECK_CLOSE(w, wpoly_00.weight(x), eps); CHECK_CLOSE(w, wpoly_01.weight(x), eps); for (unsigned deg=0; deg<=densMaxDeg; ++deg) { const double poly = jac.poly(deg, x); CHECK_CLOSE(poly, dpoly_00.poly(deg, x), eps*20*deg); CHECK_CLOSE(poly, dpoly_01.poly(deg, x), eps*20*deg); CHECK_CLOSE(poly, wpoly_00.poly(deg, x), eps*20*deg); CHECK_CLOSE(poly, wpoly_01.poly(deg, x), eps*20*deg); } } GaussLegendreQuadrature ptest(128); - const Matrix& m00 = dpoly_00.altWeightProductAverages( + const Matrix& m00 = dpoly_00.extWeightProductAverages( DensityFunctor1D(b1), ptest, densMaxDeg); CHECK_EQUAL(densMaxDeg+1U, m00.nRows()); CHECK_EQUAL(densMaxDeg+1U, m00.nColumns()); Matrix diag(densMaxDeg+1U, densMaxDeg+1U, 1); CHECK((m00 - diag).maxAbsValue() < eps); - const Matrix& m01 = dpoly_01.altWeightProductAverages( + const Matrix& m01 = dpoly_01.extWeightProductAverages( DensityFunctor1D(b1), ptest, densMaxDeg); CHECK((m01 - diag).maxAbsValue() < eps); GaussLegendreQuadrature glq(10); for (unsigned i=0; i<5; ++i) for (unsigned j=0; j<5; ++j) { CHECK_CLOSE((i == j ? 1.0 : 0.0), jac.empiricalKroneckerDelta(glq, i, j), eps); CHECK_CLOSE((i == j ? 1.0 : 0.0), dpoly_00.empiricalKroneckerDelta(glq, i, j), eps); CHECK_CLOSE((i == j ? 1.0 : 0.0), dpoly_01.empiricalKroneckerDelta(glq, i, j), eps); CHECK_CLOSE((i == j ? 1.0 : 0.0), wpoly_00.empiricalKroneckerDelta(glq, i, j), eps); CHECK_CLOSE((i == j ? 1.0 : 0.0), wpoly_01.empiricalKroneckerDelta(glq, i, j), eps); unsigned degs[2]; degs[0] = i; degs[1] = j; CHECK_CLOSE((i == j ? 1.0 : 0.0), jac.jointAverage(glq, degs, 2), eps); CHECK_CLOSE((i == j ? 1.0 : 0.0), dpoly_00.jointAverage(glq, degs, 2), eps); CHECK_CLOSE((i == j ? 1.0 : 0.0), dpoly_01.jointAverage(glq, degs, 2), eps); CHECK_CLOSE((i == j ? 1.0 : 0.0), wpoly_00.jointAverage(glq, degs, 2), eps); CHECK_CLOSE((i == j ? 1.0 : 0.0), wpoly_01.jointAverage(glq, degs, 2), eps); } JacobiOrthoPoly1D jac2(2, 5); const Beta1D b2(-1.0, 2.0, 6, 3); const DensityOrthoPoly1D dpoly_20(b2, densMaxDeg, 2*densMaxDeg+6+3, OPOLY_STIELTJES); const DensityOrthoPoly1D dpoly_21(b2, densMaxDeg, 2*densMaxDeg+6+3, OPOLY_LANCZOS); OrthoPoly1DWeight kernel2(jac2); const ClassicalOrthoPoly1DFromWeight wpoly_20( kernel2, densMaxDeg, 2*densMaxDeg+6+3, -1, 1, OPOLY_STIELTJES); const ClassicalOrthoPoly1DFromWeight wpoly_21( kernel2, densMaxDeg, 2*densMaxDeg+6+3, -1, 1, OPOLY_LANCZOS); for (unsigned i=0; i<100; ++i) { const double x = test_rng()*2.0 - 1.0; const double w = jac2.weight(x); CHECK_CLOSE(w, dpoly_20.weight(x), eps); CHECK_CLOSE(w, dpoly_21.weight(x), eps); CHECK_CLOSE(w, wpoly_20.weight(x), eps); CHECK_CLOSE(w, wpoly_21.weight(x), eps); for (unsigned deg=0; deg<=densMaxDeg; ++deg) { const double poly = jac2.poly(deg, x); CHECK_CLOSE(poly, dpoly_20.poly(deg, x), eps*100*deg); CHECK_CLOSE(poly, dpoly_21.poly(deg, x), eps*100*deg); CHECK_CLOSE(poly, wpoly_20.poly(deg, x), eps*100*deg); CHECK_CLOSE(poly, wpoly_21.poly(deg, x), eps*100*deg); } } for (unsigned i=0; i<5; ++i) for (unsigned j=0; j<5; ++j) { CHECK_CLOSE((i == j ? 1.0 : 0.0), jac2.empiricalKroneckerDelta(glq, i, j), eps); CHECK_CLOSE((i == j ? 1.0 : 0.0), dpoly_20.empiricalKroneckerDelta(glq, i, j), eps); CHECK_CLOSE((i == j ? 1.0 : 0.0), dpoly_21.empiricalKroneckerDelta(glq, i, j), eps); CHECK_CLOSE((i == j ? 1.0 : 0.0), wpoly_20.empiricalKroneckerDelta(glq, i, j), eps); CHECK_CLOSE((i == j ? 1.0 : 0.0), wpoly_21.empiricalKroneckerDelta(glq, i, j), eps); unsigned degs[2]; degs[0] = i; degs[1] = j; CHECK_CLOSE((i == j ? 1.0 : 0.0), jac2.jointAverage(glq, degs, 2), eps); CHECK_CLOSE((i == j ? 1.0 : 0.0), dpoly_20.jointAverage(glq, degs, 2), eps); CHECK_CLOSE((i == j ? 1.0 : 0.0), dpoly_21.jointAverage(glq, degs, 2), eps); CHECK_CLOSE((i == j ? 1.0 : 0.0), wpoly_20.jointAverage(glq, degs, 2), eps); CHECK_CLOSE((i == j ? 1.0 : 0.0), wpoly_21.jointAverage(glq, degs, 2), eps); } JacobiOrthoPoly1D jac3(0, 0); CHECK_CLOSE(0.5, jac3.weight(0.123), eps); CHECK(jac3.weight(10.0) == 0.0); unsigned deg = 7; CHECK_CLOSE(-326569/1771561.0L*sqrtl(2*deg+1.0L), jac3.poly(deg, 1/11.0L), eps); MersenneTwister rng; const unsigned npoints = 64; const unsigned maxdeg = 7; const unsigned ntries = 5; std::vector points(npoints); for (unsigned itry=0; itry avemat(jac.sampleProductAverages(&points[0], npoints, maxdeg)); std::vector averages(maxdeg + 1); jac.sampleAverages(&points[0], npoints, &averages[0], maxdeg); CHECK_EQUAL(maxdeg + 1, avemat.nRows()); CHECK_EQUAL(maxdeg + 1, avemat.nColumns()); for (unsigned i=0; i<=maxdeg; ++i) { CHECK_CLOSE(averages[i], avemat[i][0], eps); CHECK_CLOSE(averages[i], avemat[0][i], eps); for (unsigned j=0; j<=maxdeg; ++j) { unsigned degs[2]; degs[0] = i; degs[1] = j; const double ave = jac.jointSampleAverage(&points[0], npoints, degs, 2); CHECK_CLOSE(ave, avemat[i][j], eps); CHECK_CLOSE(ave, avemat[j][i], eps); } } // Test "twoJointSampleAverages" method const double degmax = maxdeg + 0.99; unsigned degs0[3]; for (unsigned i=0; i<3; ++i) degs0[i] = rng()*degmax; unsigned degs1[4]; for (unsigned i=0; i<4; ++i) degs1[i] = rng()*degmax; const double ave0 = jac.jointSampleAverage(&points[0], npoints, degs0, 3); const double ave1 = jac.jointSampleAverage(&points[0], npoints, degs1, 4); const std::pair& ave2 = jac.twoJointSampleAverages( &points[0], npoints, degs0, 3, degs1, 4); CHECK_CLOSE(ave0, ave2.first, eps); CHECK_CLOSE(ave1, ave2.second, eps); } } TEST(ChebyshevOrthoPoly2nd_values) { const double epsVal = 1.0e-15; const double epsKron = 1.0e-7; ChebyshevOrthoPoly2nd cheb; unsigned deg = 3; CHECK_CLOSE(-0.54810495626822157434L, cheb.poly(deg, 1/7.0L), epsVal); deg = 7; CHECK_CLOSE(-0.66835314371696127673L, cheb.poly(deg, 1/11.0L), epsVal); GaussLegendreQuadrature glq(1024); for (unsigned i=0; i<5; ++i) for (unsigned j=0; j<5; ++j) CHECK_CLOSE((i == j ? 1.0 : 0.0), cheb.empiricalKroneckerDelta(glq, i, j), epsKron); } TEST(ChebyshevOrthoPoly1st_values) { const double epsVal = 1.0e-15; const double epsKron = 1.0e-3; const double norm = 1.0/sqrt(2.0); ChebyshevOrthoPoly1st cheb; unsigned deg = 0; CHECK_CLOSE(1.0, cheb.poly(deg, 0.3), epsVal); deg = 3; CHECK_CLOSE(-0.41690962099125364431L/norm, cheb.poly(deg, 1/7.0L), epsVal); deg = 7; CHECK_CLOSE(-0.59498215518301758629L/norm, cheb.poly(deg, 1/11.0L), epsVal); FejerQuadrature fej(1000); for (unsigned i=0; i<5; ++i) for (unsigned j=0; j<5; ++j) CHECK_CLOSE((i == j ? 1.0 : 0.0), cheb.empiricalKroneckerDelta(fej, i, j), epsKron); } TEST(HermiteProbOrthoPoly1D_values) { const double eps = 1.0e-15; const double epsKron = 1.0e-7; HermiteProbOrthoPoly1D He; long double x = 1/3.0L; CHECK_CLOSE(He.weight(x), exp(-x*x/2)/sqrt(2.0*M_PI), eps); CHECK_CLOSE(He.poly(0, x), 1.0L, eps); CHECK_CLOSE(He.poly(1, x), 1/3.0L, eps); CHECK_CLOSE(He.poly(2, x), -0.62853936105470891058L, eps); CHECK_CLOSE(He.poly(3, x), -0.39312798340964586761L, eps); CHECK_CLOSE(He.poly(4, x), 0.47880972338354304389, eps); GaussLegendreQuadrature glq(1024); for (unsigned i=0; i<5; ++i) for (unsigned j=0; j<5; ++j) CHECK_CLOSE((i == j ? 1.0 : 0.0), He.empiricalKroneckerDelta(glq, i, j), epsKron); } TEST(HermiteProbOrthoPoly1D_dens_1) { const double eps = 1.0e-15; const double epsKron = 1.0e-7; Gauss1D g(0.0, 1.0); const DensityOrthoPoly1D He(g, 5, 1000, OPOLY_STIELTJES); long double x = 1/3.0L; CHECK_CLOSE(He.weight(x), exp(-x*x/2)/sqrt(2.0*M_PI), eps); CHECK_CLOSE(He.poly(0, x), 1.0L, eps); CHECK_CLOSE(He.poly(1, x), 1/3.0L, eps); CHECK_CLOSE(He.poly(2, x), -0.62853936105470891058L, eps); CHECK_CLOSE(He.poly(3, x), -0.39312798340964586761L, eps); CHECK_CLOSE(He.poly(4, x), 0.47880972338354304389, eps); GaussLegendreQuadrature glq(1024); for (unsigned i=0; i<5; ++i) for (unsigned j=0; j<5; ++j) CHECK_CLOSE((i == j ? 1.0 : 0.0), He.empiricalKroneckerDelta(glq, i, j), epsKron); } TEST(HermiteProbOrthoPoly1D_dens_2) { const double eps = 1.0e-15; const double epsKron = 1.0e-7; Gauss1D g(0.0, 1.0); const DensityOrthoPoly1D He(g, 5, 1000, OPOLY_LANCZOS); long double x = 1/3.0L; CHECK_CLOSE(He.weight(x), exp(-x*x/2)/sqrt(2.0*M_PI), eps); CHECK_CLOSE(He.poly(0, x), 1.0L, eps); CHECK_CLOSE(He.poly(1, x), 1/3.0L, eps); CHECK_CLOSE(He.poly(2, x), -0.62853936105470891058L, eps); CHECK_CLOSE(He.poly(3, x), -0.39312798340964586761L, eps); CHECK_CLOSE(He.poly(4, x), 0.47880972338354304389, eps); GaussLegendreQuadrature glq(1024); for (unsigned i=0; i<5; ++i) for (unsigned j=0; j<5; ++j) CHECK_CLOSE((i == j ? 1.0 : 0.0), He.empiricalKroneckerDelta(glq, i, j), epsKron); } TEST(AbsClassicalOrthoPoly1D_sampleCoeffVars) { typedef std::pair Point; const unsigned maxdeg = 10; const unsigned nPseudo = 100; const unsigned perPseudo = 100; const double loc = 2.0; const double scale = 8.0; ShiftedLegendreOrthoPoly1D leg; std::vector points(perPseudo); std::vector wPoints(perPseudo); std::vector coeffs(maxdeg + 1U, 100000.0); std::vector vars(maxdeg + 1U, -100000.0); for (unsigned ips=0; ips 0.0); leg.weightedSampleCoeffs(&wPoints[0], perPseudo, loc, scale, &coeffs[0], maxdeg); leg.weightedSampleCoeffVars(&wPoints[0], perPseudo, loc, scale, &coeffs[0], maxdeg, &vars[0]); for (unsigned i=1; i<=maxdeg; ++i) CHECK(vars[i] > 0.0); } } } Index: trunk/tests/test_ContOrthoPoly1D.cc =================================================================== --- trunk/tests/test_ContOrthoPoly1D.cc (revision 729) +++ trunk/tests/test_ContOrthoPoly1D.cc (revision 730) @@ -1,367 +1,399 @@ #include "UnitTest++.h" #include "test_utils.hh" #include "npstat/nm/ContOrthoPoly1D.hh" #include "npstat/nm/ClassicalOrthoPolys1D.hh" #include "npstat/nm/FejerQuadrature.hh" #include "npstat/nm/StorablePolySeries1D.hh" #include "npstat/rng/MersenneTwister.hh" +#include "npstat/stat/Distributions1D.hh" + using namespace npstat; using namespace std; inline static int kdelta(const unsigned i, const unsigned j) { return i == j ? 1 : 0; } namespace { TEST(ContOrthoPoly1D_orthonormalization) { const double eps = 1.0e-15; const OrthoPolyMethod method[] = {OPOLY_STIELTJES, OPOLY_LANCZOS}; const unsigned nMethods = sizeof(method)/sizeof(method[0]); const unsigned npoints = 64; const unsigned maxdeg = 10; const unsigned ntries = 10; std::vector points(2U*npoints); std::vector measure; measure.reserve(npoints); std::vector coeffs(maxdeg + 1); MersenneTwister rng; for (unsigned imeth=0; imeth(kdelta(i, j)), d, eps); } measure.clear(); for (unsigned i=0; i(kdelta(i, 0)), coeffs[i], eps); } } #ifdef USE_LAPACK_QUAD TEST(ContOrthoPoly1D_orthonormalization_quad_STIELTJES) { const lapack_double eps = FLT128_EPSILON*100.0; const OrthoPolyMethod method = OPOLY_STIELTJES; const unsigned npoints = 64; const unsigned maxdeg = 10; const unsigned ntries = 5; const double xmin = -1.0; const double xmax = 1.0; const double range = xmax - xmin; std::vector points(2U*npoints); std::vector measure; measure.reserve(npoints); MersenneTwister rng; for (unsigned itry=0; itry(kdelta(i, j)), d, eps); } } } TEST(ContOrthoPoly1D_orthonormalization_quad_LANCZOS) { const lapack_double eps = FLT128_EPSILON*100.0; const OrthoPolyMethod method = OPOLY_LANCZOS; const unsigned npoints = 64; const unsigned maxdeg = 10; const unsigned ntries = 5; const double xmin = -1.0; const double xmax = 1.0; const double range = xmax - xmin; std::vector points(2U*npoints); std::vector measure; measure.reserve(npoints); MersenneTwister rng; for (unsigned itry=0; itry(kdelta(i, j)), d, eps); } } } #endif TEST(ContOrthoPoly1D_weightCoeffs) { const double eps = 1.0e-7; const unsigned maxdeg = 10; const unsigned ntries = 10; const unsigned npoints = maxdeg + 1U; std::vector points(2U*npoints); std::vector measure; measure.reserve(npoints); std::vector coeffs(maxdeg + 1); MersenneTwister rng; for (unsigned itry=0; itry poly2( poly.makeStorablePolySeries(xmin, xmax)); for (unsigned deg=0; deg<=maxdeg; ++deg) for (unsigned ipow=0; ipow<4; ++ipow) { const double i1 = poly.integratePoly(deg, ipow, xmin, xmax); const double i2 = poly2->integratePoly(deg, ipow); if (fabs(i2) > 1.0e-10) CHECK_CLOSE(1.0, i1/i2, 1.0e-10); } for (unsigned i=0; iseries(&coeffs[0], maxdeg, x); CHECK_CLOSE(w, fvalue, 1000*eps); CHECK_CLOSE(fvalue, f2, eps); } } } TEST(ContOrthoPoly1D_cov8) { const double eps = 1.0e-12; MersenneTwister rng; const unsigned npoints = 64; const unsigned maxdeg = 6; const unsigned ntries = 5; const unsigned degtries = 100; std::vector points(npoints); for (unsigned itry=0; itry points(npoints); + Beta1D beta(0.0, 1.0, 2.0, 2.0); + GaussLegendreQuadrature glq(nInteg); + + for (unsigned itry=0; itry& mat = poly.extWeightProductAverages( + DensityFunctor1D(beta), glq, maxdeg, 0.0, 1.0); + for (unsigned i=0; i<=maxdeg; ++i) + for (unsigned j=0; j<=maxdeg; ++j) + { + const double integ = poly.integrateEWPolyProduct( + DensityFunctor1D(beta), i, j, 0.0, 1.0, nInteg); + CHECK_CLOSE(integ, mat[i][j], eps); + } + } + } + TEST(ContOrthoPoly1D_epsCovariance) { const double eps = 1.0e-15; const unsigned npoints = 64; const unsigned maxdeg = 6; const unsigned ntries = 5; MersenneTwister rng; std::vector points(npoints); for (unsigned itry=0; itry r(nrandom); for (unsigned i=0; i result(maxdeg + 1); poly.sampleAverages(&r[0], r.size(), &result[0], maxdeg); Matrix mat(maxdeg + 1, maxdeg + 1, 0); std::vector acc(maxdeg + 1, 0.0L); std::vector tmp(maxdeg + 1); for (unsigned i=0; i(acc[deg]), result[deg], eps); } // Test "pairwiseSampleAverages" method mat /= nrandom; Matrix averages = poly.sampleProductAverages( &r[0], r.size(), maxdeg); Matrix refmat(mat); CHECK((averages - refmat).maxAbsValue() < eps); } TEST(ContOrthoPoly1D_jacobi_2) { typedef ContOrthoPoly1D::PreciseQuadrature Quadrature; const double eps = 1.0e-13; const unsigned alpha = 3; const unsigned beta = 4; const unsigned maxdeg = 5; const unsigned npoints = 64; JacobiOrthoPoly1D jac(alpha, beta); OrthoPoly1DWeight weight(jac); const unsigned npt = Quadrature::minimalExactRule(2*maxdeg+alpha+beta); Quadrature glq(npt); ContOrthoPoly1D poly(maxdeg, glq.weightedIntegrationPoints(weight, -1.0, 1.0), OPOLY_LANCZOS); for (unsigned i=0; i