Page MenuHomeHEPForge

No OneTemporary

diff --git a/inc/Lau1DCubicSpline.hh b/inc/Lau1DCubicSpline.hh
index 87cd324..accc701 100644
--- a/inc/Lau1DCubicSpline.hh
+++ b/inc/Lau1DCubicSpline.hh
@@ -1,228 +1,235 @@
/*
Copyright 2015 University of Warwick
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Laura++ package authors:
John Back
Paul Harrison
Thomas Latham
*/
/*! \file Lau1DCubicSpline.hh
\brief File containing declaration of Lau1DCubicSpline class.
*/
/*! \class Lau1DCubicSpline
\brief Class for defining a 1D cubic spline based on a set of knots.
Class for defining a 1D cubic spline based on a set of knots.
- Interpolation between the knots is performed either by one of
+ Interpolation between the knots is performed either by one of
two types of cubic spline (standard or Akima) or by linear interpolation.
The splines are defined by a piecewise cubic function which, between knots i and i+1, has the form
f_i(x) = (1 - t)*y_i + t*y_i+1 + t*(1 - t)(a*(1 - t) + b*t)
where t = (x - x_i)/(x_i+1 - x_i),
a = k_i *(x_i+1 - x_i) - (y_i+1 - y_i),
b = -k_i+1*(x_i+1 - x_i) + (y_i+1 - y_i)
and k_i is (by construction) the first derivative at knot i.
f(x) and f'(x) are continuous at the internal knots by construction.
-
- For the standard splines, f''(x) is required to be continuous
+
+ For the standard splines, f''(x) is required to be continuous
at all internal knots placing n-2 constraints on the n parameters, k_i.
The final two constraints are set by the boundary conditions.
At each boundary, the function may be:
(i) Clamped : f'(x) = C at the last knot
(ii) Natural : f''(x) = 0 at the last knot
(iii) Not a knot : f'''(x) continuous at the second last knot
The algorithms used in these splines can be found on:
http://en.wikipedia.org/wiki/Spline_interpolation#Algorithm_to_find_the_interpolating_cubic_spline
http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm
For the Akima splines, the values of k_i are determined from the slopes of the four nearest segments (a_i-1, a_i, a_i+1 and a_i+2) as
k_i = ( | a_i+2 - a_i+1 | a_i + | a_i - a_i-1 | a_i+1 ) / ( | a_i+2 - a_i+1 | + | a_i - a_i-1 | )
and as
k_i = ( a_i + a_i+1 ) / 2
in the special case a_i-1 == a_i != a_i+1 == a_i+2.
Boundary conditions are specified by the relations
- a_2 - a_1 = a_1 - a_0 = a_0 - a_-1 and
+ a_2 - a_1 = a_1 - a_0 = a_0 - a_-1 and
a_n-1 - a_n-2 = a_n - a_n-1 = a_n+1 - a_n
The algorithms used in these splines can be found in:
J.ACM vol. 17 no. 4 pp 589-602
*/
#ifndef LAU_1DCUBICSPLINE
#define LAU_1DCUBICSPLINE
#include <vector>
#include <array>
#include "LauParameter.hh"
#include "Rtypes.h"
#include "TF1.h"
class Lau1DCubicSpline {
public:
//! Define the allowed interpolation types
enum LauSplineType {
StandardSpline, /*!< standard cubic splines with f''(x) continuous at all internal knots */
AkimaSpline, /*!< Akima cubic splines with f'(x) at each knot defined locally by the positions of only five knots */
LinearInterpolation /*! Linear interpolation between each pair of knots */
};
//! Define the allowed boundary condition types
/*!
These are only supported by standard splines
*/
enum LauSplineBoundaryType {
Clamped, /*!< clamped boundary - f'(x) = C */
Natural, /*!< natural boundary - f''(x) = 0 */
NotAKnot /*!< 'not a knot' boundary - f'''(x) continuous at second last knot */
};
//! Constructor
/*!
/param [in] xs the x-values of the knots
/param [in] ys the y-values of the knots
/param [in] leftBound the left-hand boundary condition
/param [in] rightBound the right-hand boundary condition
/param [in] dydx0 the gradient at the left-hand boundary of a clamped spline
/param [in] dydxn the gradient at the right-hand boundary of a clamped spline
*/
Lau1DCubicSpline(const std::vector<Double_t>& xs, const std::vector<Double_t>& ys,
LauSplineType type = Lau1DCubicSpline::StandardSpline,
- LauSplineBoundaryType leftBound = Lau1DCubicSpline::NotAKnot,
+ LauSplineBoundaryType leftBound = Lau1DCubicSpline::NotAKnot,
LauSplineBoundaryType rightBound = Lau1DCubicSpline::NotAKnot,
Double_t dydx0 = 0.0, Double_t dydxn = 0.0);
//! Destructor
virtual ~Lau1DCubicSpline();
//! Evaluate the function at given point
/*!
\param [in] x the x-coordinate
\return the value of the spline at x
*/
Double_t evaluate(Double_t x) const;
//! Update the y-values of the knots
/*!
\param [in] ys the y-values of the knots
*/
void updateYValues(const std::vector<Double_t>& ys);
void updateYValues(const std::vector<LauParameter*>& ys);
//! Update the type of interpolation to perform
/*!
\param [in] type the type of interpolation
*/
void updateType(LauSplineType type);
//! Update the boundary conditions for the spline
/*!
/param [in] leftBound the left-hand boundary condition
/param [in] rightBound the right-hand boundary condition
/param [in] dydx0 the gradient at the left-hand boundary of a clamped spline
/param [in] dydxn the gradient at the right-hand boundary of a clamped spline
*/
- void updateBoundaryConditions(LauSplineBoundaryType leftBound,
- LauSplineBoundaryType rightBound,
+ void updateBoundaryConditions(LauSplineBoundaryType leftBound,
+ LauSplineBoundaryType rightBound,
Double_t dydx0 = 0.0,
Double_t dydxn = 0.0);
//! Return the number of knots
UInt_t getnKnots() const {return nKnots_;}
//! Get y values
- std::vector<Double_t> getYValues(){return y_;}
+ const std::vector<Double_t>& getYValues() const {return y_;}
+
+ //! Get x values
+ const std::vector<Double_t>& getXValues() const {return x_;}
//! Get the coefficients of spline section i in the form a + bx + cx^2 + dx^3
/*!
\params [in] i refers to the left-hand index of the knot (i = 0 gets the params between x_0 and x_1)
\return coefficients {a, b, c, d}
*/
- std::array<Double_t,4> getCoefficients(const UInt_t i) const;
+ std::array<Double_t,4> getCoefficients(const UInt_t i, const bool normalise = false) const;
+
+ //! Get the integral over all the spline segments
+ Double_t integral() const;
//! Make a TF1 showing the spline with its current knot values
/*!
+ \params [in] normalise whether or not you want the spline normalised
\return 1D function object
*/
- TF1* makeTF1() const;
+ TF1* makeTF1(const bool normalise = false) const;
private:
//! Copy constructor - not implemented
Lau1DCubicSpline( const Lau1DCubicSpline& rhs );
//! Copy assignment operator - not implemented
Lau1DCubicSpline& operator=(const Lau1DCubicSpline& rhs);
//! Initialise the class
void init();
//! Calculate the first derivative at each knot
void calcDerivatives();
//! Calculate the first derivatives according to the standard method
void calcDerivativesStandard();
//! Calculate the first derivatives according to the Akima method
void calcDerivativesAkima();
//! The number of knots in the spline
const UInt_t nKnots_;
//! The x-value at each knot
std::vector<Double_t> x_;
//! The y-value at each knot
std::vector<Double_t> y_;
//! The first derivative at each knot
std::vector<Double_t> dydx_;
//! The 'a' coefficients used to determine the derivatives
std::vector<Double_t> a_;
//! The 'b' coefficients used to determine the derivatives
std::vector<Double_t> b_;
//! The 'c' coefficients used to determine the derivatives
std::vector<Double_t> c_;
//! The 'd' coefficients used to determine the derivatives
std::vector<Double_t> d_;
//! The type of interpolation to be performed
LauSplineType type_;
//! The left-hand boundary condition on the spline
LauSplineBoundaryType leftBound_;
//! The right-hand boundary condition on the spline
LauSplineBoundaryType rightBound_;
//! The gradient at the left boundary for a clamped spline
Double_t dydx0_;
//! The gradient at the right boundary for a clamped spline
Double_t dydxn_;
ClassDef(Lau1DCubicSpline, 0); // Class for defining a 1D cubic spline
};
#endif
diff --git a/inc/LauDecayTimePdf.hh b/inc/LauDecayTimePdf.hh
index 2b32219..c03334e 100644
--- a/inc/LauDecayTimePdf.hh
+++ b/inc/LauDecayTimePdf.hh
@@ -1,625 +1,672 @@
/*
Copyright 2006 University of Warwick
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Laura++ package authors:
John Back
Paul Harrison
Thomas Latham
*/
/*! \file LauDecayTimePdf.hh
\brief File containing declaration of LauDecayTimePdf class.
*/
/*! \class LauDecayTimePdf
\brief Class for defining the PDFs used in the time-dependent fit model to describe the decay time.
LauDecayTimePdf is a class that provides the PDFs for describing the
time-dependence of the various terms in a particle/antiparticle decay to a
common final state. The various terms have the form of exponentially
decaying trigonometric or hyperbolic functions convolved with a N-Gaussian
resolution function.
*/
#ifndef LAU_DECAYTIME_PDF
#define LAU_DECAYTIME_PDF
#include <vector>
+#include <array>
#include <complex>
#include "TString.h"
#include "LauAbsRValue.hh"
#include "LauFitDataTree.hh"
#include "LauComplex.hh"
class TH1;
class Lau1DHistPdf;
class Lau1DCubicSpline;
// TODO - Should this have Pdf in the name?
// - Audit function names and public/private access category
// - Audit what should be given to constructor and what can be set later (maybe different constructors for different scenarios, e.g. smeared with per-event error/smeared with avg error/not smeared)
class LauDecayTimePdf final {
public:
// TODO - can we think of better names?
//! The functional form of the decay time PDF
enum FuncType {
Hist, //< Hist PDF for fixed background
Delta, //< Delta function - for prompt background
Exp, //< Exponential function - for non-prompt background or charged B's
DeltaExp, //< Delta + Exponential function - for background with prompt and non-prompt parts
ExpTrig, //< Exponential function with Delta m driven mixing - for neutral B_d's
ExpHypTrig //< Exponential function with both Delta m and Delta Gamma driven mixing - for neutral B_s's
};
//! How is the decay time measured - absolute or difference?
enum TimeMeasurementMethod {
DecayTime, //< Absolute measurement of decay time, e.g. LHCb scenario
DecayTimeDiff //< Measurement of the difference of two decay times, e.g. BaBar/Belle(II) scenario
};
//! How is the TD efficiency information going to be given?
enum EfficiencyMethod {
Spline, //< As a cubic spline
Binned, //< As a histogram (TH1D/TH1F)
Flat //< As a flat distribution (constant)
};
//! Constructor
/*!
\param [in] theVarName the name of the decay time variable in the input data
\param [in] theVarErrName the name of the decay time error variable in the input data
\param [in] params the parameters of the PDF
\param [in] minAbscissaVal the minimum value of the abscissa
\param [in] maxAbscissaVal the maximum value of the abscissa
\param [in] minAbscissaErr the minimum value of the abscissa error
\param [in] maxAbscissaErr the maximum value of the abscissa error
\param [in] type the functional form of the PDF
\param [in] nGauss the number of Gaussians in the resolution function
\param [in] scale controls whether the Gaussian parameters are scaled by the per-event error
\param [in] method set the type of the time measurement used in the given experiment
*/
LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector<LauAbsRValue*>& params,
const Double_t minAbscissaVal, const Double_t maxAbscissaVal,
const Double_t minAbscissaErr, const Double_t maxAbscissaErr,
const FuncType type, const UInt_t nGauss, const std::vector<Bool_t>& scale, const TimeMeasurementMethod method, const EfficiencyMethod effMethod = EfficiencyMethod::Spline);
//! Constructor
/*!
\param [in] theVarName the name of the decay time variable in the input data
\param [in] theVarErrName the name of the decay time error variable in the input data
\param [in] params the parameters of the PDF
\param [in] minAbscissaVal the minimum value of the abscissa
\param [in] maxAbscissaVal the maximum value of the abscissa
\param [in] minAbscissaErr the minimum value of the abscissa error
\param [in] maxAbscissaErr the maximum value of the abscissa error
\param [in] type the functional form of the PDF
\param [in] nGauss the number of Gaussians in the resolution function
\param [in] scaleMeans controls whether the Gaussian mean parameters are scaled by the per-event error
\param [in] scaleWidths controls whether the Gaussian width parameters are scaled by the per-event error
\param [in] method set the type of the time measurement used in the given experiment
*/
LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector<LauAbsRValue*>& params,
const Double_t minAbscissaVal, const Double_t maxAbscissaVal,
const Double_t minAbscissaErr, const Double_t maxAbscissaErr,
const FuncType type, const UInt_t nGauss, const std::vector<Bool_t>& scaleMeans,
const std::vector<Bool_t>& scaleWidths, const TimeMeasurementMethod method, const EfficiencyMethod effMethod = EfficiencyMethod::Spline);
//! Copy constructor (deleted)
LauDecayTimePdf(const LauDecayTimePdf& other) = delete;
//! Copy assignment operator (deleted)
LauDecayTimePdf& operator=(const LauDecayTimePdf& other) = delete;
//! Move constructor (deleted)
LauDecayTimePdf(LauDecayTimePdf&& other) = delete;
//! Move assignment operator (deleted)
LauDecayTimePdf& operator=(LauDecayTimePdf&& other) = delete;
//! Destructor
~LauDecayTimePdf();
// TODO - Do we need this?
// - If so, should it be a hist or a LauAbsPdf?
// - Or should there be a dedicated constructor for this scenario?
//! Set the Histogram PDF in case of fixed background PDF
void setHistoPdf(const TH1* hist);
// TODO - should this be a LauAbsPdf instead?
//! Set the histogram to be used for generation of per-event decay time errors
/*!
If not set will fall back to using Landau distribution
\param [in] hist the histogram of the distribution
*/
void setErrorHisto(const TH1* hist);
// TODO - do we still want this option?
//! Set the parameters of the Landau distribution used to generate the per-event decay time errors
/*!
\param [in] mpv the MPV (most probable value) of the distribution
\param [in] sigma the width of the distribution
*/
void setErrorDistTerms(const Double_t mpv, const Double_t sigma) {
errorDistMPV_ = mpv;
errorDistSigma_ = sigma;
}
// TODO - should we remove the EfficiencyMethod argument from the constructor, default to Flat and have these functions modify it?
//! Set the efficiency function in the form of a histogram
/*!
\param [in] hist the histogram of efficiencies
*/
void setEffiHist(const TH1* hist);
//! Set the efficiency function in the form of spline
/*!
\param [in] spline the efficiency spline function
*/
void setEffiSpline(Lau1DCubicSpline* spline);
//! Retrieve the name of the error variable
const TString& varName() const {return varName_;}
//! Retrieve the name of the error variable
const TString& varErrName() const {return varErrName_;}
// TODO - this should probably be set at construction time
//! Turn on or off the resolution function
void doSmearing(Bool_t smear) {smear_ = smear;}
//! Determine if the resolution function is turned on or off
Bool_t doSmearing() const {return smear_;}
// TODO - we don't use this at the moment
//! Calculate single effective decay time resolution from multiple Gaussian resolution functions
/*!
\return effective resolution
*/
Double_t effectiveResolution() const;
//! Cache information from data
/*!
\param [in] inputData the dataset to be used to calculate everything
*/
void cacheInfo(const LauFitDataTree& inputData);
//! Calculate the likelihood (and all associated information) given value of the abscissa
/*!
\param [in] abscissa the value of the abscissa
*/
void calcLikelihoodInfo(const Double_t abscissa);
//! Calculate the likelihood (and all associated information) given value of the abscissa and its error
/*!
\param [in] abscissa the value of the abscissa
\param [in] abscissaErr the error on the abscissa
*/
void calcLikelihoodInfo(const Double_t abscissa, const Double_t abscissaErr);
//! Retrieve the likelihood (and all associated information) given the event number
/*!
\param [in] iEvt the event number
*/
void calcLikelihoodInfo(const UInt_t iEvt);
//! Get FuncType from model
FuncType getFuncType() const {return type_;}
// TODO - should maybe do away with exp term (and it's norm) since it's just the cosh term when DG=0 and it's confusing to have both
// - counter argument is to keep it for backgrounds that have a lifetime-like behaviour
//! Get the exponential term
Double_t getExpTerm() const {return expTerm_;}
//! Get the cos(Dm*t) term (multiplied by the exponential)
Double_t getCosTerm() const {return cosTerm_;}
//! Get the sin(Dm*t) term (multiplied by the exponential)
Double_t getSinTerm() const {return sinTerm_;}
//! Get the cosh(DG/2*t) term (multiplied by the exponential)
Double_t getCoshTerm() const {return coshTerm_;}
//! Get the sinh(DG/2*t) term (multiplied by the exponential)
Double_t getSinhTerm() const {return sinhTerm_;}
//! Get the normalisation related to the exponential term only
Double_t getNormTermExp() const {return normTermExp_;}
//! Get the normalisation related to the cos term only
Double_t getNormTermCos() const {return normTermCos_;}
//! Get the normalisation related to the sin term only
Double_t getNormTermSin() const {return normTermSin_;}
//! Get the first term in the normalisation (from integrating the cosh)
Double_t getNormTermCosh() const {return normTermCosh_;}
//! Get the second term in the normalisation (from integrating the sinh)
Double_t getNormTermSinh() const {return normTermSinh_;}
//! Get error probability density from Error distribution
Double_t getErrTerm() const{return errTerm_;}
//! Get efficiency probability density from efficiency distribution
Double_t getEffiTerm() const{return effiTerm_;}
//! Retrieve the parameters of the PDF, e.g. so that they can be loaded into a fit
/*!
\return the parameters of the PDF
*/
const std::vector<LauAbsRValue*>& getParameters() const { return param_; }
//! Retrieve the parameters of the PDF, e.g. so that they can be loaded into a fit
/*!
\return the parameters of the PDF
*/
std::vector<LauAbsRValue*>& getParameters() { return param_; }
//! Update the pulls for all parameters
void updatePulls();
//! Calculate the normalisation of all terms
/*!
\param [in] abscissaErr the per-event decay-time error (if used)
*/
void calcNorm(const Double_t abscissaErr = 0.0);
//! Calculate the normalisation integrals in the given range
/*!
This form to be used for case where decay time resolution is neglected
\param [in] minAbs lower bound for the integral domain
\param [in] maxAbs lower bound for the integral domain
\param [in] weight the weight for this range, typically the efficiency value
*/
void calcNonSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight);
//! Calculate the normalisation integrals in the given range
/*!
This form to be used for case where decay time resolution is accounted for
\param [in] minAbs lower bound for the integral domain
\param [in] maxAbs lower bound for the integral domain
\param [in] weight the weight for this range, typically the efficiency value
\param [in] means the mean values of each Gaussian in the resolution function
\param [in] sigmas the width values of each Gaussian in the resolution function
\param [in] fractions the fractional weight of each Gaussian in the resolution function
*/
void calcSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight, const std::vector<Double_t>& means, const std::vector<Double_t>& sigmas, const std::vector<Double_t>& fractions);
+ //! Calculate the normalisation integrals in the given range for the case of spline efficiency acceptance
+ /*!
+ This form to be used for case where decay time resolution is accounted for
+
+ \param [in] minAbs lower bound for the integral domain
+ \param [in] maxAbs lower bound for the integral domain
+ \param [in] means the mean values of each Gaussian in the resolution function
+ \param [in] sigmas the width values of each Gaussian in the resolution function
+ \param [in] fractions the fractional weight of each Gaussian in the resolution function
+ */
+ void calcSmearedSplinePartialIntegrals(const UInt_t splineIndex, const std::vector<Double_t>& means, const std::vector<Double_t>& sigmas, const std::vector<Double_t>& fractions);
+
//! Calculate normalisation for non-smeared cos and sin terms
/*!
\param [in] minAbs lower bound for the integral domain
\param [in] maxAbs lower bound for the integral domain
\return pair of {cosTermIntegral, sinTermIntegral}
*/
std::pair<Double_t, Double_t> nonSmearedCosSinIntegral(const Double_t minAbs, const Double_t maxAbs);
//! Calculate normalisation for decay-time resolution smeared cos and sin terms
/*!
Uses the using the Faddeeva function method from
(https://arxiv.org/abs/1407.0748)
\param [in] minAbs lower bound for the integral domain
\param [in] maxAbs lower bound for the integral domain
\param [in] sigma width of the Gaussian resolution function
\param [in] mu mean of the Gaussian resolution function
\return pair of {cosTermIntegral, sinTermIntegral}
*/
std::pair<Double_t, Double_t> smearedCosSinIntegral(const Double_t minAbs, const Double_t maxAbs, const Double_t sigma, const Double_t mu = 0.0);
//! Calculate normalisation for non-smeared cosh and sinh terms
/*!
\param [in] minAbs lower bound for the integral domain
\param [in] maxAbs lower bound for the integral domain
\return pair of {coshTermIntegral, sinhTermIntegral}
*/
std::pair<Double_t, Double_t> nonSmearedCoshSinhIntegral(const Double_t minAbs, const Double_t maxAbs);
//! Calculate normalisation for decay-time resolution smeared cosh and sinh terms
/*!
Uses the using the Faddeeva function method from
(https://arxiv.org/abs/1407.0748)
\param [in] minAbs lower bound for the integral domain
\param [in] maxAbs lower bound for the integral domain
\param [in] sigma width of the Gaussian resolution function
\param [in] mu mean of the Gaussian resolution function
\return pair of {coshTermIntegral, sinhTermIntegral}
*/
std::pair<Double_t, Double_t> smearedCoshSinhIntegral(const Double_t minAbs, const Double_t maxAbs, const Double_t sigma, const Double_t mu = 0.0);
//! Calculate normalisation for non-smeared exponential term
/*!
\param [in] minAbs lower bound for the integral domain
\param [in] maxAbs lower bound for the integral domain
\return integral
*/
Double_t nonSmearedExpIntegral(const Double_t minAbs, const Double_t maxAbs);
//! Calculate normalisation for decay-time resolution smeared exponential term
/*!
Uses the using the Faddeeva function method from
(https://arxiv.org/abs/1407.0748)
\param [in] minAbs lower bound for the integral domain
\param [in] maxAbs lower bound for the integral domain
\param [in] sigma width of the Gaussian resolution function
\param [in] mu mean of the Gaussian resolution function
\return integral
*/
Double_t smearedExpIntegral(const Double_t minAbs, const Double_t maxAbs, const Double_t sigma, const Double_t mu = 0.0);
//! Calculate decay-time resolution smeared cos and sin terms
/*!
Uses the using the Faddeeva function method from
(https://arxiv.org/abs/1407.0748)
\param [in] t decay time
\param [in] sigma width of the Gaussian resolution function
\param [in] mu mean of the Gaussian resolution function
\return pair of {cosTerm, sinTerm}
*/
std::pair<Double_t, Double_t> smearedCosSinTerm(const Double_t t, const Double_t sigma, const Double_t mu = 0.0);
//! Calculate decay-time resolution smeared cosh and sinh terms
/*!
Uses the using the Faddeeva function method from
(https://arxiv.org/abs/1407.0748)
\param [in] t decay time
\param [in] sigma width of the Gaussian resolution function
\param [in] mu mean of the Gaussian resolution function
\return pair of {coshTerm, sinhTerm}
*/
std::pair<Double_t, Double_t> smearedCoshSinhTerm(const Double_t t, const Double_t sigma, const Double_t mu = 0.0);
//! Calculate decay-time resolution smeared exponential term
/*!
Uses the using the Faddeeva function method from
(https://arxiv.org/abs/1407.0748)
\param [in] t decay time
\param [in] sigma width of the Gaussian resolution function
\param [in] mu mean of the Gaussian resolution function
\return exponential term convolved with resolution function
*/
Double_t smearedExpTerm(const Double_t t, const Double_t sigma, const Double_t mu = 0.0);
+ //! Generate the K vector used in eqn 31 from arXiv:1407.0748
+ /*
+ \param [in] sigma width of the Gaussian resolution function
+ \param [in] z The z value, changing for exp, sin, sinh, etc
+ \return size 4 array of vector values
+ */
+ std::array<std::complex<double>,4> generateKvector(const std::complex<double> z);
+
+ //! Generate the K vector used in eqn 31 from arXiv:1407.0748
+ /*
+ Uses the using the Faddeeva function method from
+ (https://arxiv.org/abs/1407.0748)
+
+ \param [in] minAbs lower bound for the integral domain
+ \param [in] maxAbs lower bound for the integral domain
+ \param [in] z The z value, changing for exp, sin, sinh, etc
+ \param [in] sigma width of the Gaussian resolution function
+ \param [in] mu mean of the Gaussian resolution function
+ \return size 4 array of vector values
+ */
+ std::array<std::complex<double>,4> generateMvector(const Double_t minAbs, const Double_t maxAbs, const std::complex<double> z, const Double_t sigma, const Double_t mu = 0.);
+
+
+ //! Normalise the spline, call this for each term with different z values
+ /*
+ \param [in] minAbs lower bound for the integral domain
+ \param [in] maxAbs lower bound for the integral domain
+ \param [in] z varying z value for each term
+ \param [in] sigma width of the Gaussian resolution function
+ \param [in] mu mean of the Gaussian resolution function
+ \return real and imaginary parts of the normalisation
+ */
+ std::pair<Double_t,Double_t> splineNormalise(const UInt_t splineIndex, const std::complex<double> z, const Double_t sigma, const Double_t mu);
+
//! Generate the value of the error
/*!
If scaling by the error should call this before calling generate
\param [in] forceNew forces generation of a new value
*/
Double_t generateError(const Bool_t forceNew = kFALSE);
//TODO not clear that this is really needed, perhaps for background? commented out for now
//! Generate an event from the PDF
/*!
\param [in] kinematics used by some PDFs to determine the DP position, on which they have dependence
*/
//LauFitData generate(const LauKinematics* kinematics);
//! Retrieve the decay time minimum value
Double_t minAbscissa() const {return minAbscissa_;}
//! Retrieve the decay time maximum value
Double_t maxAbscissa() const {return maxAbscissa_;}
//! Retrieve the decay time error minimum value
Double_t minAbscissaError() const {return minAbscissaError_;}
//! Retrieve the decay time error maximum value
Double_t maxAbscissaError() const {return maxAbscissaError_;}
// TODO - can we delete this?
// NB calcPDFHeight only calculates the gaussian information for the (type_ == Delta) case
//! Calculate the maximum height of the PDF
//void calcPDFHeight( const LauKinematics* kinematics );
//! Get efficiency parameters to float in the fit
std::vector<LauParameter*>& getEffiPars() {return effiPars_;}
//! Update spline Y values when floating the decay time acceptance
/*!
\param [in] params the vector of LauParameters describing the Y values
*/
void updateEffiSpline(std::vector<LauParameter*> params);
protected:
//! Set up the initial state correctly - called by the constructors
void initialise();
//! Calculate the pure physics terms with no resolution function applied
void calcNonSmearedTerms(const Double_t abscissa);
//! Retrieve the number of PDF parameters
/*!
\return the number of PDF parameters
*/
UInt_t nParameters() const {return param_.size();}
//! Retrieve the specified parameter
/*!
\param [in] parName the parameter to retrieve
*/
LauAbsRValue* findParameter(const TString& parName);
//! Retrieve the specified parameter
/*!
\param [in] parName the parameter to retrieve
*/
const LauAbsRValue* findParameter(const TString& parName) const;
private:
//! Name of the variable
const TString varName_;
//! Name of the error variable
const TString varErrName_;
//! The parameters of the PDF
std::vector<LauAbsRValue*> param_;
// TODO - should probably set this at construction time (can then be const)
//! Smear with the resolution model or not
Bool_t smear_;
//! The minimum value of the decay time
const Double_t minAbscissa_;
//! The maximum value of the decay time
const Double_t maxAbscissa_;
//! The minimum value of the decay time error
const Double_t minAbscissaError_;
//! The maximum value of the decay time error
const Double_t maxAbscissaError_;
//! The current value of the decay time error
Double_t abscissaError_;
//! Flag whether a value for the decay time error has been generated
Bool_t abscissaErrorGenerated_;
//! Value of the MPV of the Landau dist used to generate the Delta t error
Double_t errorDistMPV_;
//! Value of the width of the Landau dist used to generate the Delta t error
Double_t errorDistSigma_;
//! The number of gaussians in the resolution model
const UInt_t nGauss_;
// Parameters of the gaussian(s) that accounts for the resolution:
//! mean (offset) of each Gaussian in the resolution function
std::vector<LauAbsRValue*> mean_;
//! spread (sigma) of each Gaussian in the resolution function
std::vector<LauAbsRValue*> sigma_;
//! fraction of each Gaussian in the resolution function
std::vector<LauAbsRValue*> frac_;
// Parameters of the physics decay time distribution
//! Lifetime parameter
LauAbsRValue* tau_;
//! Mass difference parameter
LauAbsRValue* deltaM_;
//! Width difference parameter
LauAbsRValue* deltaGamma_;
//! Parameter for the fraction of prompt events in DeltaExp
LauAbsRValue* fracPrompt_;
//! Which type of decay time function is this?
const FuncType type_;
//! Are we using absolute decay time or decay time difference?
const TimeMeasurementMethod method_;
//! Which method for eff(decaytime) input are we using?
const EfficiencyMethod effMethod_;
//! Scale the mean of each Gaussian by the per-event decay time error?
const std::vector<Bool_t> scaleMeans_;
//! Scale the sigma of each Gaussian by the per-event decay time error?
const std::vector<Bool_t> scaleWidths_;
//! Is anything being scaled by the per-event decay time error?
const Bool_t scaleWithPerEventError_;
//! The exp(-G*t) term
Double_t expTerm_;
//! The cos(Dm*t) term (multiplied by the exponential)
Double_t cosTerm_;
//! The sin(Dm*t) term (multiplied by the exponential)
Double_t sinTerm_;
//! The cosh(DG/2*t) term (multiplied by the exponential)
Double_t coshTerm_;
//! The sinh(DG/2*t) term (multiplied by the exponential)
Double_t sinhTerm_;
//! Normalisation of the exponential term
Double_t normTermExp_;
//! Normalisation of the cos term
Double_t normTermCos_;
//! Normalisation of the sin term
Double_t normTermSin_;
//! Normalisation of the cosh term
Double_t normTermCosh_;
//! Normalisation of the sinh term
Double_t normTermSinh_;
//! Error PDF (NB there is no equivalent cache since the PDF errHist_ keeps a cache)
Double_t errTerm_;
//! Efficiency
Double_t effiTerm_;
//TODO : to be deleted? or needed for backgrounds?
//! Hist PDF term (NB there is no equivalent cache since the PDF pdfHist_ keeps a cache)
Double_t pdfTerm_;
//! The cache of the decay times
std::vector<Double_t> abscissas_;
//! The cache of the per-event errors on the decay time
std::vector<Double_t> abscissaErrors_;
//! The cache of the exponential terms
std::vector<Double_t> expTerms_;
//! The cache of the exponential * cosh(DG/2*t) terms
std::vector<Double_t> coshTerms_;
//! The cache of the exponential * sinh(DG/2*t) terms
std::vector<Double_t> sinhTerms_;
//! The cache of the exponential * cos(Dm*t) terms
std::vector<Double_t> cosTerms_;
//! The cache of the exponential * sin(Dm*t) terms
std::vector<Double_t> sinTerms_;
//! The cache of the exponential normalisation
std::vector<Double_t> normTermsExp_;
//! The cache of the cosh term normalisation
std::vector<Double_t> normTermsCosh_;
//! The cache of the sinh term normalisation
std::vector<Double_t> normTermsSinh_;
//! The cache of the cos term normalisation
std::vector<Double_t> normTermsCos_;
//! The cache of the sin term normalisation
std::vector<Double_t> normTermsSin_;
//! The cache of the efficiency
std::vector<Double_t> effiTerms_;
//! Histogram PDF for abscissa error distribution
Lau1DHistPdf* errHist_;
//! Histogram PDF for abscissa distribution
Lau1DHistPdf* pdfHist_;
//! efficiency PDF in spline
Lau1DCubicSpline* effiFun_;
//! efficiency PDF as Histogram
TH1* effiHist_;
//! Vector of parameters to float acceptance
std::vector<LauParameter*> effiPars_;
ClassDef(LauDecayTimePdf,0) // Define the Delta t PDF
};
#endif
diff --git a/src/Lau1DCubicSpline.cc b/src/Lau1DCubicSpline.cc
index 26c43e9..15daf3c 100644
--- a/src/Lau1DCubicSpline.cc
+++ b/src/Lau1DCubicSpline.cc
@@ -1,455 +1,479 @@
/*
Copyright 2015 University of Warwick
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Laura++ package authors:
John Back
Paul Harrison
Thomas Latham
*/
/*! \file Lau1DCubicSpline.cc
\brief File containing implementation of Lau1DCubicSpline class.
*/
#include <cmath>
#include <cstdlib>
#include <iostream>
#include <TH2.h>
#include <TSystem.h>
#include "Lau1DCubicSpline.hh"
ClassImp(Lau1DCubicSpline)
Lau1DCubicSpline::Lau1DCubicSpline(const std::vector<Double_t>& xs, const std::vector<Double_t>& ys,
LauSplineType type,
LauSplineBoundaryType leftBound,
LauSplineBoundaryType rightBound,
Double_t dydx0, Double_t dydxn) :
nKnots_(xs.size()),
x_(xs),
y_(ys),
type_(type),
leftBound_(leftBound),
rightBound_(rightBound),
dydx0_(dydx0),
dydxn_(dydxn)
{
init();
}
Lau1DCubicSpline::~Lau1DCubicSpline()
{
}
Double_t Lau1DCubicSpline::evaluate(Double_t x) const
{
// do not attempt to extrapolate the spline
if( x<x_[0] || x>x_[nKnots_-1] ) {
std::cerr << "WARNING in Lau1DCubicSpline::evaluate : function is only defined between " << x_[0] << " and " << x_[nKnots_-1] << std::endl;
std::cerr << " value at " << x << " returned as 0" << std::endl;
return 0.;
}
// first determine which 'cell' of the spline x is in
// cell i runs from knot i to knot i+1
Int_t cell(0);
while( x > x_[cell+1] ) {
++cell;
}
// obtain x- and y-values of the neighbouring knots
Double_t xLow = x_[cell];
Double_t xHigh = x_[cell+1];
Double_t yLow = y_[cell];
Double_t yHigh = y_[cell+1];
if(type_ == Lau1DCubicSpline::LinearInterpolation) {
return yHigh*(x-xLow)/(xHigh-xLow) + yLow*(xHigh-x)/(xHigh-xLow);
}
// obtain t, the normalised x-coordinate within the cell,
// and the coefficients a and b, which are defined in cell i as:
//
// a_i = k_i *(x_i+1 - x_i) - (y_i+1 - y_i),
// b_i = -k_i+1*(x_i+1 - x_i) + (y_i+1 - y_i)
//
// where k_i is (by construction) the first derivative at knot i
Double_t t = (x - xLow) / (xHigh - xLow);
Double_t a = dydx_[cell] * (xHigh - xLow) - (yHigh - yLow);
Double_t b = -1.*dydx_[cell+1] * (xHigh - xLow) + (yHigh - yLow);
Double_t retVal = (1 - t) * yLow + t * yHigh + t * (1 - t) * ( a * (1 - t) + b * t );
return retVal;
}
void Lau1DCubicSpline::updateYValues(const std::vector<Double_t>& ys)
{
y_ = ys;
this->calcDerivatives();
}
void Lau1DCubicSpline::updateYValues(const std::vector<LauParameter*>& ys)
{
for (UInt_t i=0; i<nKnots_; ++i){
y_[i] = ys[i]->unblindValue();
}
this->calcDerivatives();
}
void Lau1DCubicSpline::updateType(LauSplineType type)
{
if(type_ != type) {
type_ = type;
this->calcDerivatives();
}
}
void Lau1DCubicSpline::updateBoundaryConditions(LauSplineBoundaryType leftBound,
LauSplineBoundaryType rightBound,
Double_t dydx0, Double_t dydxn)
{
Bool_t updateDerivatives(kFALSE);
if(leftBound_ != leftBound || rightBound_ != rightBound) {
leftBound_ = leftBound;
rightBound_ = rightBound;
updateDerivatives = kTRUE;
}
if(dydx0_ != dydx0) {
dydx0_ = dydx0;
if(leftBound_ == Lau1DCubicSpline::Clamped) updateDerivatives = kTRUE;
}
if(dydxn_ != dydxn) {
dydxn_ = dydxn;
if(rightBound_ == Lau1DCubicSpline::Clamped) updateDerivatives = kTRUE;
}
if(updateDerivatives) {
this->calcDerivatives();
}
}
-std::array<Double_t,4> Lau1DCubicSpline::getCoefficients(const UInt_t i) const
+std::array<Double_t,4> Lau1DCubicSpline::getCoefficients(const UInt_t i, const bool normalise) const
{
std::array<Double_t,4> result = {0.,0.,0.,0.};
if(i >= nKnots_)
{
std::cerr << "ERROR in Lau1DCubicSpline::getCoefficients requested for too high a knot value" << std::endl;
return result;
}
Double_t xL = x_[i] , xH = x_[i+1];
Double_t yL = y_[i] , yH = y_[i+1];
Double_t h = xH-xL; //This number comes up a lot
switch(type_)
{
case Lau1DCubicSpline::StandardSpline:
case Lau1DCubicSpline::AkimaSpline:
{
Double_t kL = dydx_[i], kH = dydx_[i+1];
//a and b based on definitions from https://en.wikipedia.org/wiki/Spline_interpolation#Algorithm_to_find_the_interpolating_cubic_spline
Double_t a = kL*h-(yH-yL);
Double_t b =-kH*h+(yH-yL);
Double_t denom = -h*h*h;//The terms have a common demoninator
result[0] = -b*xL*xL*xH + a*xL*xH*xH + h*h*(xL*yH - xH*yL);
result[1] = -a*xH*(2*xL+xH) + b*xL*(xL+2*xH) + h*h*(yL-yH);
result[2] = -b*(2*xL+xH) + a*(xL+2*xH);
result[3] = -a+b;
for(auto& res : result){res /= denom;}
break;
}
/* case Lau1DCubicSpline::AkimaSpline: // Double check the Akima description of splines (in evaluate) right now they're the same except for the first derivatives
{
//using fomulae from https://asmquantmacro.com/2015/09/01/akima-spline-interpolation-in-excel/
std::function<Double_t(Int_t)> m = [&](Int_t j) //formula to get the straight line gradient
{
if(j < 0){return 2*m(j+1)-m(j+2);}
if(j >= nKnots_){return 2*m(j-1)-m(j-2);}
return (y_[j+1]-y_[j]) / (x_[j+1]-x_[j]);
};
auto t = [&](Int_t j)
{
Double_t res = 0.; //originally res was called 't' but that produced a shadow warning
Double_t denom = TMath::Abs(m(j+1)-m(j)) + TMath::Abs(m(j-1)-m(j-2));
if(denom == 0){res = (m(j)-m(j-1))/2;} //Special case for when denom = 0
else
{
res = TMath::Abs(m(j+1)-m(j))*m(j-1) + TMath::Abs(m(j-1)-m(j-2))*m(j);
res /= denom;
}
return res;
};
//These are the p's to get the spline in the form p_k(x-xL)^k
Double_t pDenom = x_[i+1]/x_[i]; //a denominator used for p[2] and p[3]
std::array<Double_t,4> p = {y_[i],t(i),0.,0.}; //we'll do the last 2 below
p[2] = 3*m(i)-2*t(i)-t(i+1);
p[2]/= pDenom;
p[3] = t(i)+t(i+1)-2*m(i);
p[3]/= pDenom*pDenom;
//Now finally rearranging the p's into the desired results
result[0] = p[0]-p[1]*xL+p[2]*xL*xL-p[3]*xL*xL*xL;
result[1] = p[1]-2*p[2]*xL+3*p[3]*xL*xL;
result[2] = p[2]-3*p[3]*xL;
result[3] = p[3];
break;
}*/
case Lau1DCubicSpline::LinearInterpolation:
{
result[0] = xH*yL-xL*yH;
result[1] = yH-yL;
for(auto& res : result){res /= h;}
break;
}
}
+ if(normalise)
+ {
+ Double_t integral = this->integral();
+ for(auto& res : result){res /= integral;}
+ }
+
return result;
}
-TF1* Lau1DCubicSpline::makeTF1() const
+Double_t Lau1DCubicSpline::integral() const
+{
+ Double_t integral = 0.;
+ for(UInt_t iKnot = 0; iKnot < nKnots_ -1; ++iKnot)
+ {
+ Double_t minAbs = x_[iKnot];
+ Double_t maxAbs = x_[iKnot+1];
+
+ std::array<Double_t,4> coeffs = this -> getCoefficients(iKnot, false);
+
+ auto integralFunc = [&coeffs](Double_t x){return coeffs[0]*x + coeffs[1]*x*x/2 + coeffs[2]*x*x*x/3 + coeffs[3]*x*x*x*x/4;};
+
+ integral += integralFunc(maxAbs);
+ integral -= integralFunc(minAbs);
+ }
+ return integral;
+}
+
+TF1* Lau1DCubicSpline::makeTF1(const bool normalise) const
{
TString functionString = ""; //make a long piecewise construction of all the spline pieces
for(UInt_t i = 0; i < nKnots_-1; ++i)
{
functionString += Form("(x>%f && x<= %f)*",x_[i],x_[i+1]);//get the bounds of this piece
- std::array<Double_t,4> coeffs = this->getCoefficients(i);
+ std::array<Double_t,4> coeffs = this->getCoefficients(i,normalise);
functionString += Form("(%f + %f*x + %f*x^2 + %f*x^3)",coeffs[0],coeffs[1],coeffs[2],coeffs[3]);
if(i < nKnots_ -2){functionString += " + \n";}//add to all lines except the last
}
TF1* func = new TF1("SplineFunction", functionString, x_.front(), x_.back());
return func;
}
void Lau1DCubicSpline::init()
{
if( y_.size() != x_.size()) {
std::cerr << "ERROR in Lau1DCubicSpline::init : The number of y-values given does not match the number of x-values" << std::endl;
std::cerr << " Found " << y_.size() << ", expected " << x_.size() << std::endl;
gSystem->Exit(EXIT_FAILURE);
}
dydx_.insert(dydx_.begin(),nKnots_,0.);
a_.insert(a_.begin(),nKnots_,0.);
b_.insert(b_.begin(),nKnots_,0.);
c_.insert(c_.begin(),nKnots_,0.);
d_.insert(d_.begin(),nKnots_,0.);
this->calcDerivatives();
}
void Lau1DCubicSpline::calcDerivatives()
{
switch ( type_ ) {
case Lau1DCubicSpline::StandardSpline :
this->calcDerivativesStandard();
break;
case Lau1DCubicSpline::AkimaSpline :
this->calcDerivativesAkima();
break;
case Lau1DCubicSpline::LinearInterpolation :
//derivatives not needed for linear interpolation
break;
}
}
void Lau1DCubicSpline::calcDerivativesStandard()
{
// derivatives are determined such that the second derivative is continuous at internal knots
// derivatives, k_i, are the solutions to a set of linear equations of the form:
// a_i * k_i-1 + b_i * k+i + c_i * k_i+1 = d_i with a_0 = 0, c_n-1 = 0
// this is solved using the tridiagonal matrix algorithm as on en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm
// first and last equations give boundary conditions
// - for natural boundary, require f''(x) = 0 at end knot
// - for 'not a knot' boundary, require f'''(x) continuous at second knot
// - for clamped boundary, require predefined value of f'(x) at end knot
// non-zero values of a_0 and c_n-1 would give cyclic boundary conditions
a_[0] = 0.;
c_[nKnots_-1] = 0.;
// set left boundary condition
if(leftBound_ == Lau1DCubicSpline::Natural) {
b_[0] = 2./(x_[1]-x_[0]);
c_[0] = 1./(x_[1]-x_[0]);
d_[0] = 3.*(y_[1]-y_[0])/((x_[1]-x_[0])*(x_[1]-x_[0]));
} else if(leftBound_ == Lau1DCubicSpline::NotAKnot) {
// define the width, h, and the 'slope', delta, of the first cell
Double_t h1(x_[1]-x_[0]), h2(x_[2]-x_[0]);
Double_t delta1((y_[1]-y_[0])/h1), delta2((y_[2]-y_[1])/h2);
// these coefficients can be determined by requiring f'''_0(x_1) = f'''_1(x_1)
// the requirement f''_0(x_1) = f''_1(x_1) has been used to remove the dependence on k_2
b_[0] = h2;
c_[0] = h1+h2;
d_[0] = delta1*(2.*h2*h2 + 3.*h1*h2)/(h1+h2) + delta2*5.*h1*h1/(h1+h2);
} else { //Clamped
b_[0] = 1.;
c_[0] = 0.;
d_[0] = dydx0_;
}
// set right boundary condition
if(rightBound_ == Lau1DCubicSpline::Natural) {
a_[nKnots_-1] = 1./(x_[nKnots_-1]-x_[nKnots_-2]);
b_[nKnots_-1] = 2./(x_[nKnots_-1]-x_[nKnots_-2]);
d_[nKnots_-1] = 3.*(y_[nKnots_-1]-y_[nKnots_-2])/((x_[nKnots_-1]-x_[nKnots_-2])*(x_[nKnots_-1]-x_[nKnots_-2]));
} else if(rightBound_ == Lau1DCubicSpline::NotAKnot) {
// define the width, h, and the 'slope', delta, of the last cell
Double_t hnm1(x_[nKnots_-1]-x_[nKnots_-2]), hnm2(x_[nKnots_-2]-x_[nKnots_-3]);
Double_t deltanm1((y_[nKnots_-1]-y_[nKnots_-2])/hnm1), deltanm2((y_[nKnots_-2]-y_[nKnots_-3])/hnm2);
// these coefficients can be determined by requiring f'''_n-3(x_n-2) = f'''_n-2(x_n-2)
// the requirement f''_n-3(x_n-2) = f''_n-2(x_n-2) has been used to remove
// the dependence on k_n-3
a_[nKnots_-1] = hnm2 + hnm1;
b_[nKnots_-1] = hnm1;
d_[nKnots_-1] = deltanm2*hnm1*hnm1/(hnm2+hnm1) + deltanm1*(2.*hnm2*hnm2 + 3.*hnm2*hnm1)/(hnm2+hnm1);
} else { //Clamped
a_[nKnots_-1] = 0.;
b_[nKnots_-1] = 1.;
d_[nKnots_-1] = dydxn_;
}
// the remaining equations ensure that f_i-1''(x_i) = f''_i(x_i) for all internal knots
for(UInt_t i=1; i<nKnots_-1; ++i) {
a_[i] = 1./(x_[i]-x_[i-1]);
b_[i] = 2./(x_[i]-x_[i-1]) + 2./(x_[i+1]-x_[i]);
c_[i] = 1./(x_[i+1]-x_[i]);
d_[i] = 3.*(y_[i]-y_[i-1])/((x_[i]-x_[i-1])*(x_[i]-x_[i-1])) + 3.*(y_[i+1]-y_[i])/((x_[i+1]-x_[i])*(x_[i+1]-x_[i]));
}
// forward sweep - replace c_i and d_i with
//
// c'_i = c_i / b_i for i = 0
// c_i / (b_i - a_i * c_i-1) otherwise
//
// and
//
// d'_i = d_i / b_i for i = 0
// (d_i - a_i * d_i-1) / (b_i - a_i * c_i-1) otherwise
c_[0] /= b_[0];
d_[0] /= b_[0];
for(UInt_t i=1; i<nKnots_-1; ++i) {
c_[i] = c_[i] / (b_[i] - a_[i]*c_[i-1]);
d_[i] = (d_[i] - a_[i]*d_[i-1]) / (b_[i] - a_[i]*c_[i-1]);
}
d_[nKnots_-1] = (d_[nKnots_-1] - a_[nKnots_-1]*d_[nKnots_-2]) / (b_[nKnots_-1] - a_[nKnots_-1]*c_[nKnots_-2]);
// back substitution - calculate k_i = dy/dx at each knot
//
// k_i = d'_i for i = n-1
// d'_i - c'_i * k_i+1 otherwise
dydx_[nKnots_-1] = d_[nKnots_-1];
for(Int_t i=nKnots_-2; i>=0; --i) {
dydx_[i] = d_[i] - c_[i]*dydx_[i+1];
}
}
void Lau1DCubicSpline::calcDerivativesAkima() {
//derivatives are calculated according to the Akima method
// J.ACM vol. 17 no. 4 pp 589-602
Double_t am1(0.), an(0.), anp1(0.);
// a[i] is the slope of the segment from point i-1 to point i
//
// n.b. segment 0 is before point 0 and segment n is after point n-1
// internal segments are numbered 1 - n-1
for(UInt_t i=1; i<nKnots_; ++i) {
a_[i] = (y_[i]-y_[i-1])/(x_[i]-x_[i-1]);
}
// calculate slopes between additional points on each end according to the Akima method
// method assumes that the additional points follow a quadratic defined by the last three points
// this leads to the relations a[2] - a[1] = a[1] - a[0] = a[0] - a[-1] and a[n-1] - a[n-2] = a[n] - a[n-1] = a[n+1] - a[n]
a_[0] = 2*a_[1] - a_[2];
am1 = 2*a_[0] - a_[1];
an = 2*a_[nKnots_-1] - a_[nKnots_-2];
anp1 = 2*an - a_[nKnots_-1];
// b[i] is the weight of a[i] towards dydx[i]
// c[i] is the weight of a[i+1] towards dydx[i]
// See Appendix A of J.ACM vol. 17 no. 4 pp 589-602 for a justification of these weights
b_[0] = TMath::Abs(a_[1] - a_[0]);
b_[1] = TMath::Abs(a_[2] - a_[1]);
c_[0] = TMath::Abs(a_[0] - am1 );
c_[1] = TMath::Abs(a_[1] - a_[0]);
for(UInt_t i=2; i<nKnots_-2; ++i) {
b_[i] = TMath::Abs(a_[i+2] - a_[i+1]);
c_[i] = TMath::Abs(a_[i] - a_[i-1]);
}
b_[nKnots_-2] = TMath::Abs(an - a_[nKnots_-1]);
b_[nKnots_-1] = TMath::Abs(anp1 - an );
c_[nKnots_-2] = TMath::Abs(a_[nKnots_-2] - a_[nKnots_-3]);
c_[nKnots_-1] = TMath::Abs(a_[nKnots_-1] - a_[nKnots_-2]);
// dy/dx calculated as the weighted average of a[i] and a[i+1]:
// dy/dx_i = ( | a_i+2 - a_i+1 | a_i + | a_i - a_i-1 | a_i+1 ) / ( | a_i+2 - a_i+1 | + | a_i - a_i-1 | )
// in the special case a_i-1 == a_i != a_i+1 == a_i+2 this function is undefined so dy/dx is then defined as (a_i + a_i+1) / 2
for(UInt_t i=0; i<nKnots_-2; ++i) {
if(b_[i]==0 && c_[i]==0) {
dydx_[i] = ( a_[i] + a_[i+1] ) / 2.;
} else {
dydx_[i] = ( b_[i] * a_[i] + c_[i] * a_[i+1] ) / ( b_[i] + c_[i] );
}
}
if(b_[nKnots_-1]==0 && c_[nKnots_-1]==0) {
dydx_[nKnots_-1] = ( a_[nKnots_-1] + an ) / 2.;
} else {
dydx_[nKnots_-1] = ( b_[nKnots_-1] * a_[nKnots_-1] + c_[nKnots_-1] * an ) / ( b_[nKnots_-1] + c_[nKnots_-1] );
}
}
diff --git a/src/LauDecayTimePdf.cc b/src/LauDecayTimePdf.cc
index dd94338..5ee1e3e 100644
--- a/src/LauDecayTimePdf.cc
+++ b/src/LauDecayTimePdf.cc
@@ -1,1250 +1,1421 @@
/*
Copyright 2006 University of Warwick
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Laura++ package authors:
John Back
Paul Harrison
Thomas Latham
*/
/*! \file LauDecayTimePdf.cc
\brief File containing implementation of LauDecayTimePdf class.
*/
#include <complex>
#include <functional>
#include <iostream>
#include <numeric>
#include <vector>
#include "TMath.h"
#include "TRandom.h"
#include "TSystem.h"
#include "TH1.h"
#include "RooMath.h"
#include "Lau1DCubicSpline.hh"
#include "Lau1DHistPdf.hh"
#include "LauConstants.hh"
#include "LauComplex.hh"
#include "LauDecayTimePdf.hh"
#include "LauFitDataTree.hh"
#include "LauParameter.hh"
#include "LauRandom.hh"
ClassImp(LauDecayTimePdf)
LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector<LauAbsRValue*>& params,
Double_t minAbscissaVal, Double_t maxAbscissaVal,
Double_t minAbscissaErr, Double_t maxAbscissaErr,
FuncType type, UInt_t nGauss, const std::vector<Bool_t>& scale, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) :
varName_(theVarName),
varErrName_(theVarErrName),
param_(params),
smear_(kTRUE),
minAbscissa_(minAbscissaVal),
maxAbscissa_(maxAbscissaVal),
minAbscissaError_(minAbscissaErr),
maxAbscissaError_(maxAbscissaErr),
abscissaError_(0.0),
abscissaErrorGenerated_(kFALSE),
errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286
errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102
nGauss_(nGauss),
mean_(nGauss_,0),
sigma_(nGauss_,0),
frac_(nGauss_-1,0),
tau_(0),
deltaM_(0),
deltaGamma_(0),
fracPrompt_(0),
type_(type),
method_(method),
effMethod_(effMethod),
scaleMeans_(scale),
scaleWidths_(scale),
scaleWithPerEventError_( std::accumulate( scale.begin(), scale.end(), kFALSE, std::logical_or<Bool_t>() ) ),
expTerm_(0.0),
cosTerm_(0.0),
sinTerm_(0.0),
coshTerm_(0.0),
sinhTerm_(0.0),
normTermExp_(0.0),
normTermCosh_(0.0),
normTermSinh_(0.0),
errTerm_(0.0),
effiTerm_(0.0),
pdfTerm_(0.0),
errHist_(nullptr),
pdfHist_(nullptr),
effiFun_(nullptr),
effiHist_(nullptr),
effiPars_(0)
{
this->initialise();
}
LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector<LauAbsRValue*>& params,
Double_t minAbscissaVal, Double_t maxAbscissaVal,
Double_t minAbscissaErr, Double_t maxAbscissaErr,
FuncType type, UInt_t nGauss, const std::vector<Bool_t>& scaleMeans, const std::vector<Bool_t>& scaleWidths, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) :
varName_(theVarName),
varErrName_(theVarErrName),
param_(params),
smear_(kTRUE),
minAbscissa_(minAbscissaVal),
maxAbscissa_(maxAbscissaVal),
minAbscissaError_(minAbscissaErr),
maxAbscissaError_(maxAbscissaErr),
abscissaError_(0.0),
abscissaErrorGenerated_(kFALSE),
errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286
errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102
nGauss_(nGauss),
mean_(nGauss_,0),
sigma_(nGauss_,0),
frac_(nGauss_-1,0),
tau_(0),
deltaM_(0),
deltaGamma_(0),
fracPrompt_(0),
type_(type),
method_(method),
effMethod_(effMethod),
scaleMeans_(scaleMeans),
scaleWidths_(scaleWidths),
scaleWithPerEventError_( std::accumulate( scaleMeans.begin(), scaleMeans.end(), kFALSE, std::logical_or<Bool_t>() ) || std::accumulate( scaleWidths.begin(), scaleWidths.end(), kFALSE, std::logical_or<Bool_t>() ) ),
expTerm_(0.0),
cosTerm_(0.0),
sinTerm_(0.0),
coshTerm_(0.0),
sinhTerm_(0.0),
normTermExp_(0.0),
normTermCosh_(0.0),
normTermSinh_(0.0),
errTerm_(0.0),
effiTerm_(0.0),
pdfTerm_(0.0),
errHist_(nullptr),
pdfHist_(nullptr),
effiFun_(nullptr),
effiHist_(nullptr),
effiPars_(0)
{
this->initialise();
}
LauDecayTimePdf::~LauDecayTimePdf()
{
// Destructor
delete errHist_; errHist_ = nullptr;
delete pdfHist_; pdfHist_ = nullptr;
delete effiFun_; effiFun_ = nullptr;
delete effiHist_; effiHist_ = nullptr;
for( auto& par : effiPars_ ){ delete par; par = nullptr; }
effiPars_.clear();
}
void LauDecayTimePdf::initialise()
{
// The parameters are:
// - the mean and the sigma (bias and spread in resolution) of the gaussian(s)
// - the mean lifetime, denoted tau, of the exponential decay
// - the frequency of oscillation, denoted Delta m, of the cosine and sine terms
// - the decay width difference, denoted Delta Gamma, of the hyperbolic cosine and sine terms
//
// The next two arguments specify the range in which the PDF is defined,
// and the PDF will be normalised w.r.t. these limits.
//
// The final three arguments define the type of Delta t PDF (Delta, Exp, ExpTrig or ExpHypTrig ), the number of gaussians
// and whether or not the gaussian parameters should be scaled by the per-event errors on Delta t
// First check whether the scale vector is nGauss in size
if (nGauss_ != scaleMeans_.size() || nGauss_ != scaleWidths_.size()) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : scale vector size not the same as nGauss."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
if (type_ == Hist) {
if (this->nParameters() != 0){
std::cerr<<"ERROR in LauDecayTimePdf::initialise : Hist PDF should have 0 parameters"<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else {
TString meanName("mean_");
TString sigmaName("sigma_");
TString fracName("frac_");
Bool_t foundParams(kTRUE);
for (UInt_t i(0); i<nGauss_; ++i) {
TString tempName(meanName); tempName += i;
TString tempName2(sigmaName); tempName2 += i;
TString tempName3(fracName); tempName3 += i;
mean_[i] = this->findParameter(tempName);
foundParams &= (mean_[i] != 0);
sigma_[i] = this->findParameter(tempName2);
foundParams &= (sigma_[i] != 0);
if (i!=0) {
frac_[i-1] = this->findParameter(tempName3);
foundParams &= (frac_[i-1] != 0);
}
}
if (type_ == Delta) {
if ((this->nParameters() != (3*nGauss_-1)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : Delta type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == Exp) {
tau_ = this->findParameter("tau");
foundParams &= (tau_ != 0);
if ((this->nParameters() != (3*nGauss_-1+1)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : Exp type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
std::cerr<<" - the lifetime of the exponential decay: \"tau\"."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == DeltaExp) {
tau_ = this->findParameter("tau");
fracPrompt_ = this->findParameter("frac_prompt");
foundParams &= (tau_ != 0);
foundParams &= (fracPrompt_ != 0);
if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : DeltaExp type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
std::cerr<<" - the lifetime of the exponential decay: \"tau\""<<std::endl;
std::cerr<<" - the fraction of the prompt part: \"frac_prompt\"."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == ExpTrig) {
tau_ = this->findParameter("tau");
deltaM_ = this->findParameter("deltaM");
foundParams &= (tau_ != 0);
foundParams &= (deltaM_ != 0);
if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : ExpTrig type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
std::cerr<<" - the lifetime of the exponential decay: \"tau\""<<std::endl;
std::cerr<<" - the oscillation frequency: \"deltaM\"."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == ExpHypTrig) {
tau_ = this->findParameter("tau");
deltaM_ = this->findParameter("deltaM");
deltaGamma_ = this->findParameter("deltaGamma");
foundParams &= (tau_ != 0);
foundParams &= (deltaM_ != 0);
foundParams &= (deltaGamma_ != 0);
if ((this->nParameters() != (3*nGauss_-1+3)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : ExpHypTrig type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
std::cerr<<" - the lifetime of the exponential decay: \"tau\""<<std::endl;
std::cerr<<" - the oscillation frequency: \"deltaM\"."<<std::endl;
std::cerr<<" - the width difference: \"deltaGamma\"."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
}
}
}
Double_t LauDecayTimePdf::effectiveResolution() const
{
Double_t dilution = 0.;
Double_t dMSq = deltaM_->unblindValue() * deltaM_->unblindValue();
// Might be cleaner to just append this to the vector in the init step,
// the the consistency can also be checked
Double_t fracSum = 0;
for (auto f : frac_) fracSum += f->unblindValue();
Double_t lastFrac = 1. - fracSum;
for (size_t i = 0; i < sigma_.size(); i++) {
Double_t sigSq = sigma_[i]->unblindValue() * sigma_[i]->unblindValue();
Double_t thisFrac = lastFrac;
if (i < sigma_.size() - 1) thisFrac = frac_[i]->unblindValue();
dilution += thisFrac * TMath::Exp(-dMSq * sigSq / 2.);
}
return TMath::Sqrt(-2. * TMath::Log(dilution)) / deltaM_->unblindValue();
}
void LauDecayTimePdf::cacheInfo(const LauFitDataTree& inputData)
{
// Check that the input data contains the decay time variable
Bool_t hasBranch = inputData.haveBranch(this->varName());
if (!hasBranch) {
std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<<this->varName()<<"\"."<<std::endl;
return;
}
// If we're scaling by the per-event error, also check that the input data contains the decay time error variable
if ( scaleWithPerEventError_ ) {
hasBranch = inputData.haveBranch(this->varErrName());
if (!hasBranch) {
std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<<this->varErrName()<<"\"."<<std::endl;
return;
}
}
// Pass the data to the decay-time error PDF for caching
if ( errHist_ ) {
errHist_->cacheInfo(inputData);
}
if (type_ == Hist) {
// Pass the data to the decay-time PDF for caching
if ( pdfHist_ ) {
pdfHist_->cacheInfo(inputData);
}
} else {
// determine whether we are caching our PDF value
//TODO
//Bool_t doCaching( this->nFixedParameters() == this->nParameters() );
//this->cachePDF( doCaching );
// clear the vectors and reserve enough space
const UInt_t nEvents = inputData.nEvents();
abscissas_.clear(); abscissas_.reserve(nEvents);
abscissaErrors_.clear(); abscissaErrors_.reserve(nEvents);
expTerms_.clear(); expTerms_.reserve(nEvents);
cosTerms_.clear(); cosTerms_.reserve(nEvents);
sinTerms_.clear(); sinTerms_.reserve(nEvents);
coshTerms_.clear(); coshTerms_.reserve(nEvents);
sinhTerms_.clear(); sinhTerms_.reserve(nEvents);
normTermsExp_.clear(); normTermsExp_.reserve(nEvents);
normTermsCos_.clear(); normTermsCos_.reserve(nEvents);
normTermsSin_.clear(); normTermsSin_.reserve(nEvents);
normTermsCosh_.clear(); normTermsCosh_.reserve(nEvents);
normTermsSinh_.clear(); normTermsSinh_.reserve(nEvents);
- effiTerms_.clear(); effiTerms_.reserve(nEvents);
+ effiTerms_.clear(); effiTerms_.reserve(nEvents);
// If we're not using per-event information for the decay time
// error, just calculate the normalisation terms once
if ( ! scaleWithPerEventError_ ) {
this->calcNorm();
}
for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) {
const LauFitData& dataValues = inputData.getData(iEvt);
const Double_t abscissa { dataValues.at(this->varName()) };
if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) {
std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay time: "<<abscissa<<
" outside allowed range: ["<<this->minAbscissa()<<","<<this->maxAbscissa()<<"]."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
abscissas_.push_back( abscissa );
const Double_t abscissaErr { scaleWithPerEventError_ ? dataValues.at(this->varErrName()) : 0.0 };
if ( scaleWithPerEventError_ && ( abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError() ) ) {
std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay-time error: "<<abscissaErr<<
" outside allowed range: ["<<this->minAbscissaError()<<","<<this->maxAbscissaError()<<"]."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
abscissaErrors_.push_back(abscissaErr);
this->calcLikelihoodInfo(abscissa, abscissaErr);
// If we are using per-event information for the decay
// time error, need to calculate the normalisation
// terms for every event
if ( scaleWithPerEventError_ ) {
this->calcNorm(abscissaErr);
}
expTerms_.push_back(expTerm_);
cosTerms_.push_back(cosTerm_);
sinTerms_.push_back(sinTerm_);
coshTerms_.push_back(coshTerm_);
sinhTerms_.push_back(sinhTerm_);
normTermsExp_.push_back(normTermExp_);
normTermsCos_.push_back(normTermCos_);
normTermsSin_.push_back(normTermSin_);
normTermsCosh_.push_back(normTermCosh_);
normTermsSinh_.push_back(normTermSinh_);
effiTerms_.push_back(effiTerm_);
}
}
}
void LauDecayTimePdf::calcLikelihoodInfo(const UInt_t iEvt)
{
// Extract all the terms and their normalisations
if (type_ == Hist) {
if ( pdfHist_ ) {
pdfHist_->calcLikelihoodInfo(iEvt);
pdfTerm_ = pdfHist_->getLikelihood();
} else {
pdfTerm_ = 1.0;
}
} else {
expTerm_ = expTerms_[iEvt];
cosTerm_ = cosTerms_[iEvt];
sinTerm_ = sinTerms_[iEvt];
coshTerm_ = coshTerms_[iEvt];
sinhTerm_ = sinhTerms_[iEvt];
normTermExp_ = normTermsExp_[iEvt];
normTermCos_ = normTermsCos_[iEvt];
normTermSin_ = normTermsSin_[iEvt];
normTermCosh_ = normTermsCosh_[iEvt];
normTermSinh_ = normTermsSinh_[iEvt];
}
// Extract the decay time error PDF value
if ( errHist_ ) {
errHist_->calcLikelihoodInfo(iEvt);
errTerm_ = errHist_->getLikelihood();
} else {
errTerm_ = 1.0;
}
// Extract the decay time efficiency
effiTerm_ = effiTerms_[iEvt];
// TODO - Parameters can change in some cases, so we'll need to update things!
// - For the moment do the blunt force thing and recalculate everything for every event!
// - Need to make this intelligent!
const Double_t abscissa = abscissas_[iEvt];
const Double_t abscissaErr = abscissaErrors_[iEvt];
this->calcLikelihoodInfo(abscissa,abscissaErr);
this->calcNorm(abscissaErr);
}
void LauDecayTimePdf::calcLikelihoodInfo(const Double_t abscissa)
{
// Check whether any of the gaussians should be scaled - if any of them should we need the per-event error
if (scaleWithPerEventError_) {
std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Per-event error on decay time not provided, cannot calculate anything."<<std::endl;
return;
}
this->calcLikelihoodInfo(abscissa, 0.0);
}
void LauDecayTimePdf::calcLikelihoodInfo(const Double_t abscissa, const Double_t abscissaErr)
{
// Check that the decay time and the decay time error are in valid ranges
if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) {
std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of the decay time: "<<abscissa<<
" outside allowed range: ["<<this->minAbscissa()<<","<<this->maxAbscissa()<<"]."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
if ( scaleWithPerEventError_ && ( abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError() ) ) {
std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of Delta t error: "<<abscissaErr<<
" outside allowed range: ["<<this->minAbscissaError()<<","<<this->maxAbscissaError()<<"]."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
// Determine the decay time efficiency
switch( effMethod_ )
{
case EfficiencyMethod::Spline : effiTerm_ = effiFun_ ? effiFun_ -> evaluate(abscissa) : 1.0 ; break;
case EfficiencyMethod::Binned : effiTerm_ = effiHist_ ? effiHist_-> GetBinContent(effiHist_-> FindFixBin(abscissa)) : 1.0 ; break;
case EfficiencyMethod::Flat : effiTerm_ = 1.0 ; break;
}
if ( effiTerm_ > 1.0 ) { effiTerm_ = 1.0; }
else if ( effiTerm_ < 0.0 ) { effiTerm_ = 0.0; }
// For the histogram PDF just calculate that term and return
if (type_ == Hist){
if ( pdfHist_ ) {
pdfHist_->calcLikelihoodInfo(abscissa);
pdfTerm_ = pdfHist_->getLikelihood();
} else {
pdfTerm_ = 1.0;
}
return;
}
// If we're not using the resolution function, calculate the simple terms and return
if (!this->doSmearing()) {
this->calcNonSmearedTerms(abscissa);
return;
}
// Get all the up to date parameter values for the resolution function
std::vector<Double_t> frac(nGauss_);
std::vector<Double_t> mean(nGauss_);
std::vector<Double_t> sigma(nGauss_);
Double_t fracPrompt(0.0);
// TODO - why do we do the fractions this way around?
frac[0] = 1.0;
for (UInt_t i(0); i<nGauss_; ++i) {
mean[i] = mean_[i]->unblindValue();
sigma[i] = sigma_[i]->unblindValue();
if (i != 0) {
frac[i] = frac_[i-1]->unblindValue();
frac[0] -= frac[i];
}
}
if (type_ == DeltaExp) {
fracPrompt = fracPrompt_->unblindValue();
}
// Scale the gaussian parameters by the per-event error on Delta t (if appropriate)
for (UInt_t i(0); i<nGauss_; ++i) {
if (scaleMeans_[i]) {
mean[i] *= abscissaErr;
}
if (scaleWidths_[i]) {
sigma[i] *= abscissaErr;
}
}
// Calculate term needed by every type
std::vector<Double_t> x(nGauss_);
const Double_t xMax = this->maxAbscissa();
const Double_t xMin = this->minAbscissa();
for (UInt_t i(0); i<nGauss_; ++i) {
x[i] = abscissa - mean[i];
}
Double_t value(0.0);
if (type_ == Delta || type_ == DeltaExp) {
// Calculate the gaussian function(s)
for (UInt_t i(0); i<nGauss_; ++i) {
if (TMath::Abs(sigma[i]) > 1e-10) {
Double_t exponent(0.0);
Double_t norm(0.0);
Double_t scale = LauConstants::root2*sigma[i];
Double_t scale2 = LauConstants::rootPiBy2*sigma[i];
exponent = -0.5*x[i]*x[i]/(sigma[i]*sigma[i]);
norm = scale2*(TMath::Erf((xMax - mean[i])/scale)
- TMath::Erf((xMin - mean[i])/scale));
value += frac[i]*TMath::Exp(exponent)/norm;
}
}
}
if (type_ != Delta) {
// Reset values of terms
expTerm_ = 0.0;
cosTerm_ = 0.0;
sinTerm_ = 0.0;
coshTerm_ = 0.0;
sinhTerm_ = 0.0;
// Calculate values of the PDF convoluted with each Gaussian for a given value of the abscsissa
for (UInt_t i(0); i<nGauss_; ++i) {
// We need this in all other cases
const Double_t expTerm = smearedExpTerm(abscissa, sigma[i], mean[i]);
expTerm_ += frac[i] * expTerm;
// Typical case (1): B0/B0bar
if ( type_ == ExpTrig ) {
// LHCb convention, i.e. convolution evaluate between 0 and inf
if (method_ == DecayTime) {
auto [cosTerm, sinTerm] = smearedCosSinTerm(abscissa, sigma[i], mean[i]);
coshTerm_ += frac[i] * expTerm;
cosTerm_ += frac[i] * cosTerm;
sinTerm_ += frac[i] * sinTerm;
} else {
// lhcb.web.cern.ch
}
}
// Typical case (2): B0s/B0sbar
else if ( type_ == ExpHypTrig ) {
// LHCb convention
if (method_ == DecayTime) {
auto [coshTerm, sinhTerm] = smearedCoshSinhTerm(abscissa, sigma[i], mean[i]);
auto [cosTerm, sinTerm] = smearedCosSinTerm(abscissa, sigma[i], mean[i]);
coshTerm_ += frac[i] * coshTerm;
sinhTerm_ += frac[i] * sinhTerm;
cosTerm_ += frac[i] * cosTerm;
sinTerm_ += frac[i] * sinTerm;
} else {
// lhcb.web.cern.ch
}
}
}
if (type_ == DeltaExp) {
value *= fracPrompt;
value += (1.0-fracPrompt)*expTerm_;
} else {
value = expTerm_;
}
}
// Calculate the decay time error PDF value
if ( errHist_ ) {
std::vector<Double_t> absErrVec = {abscissaErr}; //Otherwise seg fault
errHist_->calcLikelihoodInfo(absErrVec);
errTerm_ = errHist_->getLikelihood();
} else {
errTerm_ = 1.0;
}
}
void LauDecayTimePdf::calcNonSmearedTerms(Double_t abscissa)
{
// Reset values of terms
errTerm_ = 1.0;
expTerm_ = 0.0;
cosTerm_ = 0.0;
sinTerm_ = 0.0;
coshTerm_ = 0.0;
sinhTerm_ = 0.0;
if ( type_ == Hist || type_ == Delta ){
return;
}
const Double_t tau { tau_->unblindValue() };
const Double_t gamma { 1.0 / tau };
if (method_ == DecayTime) {
expTerm_ = TMath::Exp(-abscissa*gamma);
} else if (method_ == DecayTimeDiff) {
expTerm_ = TMath::Exp(-TMath::Abs(abscissa)*gamma);
}
// Calculate also the terms related to cosine and sine
if (type_ == ExpTrig) {
const Double_t deltaM = deltaM_->unblindValue();
coshTerm_ = expTerm_;
sinhTerm_ = 0.0;
cosTerm_ = TMath::Cos(deltaM*abscissa)*expTerm_;
sinTerm_ = TMath::Sin(deltaM*abscissa)*expTerm_;
}
// Calculate also the terms related to cosh, sinh, cosine, and sine
else if (type_ == ExpHypTrig) {
const Double_t deltaM = deltaM_->unblindValue();
const Double_t deltaGamma = deltaGamma_->unblindValue();
coshTerm_ = TMath::CosH(0.5*deltaGamma*abscissa)*expTerm_;
sinhTerm_ = TMath::SinH(0.5*deltaGamma*abscissa)*expTerm_;
cosTerm_ = TMath::Cos(deltaM*abscissa)*expTerm_;
sinTerm_ = TMath::Sin(deltaM*abscissa)*expTerm_;
}
}
std::pair<Double_t, Double_t> LauDecayTimePdf::smearedCosSinTerm(Double_t t, Double_t sigma, Double_t mu)
{
using namespace std::complex_literals;
const Double_t gamma = 1. / this->tau_->unblindValue();
const Double_t x = (t - mu) / (LauConstants::root2 * sigma);
const std::complex z = std::complex(gamma * sigma / LauConstants::root2, -this->deltaM_->unblindValue() * sigma / LauConstants::root2);
const std::complex arg1 = std::complex(0., 1.) * (z - x);
const std::complex arg2 { -(x*x) - (arg1 * arg1) };
const std::complex conv = arg1.imag() < -5.? 0.5 * std::exp(arg2) * RooMath::erfc( -1i * arg1 ) : 0.5 * TMath::Exp(-(x * x)) * RooMath::faddeeva(arg1) ;
const Double_t cos_conv = conv.real();
const Double_t sin_conv = conv.imag();
return {cos_conv, sin_conv};
}
std::pair<Double_t, Double_t> LauDecayTimePdf::smearedCoshSinhTerm(Double_t t, Double_t sigma, Double_t mu)
{
using namespace std::complex_literals;
Double_t gamma = 1. / this->tau_->unblindValue();
std::complex x((t - mu) / (LauConstants::root2 * sigma),0.);
Double_t xRe = x.real();
Double_t z_H = ((gamma - deltaGamma_->unblindValue() / 2.) * sigma) / LauConstants::root2;
Double_t z_L = ((gamma + deltaGamma_->unblindValue() / 2.) * sigma) / LauConstants::root2;
//Doing H
std::complex arg_H1(0., z_H - x.real());
std::complex arg_H2 = -(x*x) - (arg_H1 * arg_H1);
std::complex conv_H = arg_H1.imag() < -5. ? (0.5 * std::exp(arg_H2)) * RooMath::erfc(-1i * arg_H1) : 0.5 * TMath::Exp(-( xRe * xRe )) * RooMath::faddeeva(arg_H1);
//Doing L
std::complex arg_L1(0., z_L - x.real());
std::complex arg_L2 = -(x*x) - (arg_L1 * arg_L1);
std::complex conv_L = arg_L1.imag() < -5. ? (0.5 * std::exp(arg_L2)) * RooMath::erfc(-1i * arg_L1) : 0.5 * TMath::Exp(-( xRe * xRe )) * RooMath::faddeeva(arg_L1);
std::complex cosh_conv = 0.5 * (conv_H + conv_L);
std::complex sinh_conv = 0.5 * (conv_H - conv_L);
return {cosh_conv.real(), sinh_conv.real()};
}
Double_t LauDecayTimePdf::smearedExpTerm(Double_t t, Double_t sigma, Double_t mu)
{
using namespace std::complex_literals;
const Double_t gamma = 1. / this->tau_->unblindValue();
const std::complex x((t - mu) / (LauConstants::root2 * sigma),0.);
const Double_t xRe = x.real();
const Double_t z = (gamma * sigma) / LauConstants::root2;
const std::complex arg1(0., z - x.real());
const std::complex arg2 = -(x * x) - (arg1 * arg1);
const std::complex conv = arg1.imag() < -5. ? 0.5 * (std::exp(arg2)) * RooMath::erfc(-1i * arg1) : 0.5 * TMath::Exp(-(xRe * xRe)) * RooMath::faddeeva(arg1) ;
return conv.real();
}
std::pair<Double_t, Double_t> LauDecayTimePdf::nonSmearedCosSinIntegral(Double_t minAbs, Double_t maxAbs)
{
// From 1407.0748, not clear whether complex is faster in this case
Double_t gamma = 1. / this->tau_->unblindValue();
LauComplex denom = LauComplex(gamma, -this->deltaM_->unblindValue());
LauComplex exponent = LauComplex(-gamma, this->deltaM_->unblindValue());
LauComplex num0 = -exponent.scale(minAbs).exp();
LauComplex num1 = -exponent.scale(maxAbs).exp();
LauComplex integral = (num1 - num0) / denom;
return {integral.re(), integral.im()};
}
std::pair<Double_t, Double_t> LauDecayTimePdf::smearedCosSinIntegral(Double_t minAbs, Double_t maxAbs, Double_t sigma, Double_t mu)
{
using namespace std::complex_literals;
Double_t gamma = 1. / this->tau_->unblindValue();
Double_t x1 = (maxAbs - mu) / (LauConstants::root2 * sigma);
Double_t x0 = (minAbs - mu) / (LauConstants::root2 * sigma);
std::complex z = std::complex(gamma * sigma / LauConstants::root2, -this->deltaM_->unblindValue() * sigma / LauConstants::root2);
std::complex arg1 = std::complex(0., 1.) * (z - x1);
std::complex arg0 = std::complex(0., 1.) * (z - x0);
std::complex integral = 0. + 0i;
if(arg1.imag() < -5.)
{integral = RooMath::erf(x1) - std::exp(-(x1 * x1) - (arg1 * arg1)) * RooMath::erfc(-1i * arg1);}
else
{integral = RooMath::erf(x1) - TMath::Exp(-(x1 * x1)) * RooMath::faddeeva(arg1);}
if(arg0.imag() < -5.)
{integral -= RooMath::erf(x0) - std::exp(-(x0 * x0) - (arg0 * arg0)) * RooMath::erfc(-1i * arg0);}
else
{integral -= RooMath::erf(x0) - TMath::Exp(-(x0 * x0)) * RooMath::faddeeva(arg0);}
integral *= (sigma / (2. * LauConstants::root2 * z));
Double_t cos_integral = integral.real();
Double_t sin_integral = integral.imag();
return {cos_integral, sin_integral};
}
Double_t LauDecayTimePdf::nonSmearedExpIntegral(Double_t minAbs, Double_t maxAbs)
{
const Double_t tau = tau_->unblindValue();
const Double_t Gamma = 1.0 / tau;
return tau * ( TMath::Exp(-minAbs*Gamma) - TMath::Exp(-maxAbs*Gamma) );
}
Double_t LauDecayTimePdf::smearedExpIntegral(Double_t minAbs, Double_t maxAbs, Double_t sigma, Double_t mu)
{
using namespace std::complex_literals;
const Double_t gamma = 1. / this->tau_->unblindValue();
const Double_t x1 = (maxAbs - mu) / (LauConstants::root2 * sigma);
const Double_t x0 = (minAbs - mu) / (LauConstants::root2 * sigma);
const Double_t z = (gamma * sigma) / LauConstants::root2;
std::complex arg1(0., z - x1);
std::complex arg0(0., z - x0);
std::complex integral = 0. + 0i;
if(arg1.imag() < -5.)
{integral = RooMath::erf(x1) - std::exp(-(x1 * x1) - (arg1 * arg1)) * RooMath::erfc(-1i * arg1);}
else
{integral = RooMath::erf(x1) - TMath::Exp(-(x1 * x1)) * RooMath::faddeeva(arg1);}
if(arg0.imag() < -5.)
{integral -= RooMath::erf(x0) - std::exp(-(x0 * x0) - (arg0 * arg0)) * RooMath::erfc(-1i * arg0);}
else
{integral -= RooMath::erf(x0) - TMath::Exp(-(x0 * x0)) * RooMath::faddeeva(arg0);}
integral *= (sigma / (2. * LauConstants::root2 * z));
return integral.real();
}
std::pair<Double_t, Double_t> LauDecayTimePdf::nonSmearedCoshSinhIntegral(Double_t minAbs, Double_t maxAbs)
{
// Use exponential formualtion rather than cosh, sinh.
// Fewer terms (reused for each), but not guaranteed to be faster.
Double_t gamma = 1. / this->tau_->unblindValue();
Double_t gammaH = gamma - 0.5 * deltaGamma_->unblindValue();
Double_t gammaL = gamma - 0.5 * deltaGamma_->unblindValue();
Double_t nL1 = -TMath::Exp(-gammaL * maxAbs) / gammaL;
Double_t nH1 = -TMath::Exp(-gammaH * maxAbs) / gammaH;
Double_t nL0 = -TMath::Exp(-gammaL * minAbs) / gammaL;
Double_t nH0 = -TMath::Exp(-gammaH * minAbs) / gammaH;
Double_t cosh_integral = 0.5 * ( (nH1 + nL1) - (nH0 + nL0) );
Double_t sinh_integral = 0.5 * ( (nH1 - nL1) - (nH0 - nL0) );
return {cosh_integral, sinh_integral};
}
std::pair<Double_t, Double_t> LauDecayTimePdf::smearedCoshSinhIntegral(Double_t minAbs, Double_t maxAbs, Double_t sigma, Double_t mu)
{
using namespace std::complex_literals;
Double_t gamma = 1. / this->tau_->unblindValue();
Double_t x1 = (maxAbs - mu) / (LauConstants::root2 * sigma);
Double_t x0 = (minAbs - mu) / (LauConstants::root2 * sigma);
Double_t z_H = ((gamma - deltaGamma_->unblindValue() / 2.) * sigma) / LauConstants::root2;
std::complex arg1_H(0., z_H - x1);
std::complex arg0_H(0., z_H - x0);
std::complex integral_H = 0. + 0i;
if(arg1_H.imag() < -5.)
{integral_H = RooMath::erf(x1) - std::exp(-(x1 * x1) - (arg1_H * arg1_H)) * RooMath::erfc(-1i * arg1_H);}
else
{integral_H = RooMath::erf(x1) - TMath::Exp(-(x1 * x1)) * RooMath::faddeeva(arg1_H);}
if(arg0_H.imag() < -5.)
{integral_H -= RooMath::erf(x0) - std::exp(-(x0 * x0) - (arg0_H * arg0_H)) * RooMath::erfc(-1i * arg0_H);}
else
{integral_H -= RooMath::erf(x0) - TMath::Exp(-(x0 * x0)) * RooMath::faddeeva(arg0_H);}
integral_H *= (sigma / (2. * LauConstants::root2 * z_H));
// Same for light (L)
Double_t z_L = ((gamma + deltaGamma_->unblindValue() / 2.) * sigma) / LauConstants::root2;
std::complex arg1_L(0., z_L - x1);
std::complex arg0_L(0., z_L - x0);
std::complex integral_L = 0. + 0i;
if(arg1_L.imag() < -5.)
{integral_L = RooMath::erf(x1) - std::exp(-(x1 * x1) - (arg1_L * arg1_L)) * RooMath::erfc(-1i * arg1_L);}
else
{integral_L = RooMath::erf(x1) - TMath::Exp(-(x1 * x1)) * RooMath::faddeeva(arg1_L);}
if(arg0_L.imag() < -5.)
{integral_L -= RooMath::erf(x0) - std::exp(-(x0 * x0) - (arg0_L * arg0_L)) * RooMath::erfc(-1i * arg0_L);}
else
{integral_L -= RooMath::erf(x0) - TMath::Exp(-(x0 * x0)) * RooMath::faddeeva(arg0_L);}
integral_L *= (sigma / (2. * LauConstants::root2 * z_L));
std::complex cosh_integral = 0.5 * (integral_H + integral_L);
std::complex sinh_integral = 0.5 * (integral_H - integral_L);
return {cosh_integral.real(), sinh_integral.real()};
}
void LauDecayTimePdf::calcNorm(const Double_t abscissaErr)
{
// first reset integrals to zero
normTermExp_ = 0.0;
normTermCos_ = 0.0;
normTermSin_ = 0.0;
normTermCosh_ = 0.0;
normTermSinh_ = 0.0;
// Get all the up to date parameter values
- std::vector<Double_t> frac(nGauss_);
- std::vector<Double_t> mean(nGauss_);
- std::vector<Double_t> sigma(nGauss_);
+ std::vector<Double_t> fracs(nGauss_);
+ std::vector<Double_t> means(nGauss_);
+ std::vector<Double_t> sigmas(nGauss_);
// TODO - why do we do the fractions this way around?
- frac[0] = 1.0;
+ fracs[0] = 1.0;
for (UInt_t i(0); i<nGauss_; ++i) {
- mean[i] = mean_[i]->unblindValue();
- sigma[i] = sigma_[i]->unblindValue();
+ means[i] = mean_[i]->unblindValue();
+ sigmas[i] = sigma_[i]->unblindValue();
if (i != 0) {
- frac[i] = frac_[i-1]->unblindValue();
- frac[0] -= frac[i];
+ fracs[i] = frac_[i-1]->unblindValue();
+ fracs[0] -= fracs[i];
}
}
// Scale the gaussian parameters by the per-event error on decay time (if appropriate)
for (UInt_t i(0); i<nGauss_; ++i) {
if (scaleMeans_[i]) {
- mean[i] *= abscissaErr;
+ means[i] *= abscissaErr;
}
if (scaleWidths_[i]) {
- sigma[i] *= abscissaErr;
+ sigmas[i] *= abscissaErr;
}
}
switch ( effMethod_ ) {
case EfficiencyMethod::Flat :
// No efficiency variation
// Simply calculate integrals over full range
if( this -> doSmearing() )
- {this->calcSmearedPartialIntegrals( minAbscissa_, maxAbscissa_ , 1.0, mean, sigma, frac);}
+ {this->calcSmearedPartialIntegrals( minAbscissa_, maxAbscissa_ , 1.0, means, sigmas, fracs);}
else
{this->calcNonSmearedPartialIntegrals( minAbscissa_, maxAbscissa_, 1.0 );}
break;
case EfficiencyMethod::Binned :
// Efficiency varies as piecewise constant
// Total integral is sum of integrals in each bin, each weighted by efficiency in that bin
for ( Int_t bin{1}; bin <= effiHist_->GetNbinsX(); ++bin ) {
const Double_t loEdge {effiHist_->GetBinLowEdge(bin)};
const Double_t hiEdge {loEdge + effiHist_->GetBinWidth(bin)};
const Double_t effVal {effiHist_->GetBinContent(bin)};
if ( this -> doSmearing() )
- {this->calcSmearedPartialIntegrals( loEdge, hiEdge, effVal, mean, sigma, frac );}
+ {this->calcSmearedPartialIntegrals( loEdge, hiEdge, effVal, means, sigmas, fracs );}
else
{this->calcNonSmearedPartialIntegrals( loEdge, hiEdge, effVal );}
}
break;
case EfficiencyMethod::Spline :
// Efficiency varies as piecewise polynomial
// TODO - to be worked out what to do here
+ if(not effiFun_){std::cerr << "FATAL : no spline defined!"; gSystem->Exit(EXIT_FAILURE);}
+
+ for(size_t i = 0; i < effiFun_ -> getnKnots()-1; ++i)
+ {
+ if( this -> doSmearing() )
+ {this -> calcSmearedSplinePartialIntegrals( i, means, sigmas, fracs );}
+ else
+ {std::cerr << "Spline acceptance for non-smeared case not implemented!"<<std::endl; gSystem -> Exit(EXIT_FAILURE);}
+ }
+
+
+ /*
std::cerr << "WARNING in LauDecayTimePdf::calcNorm : normalisation integrals for spline acceptance not yet implemented - effect of acceptance will be neglected!" << std::endl;
if ( this -> doSmearing() )
{this->calcSmearedPartialIntegrals( minAbscissa_, maxAbscissa_ , 1.0, mean, sigma, frac);}
else
{this->calcNonSmearedPartialIntegrals( minAbscissa_, maxAbscissa_, 1.0 );}
+ */
break;
}
// TODO - should we check here that all terms we expect to use are now non-zero?
}
// TODO - Mildly concerned this is void rather than returning the integrals
// (but this would require refactoring for different return values).
// As long as it doesn't get called outside of calcNorm() it should be fine - DPO
void LauDecayTimePdf::calcNonSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight)
{
Double_t normTermExp {0.0};
if (method_ == DecayTime) {
normTermExp = weight * this -> nonSmearedExpIntegral(minAbs, maxAbs);
} else if (method_ == DecayTimeDiff) {
const Double_t tau = tau_->unblindValue();
const Double_t Gamma = 1.0 / tau;
// TODO - there should be some TMath::Abs here surely?
normTermExp = weight * tau * (2.0 - TMath::Exp(-maxAbs*Gamma) - TMath::Exp(-minAbs*Gamma));
}
normTermExp_ += normTermExp;
// Normalisation factor for B0 decays
if ( type_ == ExpTrig ) {
normTermCosh_ += normTermExp;
auto [cosIntegral, sinIntegral] = this->nonSmearedCosSinIntegral(minAbs, maxAbs);
normTermCos_ += weight * cosIntegral;
normTermSin_ += weight * sinIntegral;
}
// Normalisation factor for Bs decays
else if ( type_ == ExpHypTrig ) {
auto [coshIntegral, sinhIntegral] = this->nonSmearedCoshSinhIntegral(minAbs, maxAbs);
normTermCosh_ += weight * coshIntegral;
normTermSinh_ += weight * sinhIntegral;
auto [cosIntegral, sinIntegral] = this->nonSmearedCosSinIntegral(minAbs, maxAbs);
normTermCos_ += weight * cosIntegral;
normTermSin_ += weight * sinIntegral;
}
}
void LauDecayTimePdf::calcSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight, const std::vector<Double_t>& means, const std::vector<Double_t>& sigmas, const std::vector<Double_t>& fractions)
{
for (UInt_t i(0); i<nGauss_; ++i)
{
Double_t normTermExp {0.0};
if (method_ == DecayTime) {
normTermExp = weight * this -> smearedExpIntegral(minAbs, maxAbs, sigmas[i], means[i]);
} else if (method_ == DecayTimeDiff) {
const Double_t tau = tau_->unblindValue();
const Double_t Gamma = 1.0 / tau;
// TODO - this is neglecting resolution at the moment
// TODO - there should be some TMath::Abs here surely?
normTermExp = weight * tau * (2.0 - TMath::Exp(-maxAbs*Gamma) - TMath::Exp(-minAbs*Gamma));
}
normTermExp_ += fractions[i] * normTermExp;
// Normalisation factor for B0 decays
if ( type_ == ExpTrig ) {
normTermCosh_ += fractions[i] * normTermExp;
auto [cosIntegral, sinIntegral] = this->smearedCosSinIntegral(minAbs, maxAbs, sigmas[i], means[i]);
normTermCos_ += fractions[i] * weight * cosIntegral;
normTermSin_ += fractions[i] * weight * sinIntegral;
}
// Normalisation factor for Bs decays
else if ( type_ == ExpHypTrig ) {
auto [coshIntegral, sinhIntegral] = this->smearedCoshSinhIntegral(minAbs, maxAbs, sigmas[i], means[i]);
normTermCosh_ += fractions[i] * weight * coshIntegral;
normTermSinh_ += fractions[i] * weight * sinhIntegral;
auto [cosIntegral, sinIntegral] = this->smearedCosSinIntegral(minAbs, maxAbs, sigmas[i], means[i]);
normTermCos_ += fractions[i] * weight * cosIntegral;
normTermSin_ += fractions[i] * weight * sinIntegral;
}
}
}
+
+void LauDecayTimePdf::calcSmearedSplinePartialIntegrals(const UInt_t splineIndex, const std::vector<Double_t>& means, const std::vector<Double_t>& sigmas, const std::vector<Double_t>& fractions)
+{
+ using namespace std::complex_literals;
+
+ const Double_t gamma = 1. / this->tau_->unblindValue();
+ for (UInt_t i(0); i<nGauss_; ++i)
+ {
+
+ Double_t normTermExp {0.0};
+ if (method_ == DecayTime) {
+ const std::complex<double> z = (gamma * sigmas[i]) / LauConstants::root2;
+ normTermExp = this -> splineNormalise(splineIndex, z, sigmas[i], means[i]).first;
+ } else if (method_ == DecayTimeDiff) {//TODO this isn't implemented at all
+ //const Double_t tau = tau_->unblindValue();
+ //const Double_t Gamma = 1.0 / tau;
+ // TODO - this is neglecting resolution at the moment
+ // TODO - there should be some TMath::Abs here surely?
+// normTermExp = tau * (2.0 - TMath::Exp(-maxAbs*Gamma) - TMath::Exp(-minAbs*Gamma));
+//
+ ; // nop so the compiler doesn't complain
+ }
+ normTermExp_ += fractions[i] * normTermExp;
+
+ // Normalisation factor for B0 decays
+ if ( type_ == ExpTrig ) {
+
+ normTermCosh_ += fractions[i] * normTermExp;
+
+ const std::complex z = std::complex(gamma * sigmas[i] / LauConstants::root2, -this->deltaM_->unblindValue() * sigmas[i] / LauConstants::root2);
+ auto [cosIntegral, sinIntegral] = this -> splineNormalise(splineIndex, z, sigmas[i], means[i]);
+
+ normTermCos_ += fractions[i] * cosIntegral;
+ normTermSin_ += fractions[i] * sinIntegral;
+ }
+
+ // Normalisation factor for Bs decays
+ else if ( type_ == ExpHypTrig ) {
+
+ const std::complex z_H = ((gamma - deltaGamma_->unblindValue() / 2.) * sigmas[i]) / LauConstants::root2;
+ const std::complex z_L = ((gamma + deltaGamma_->unblindValue() / 2.) * sigmas[i]) / LauConstants::root2;
+
+ const Double_t N_H = this -> splineNormalise(splineIndex, z_H, sigmas[i], means[i]).first;
+ const Double_t N_L = this -> splineNormalise(splineIndex, z_L, sigmas[i], means[i]).first;
+
+ const Double_t coshIntegral = 0.5 * (N_H + N_L);
+ const Double_t sinhIntegral = 0.5 * (N_H - N_L);
+
+ normTermCosh_ += fractions[i] * coshIntegral;
+ normTermSinh_ += fractions[i] * sinhIntegral;
+
+ const std::complex z = std::complex(gamma * sigmas[i] / LauConstants::root2, -this->deltaM_->unblindValue() * sigmas[i] / LauConstants::root2);
+ auto [cosIntegral, sinIntegral] = this -> splineNormalise(splineIndex, z, sigmas[i], means[i]);
+ normTermCos_ += fractions[i] * cosIntegral;
+ normTermSin_ += fractions[i] * sinIntegral;
+ }
+ }
+}
+
+std::array<std::complex<double>,4> LauDecayTimePdf::generateKvector(const std::complex<double> z)
+{
+ std::array<std::complex<double>,4> K = {0.,0.,0.,0.};
+
+ const std::complex<double> zr = 1./z;
+
+ K[0] = 0.5*zr;
+ K[1] = 0.5*zr*zr;
+ K[2] = zr*(1.+zr*zr);
+ K[3] = 3.*zr*zr*(1.+zr*zr);
+
+ return K;
+}
+
+std::array<std::complex<double>,4> LauDecayTimePdf::generateMvector(const Double_t minAbs, const Double_t maxAbs, const std::complex<double> z, const Double_t sigma, const Double_t mu)
+{
+ using namespace std::complex_literals;
+
+ std::array<std::complex<double>,4> M0 = {0.,0.,0.,0.};
+ std::array<std::complex<double>,4> M1 = {0.,0.,0.,0.};
+ std::array<std::complex<double>,4> M;
+
+ const Double_t x1 = (maxAbs - mu) / (LauConstants::root2 * sigma);
+ const Double_t x0 = (minAbs - mu) / (LauConstants::root2 * sigma);
+
+ //Values used a lot
+ const Double_t ex2_1 = TMath::Exp(-(x1*x1));
+ const Double_t ex2_0 = TMath::Exp(-(x0*x0));
+ const Double_t sqrtPir = 1/LauConstants::rootPi;
+
+ const std::complex arg1 = (0.+1.i) * (z-x1);
+ const std::complex arg0 = (0.+1.i) * (z-x0);
+
+ //fad = the faddeeva times the ex2 value (done in different ways depending on the domain)
+ std::complex<double> fad1;
+ std::complex<double> fad0;
+
+ if(arg1.imag() < -5.)
+ {fad1 = std::exp(-(x1 * x1) - (arg1 * arg1)) * RooMath::erfc(-1i * arg1);}
+ else
+ {fad1 = ex2_1*RooMath::faddeeva(arg1);}
+
+ if(arg0.imag() < -5.)
+ {fad0 = std::exp(-(x0 * x0) - (arg0 * arg0)) * RooMath::erfc(-1i * arg0);}
+ else
+ {fad0 = ex2_0*RooMath::faddeeva(arg0);}
+
+ //doing the actual functions for x1
+ M1[0] = RooMath::erf(x1) - fad1;
+ M1[1] = 2. * (-sqrtPir*ex2_1 - x1*fad1);
+ M1[2] = 2. * (-2*x1*sqrtPir*ex2_1 - (2*x1 - 1)*fad1);
+ M1[3] = 4. * (-(2*x1*x1 - 1)*sqrtPir*ex2_1 - x1*(2*x1*x1-3)*fad1);
+
+ //doing them again for x0
+ M0[0] = RooMath::erf(x0) - fad0;
+ M0[1] = 2. * (-sqrtPir*ex2_0 - x0*fad0);
+ M0[2] = 2. * (-2*x0*sqrtPir*ex2_0 - (2*x0 - 1)*fad0);
+ M0[3] = 4. * (-(2*x0*x0 - 1)*sqrtPir*ex2_0 - x0*(2*x0*x0-3)*fad0);
+
+ for(Int_t i = 0; i < 4; ++i){M[i] = M1[i] - M0[i];}
+
+ return M;
+}
+
+std::pair<Double_t,Double_t> LauDecayTimePdf::splineNormalise(const UInt_t splineIndex, std::complex<double> z, const Double_t sigma, const Double_t mu)
+{
+ using namespace std::complex_literals;
+
+ const std::vector<Double_t>& xVals = effiFun_ -> getXValues();
+ const Double_t minAbs = xVals[splineIndex];
+ const Double_t maxAbs = xVals[splineIndex+1];
+
+ std::array<Double_t,4> coeffs = effiFun_ -> getCoefficients(splineIndex);
+ std::array<std::complex<double>,4> K = this -> generateKvector(z);
+ std::array<std::complex<double>,4> M = this -> generateMvector(minAbs, maxAbs, z, sigma, mu);
+
+//Double sum to get N (eqn 31 in https://arxiv.org/pdf/1407.0748.pdf)
+ std::complex<double> N = 0. + 0i;
+ for(Int_t i = 0; i < 4; ++i)
+ {
+ for(Int_t j = 0; j <= i ; ++j)
+ {
+ Int_t ij = i+j;
+ if(ij > 3){continue;} // sum component = 0
+ Double_t A = coeffs[ij] * TMath::Binomial(ij,j) / TMath::Power(2,ij);
+
+ N += A * M[i] * K[j];
+ }
+ }
+ return std::make_pair( N.real(), N.imag() );
+}
+
Double_t LauDecayTimePdf::generateError(Bool_t forceNew)
{
if (errHist_ && (forceNew || !abscissaErrorGenerated_)) {
LauFitData errData = errHist_->generate(nullptr);
abscissaError_ = errData.at(this->varErrName());
abscissaErrorGenerated_ = kTRUE;
} else {
while (forceNew || !abscissaErrorGenerated_) {
abscissaError_ = LauRandom::randomFun()->Landau(errorDistMPV_,errorDistSigma_);
if (abscissaError_ < maxAbscissaError_ && abscissaError_ > minAbscissaError_) {
abscissaErrorGenerated_ = kTRUE;
forceNew = kFALSE;
}
}
}
return abscissaError_;
}
/*
LauFitData LauDecayTimePdf::generate(const LauKinematics* kinematics)
{
// generateError SHOULD have been called before this
// function but will call it here just to make sure
// (has ns effect if has already been called)
abscissaError_ = this->generateError();
// If the PDF is scaled by the per-event error then need to update the PDF height for each event
Bool_t scale(kFALSE);
for (std::vector<Bool_t>::const_iterator iter = scaleMeans_.begin(); iter != scaleMeans_.end(); ++iter) {
scale |= (*iter);
}
for (std::vector<Bool_t>::const_iterator iter = scaleWidths_.begin(); iter != scaleWidths_.end(); ++iter) {
scale |= (*iter);
}
if (scale || (!this->heightUpToDate() && !this->cachePDF())) {
this->calcPDFHeight(kinematics);
this->heightUpToDate(kTRUE);
}
// Generate the value of the abscissa.
Bool_t gotAbscissa(kFALSE);
Double_t genVal(0.0);
Double_t genPDFVal(0.0);
LauFitData genAbscissa;
const Double_t xMin = this->minAbscissa();
const Double_t xMax = this->maxAbscissa();
const Double_t xRange = xMax - xMin;
while (!gotAbscissa) {
genVal = LauRandom::randomFun()->Rndm()*xRange + xMin;
this->calcLikelihoodInfo(genVal, abscissaError_);
genPDFVal = this->getUnNormLikelihood();
if (LauRandom::randomFun()->Rndm() <= genPDFVal/this->getMaxHeight()) {gotAbscissa = kTRUE;}
if (genPDFVal > this->getMaxHeight()) {
std::cerr<<"Warning in LauDecayTimePdf::generate()."
<<" genPDFVal = "<<genPDFVal<<" is larger than the specified PDF height "
<<this->getMaxHeight()<<" for the abscissa = "<<genVal<<". Need to reset height to be larger than "
<<genPDFVal<<" by using the setMaxHeight(Double_t) function"
<<" and re-run the Monte Carlo generation!"<<std::endl;
}
}
genAbscissa[this->varName()] = genVal;
// mark that we need a new error to be generated next time
abscissaErrorGenerated_ = kFALSE;
return genAbscissa;
}
*/
void LauDecayTimePdf::setErrorHisto(const TH1* hist)
{
if ( errHist_ != nullptr ) {
std::cerr<<"WARNING in LauDecayTimePdf::setErrorHisto : Error histogram already set, not doing it again."<<std::endl;
return;
}
errHist_ = new Lau1DHistPdf(this->varErrName(), hist, this->minAbscissaError(), this->maxAbscissaError());
}
void LauDecayTimePdf::setHistoPdf(const TH1* hist)
{
if ( pdfHist_ != nullptr ) {
std::cerr<<"WARNING in LauDecayTimePdf::setHistoPdf : PDF histogram already set, not doing it again."<<std::endl;
return;
}
pdfHist_ = new Lau1DHistPdf(this->varName(), hist, this->minAbscissa(), this->maxAbscissa());
}
void LauDecayTimePdf::setEffiHist(const TH1* hist)
{
if ( effiHist_ != nullptr ) {
std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : efficiency histogram already set, not doing it again." << std::endl;
return;
}
if ( hist == nullptr ) {
std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : supplied efficiency histogram pointer is null." << std::endl;
return;
}
// Check boundaries of histogram align with our abscissa's range
const Double_t axisMin {hist->GetXaxis()->GetXmin()};
const Double_t axisMax {hist->GetXaxis()->GetXmax()};
if ( TMath::Abs(minAbscissa_ - axisMin)>1e-6 || TMath::Abs(maxAbscissa_ - axisMax)>1e-6 ) {
std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : mismatch in range between supplied histogram and abscissa\n"
<< " : histogram range: " << axisMin << " - " << axisMax << "\n"
<< " : abscissa range: " << minAbscissa_ << " - " << maxAbscissa_ << "\n"
<< " : Disregarding this histogram." << std::endl;
return;
}
effiHist_ = dynamic_cast<TH1*>( hist->Clone() );
+
+ //Normalise the hist if the (relative) efficiencies have very large values
+ if(effiHist_ -> GetMaximum() > 1.)
+ {
+ effiHist_ -> Scale( 1. / effiHist_->Integral() ); //Normalise
+ std::cout << "INFO in LauDecayTimePdf::setEffiHist : Supplied histogram for Decay Time Acceptance has values too large: normalising..." << std::endl;
+ }
}
void LauDecayTimePdf::setEffiSpline(Lau1DCubicSpline* spline)
{
if ( effiFun_ != 0 ) {
std::cerr<<"WARNING in LauDecayTimePdf::setEffiPdf : efficiency function already set, not doing it again."<<std::endl;
return;
}
effiFun_ = spline;
std::vector<Double_t> effis = effiFun_->getYValues();
effiPars_.resize( effis.size() );
size_t index = 0;
for( Double_t& effi : effis )
{
effiPars_[ index ] = new LauParameter( Form( "%s_Knot_%lu", varName_.Data() ,index ), effi, 0.0, 1.0, kTRUE );
++index;
}
}
LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName)
{
for ( std::vector<LauAbsRValue*>::iterator iter = param_.begin(); iter != param_.end(); ++iter ) {
if ((*iter)->name().Contains(parName)) {
return (*iter);
}
}
std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl;
return 0;
}
const LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName) const
{
for ( std::vector<LauAbsRValue*>::const_iterator iter = param_.begin(); iter != param_.end(); ++iter ) {
if ((*iter)->name().Contains(parName)) {
return (*iter);
}
}
std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl;
return 0;
}
void LauDecayTimePdf::updatePulls()
{
for ( std::vector<LauAbsRValue*>::iterator iter = param_.begin(); iter != param_.end(); ++iter ) {
std::vector<LauParameter*> params = (*iter)->getPars();
for (std::vector<LauParameter*>::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter ) {
if (!(*iter)->fixed()) {
(*params_iter)->updatePull();
}
}
}
}
void LauDecayTimePdf::updateEffiSpline(std::vector<LauParameter*> effiPars)
{
if (effiPars.size() != effiFun_->getnKnots()){
std::cerr<<"ERROR in LauDecayTimePdf::updateEffiSpline : number of efficiency parameters is not equal to the number of spline knots."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
effiFun_->updateYValues(effiPars);
}

File Metadata

Mime Type
text/x-diff
Expires
Sat, Dec 21, 12:15 PM (1 d, 17 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
4022721
Default Alt Text
(101 KB)

Event Timeline