Page Menu
Home
HEPForge
Search
Configure Global Search
Log In
Files
F9514788
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
61 KB
Subscribers
None
View Options
diff --git a/inc/LauDecayTimePdf.hh b/inc/LauDecayTimePdf.hh
index 5f20811..90cfa95 100644
--- a/inc/LauDecayTimePdf.hh
+++ b/inc/LauDecayTimePdf.hh
@@ -1,453 +1,444 @@
// Copyright University of Warwick 2006 - 2013.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Authors:
// Thomas Latham
// John Back
// Paul Harrison
/*! \file LauDecayTimePdf.hh
\brief File containing declaration of LauDecayTimePdf class.
*/
/*! \class LauDecayTimePdf
\brief Class for defining the PDFs used in the time-dependent fit model to describe the decay time.
LauDecayTimePdf is a class that provides the PDFs for describing the
time-dependence of the various terms in a particle/antiparticle decay to a
common final state. The various terms have the form of exponentially
decaying trigonometric or hyperbolic functions convolved with a N-Gaussian
resolution function.
*/
#ifndef LAU_DECAYTIME_PDF
#define LAU_DECAYTIME_PDF
#include <vector>
#include "TString.h"
#include "LauAbsRValue.hh"
#include "LauFitDataTree.hh"
#include "LauComplex.hh"
class TH1;
class Lau1DHistPdf;
class Lau1DCubicSpline;
class LauDecayTimePdf {
public:
//! The functional form of the decay time PDF
enum FuncType {
Hist, /*Hist PDF for fixed background*/
Delta, /*< Delta function - for prompt background */
Exp, /*< Exponential function - for non-prompt background or charged B's */
DeltaExp, /*< Delta + Exponential function - for background with prompt and non-prompt parts */
ExpTrig, /*< Exponential function with Delta m driven mixing- for neutral B_d's */
ExpHypTrig /*< Exponential function with both Delta m and Delta Gamma driven mixing- for neutral B_s's */
};
//! State of complex error function calculation
enum State {
Good, /*< All OK */
Overflow1, /*< Overflow in term 1 */
Overflow2 /*< Overflow in term 2 */
};
//! How is the decay time measured - absolute or difference
enum TimeMeasurementMethod {
DecayTime, /*< Absolute measurement of decay time, e.g. LHCb scenario */
DecayTimeDiff /*< Measurement of the difference of two decay times, e.g. BaBar/Belle(II) scenario */
};
//! Constructor
/*!
\param [in] theVarName the name of the decay time variable in the input data
\param [in] theVarErrName the name of the decay time error variable in the input data
\param [in] params the parameters of the PDF
\param [in] minAbscissaVal the minimum value of the abscissa
\param [in] maxAbscissaVal the maximum value of the abscissa
\param [in] minAbscissaErr the minimum value of the abscissa error
\param [in] maxAbscissaErr the maximum value of the abscissa error
\param [in] type the functional form of the PDF
\param [in] nGauss the number of Gaussians in the resolution function
\param [in] scale controls whether the Gaussian parameters are scaled by the per-event error
\param [in] method set the type of the time measurement used in the given experiment
*/
LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector<LauAbsRValue*>& params,
Double_t minAbscissaVal, Double_t maxAbscissaVal,
Double_t minAbscissaErr, Double_t maxAbscissaErr,
const FuncType type, const UInt_t nGauss, const std::vector<Bool_t>& scale, const TimeMeasurementMethod method);
//! Constructor
/*!
\param [in] theVarName the name of the decay time variable in the input data
\param [in] theVarErrName the name of the decay time error variable in the input data
\param [in] params the parameters of the PDF
\param [in] minAbscissaVal the minimum value of the abscissa
\param [in] maxAbscissaVal the maximum value of the abscissa
\param [in] minAbscissaErr the minimum value of the abscissa error
\param [in] maxAbscissaErr the maximum value of the abscissa error
\param [in] type the functional form of the PDF
\param [in] nGauss the number of Gaussians in the resolution function
\param [in] scaleMeans controls whether the Gaussian mean parameters are scaled by the per-event error
\param [in] scaleWidths controls whether the Gaussian width parameters are scaled by the per-event error
\param [in] method set the type of the time measurement used in the given experiment
*/
LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector<LauAbsRValue*>& params,
const Double_t minAbscissaVal, const Double_t maxAbscissaVal,
const Double_t minAbscissaErr, const Double_t maxAbscissaErr,
const FuncType type, const UInt_t nGauss, const std::vector<Bool_t>& scaleMeans,
const std::vector<Bool_t>& scaleWidths, const TimeMeasurementMethod method);
//! Destructor
virtual ~LauDecayTimePdf();
//! Set the histogram to be used for generation of per-event decay time errors
/*!
If not set will fall back to using Landau distribution
\param [in] hist the histogram of the distribution
*/
void setErrorHisto(const TH1* hist);
//! Set the Histogram PDF in case of fixed background PDF
void setHistoPdf(const TH1* hist);
//! Set efficiency PDF in the form of Spline
void setEffiSpline(Lau1DCubicSpline* spline);
//! Set the parameters of the Landau distribution used to generate the per-event decay time errors
/*!
\param [in] mpv the MPV (most probable value) of the distribution
\param [in] sigma the width of the distribution
*/
void setErrorDistTerms(const Double_t mpv, const Double_t sigma) {
errorDistMPV_ = mpv;
errorDistSigma_ = sigma;
}
//! Retrieve the name of the error variable
const TString& varName() const {return varName_;}
//! Retrieve the name of the error variable
const TString& varErrName() const {return varErrName_;}
//! Turn on or off the resolution function
void doSmearing(Bool_t smear) {smear_ = smear;}
//! Determine if the resolution function is turned on or off
Bool_t doSmearing() const {return smear_;}
//! Cache information from data
/*!
Will cache, for every event, the abscissa values and, if all parameters are fixed, the PDF value.
\param [in] inputData the data to be used to calculate everything
*/
virtual void cacheInfo(const LauFitDataTree& inputData);
//! Calculate the likelihood (and all associated information) given value of the abscissa
/*!
\param [in] abscissa the value of the abscissa
*/
virtual void calcLikelihoodInfo(Double_t abscissa);
//! Calculate the likelihood (and all associated information) given value of the abscissa and its error
/*!
\param [in] abscissa the value of the abscissa
\param [in] abscissaErr the error on the abscissa
*/
virtual void calcLikelihoodInfo(Double_t abscissa, Double_t abscissaErr);
//! Retrieve the likelihood (and all associated information) given the event number
/*!
\param [in] iEvt the event number
*/
virtual void calcLikelihoodInfo(UInt_t iEvt);
// Evaluate the error fonction of a complex number
LauComplex ComplexErf(Double_t x, Double_t y);
LauComplex Erfi(Double_t x, Double_t y);
LauComplex ComplexErfc(Double_t x, Double_t y);
//! Get the exponential term
Double_t getExpTerm() const {return expTerm_;}
//! Get the cos(Dm*t) term (multiplied by the exponential)
Double_t getCosTerm() const {return cosTerm_;}
//! Get the sin(Dm*t) term (multiplied by the exponential)
Double_t getSinTerm() const {return sinTerm_;}
//! Get the cosh(DG/2*t) term (multiplied by the exponential)
Double_t getCoshTerm() const {return coshTerm_;}
//! Get the sinh(DG/2*t) term (multiplied by the exponential)
Double_t getSinhTerm() const {return sinhTerm_;}
//! Get the normalisation related to the exponential term only
Double_t getNormTermExp() const {return normTermExp_;}
//! Get the first term in the normalisation (from integrating the cosh)
Double_t getNormTermCosh() const {return normTermCosh_;}
//! Get the second term in the normalisation (from integrating the sinh)
Double_t getNormTermSinh() const {return normTermSinh_;}
//! Get error probability density from Error distribution
Double_t getErrTerm() const{return errTerm_;}
//! Get efficiency probability density from efficiency distribution
Double_t getEffiTerm() const{return effiTerm_;}
//! Retrieve the parameters of the PDF, e.g. so that they can be loaded into a fit
/*!
\return the parameters of the PDF
*/
const std::vector<LauAbsRValue*>& getParameters() const { return param_; }
//! Retrieve the parameters of the PDF, e.g. so that they can be loaded into a fit
/*!
\return the parameters of the PDF
*/
std::vector<LauAbsRValue*>& getParameters() { return param_; }
//! Update the pulls for all parameters
void updatePulls();
// Calculate the normalisation for the non smeared Hyperbolic terms
Double_t normExpHypTerm(Double_t Abs);
Double_t normExpHypTermDep(Double_t Abs);
// Store the normalisation decaytime cosh and sinh terms
void storeIntegralTimeDep(Double_t minAbs, Double_t maxAbs);
//! Generate the value of the error
/*!
If scaling by the error should call this before calling generate
\param [in] forceNew forces generation of a new value
*/
Double_t generateError(const Bool_t forceNew = kFALSE);
//! Generate an event from the PDF - TODO not clear that this is really needed, perhaps for background? commented out for now
/*!
\param [in] kinematics used by some PDFs to determine the DP position, on which they have dependence
*/
//virtual LauFitData generate(const LauKinematics* kinematics);
//! Determine the state of the calculation
State state() const {return state_;}
//! Retrieve the decay time error
Double_t abscissaError() const {return abscissaError_;}
//! Retrieve the decay time minimum value
Double_t minAbscissa() const {return minAbscissa_;}
//! Retrieve the decay time maximum value
Double_t maxAbscissa() const {return maxAbscissa_;}
//! Retrieve the decay time error minimum value
Double_t minAbscissaError() const {return minAbscissaError_;}
//! Retrieve the decay time error maximum value
Double_t maxAbscissaError() const {return maxAbscissaError_;}
virtual void checkPositiveness() {}; // Nothing to check here.
// NB calcNorm and calcPDFHeight only calculate the gaussian information for the (type_ == Delta) case
// TODO - can we delete these?
//! Calculate the normalisation factor of the PDF
//virtual void calcNorm();
//! Calculate the maximum height of the PDF
//virtual void calcPDFHeight( const LauKinematics* kinematics );
protected:
//! Set up the initial state correctly - called by the constructors
void initialise();
//! Calculate the pure physics terms with no resolution function applied
void calcNonSmearedTerms(const Double_t abscissa);
- //! Calculate the complex error function - called by LauDecayTimePdf::evalExpCerf
- void complexErrFunc(Double_t, Double_t, Double_t&, Double_t&);
-
- //! Calculate an approximate complex error function - called by LauDecayTimePdf::evalExpCerf
- void approxExpComplexErrFunc(Double_t, Double_t, Double_t, Double_t&, Double_t&);
-
- //! Evaluate the convolution of exp*cos or exp*sin with a Gaussian
- void evalExpCerf(Double_t, Double_t, Double_t, Double_t&, Double_t&);
-
inline void state(State s) {state_ = s;}
//! Calculate exponential auxiliary term for the convolution
void calcTrigExponent(Double_t deltaM, Double_t tau, Double_t x, Double_t sigma, Double_t& reTerm, Double_t& imTerm);
//! Calculate convolution between exponential*sin or cos with a Gaussian
void calcTrigConv(Double_t deltaM, Double_t tau, Double_t x, Double_t sigma, Double_t& reOutTerm, Double_t& imOutTerm, Bool_t trig);
//! Retrieve the number of PDF parameters
/*!
\return the number of PDF parameters
*/
UInt_t nParameters() const {return param_.size();}
//! Retrieve the specified parameter
/*!
\param [in] parName the parameter to retrieve
*/
LauAbsRValue* findParameter(const TString& parName);
//! Retrieve the specified parameter
/*!
\param [in] parName the parameter to retrieve
*/
const LauAbsRValue* findParameter(const TString& parName) const;
private:
//! Copy constructor (not implemented)
LauDecayTimePdf(const LauDecayTimePdf& other);
//! Copy assignment operator (not implemented)
LauDecayTimePdf& operator=(const LauDecayTimePdf& other);
//! Name of the variable
TString varName_;
//! Name of the error variable
TString varErrName_;
//! The parameters of the PDF
std::vector<LauAbsRValue*> param_;
//! Smear with the resolution model or not
Bool_t smear_;
//! The minimum value of the decay time
Double_t minAbscissa_;
//! The maximum value of the decay time
Double_t maxAbscissa_;
//! The minimum value of the decay time error
Double_t minAbscissaError_;
//! The maximum value of the decay time error
Double_t maxAbscissaError_;
//! The current value of the decay time error
Double_t abscissaError_;
//! Flag whether a value for the decay time error has been generated
Bool_t abscissaErrorGenerated_;
//! Value of the MPV of the Landau dist used to generate the Delta t error
Double_t errorDistMPV_;
//! Value of the width of the Landau dist used to generate the Delta t error
Double_t errorDistSigma_;
//! The number of gaussians in the resolution mode;
const UInt_t nGauss_;
// Parameters of the gaussian(s) that accounts for the resolution:
//! mean (offset) of each Gaussian in the resolution function
std::vector<LauAbsRValue*> mean_;
//! spread (sigma) of each Gaussian in the resolution function
std::vector<LauAbsRValue*> sigma_;
//! fraction of each Gaussian in the resolution function
std::vector<LauAbsRValue*> frac_;
// Parameters of the exponential: the mean life (tau) and the frequency of oscillation.
//! Lifetime parameter
LauAbsRValue* tau_;
//! Mass difference parameter
LauAbsRValue* deltaM_;
//! Width difference parameter
LauAbsRValue* deltaGamma_;
//! Parameter for the fraction of prompt events in DeltaExp
LauAbsRValue* fracPrompt_;
// Which type of Delta t PDF is this?
const FuncType type_;
// Which type of Delta t PDF is this?
const TimeMeasurementMethod method_;
// Scale the mean and sigma by the per-event error on Delta t?
const std::vector<Bool_t> scaleMeans_;
const std::vector<Bool_t> scaleWidths_;
// The values of the Exp, ExpCos and ExpSin terms
// (NB the Delta terms, i.e. just the gaussian, are stored as the PDF value)
//! The exponential term
Double_t expTerm_;
//! The cos(Dm*t) term (multiplied by the exponential)
Double_t cosTerm_;
//! The sin(Dm*t) term (multiplied by the exponential)
Double_t sinTerm_;
//! The cosh(DG/2*t) term (multiplied by the exponential)
Double_t coshTerm_;
//! The sinh(DG/2*t) term (multiplied by the exponential)
Double_t sinhTerm_;
// Normalisation that is used in the amplitude independent of cosh/sinh term
Double_t normTermExp_;
//! The first term in the normalisation (from integrating the cosh)
Double_t normTermCosh_;
//! The second term in the normalisation (from integrating the sinh)
Double_t normTermSinh_;
//error Hist term
Double_t errTerm_;
// Hist PDF term
Double_t pdfTerm_;
// Efficiency PDF term
Double_t effiTerm_;
//! The cache of the per-event errors on the decay time
std::vector<Double_t> abscissas_;
//! The cache of the per-event errors on the decay time
std::vector<Double_t> abscissaErrors_;
//! The cache of the exponential terms
std::vector<Double_t> expTerms_;
//! The cache of the exponential * cos(Dm*t) terms
std::vector<Double_t> cosTerms_;
//! The cache of the exponential * sin(Dm*t) terms
std::vector<Double_t> sinTerms_;
//! The cache of the exponential * cosh(DG/2*t) terms
std::vector<Double_t> coshTerms_;
//! The cache of the exponential * sinh(DG/2*t) terms
std::vector<Double_t> sinhTerms_;
//! The cache of the exponential normalisation
std::vector<Double_t> normTermsExp_;
//! The cache of the first term in the normalisation
std::vector<Double_t> normTermsCosh_;
//! The cache of the second term in the normalisation
std::vector<Double_t> normTermsSinh_;
// To be deleted once modified all the code
std::vector<Double_t> expNorms_;
// cache efficiency term
std::vector<Double_t> effiTerms_;
//! The state of the complex error function calculation
State state_;
//! Histogram PDF for abscissa error distribution
Lau1DHistPdf* errHist_;
//! Histogram PDF for abscissa distribution
Lau1DHistPdf* pdfHist_;
//! efficiency PDF in spline
Lau1DCubicSpline* effiFun_;
ClassDef(LauDecayTimePdf,0) // Define the Delta t PDF
};
#endif
diff --git a/src/LauDecayTimePdf.cc b/src/LauDecayTimePdf.cc
index 7cebc52..b6ae255 100644
--- a/src/LauDecayTimePdf.cc
+++ b/src/LauDecayTimePdf.cc
@@ -1,1303 +1,923 @@
// Copyright University of Warwick 2006 - 2013.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Authors:
// Thomas Latham
// John Back
// Paul Harrison
/*! \file LauDecayTimePdf.cc
\brief File containing implementation of LauDecayTimePdf class.
*/
#include <iostream>
#include <vector>
using std::cout;
using std::cerr;
using std::endl;
#include <complex>
using std::complex;
#include "TMath.h"
#include "TRandom.h"
#include "TSystem.h"
#include "Lau1DCubicSpline.hh"
#include "Lau1DHistPdf.hh"
#include "LauConstants.hh"
#include "LauComplex.hh"
#include "LauDecayTimePdf.hh"
#include "LauFitDataTree.hh"
#include "LauParameter.hh"
#include "LauRandom.hh"
ClassImp(LauDecayTimePdf)
LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector<LauAbsRValue*>& params,
Double_t minAbscissaVal, Double_t maxAbscissaVal,
Double_t minAbscissaErr, Double_t maxAbscissaErr,
FuncType type, UInt_t nGauss, const std::vector<Bool_t>& scale, TimeMeasurementMethod method) :
varName_(theVarName),
varErrName_(theVarErrName),
param_(params),
smear_(kTRUE),
minAbscissa_(minAbscissaVal),
maxAbscissa_(maxAbscissaVal),
minAbscissaError_(minAbscissaErr),
maxAbscissaError_(maxAbscissaErr),
abscissaError_(0.0),
abscissaErrorGenerated_(kFALSE),
errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286
errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102
nGauss_(nGauss),
mean_(nGauss_,0),
sigma_(nGauss_,0),
frac_(nGauss_-1,0),
tau_(0),
deltaM_(0),
deltaGamma_(0),
fracPrompt_(0),
type_(type),
method_(method),
scaleMeans_(scale),
scaleWidths_(scale),
expTerm_(0.0),
cosTerm_(0.0),
sinTerm_(0.0),
coshTerm_(0.0),
sinhTerm_(0.0),
normTermExp_(0.0),
normTermCosh_(0.0),
normTermSinh_(0.0),
errTerm_(0.0),
pdfTerm_(0.0),
effiTerm_(0.0),
state_(Good),
errHist_(0),
pdfHist_(0),
effiFun_(0)
{
this->initialise();
// Calculate the integrals of the decay time independent of the t
this->storeIntegralTimeDep(minAbscissa_, maxAbscissa_);
}
LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector<LauAbsRValue*>& params,
Double_t minAbscissaVal, Double_t maxAbscissaVal,
Double_t minAbscissaErr, Double_t maxAbscissaErr,
FuncType type, UInt_t nGauss, const std::vector<Bool_t>& scaleMeans, const std::vector<Bool_t>& scaleWidths, TimeMeasurementMethod method) :
varName_(theVarName),
varErrName_(theVarErrName),
param_(params),
smear_(kTRUE),
minAbscissa_(minAbscissaVal),
maxAbscissa_(maxAbscissaVal),
minAbscissaError_(minAbscissaErr),
maxAbscissaError_(maxAbscissaErr),
abscissaError_(0.0),
abscissaErrorGenerated_(kFALSE),
errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286
errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102
nGauss_(nGauss),
mean_(nGauss_,0),
sigma_(nGauss_,0),
frac_(nGauss_-1,0),
tau_(0),
deltaM_(0),
deltaGamma_(0),
fracPrompt_(0),
type_(type),
method_(method),
scaleMeans_(scaleMeans),
scaleWidths_(scaleWidths),
expTerm_(0.0),
cosTerm_(0.0),
sinTerm_(0.0),
coshTerm_(0.0),
sinhTerm_(0.0),
normTermExp_(0.0),
normTermCosh_(0.0),
normTermSinh_(0.0),
errTerm_(0.0),
pdfTerm_(0.0),
effiTerm_(0.0),
state_(Good),
errHist_(0),
pdfHist_(0),
effiFun_(0)
{
this->initialise();
// Calculate the integrals of the decay time independent of the t
this->storeIntegralTimeDep(minAbscissa_, maxAbscissa_);
}
LauDecayTimePdf::~LauDecayTimePdf()
{
// Destructor
delete errHist_; errHist_ = 0;
delete pdfHist_; pdfHist_ = 0;
delete effiFun_; effiFun_ = 0;
}
void LauDecayTimePdf::initialise()
{
// The parameters are:
// - the mean and the sigma (bias and spread in resolution) of the gaussian(s)
// - the mean lifetime, denoted tau, of the exponential decay
// - the frequency of oscillation, denoted Delta m, of the cosine and sine terms
// - the decay width difference, denoted Delta Gamma, of the hyperbolic cosine and sine terms
//
// The next two arguments specify the range in which the PDF is defined,
// and the PDF will be normalised w.r.t. these limits.
//
// The final three arguments define the type of Delta t PDF (Delta, Exp, ExpTrig or ExpHypTrig ), the number of gaussians
// and whether or not the gaussian parameters should be scaled by the per-event errors on Delta t
// First check whether the scale vector is nGauss in size
if (nGauss_ != scaleMeans_.size() || nGauss_ != scaleWidths_.size()) {
cerr<<"ERROR in LauDecayTimePdf::initialise : scale vector size not the same as nGauss."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
if (type_ == Hist){
if (this->nParameters() != 0){
cerr<<"ERROR in LauDecayTimePdf::initialise : Hist PDF should have 0 parameters"<<endl;
gSystem->Exit(EXIT_FAILURE);
}
}else{
TString meanName("mean_");
TString sigmaName("sigma_");
TString fracName("frac_");
Bool_t foundParams(kTRUE);
for (UInt_t i(0); i<nGauss_; ++i) {
TString tempName(meanName); tempName += i;
TString tempName2(sigmaName); tempName2 += i;
TString tempName3(fracName); tempName3 += i;
mean_[i] = this->findParameter(tempName);
foundParams &= (mean_[i] != 0);
sigma_[i] = this->findParameter(tempName2);
foundParams &= (sigma_[i] != 0);
if (i!=0) {
frac_[i-1] = this->findParameter(tempName3);
foundParams &= (frac_[i-1] != 0);
}
}
if (type_ == Delta) {
if ((this->nParameters() != (3*nGauss_-1)) || (!foundParams)) {
cerr<<"ERROR in LauDecayTimePdf::initialise : Delta type PDF requires:"<<endl;
cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<endl;
cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == Exp) {
tau_ = this->findParameter("tau");
foundParams &= (tau_ != 0);
if ((this->nParameters() != (3*nGauss_-1+1)) || (!foundParams)) {
cerr<<"ERROR in LauDecayTimePdf::initialise : Exp type PDF requires:"<<endl;
cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<endl;
cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<endl;
cerr<<" - the lifetime of the exponential decay: \"tau\"."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == ExpTrig) {
tau_ = this->findParameter("tau");
deltaM_ = this->findParameter("deltaM");
foundParams &= (tau_ != 0);
foundParams &= (deltaM_ != 0);
if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) {
cerr<<"ERROR in LauDecayTimePdf::initialise : ExpTrig type PDF requires:"<<endl;
cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<endl;
cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<endl;
cerr<<" - the lifetime of the exponential decay: \"tau\""<<endl;
cerr<<" - the oscillation frequency: \"deltaM\"."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == ExpHypTrig) {
tau_ = this->findParameter("tau");
deltaM_ = this->findParameter("deltaM");
deltaGamma_ = this->findParameter("deltaGamma");
foundParams &= (tau_ != 0);
foundParams &= (deltaM_ != 0);
foundParams &= (deltaGamma_ != 0);
if ((this->nParameters() != (3*nGauss_-1+3)) || (!foundParams)) {
cerr<<"ERROR in LauDecayTimePdf::initialise : ExpHypTrig type PDF requires:"<<endl;
cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<endl;
cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<endl;
cerr<<" - the lifetime of the exponential decay: \"tau\""<<endl;
cerr<<" - the oscillation frequency: \"deltaM\"."<<endl;
cerr<<" - the width difference: \"deltaGamma\"."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == DeltaExp) {
tau_ = this->findParameter("tau");
fracPrompt_ = this->findParameter("frac_prompt");
foundParams &= (tau_ != 0);
foundParams &= (fracPrompt_ != 0);
if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) {
cerr<<"ERROR in LauDecayTimePdf::initialise : DeltaExp type PDF requires:"<<endl;
cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<endl;
cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<endl;
cerr<<" - the lifetime of the exponential decay: \"tau\""<<endl;
cerr<<" - the fraction of the prompt part: \"frac_prompt\"."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
}
}
// Cache the normalisation factor
//this->calcNorm();
}
void LauDecayTimePdf::cacheInfo(const LauFitDataTree& inputData)
{
Bool_t hasBranch = inputData.haveBranch(this->varName());
if (!hasBranch) {
cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<<this->varName()<<"\"."<<endl;
return;
}
hasBranch = inputData.haveBranch(this->varErrName());
if (!hasBranch) {
cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<<this->varErrName()<<"\"."<<endl;
return;
}
// Pass the data to the decay-time error PDF for caching
if ( errHist_ ) {
errHist_->cacheInfo(inputData);
}
if (type_ == Hist){
// Pass the data to the decay-time PDF for caching
if ( pdfHist_ ) {
pdfHist_->cacheInfo(inputData);
}
}else{
// determine whether we are caching our PDF value
//TODO
//Bool_t doCaching( this->nFixedParameters() == this->nParameters() );
//this->cachePDF( doCaching );
// clear the vectors and reserve enough space
const UInt_t nEvents = inputData.nEvents();
abscissas_.clear(); abscissas_.reserve(nEvents);
abscissaErrors_.clear(); abscissaErrors_.reserve(nEvents);
expTerms_.clear(); expTerms_.reserve(nEvents);
cosTerms_.clear(); cosTerms_.reserve(nEvents);
sinTerms_.clear(); sinTerms_.reserve(nEvents);
coshTerms_.clear(); coshTerms_.reserve(nEvents);
sinhTerms_.clear(); sinhTerms_.reserve(nEvents);
normTermsExp_.clear(); normTermsExp_.reserve(nEvents);
normTermsCosh_.clear(); normTermsCosh_.reserve(nEvents);
normTermsSinh_.clear(); normTermsSinh_.reserve(nEvents);
effiTerms_.clear(); effiTerms_.reserve(nEvents);
for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) {
const LauFitData& dataValues = inputData.getData(iEvt);
LauFitData::const_iterator iter = dataValues.find(this->varName());
const Double_t abscissa = iter->second;
if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) {
cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay time: "<<abscissa<<
" outside allowed range: ["<<this->minAbscissa()<<","<<this->maxAbscissa()<<"]."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
abscissas_.push_back( abscissa );
iter = dataValues.find(this->varErrName());
Double_t abscissaErr = iter->second;
if (abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError()) {
cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay-time error: "<<abscissaErr<<
" outside allowed range: ["<<this->minAbscissaError()<<","<<this->maxAbscissaError()<<"]."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
abscissaErrors_.push_back(abscissaErr);
this->calcLikelihoodInfo(abscissa, abscissaErr);
expTerms_.push_back(expTerm_);
cosTerms_.push_back(cosTerm_);
sinTerms_.push_back(sinTerm_);
coshTerms_.push_back(coshTerm_);
sinhTerms_.push_back(sinhTerm_);
normTermsExp_.push_back(normTermExp_);
normTermsCosh_.push_back(normTermCosh_);
normTermsSinh_.push_back(normTermSinh_);
if(effiFun_) effiTerms_.push_back(effiFun_->evaluate(abscissa));
else effiTerms_.push_back(1.0);
}
}
}
void LauDecayTimePdf::calcLikelihoodInfo(UInt_t iEvt)
{
if (type_ == Hist){
if ( pdfHist_ ) {
pdfHist_->calcLikelihoodInfo(iEvt);
pdfTerm_ = pdfHist_->getLikelihood();
} else {
pdfTerm_ = 1.0;
}
}else{
expTerm_ = expTerms_[iEvt];
cosTerm_ = cosTerms_[iEvt];
sinTerm_ = sinTerms_[iEvt];
coshTerm_ = coshTerms_[iEvt];
sinhTerm_ = sinhTerms_[iEvt];
normTermExp_ = normTermsExp_[iEvt];
normTermCosh_ = normTermsCosh_[iEvt];
normTermSinh_ = normTermsSinh_[iEvt];
}
if ( errHist_ ) {
errHist_->calcLikelihoodInfo(iEvt);
errTerm_ = errHist_->getLikelihood();
} else {
errTerm_ = 1.0;
}
if ( effiFun_ ) {
effiTerm_ = effiTerms_[iEvt];
} else {
effiTerm_ = 1.0;
}
// TODO need a check in here that none of the floating parameter values have changed
// If they have, then we need to recalculate all or some of the terms
/*
if ( parsChanged ) {
const Double_t abscissa = abscissas_[iEvt][0];
const Double_t abscissaErr = abscissaErrors_[iEvt];
this->calcLikelihoodInfo(abscissa, abscissaErr);
}
*/
}
void LauDecayTimePdf::calcLikelihoodInfo(Double_t abscissa)
{
// Check whether any of the gaussians should be scaled - if any of them should we need the per-event error
Bool_t scale(kFALSE);
for (std::vector<Bool_t>::const_iterator iter = scaleMeans_.begin(); iter != scaleMeans_.end(); ++iter) {
scale |= (*iter);
}
for (std::vector<Bool_t>::const_iterator iter = scaleWidths_.begin(); iter != scaleWidths_.end(); ++iter) {
scale |= (*iter);
}
if (scale) {
cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Per-event error on Delta t not provided, cannot calculate anything."<<endl;
return;
} else {
this->calcLikelihoodInfo(abscissa, 0.0);
}
}
void LauDecayTimePdf::calcLikelihoodInfo(Double_t abscissa, Double_t abscissaErr)
{
if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) {
cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of the decay time: "<<abscissa<<
" outside allowed range: ["<<this->minAbscissa()<<","<<this->maxAbscissa()<<"]."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
if (abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError()) {
cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of Delta t error: "<<abscissaErr<<
" outside allowed range: ["<<this->minAbscissaError()<<","<<this->maxAbscissaError()<<"]."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
// Initialise the various terms to zero
if (type_ == Hist){
if ( pdfHist_ ) {
pdfHist_->calcLikelihoodInfo(abscissa);
pdfTerm_ = pdfHist_->getLikelihood();
} else {
pdfTerm_ = 1.0;
}
}else{
// TODO: check if we need this
expTerm_ = 0.0; cosTerm_ = 0.0; sinTerm_ = 0.0;
// Reset the state to Good
this->state(Good);
// If we're not using the resolution function calculate the simple terms and return
if (!this->doSmearing()) {
this->calcNonSmearedTerms(abscissa);
return;
}
// Get all the up to date parameter values
std::vector<Double_t> frac(nGauss_);
std::vector<Double_t> mean(nGauss_);
std::vector<Double_t> sigma(nGauss_);
Double_t tau(0.0);
Double_t deltaM(0.0);
Double_t fracPrompt(0.0);
frac[0] = 1.0;
for (UInt_t i(0); i<nGauss_; ++i) {
mean[i] = mean_[i]->value();
sigma[i] = sigma_[i]->value();
if (i != 0) {
frac[i] = frac_[i-1]->value();
frac[0] -= frac[i];
}
}
if (type_ != Delta) {
tau = tau_->value();
if (type_ == ExpTrig) {
deltaM = deltaM_->value();
}
if (type_ == DeltaExp) {
fracPrompt = fracPrompt_->value();
}
}
// Scale the gaussian parameters by the per-event error on Delta t (if appropriate)
for (UInt_t i(0); i<nGauss_; ++i) {
if (scaleMeans_[i]) {
mean[i] *= abscissaErr;
}
if (scaleWidths_[i]) {
sigma[i] *= abscissaErr;
}
}
// Calculate term needed by every type
std::vector<Double_t> x(nGauss_);
const Double_t xMax = this->maxAbscissa();
const Double_t xMin = this->minAbscissa();
for (UInt_t i(0); i<nGauss_; ++i) {
x[i] = abscissa - mean[i];
}
// TODO, what to do with this
Double_t value(0.0);
if (type_ == Delta || type_ == DeltaExp) {
// Calculate the gaussian function(s)
for (UInt_t i(0); i<nGauss_; ++i) {
if (TMath::Abs(sigma[i]) > 1e-10) {
Double_t exponent(0.0);
Double_t norm(0.0);
Double_t scale = LauConstants::root2*sigma[i];
Double_t scale2 = LauConstants::rootPiBy2*sigma[i];
exponent = -0.5*x[i]*x[i]/(sigma[i]*sigma[i]);
norm = scale2*(TMath::Erf((xMax - mean[i])/scale)
- TMath::Erf((xMin - mean[i])/scale));
value += frac[i]*TMath::Exp(exponent)/norm;
}
}
}
if (type_ != Delta) {
std::vector<Double_t> expTerms(nGauss_);
std::vector<Double_t> cosTerms(nGauss_);
std::vector<Double_t> sinTerms(nGauss_);
std::vector<Double_t> expTermsNorm(nGauss_);
// Calculate values of the PDF convoluated with each Gaussian for a given value of the abscsissa
for (UInt_t i(0); i<nGauss_; ++i) {
// Typical case (1): B0/B0bar
if (type_ == ExpTrig) {
// LHCb convention, i.e. convolution evaluate between 0 and inf
if (method_ == DecayTime) {
// Exponential term
Double_t termExponent = (pow(sigma[i], 2) - 2.0 * tau * x[i])/(2.0 * pow(tau, 2));
Double_t termErfc = (pow(sigma[i], 2) - tau * x[i])/(LauConstants::root2 * tau * sigma[i]);
expTerms[i] = (1.0/2.0) * TMath::Exp(termExponent) * TMath::Erfc(termErfc);
Double_t exponentTermRe, exponentTermIm;
this->calcTrigExponent(deltaM, tau, x[i], sigma[i], exponentTermRe, exponentTermIm);
// Elements related to the trigonometric function, i.e. convolution of Exp*Sin or Cos with Gauss
Double_t sinTrigTermRe, sinTrigTermIm, cosTrigTermRe, cosTrigTermIm;
this->calcTrigConv(deltaM, tau, x[i], sigma[i], sinTrigTermRe, sinTrigTermIm, kFALSE);
this->calcTrigConv(deltaM, tau, x[i], sigma[i], cosTrigTermRe, cosTrigTermIm, kTRUE);
// Combining elements of the full pdf
LauComplex zExp(exponentTermRe, exponentTermIm);
LauComplex zTrigSin(sinTrigTermRe, sinTrigTermIm);
LauComplex zTrigCos(cosTrigTermRe, cosTrigTermIm);
LauComplex sinConv = zExp * zTrigSin;
LauComplex cosConv = zExp * zTrigCos;
sinConv.scale(1.0/4.0);
cosConv.scale(1.0/4.0);
// Cosine*Exp and Sine*Exp terms
cosTerms[i] = cosConv.re();
sinTerms[i] = sinConv.im();
// Normalisation
Double_t umax = xMax - mean[i];
Double_t umin = xMin - mean[i];
expTermsNorm[i] = (1.0/2.0) * tau * (-1.0 + TMath::Erf(umax/(LauConstants::root2 * sigma[i])) + TMath::Erfc(umin/(LauConstants::root2 * sigma[i])) +
TMath::Exp((pow(sigma[i], 2) - 2.0 * tau * (xMax + xMin - mean[i]))/(2.0 * pow(tau, 2))) *
(TMath::Exp(xMax/tau) * TMath::Erfc((pow(sigma[i], 2) - xMin)/(LauConstants::root2 * tau))) +
(TMath::Exp(xMin/tau) * TMath::Erfc((pow(sigma[i], 2) - xMax)/(LauConstants::root2 * tau))));
} else {
}
}
// Typical case (2): B0s/B0sbar
if (type_ == ExpHypTrig) {
// TODO: add calculation
}
}
for (UInt_t i(0); i<nGauss_; ++i) {
expTerm_ += frac[i]*expTerms[i];
cosTerm_ += frac[i]*cosTerms[i];
sinTerm_ += frac[i]*sinTerms[i];
normTermExp_ += frac[i]*expTermsNorm[i];
}
if (type_ == DeltaExp) {
value *= fracPrompt;
value += (1.0-fracPrompt)*expTerm_;
} else {
value = expTerm_;
}
}
}
if ( errHist_ ) {
errHist_->calcLikelihoodInfo(abscissaErr);
errTerm_ = errHist_->getLikelihood();
} else {
errTerm_ = 1.0;
}
if ( effiFun_ ) {
effiTerm_ = effiFun_->evaluate(abscissa);
} else {
effiTerm_ = 1.0;
}
}
void LauDecayTimePdf::calcTrigExponent(Double_t deltaM, Double_t tau, Double_t x, Double_t sigma, Double_t& reTerm, Double_t& imTerm)
{
Double_t exponentTerm = TMath::Exp(-(2.0 * tau * x + pow(sigma, 2) * (pow(deltaM, 2) * pow(tau, 2) - 1.0))/(2.0 * pow(tau,2)));
reTerm = exponentTerm * TMath::Cos(deltaM * (x - pow(sigma,2)/tau));
imTerm = - exponentTerm * TMath::Sin(deltaM * (x - pow(sigma,2)/tau));
}
void LauDecayTimePdf::calcTrigConv(Double_t deltaM, Double_t tau, Double_t x, Double_t sigma, Double_t& reOutTerm, Double_t& imOutTerm, Bool_t trig)
{
Double_t reExpTerm, imExpTerm;
LauComplex zExp;
LauComplex zTrig1;
LauComplex zTrig2;
// Calculation for the sine or cosine term
if (!trig) {
reExpTerm = TMath::Sin(2.0 * deltaM * (x + pow(sigma,2)/tau));
imExpTerm = 2.0 * TMath::Sin(pow(deltaM * (x + pow(sigma,2)/tau), 2));
} else {
reExpTerm = TMath::Cos(2.0 * deltaM * (x + pow(sigma,2)/tau));
imExpTerm = TMath::Sin(2.0 * deltaM * (x + pow(sigma,2)/tau));
}
// Exponential term in front of Erfc/Erfi terms
zExp.setRealPart(reExpTerm);
zExp.setImagPart(imExpTerm);
// Nominal Erfc term (common to both sine and cosine expressions
zTrig1.setRealPart(-(tau * x - pow(sigma,2))/(LauConstants::root2 * tau * sigma));
zTrig1.setImagPart(-(deltaM * sigma)/ LauConstants::root2);
// Second term for sine (Erfi) or cosine (Erfc) - notice the re-im swap and sign change
zTrig2.setRealPart(-zTrig1.im());
zTrig2.setImagPart(-zTrig1.re());
// Calculation of Erfc and Erfi (if necessary)
LauComplex term1 = ComplexErfc(zTrig1.re(), zTrig1.im());
LauComplex term2;
if (!trig) {
term2 = Erfi(zTrig2.re(), zTrig2.im());
} else {
term2 = ComplexErfc(zTrig2.re(), zTrig2.im());
}
// Multiplying all elemnets of the convolution
LauComplex output = zExp * term1 + term2;
reOutTerm = output.re();
imOutTerm = output.im();
}
LauComplex ComplexErf(Double_t x, Double_t y)
{
// Evaluate Erf(z) where z is a complex number (z = x + iy)
// From Abramowitz & Stegun (http://people.math.sfu.ca/~cbm/aands/page_299.htm)
int n = 10;
LauComplex FirstTerm(TMath::Erf(x),0.);
LauComplex SecondTerm(1-cos(2*x*y), sin(2*x*y));
SecondTerm.rescale(TMath::Exp(-x*x)/(2*TMath::Pi()*x));
LauComplex firstPart = FirstTerm + SecondTerm;
LauComplex Sum(0,0);
for (int k = 1; k<=n; k++){
Double_t ExpPart = TMath::Exp(-0.25*k*k)/(k*k + 4*x*x);
Double_t f_k = 2*x*(1 - cos(2*x*y)*cosh(k*y)) + k*sin(2*x*y)*sinh(k*y);
Double_t g_k = 2*x*sin(2*x*y)*cosh(k*y) + k*cos(2*x*y)*sinh(k*y);
LauComplex fplusg(f_k, g_k);
fplusg.rescale(ExpPart);
Sum += fplusg;
}
Sum.rescale((2/TMath::Pi())*TMath::Exp(-x*x));
LauComplex result = firstPart + Sum;
return result;
}
LauComplex Erfi(Double_t x, Double_t y)
{
// Erfi(z) = -I*Erf(I*z) where z = x + iy
if (y==0){
y = 1e-100;
}
double x_prime = -y;
double y_prime = x;
LauComplex a = ComplexErf(x_prime, y_prime);
LauComplex result(a.im(), -a.re());
return result;
}
LauComplex ComplexErfc(Double_t x, Double_t y)
{
// Erfc(z) = 1 - Erf(z) (z = x + iy)
LauComplex one(1., 0.);
LauComplex result = one - ComplexErf(x,y);
return result;
}
-
-void LauDecayTimePdf::evalExpCerf(Double_t Zre, Double_t Zim, Double_t xOverSigmaRoot2, Double_t& outRe, Double_t& outIm) {
-
- // Evaluate exp(-x*x*0.5/sigma^2) * complexErrFunc
- // See whether the exp term is large (very negative x),
- //and cancel it appropriately with another exp that
- //goes inside complexErrFunc.
-
- if (Zim>-4.0) {
- Double_t tempOutRe(0.0), tempOutIm(0.0);
- Double_t exponential = TMath::Exp(-xOverSigmaRoot2*xOverSigmaRoot2);
- this->complexErrFunc( Zre, Zim, tempOutRe, tempOutIm);
- outRe = exponential*tempOutRe;
- outIm = exponential*tempOutIm;
- } else {
- this->approxExpComplexErrFunc(Zre, Zim, xOverSigmaRoot2, outRe, outIm);
- }
-}
-
-void LauDecayTimePdf::approxExpComplexErrFunc(Double_t Zre, Double_t Zim, Double_t xOverSigmaRoot2, Double_t& outRe, Double_t& outIm) {
-
- // (Copied from RooFit)
- // Use the asymptotic approximation erf(z) = exp(-z*z)/(sqrt(pi)*z)
- // (see Abramowitz & Stegun, p.298, eqn 7.1.23)
- // to explicitly cancel the divergent exp(y*y) behaviour of
- // complexErrFunc for z = x + i y with large negative y
- // use the approximation: erf(z) = exp(-z*z)/(sqrt(pi)*z)
- // to explicitly cancel the divergent exp(y*y) behaviour of
- // CWERF for z = x + i y with large negative y
-
- /* static Double_t rootpi= TMath::Sqrt(atan2(0.,-1.));
- RooComplex z(swt*c,u+c);
- RooComplex zc(u+c,-swt*c);
- RooComplex zsq= z*z;
- RooComplex v= -zsq - u*u;
-
- return v.exp()*(-zsq.exp()/(zc*rootpi) + 1)*2 ;*/
-
- /*
- Double_t ZsqRe = Zre*Zre - Zim*Zim;
- Double_t ZsqIm = 2.0*Zre*Zim;
- Double_t ZsqReMinusDivergence = ZsqRe + xOverSigmaRoot2*xOverSigmaRoot2;
- Double_t exp1Re = TMath::Exp( -ZsqReMinusDivergence)*TMath::Cos(ZsqIm);
- Double_t exp1Im = -TMath::Exp( -ZsqReMinusDivergence)*TMath::Sin(ZsqIm);
- Double_t exp2Re = TMath::Exp( ZsqRe )*TMath::Cos(ZsqIm);
- Double_t exp2Im = TMath::Exp( ZsqRe )*TMath::Sin(ZsqIm);
- Double_t denominator = LauConstants::rootPi*(Zim*Zim+Zre*Zre);
- Double_t fractionRe = Zim/denominator;
- Double_t fractionIm = Zre/denominator;
- Double_t product1Re = exp2Re*fractionRe - exp2Im*fractionIm;
- Double_t product1Im = exp2Re*fractionIm + exp2Im*fractionRe;
- Double_t product2Re = 2.0*( exp1Re*(1+product1Re) - exp1Im*product1Im);
- Double_t product2Im = 2.0*( exp1Re*product1Im + exp1Im*(1+product1Re));
-
- outRe = product2Re;
- outIm = product2Im;
- */
-
- Double_t rootpi = LauConstants::rootPi;
- LauComplex z(Zre,Zim);
- LauComplex zc(Zim,-Zre);
- LauComplex zsq= z*z;
- LauComplex v= -zsq - LauComplex(xOverSigmaRoot2*xOverSigmaRoot2,0.0);
-
- LauComplex answer( v.exp()*(-zsq.exp()/(zc.scale(rootpi)) + LauComplex(1.0,0.0))*LauComplex(2.0,0.0) );
- outRe = answer.re();
- outIm = answer.im();
-}
-
-
-// Following code has been translated from FORTRAN
-// ALGORITHM 680, COLLECTED ALGORITHMS FROM ACM.
-// THIS WORK PUBLISHED IN TRANSACTIONS ON MATHEMATICAL SOFTWARE,
-// VOL. 16, NO. 1, PP. 47.
-// SUBROUTINE WOFZ (XI, YI, U, V, FLAG)
-
-void LauDecayTimePdf::complexErrFunc(Double_t Zre, Double_t Zim, Double_t& outRe, Double_t& outIm)
-{
- // Return CERNlib complex error function
- //
- // This code is translated from the fortran version in the CERN mathlib.
- // (see ftp://asisftp.cern.ch/cernlib/share/pro/src/mathlib/gen/c/cwerf64.F)
-
- LauComplex Z(Zre,Zim);
-
- LauComplex ZH,S,T,V;
- static LauComplex R[38];
-
- static const Double_t Z1= 1, HF= Z1/2, Z10= 10;
- static const Double_t C1= 74/Z10, C2= 83/Z10, C3= Z10/32, C4 = 16/Z10;
- static const Double_t C = 1.12837916709551257, P = pow(2*C4,33);
- static const LauComplex zero(0.0,0.0);
-
- Double_t X(Z.re()),Y(Z.im()), XA(TMath::Abs(X)), YA(TMath::Abs(Y));
- int N;
- if((YA < C1) && (XA < C2)) {
- ZH= LauComplex(YA+C4,XA);
- R[37]=zero;
- N= 36;
- while(N > 0) {
- T=ZH+R[N+1].conj();
- T.rescale(N);
- R[N--]=T.scale(HF/T.abs2());
- }
- Double_t XL=P;
- S=zero;
- N= 33;
- while(N > 0) {
- XL=C3*XL;
- S=R[N--]*(S+LauComplex(XL,0.0));
- }
- V=S.scale(C);
- }
- else {
- ZH=LauComplex(YA,XA);
- R[1]=zero;
- N= 9;
- while(N > 0) {
- T=ZH+R[1].conj();
- T.rescale(N);
- R[1]=T.scale(HF/T.abs2());
- N--;
- }
- V=R[1].scale(C);
- }
- if(YA==0) V=LauComplex(exp(-(XA*XA)),V.im());
-
- if(Y < 0) {
- LauComplex tmp(XA,YA);
- tmp= -tmp*tmp;
- V=tmp.exp().scale(2)-V;
- if(X > 0) V= V.conj();
- }
- else {
- if(X < 0) V= V.conj();
- }
-
- outRe = V.re();
- outIm = V.im();
-
-
- /*
- // THE CODE CONTAINS A FEW COMPILER-DEPENDENT PARAMETERS :
- // RMAXREAL = THE MAXIMUM VALUE OF RMAXREAL EQUALS THE ROOT OF
- // RMAX = THE LARGEST NUMBER WHICH CAN STILL BE
- // IMPLEMENTED ON THE COMPUTER IN DOUBLE PRECISION
- // FLOATING-POINT ARITHMETIC
- // RMAXEXP = LN(RMAX) - LN(2)
- // RMAXGONI = THE LARGEST POSSIBLE ARGUMENT OF A DOUBLE PRECISION
- // GONIOMETRIC FUNCTION (DCOS, DSIN, ...)
- // FURTHERMORE THE PARAMETER FACTOR EQUALS 2/SQRT(PI)
- //
- // THE ROUTINE IS NOT UNDERFLOW-PROTECTED BUT ANY VARIABLE CAN BE
- // PUT TO 0 UPON UNDERFLOW;
- //
- // REFERENCE - GPM POPPE, CMJ WIJERS; MORE EFFICIENT COMPUTATION OF
- // THE COMPLEX ERROR-FUNCTION, ACM TRANS. MATH. SOFTWARE.
- //
-
- static const Double_t Factor = 1.12837916709551257388; // 2.0/rootPi
- static const Double_t RmaxReal = 0.5e+154;
- static const Double_t RmaxExp = 708.5030614616060;
- static const Double_t RmaxTrig = 3.53711887601422e+15;
-
- Double_t Xabs = TMath::Abs(Zre);
- Double_t Yabs = TMath::Abs(Zim);
- Double_t X = Xabs/6.3;
- Double_t Y = Yabs/4.4;
-
- Double_t Qrho, Xquad, Yquad, U2 ,V2;
-
-
- // THE FOLLOWING IF-STATEMENT PROTECTS QRHO = (X**2 + Y**2) AGAINST OVERFLOW
- if ((Xabs > RmaxReal) || (Yabs > RmaxReal))
- {
- cerr<<"1st Overflow in complexErrFunc."<<endl;
- this->state(Overflow1);
- return;
- }
-
- Qrho = X*X+Y*Y;
- Double_t XabsQ = Xabs*Xabs;
- Xquad = XabsQ - Yabs*Yabs;
- Yquad = 2.0*Xabs*Yabs;
-
- Bool_t A = Qrho < 0.085264;
-
- //IF (QRHO.LT.0.085264D0) THEN THE FADDEEVA-FUNCTION IS EVALUATED
- //USING A POWER-SERIES (ABRAMOWITZ/STEGUN, EQUATION (7.1.5), P.297)
- //N IS THE MINIMUM NUMBER OF TERMS NEEDED TO OBTAIN THE REQUIRED ACCURACY
- if (A)
- {
- Qrho = (1.0-0.85*Y)*TMath::Sqrt(Qrho);
- Int_t N = TMath::Nint(6.0 + 72.0*Qrho);
- Int_t J = 2*N+1;
- Double_t Xsum = 1.0/J;
- Double_t Ysum = 0.0, Xaux = 0.0;
- while ( N > 0 )
- {
- J -= 2;
- Xaux = (Xsum*Xquad-Ysum*Yquad)/N;
- Ysum = (Xsum*Yquad+Ysum*Xquad)/N--;
- Xsum = Xaux + 1.0/J;
- }
- Double_t Daux = TMath::Exp(-Xquad);
- Double_t U1 = -Factor*(Xsum*Yabs+Ysum*Xabs)+1.0;
- Double_t V1 = Factor*(Xsum*Xabs-Ysum*Yabs);
- U2 = Daux*TMath::Cos(Yquad);
- V2 = -Daux*TMath::Sin(Yquad);
-
- outRe = U1*U2 - V1*V2;
- outIm = U1*V2 + V1*U2;
- }
- else
- {
- // IF (QRHO.GT.1.O) THEN W(Z) IS EVALUATED USING THE LAPLACE CONTINUED FRACTION
- // NU IS THE MINIMUM NUMBER OF TERMS NEEDED TO OBTAIN THE REQUIRED ACCURACY
- //
- // IF ((QRHO.GT.0.085264D0).AND.(QRHO.LT.1.0)) THEN W(Z) IS EVALUATED
- // BY A TRUNCATED TAYLOR EXPANSION, WHERE THE LAPLACE CONTINUED FRACTION
- // IS USED TO CALCULATE THE DERIVATIVES OF W(Z)
- // KAPN IS THE MINIMUM NUMBER OF TERMS IN THE TAYLOR EXPANSION NEEDED
- // TO OBTAIN THE REQUIRED ACCURACY
- // NU IS THE MINIMUM NUMBER OF TERMS OF THE CONTINUED FRACTION NEEDED
- // TO CALCULATE THE DERIVATIVES WITH THE REQUIRED ACCURACY
- Double_t H, H2=0.0, Qlambda;
- Int_t kapN, Nu;
-
- if (Qrho > 1.0)
- {
- H = 0.0;
- kapN = 0;
- Qrho = TMath::Sqrt(Qrho);
- Nu = (Int_t)(3.0 + (1442.0/(26.0*Qrho+77.0)));//Careful! We're truncating the Double_t here
- }
- else
- {
- Qrho = (1.0-Y)*TMath::Sqrt(1.0-Qrho);
- H = 1.88*Qrho;
- H2 = 2.0*H;
- kapN = TMath::Nint(7.0+34.0*Qrho);
- Nu = TMath::Nint(16.0+26.0*Qrho);
- }
-
- if ( H > 0.0 ) Qlambda = TMath::Power(H2,(Double_t)kapN);
-
- Double_t Rx = 0.0, Ry = 0.0, Sx = 0.0, Sy = 0.0;
-
- while ( Nu >= 0 )
- {
- Int_t Np1 = Nu+1;
- Double_t Tx = Yabs + H + Np1*Rx;
- Double_t Ty = Xabs - Np1*Ry;
- Double_t C = 0.5/(Tx*Tx+Ty*Ty);
- Rx = C*Tx;
- Ry = C*Ty;
- if ((H > 0.0) && (Nu<=kapN))
- {
- Tx = Qlambda + Sx;
- Sx = Rx*Tx - Ry*Sy;
- Sy = Ry*Tx + Rx*Sy;
- Qlambda /= H2;
- }
- Nu--;
- }
-
- if ( H == 0.0 )
- {
- outRe = Factor*Rx;
- outIm = Factor*Ry;
- }
- else
- {
- outRe = Factor*Sx;
- outIm = Factor*Sy;
- }
-
- if (Yabs == 0.0) outRe = TMath::Exp(-Xabs*Xabs);
- }
-
- // EVALUATION OF W(Z) IN THE OTHER QUADRANTS
- if (Zim < 0.0)
- {
- if (A)
- {
- U2 *= 2.0;
- V2 *= 2.0;
- }
- else
- {
- Xquad = -Xquad;
- // THE FOLLOWING IF-STATEMENT PROTECTS 2*EXP(-Z**2) AGAINST OVERFLOW
- if ((Yquad > RmaxTrig) || (Xquad > RmaxExp))
- {
- cerr<<"2nd Overflow in complexErrFunc."<<endl;
- this->state(Overflow2);
- return;
- }
- Double_t W1 = 2.0*TMath::Exp(Xquad);
- U2 = W1*TMath::Cos(Yquad);
- V2 = -W1*TMath::Sin(Yquad);
-
- }
-
- outRe = U2 - outRe;
- outIm = V2 - outIm;
- if (Zre > 0.0) outIm = -outIm;
- }
- else if (Zre < 0.0) outIm = -outIm;
-
- return;
- */
-
-
- /* // Return CERNlib complex error function
- //
- // This code is translated from the fortran version in the CERN mathlib.
- // (see ftp://asisftp.cern.ch/cernlib/share/pro/src/mathlib/gen/c/cwerf64.F)
-
- LauComplex ZH,S,T,V;
- static LauComplex R[38];
-
- static const Double_t Z1= 1, HF= Z1/2, Z10= 10;
- static const Double_t C1= 74/Z10, C2= 83/Z10, C3= Z10/32, C4 = 16/Z10;
- static const Double_t C = 1.12837916709551257, P = TMath::Power(2*C4,33);
- static const LauComplex zero(0,0);
-
- Double_t X(Zre),Y(Zim), XA(TMath::Abs(X)), YA(TMath::Abs(Y));
- int N;
- if((YA < C1) && (XA < C2)) {
- ZH= LauComplex(YA+C4,XA);
- R[37]=zero;
- N= 36;
- while(N > 0) {
- T=ZH+R[N+1].conj()*N;
- R[N--]=(T*HF)/T.abs2();
- }
- Double_t XL=P;
- S=zero;
- N= 33;
- while(N > 0) {
- XL=C3*XL;
- S=R[N--]*(S+XL);
- }
- V=S*C;
- }
- else {
- ZH=LauComplex(YA,XA);
- R[1]=zero;
- N= 9;
- while(N > 0) {
- T=ZH+R[1].conj()*N;
- R[1]=(T*HF)/T.abs2();
- N--;
- }
- V=R[1]*C;
- }
- if(YA==0) V=LauComplex(exp(-(XA*XA)),V.im());
-
- if(Y < 0) {
- LauComplex tmp(XA,YA);
- tmp= -tmp*tmp;
- V=tmp.exp()*2-V;
- if(X > 0) V= V.conj();
- }
- else {
- if(X < 0) V= V.conj();
- }
-
- outRe = V.re(); outIm = V.im();
- return kFALSE;
-
- */
-}
-
void LauDecayTimePdf::calcNonSmearedTerms(Double_t abscissa)
{
if (type_ == Hist ){
- cerr<<"It is a histgrammed PDF"<<endl;
+ cerr << "It is a histogrammed PDF" << endl;
return;
}
if (type_ == Delta) {
return;
}
- cosTerm_ = 0.0; sinTerm_ = 0.0; coshTerm_ = 0.0; sinhTerm_ = 0.0, expTerm_ = 0.0;
-
Double_t tau = tau_->value();
+ Double_t deltaM = deltaM_->value();
+ Double_t deltaGamma = deltaGamma_->value();
- // Exponential term included in all the components
- // TODO: Revisite abs in line below
- expTerm_ = TMath::Exp(-TMath::Abs(abscissa)/tau)/(2.0*tau);
-
+ // Calculate the terms related to cosine and sine not normalised
if (type_ == ExpTrig) {
- Double_t deltaM = deltaM_->value();
-
- coshTerm_ = expTerm_;
+ if (method_ == DecayTime) {
+ expTerm_ = TMath::Exp(-abscissa/tau)/(2.0*tau);
+ }
+ if (method_ == DecayTimeDiff) {
+ expTerm_ = TMath::Exp(-TMath::Abs(abscissa)/tau)/(2.0*tau);
+ }
cosTerm_ = TMath::Cos(deltaM*abscissa)*expTerm_;
sinTerm_ = TMath::Sin(deltaM*abscissa)*expTerm_;
- // TODO - what's this?
- expTerm_ /= normTermCosh_;
}
+ // Calculate the terms related to cosine, sine, cosh and sinh not normalised (no decayTimeDiff implemented
if (type_ == ExpHypTrig) {
- Double_t deltaM = deltaM_->value();
- Double_t deltaGamma = deltaGamma_->value();
-
+ expTerm_ = TMath::Exp(-abscissa/tau)/(2.0*tau);
cosTerm_ = TMath::Cos(deltaM*abscissa)*expTerm_;
sinTerm_ = TMath::Sin(deltaM*abscissa)*expTerm_;
coshTerm_ = TMath::CosH(deltaGamma*abscissa/2.0)*expTerm_;
sinhTerm_ = TMath::SinH(deltaGamma*abscissa/2.0)*expTerm_;
- // TODO - what's this?
- expTerm_ /= normTermCosh_;
}
-
- //this->setUnNormPDFVal(expTerm_);
}
Double_t LauDecayTimePdf::normExpHypTerm(Double_t Abs)
{
Double_t tau = tau_->value();
Double_t deltaGamma = deltaGamma_->value();
Double_t y = tau*deltaGamma/2;
Double_t nonTrigTerm = -(TMath::Exp(-Abs/tau))/(1 - y*y);
Double_t cosHTerm = TMath::CosH(deltaGamma*Abs/2);
Double_t sinHTerm = TMath::SinH(deltaGamma*Abs/2);
Double_t normTerm = nonTrigTerm*(cosHTerm + y*sinHTerm);
return normTerm;
}
Double_t LauDecayTimePdf::normExpHypTermDep(Double_t Abs)
{
Double_t tau = tau_->value();
Double_t deltaGamma = deltaGamma_->value();
Double_t y = tau*deltaGamma/2;
Double_t nonTrigTerm = -(TMath::Exp(-Abs/tau))/(1 - y*y);
Double_t cosHTerm = TMath::CosH(deltaGamma*Abs/2);
Double_t sinHTerm = TMath::SinH(deltaGamma*Abs/2);
Double_t normTerm = nonTrigTerm*(sinHTerm + y*cosHTerm);
return normTerm;
}
void LauDecayTimePdf::storeIntegralTimeDep(Double_t minAbs, Double_t maxAbs)
{
- normTermCosh_ = 0.0; normTermSinh_ = 0.0;
-
- Double_t tau = tau_->value();
+ Double_t tau = tau_->value();
// Normalisation factor for B0 decays
if (type_ == ExpTrig) {
- normTermCosh_ = - ( TMath::Exp(-maxAbs/tau) - TMath::Exp(-minAbs/tau));
- normTermSinh_ = 0.0;
+ if (method_ == DecayTime) {
+ normTermExp_ = tau * (TMath::Exp(-minAbs/tau) - TMath::Exp(-maxAbs/tau));
+ }
+ if (method_ == DecayTimeDiff) {
+ normTermExp_ = tau * (2.0 - TMath::Exp(-maxAbs/tau) - TMath::Exp(-minAbs/tau));
+ }
}
// Normalisation factor for Bs decays
if (type_ == ExpHypTrig) {
normTermCosh_ = normExpHypTerm(maxAbs) - normExpHypTerm(minAbs);
normTermSinh_ = normExpHypTermDep( maxAbs) - normExpHypTermDep( minAbs);
}
}
Double_t LauDecayTimePdf::generateError(Bool_t forceNew)
{
if (errHist_ && (forceNew || !abscissaErrorGenerated_)) {
LauFitData errData = errHist_->generate(0);
abscissaError_ = errData.find(this->varErrName())->second;
abscissaErrorGenerated_ = kTRUE;
} else {
while (forceNew || !abscissaErrorGenerated_) {
abscissaError_ = LauRandom::randomFun()->Landau(errorDistMPV_,errorDistSigma_);
if (abscissaError_ < maxAbscissaError_ && abscissaError_ > minAbscissaError_) {
abscissaErrorGenerated_ = kTRUE;
forceNew = kFALSE;
}
}
}
return abscissaError_;
}
/*
LauFitData LauDecayTimePdf::generate(const LauKinematics* kinematics)
{
// generateError SHOULD have been called before this
// function but will call it here just to make sure
// (has ns effect if has already been called)
abscissaError_ = this->generateError();
// If the PDF is scaled by the per-event error then need to update the PDF height for each event
Bool_t scale(kFALSE);
for (std::vector<Bool_t>::const_iterator iter = scaleMeans_.begin(); iter != scaleMeans_.end(); ++iter) {
scale |= (*iter);
}
for (std::vector<Bool_t>::const_iterator iter = scaleWidths_.begin(); iter != scaleWidths_.end(); ++iter) {
scale |= (*iter);
}
if (scale || (!this->heightUpToDate() && !this->cachePDF())) {
this->calcPDFHeight(kinematics);
this->heightUpToDate(kTRUE);
}
// Generate the value of the abscissa.
Bool_t gotAbscissa(kFALSE);
Double_t genVal(0.0);
Double_t genPDFVal(0.0);
LauFitData genAbscissa;
const Double_t xMin = this->minAbscissa();
const Double_t xMax = this->maxAbscissa();
const Double_t xRange = xMax - xMin;
while (!gotAbscissa) {
genVal = LauRandom::randomFun()->Rndm()*xRange + xMin;
this->calcLikelihoodInfo(genVal, abscissaError_);
genPDFVal = this->getUnNormLikelihood();
if (LauRandom::randomFun()->Rndm() <= genPDFVal/this->getMaxHeight()) {gotAbscissa = kTRUE;}
if (genPDFVal > this->getMaxHeight()) {
cerr<<"Warning in LauDecayTimePdf::generate()."
<<" genPDFVal = "<<genPDFVal<<" is larger than the specified PDF height "
<<this->getMaxHeight()<<" for the abscissa = "<<genVal<<". Need to reset height to be larger than "
<<genPDFVal<<" by using the setMaxHeight(Double_t) function"
<<" and re-run the Monte Carlo generation!"<<endl;
}
}
genAbscissa[this->varName()] = genVal;
// mark that we need a new error to be generated next time
abscissaErrorGenerated_ = kFALSE;
return genAbscissa;
}
*/
void LauDecayTimePdf::setErrorHisto(const TH1* hist)
{
if ( errHist_ != 0 ) {
cerr<<"WARNING in LauDecayTimePdf::setErrorHisto : Error histogram already set, not doing it again."<<endl;
return;
}
errHist_ = new Lau1DHistPdf(this->varErrName(), hist, this->minAbscissaError(), this->maxAbscissaError());
}
void LauDecayTimePdf::setHistoPdf(const TH1* hist)
{
if ( pdfHist_ != 0 ) {
cerr<<"WARNING in LauDecayTimePdf::setHistoPdf : PDF histogram already set, not doing it again."<<endl;
return;
}
pdfHist_ = new Lau1DHistPdf(this->varName(), hist, this->minAbscissa(), this->maxAbscissa());
}
void LauDecayTimePdf::setEffiSpline(Lau1DCubicSpline* spline)
{
if ( effiFun_ != 0 ) {
cerr<<"WARNING in LauDecayTimePdf::setEffiPdf : efficiency function already set, not doing it again."<<endl;
return;
}
effiFun_ = spline;
}
LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName)
{
for ( std::vector<LauAbsRValue*>::iterator iter = param_.begin(); iter != param_.end(); ++iter ) {
if ((*iter)->name().Contains(parName)) {
return (*iter);
}
}
std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl;
return 0;
}
const LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName) const
{
for ( std::vector<LauAbsRValue*>::const_iterator iter = param_.begin(); iter != param_.end(); ++iter ) {
if ((*iter)->name().Contains(parName)) {
return (*iter);
}
}
std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl;
return 0;
}
void LauDecayTimePdf::updatePulls()
{
for ( std::vector<LauAbsRValue*>::iterator iter = param_.begin(); iter != param_.end(); ++iter ) {
std::vector<LauParameter*> params = (*iter)->getPars();
for (std::vector<LauParameter*>::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter ) {
if (!(*iter)->fixed()) {
(*params_iter)->updatePull();
}
}
}
}
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Mon, Feb 24, 6:44 AM (1 d, 14 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
4494614
Default Alt Text
(61 KB)
Attached To
rLAURA laura
Event Timeline
Log In to Comment