diff --git a/inc/LauDecayTimePdf.hh b/inc/LauDecayTimePdf.hh index 01a4298..cc05761 100644 --- a/inc/LauDecayTimePdf.hh +++ b/inc/LauDecayTimePdf.hh @@ -1,526 +1,525 @@ /* Copyright 2006 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauDecayTimePdf.hh \brief File containing declaration of LauDecayTimePdf class. */ /*! \class LauDecayTimePdf \brief Class for defining the PDFs used in the time-dependent fit model to describe the decay time. LauDecayTimePdf is a class that provides the PDFs for describing the time-dependence of the various terms in a particle/antiparticle decay to a common final state. The various terms have the form of exponentially decaying trigonometric or hyperbolic functions convolved with a N-Gaussian resolution function. */ #ifndef LAU_DECAYTIME_PDF #define LAU_DECAYTIME_PDF #include #include #include "TString.h" #include "LauAbsRValue.hh" #include "LauFitDataTree.hh" #include "LauComplex.hh" class TH1; class Lau1DHistPdf; class Lau1DCubicSpline; class LauDecayTimePdf { public: //! The functional form of the decay time PDF enum FuncType { - Hist, /*Hist PDF for fixed background*/ - Delta, /*< Delta function - for prompt background */ - Exp, /*< Exponential function - for non-prompt background or charged B's */ - DeltaExp, /*< Delta + Exponential function - for background with prompt and non-prompt parts */ - ExpTrig, /*< Exponential function with Delta m driven mixing - for neutral B_d's */ - ExpHypTrig, /*< Exponential function with both Delta m and Delta Gamma driven mixing - for neutral B_s's */ - SimFitNormBd, /*< Exponential function with Delta m driven mixing - for neutral B_d's for flavour specific normalisation mode */ - SimFitNormBs, /*< Exponential function with Delta m driven mixing - for neutral B_d's for CP signal modes */ - SimFitSigBd, /*< Exponential function with both Delta m and Delta Gamma driven mixing - for neutral B_s's for flavour specific normalisation mode */ - SimFitSigBs /*< Exponential function with both Delta m and Delta Gamma driven mixing - for neutral B_s's for CP signal modes */ + Hist, //< Hist PDF for fixed background + Delta, //< Delta function - for prompt background + Exp, //< Exponential function - for non-prompt background or charged B's + DeltaExp, //< Delta + Exponential function - for background with prompt and non-prompt parts + ExpTrig, //< Exponential function with Delta m driven mixing - for neutral B_d's + ExpHypTrig, //< Exponential function with both Delta m and Delta Gamma driven mixing - for neutral B_s's + SimFitNormBd, //< Exponential function with Delta m driven mixing - for neutral B_d's for flavour specific normalisation mode + SimFitNormBs, //< Exponential function with Delta m driven mixing - for neutral B_d's for CP signal modes + SimFitSigBd, //< Exponential function with both Delta m and Delta Gamma driven mixing - for neutral B_s's for flavour specific normalisation mode + SimFitSigBs //< Exponential function with both Delta m and Delta Gamma driven mixing - for neutral B_s's for CP signal modes }; //! State of complex error function calculation enum State { - Good, /*< All OK */ - Overflow1, /*< Overflow in term 1 */ - Overflow2 /*< Overflow in term 2 */ + Good, //< All OK + Overflow1, //< Overflow in term 1 + Overflow2 //< Overflow in term 2 }; //! How is the decay time measured - absolute or difference enum TimeMeasurementMethod { - DecayTime, /*< Absolute measurement of decay time, e.g. LHCb scenario */ - DecayTimeDiff /*< Measurement of the difference of two decay times, e.g. BaBar/Belle(II) scenario */ + DecayTime, //< Absolute measurement of decay time, e.g. LHCb scenario + DecayTimeDiff //< Measurement of the difference of two decay times, e.g. BaBar/Belle(II) scenario }; //! How is the TD efficiency information going to be given? enum EfficiencyMethod { - Spline, /*< As a cubic spline, e.g. BaBar*/ - dtHist, /*< As a histogram (TH1D/TH1F)*/ - Flat /*< As a flat distribution (constant)*/ + Spline, //< As a cubic spline + Binned, //< As a histogram (TH1D/TH1F) + Flat //< As a flat distribution (constant) }; //! Constructor /*! \param [in] theVarName the name of the decay time variable in the input data \param [in] theVarErrName the name of the decay time error variable in the input data \param [in] params the parameters of the PDF \param [in] minAbscissaVal the minimum value of the abscissa \param [in] maxAbscissaVal the maximum value of the abscissa \param [in] minAbscissaErr the minimum value of the abscissa error \param [in] maxAbscissaErr the maximum value of the abscissa error \param [in] type the functional form of the PDF \param [in] nGauss the number of Gaussians in the resolution function \param [in] scale controls whether the Gaussian parameters are scaled by the per-event error \param [in] method set the type of the time measurement used in the given experiment */ LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, Double_t minAbscissaVal, Double_t maxAbscissaVal, Double_t minAbscissaErr, Double_t maxAbscissaErr, const FuncType type, const UInt_t nGauss, const std::vector& scale, const TimeMeasurementMethod method, const EfficiencyMethod effMethod = EfficiencyMethod::Spline); //! Constructor /*! \param [in] theVarName the name of the decay time variable in the input data \param [in] theVarErrName the name of the decay time error variable in the input data \param [in] params the parameters of the PDF \param [in] minAbscissaVal the minimum value of the abscissa \param [in] maxAbscissaVal the maximum value of the abscissa \param [in] minAbscissaErr the minimum value of the abscissa error \param [in] maxAbscissaErr the maximum value of the abscissa error \param [in] type the functional form of the PDF \param [in] nGauss the number of Gaussians in the resolution function \param [in] scaleMeans controls whether the Gaussian mean parameters are scaled by the per-event error \param [in] scaleWidths controls whether the Gaussian width parameters are scaled by the per-event error \param [in] method set the type of the time measurement used in the given experiment */ LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const Double_t minAbscissaErr, const Double_t maxAbscissaErr, const FuncType type, const UInt_t nGauss, const std::vector& scaleMeans, const std::vector& scaleWidths, const TimeMeasurementMethod method, const EfficiencyMethod effMethod = EfficiencyMethod::Spline); //! Destructor virtual ~LauDecayTimePdf(); //! Set the histogram to be used for generation of per-event decay time errors /*! If not set will fall back to using Landau distribution \param [in] hist the histogram of the distribution */ void setErrorHisto(const TH1* hist); //! Set the Histogram PDF in case of fixed background PDF void setHistoPdf(const TH1* hist); //! Set efficiency PDF in the form of Spline /*! \param [in] spline the efficiency spline function */ void setEffiSpline(Lau1DCubicSpline* spline); //! Set efficiency PDF in the form of Spline /*! \param [in] spline the efficiency spline function \param [in] effiPars vector of LauParameters to float the y values */ void setEffiSpline(Lau1DCubicSpline* spline, std::vector effiPars); //! Set the parameters of the Landau distribution used to generate the per-event decay time errors /*! \param [in] mpv the MPV (most probable value) of the distribution \param [in] sigma the width of the distribution */ void setErrorDistTerms(const Double_t mpv, const Double_t sigma) { errorDistMPV_ = mpv; errorDistSigma_ = sigma; } //! Set the efficiency PDF in the form of a Histogram /*! \param [in] hist the histogram of efficiencies */ - void setEffiHist(TH1* hist) { - effiHist_ = hist; - } + void setEffiHist(const TH1* hist); //! Retrieve the name of the error variable const TString& varName() const {return varName_;} //! Retrieve the name of the error variable const TString& varErrName() const {return varErrName_;} //! Turn on or off the resolution function void doSmearing(Bool_t smear) {smear_ = smear;} //! Determine if the resolution function is turned on or off Bool_t doSmearing() const {return smear_;} //! Cache information from data /*! Will cache, for every event, the abscissa values and, if all parameters are fixed, the PDF value. \param [in] inputData the data to be used to calculate everything */ virtual void cacheInfo(const LauFitDataTree& inputData); //! Calculate the likelihood (and all associated information) given value of the abscissa /*! \param [in] abscissa the value of the abscissa */ virtual void calcLikelihoodInfo(Double_t abscissa); //! Calculate the likelihood (and all associated information) given value of the abscissa and its error /*! \param [in] abscissa the value of the abscissa \param [in] abscissaErr the error on the abscissa */ virtual void calcLikelihoodInfo(Double_t abscissa, Double_t abscissaErr); //! Retrieve the likelihood (and all associated information) given the event number /*! \param [in] iEvt the event number */ virtual void calcLikelihoodInfo(UInt_t iEvt); //! Evaluate the complex error fonction LauComplex ComplexErf(Double_t x, Double_t y); //! Compute the imaginary error function: Erfi(z) = -I*Erf(iz) LauComplex Erfi(Double_t x, Double_t y); //! Compute the complementary complex error function LauComplex ComplexErfc(Double_t x, Double_t y); //! Get FuncType from model FuncType getFuncType() const {return type_;} //! Get the exponential term Double_t getExpTerm() const {return expTerm_;} //! Get the cos(Dm*t) term (multiplied by the exponential) Double_t getCosTerm() const {return cosTerm_;} //! Get the sin(Dm*t) term (multiplied by the exponential) Double_t getSinTerm() const {return sinTerm_;} //! Get the cosh(DG/2*t) term (multiplied by the exponential) Double_t getCoshTerm() const {return coshTerm_;} //! Get the sinh(DG/2*t) term (multiplied by the exponential) Double_t getSinhTerm() const {return sinhTerm_;} //! Get the normalisation related to the exponential term only Double_t getNormTermExp() const {return normTermExp_;} //! Get the normalisation related to the cos term only Double_t getNormTermCos() const {return normTermCos_;} //! Get the normalisation related to the sin term only Double_t getNormTermSin() const {return normTermSin_;} //! Get the first term in the normalisation (from integrating the cosh) Double_t getNormTermCosh() const {return normTermCosh_;} //! Get the second term in the normalisation (from integrating the sinh) Double_t getNormTermSinh() const {return normTermSinh_;} //! Get error probability density from Error distribution Double_t getErrTerm() const{return errTerm_;} //! Get efficiency probability density from efficiency distribution Double_t getEffiTerm() const{return effiTerm_;} //! Retrieve the parameters of the PDF, e.g. so that they can be loaded into a fit /*! \return the parameters of the PDF */ const std::vector& getParameters() const { return param_; } //! Retrieve the parameters of the PDF, e.g. so that they can be loaded into a fit /*! \return the parameters of the PDF */ std::vector& getParameters() { return param_; } //! Update the pulls for all parameters void updatePulls(); // Calculate the normalisation for the non smeared Hyperbolic terms Double_t normExpHypTerm(Double_t Abs); Double_t normExpHypTermDep(Double_t Abs); // Calculate normalisation for non-smeared cos and sin terms using the // complex number method LauComplex nonSmearedCosSinIntegral(Double_t minAbs, Double_t maxAbs); - // Store the normalisation decaytime cosh and sinh terms - void storeIntegralTimeDep(Double_t minAbs, Double_t maxAbs); + // Store the normalisation + void calcNorm(); + void calcPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight = 1.0); //! Generate the value of the error /*! If scaling by the error should call this before calling generate \param [in] forceNew forces generation of a new value */ Double_t generateError(const Bool_t forceNew = kFALSE); //! Generate an event from the PDF - TODO not clear that this is really needed, perhaps for background? commented out for now /*! \param [in] kinematics used by some PDFs to determine the DP position, on which they have dependence */ //virtual LauFitData generate(const LauKinematics* kinematics); //! Determine the state of the calculation State state() const {return state_;} //! Retrieve the decay time error Double_t abscissaError() const {return abscissaError_;} //! Retrieve the decay time minimum value Double_t minAbscissa() const {return minAbscissa_;} //! Retrieve the decay time maximum value Double_t maxAbscissa() const {return maxAbscissa_;} //! Retrieve the decay time error minimum value Double_t minAbscissaError() const {return minAbscissaError_;} //! Retrieve the decay time error maximum value Double_t maxAbscissaError() const {return maxAbscissaError_;} virtual void checkPositiveness() {}; // Nothing to check here. // NB calcNorm and calcPDFHeight only calculate the gaussian information for the (type_ == Delta) case // TODO - can we delete these? //! Calculate the normalisation factor of the PDF //virtual void calcNorm(); //! Calculate the maximum height of the PDF //virtual void calcPDFHeight( const LauKinematics* kinematics ); //! Get efficiency parameters to float in the fit std::vector& getEffiPars() {return effiPars_;} //! Update spline Y values when floating the decay time acceptance /*! \param [in] params the vector of LauParameters describing the Y values */ void updateEffiSpline(std::vector params); protected: typedef std::map< Int_t, LauParameter> LauTagCatParamMap; //! Set up the initial state correctly - called by the constructors void initialise(); //! Calculate the pure physics terms with no resolution function applied void calcNonSmearedTerms(const Double_t abscissa); inline void state(State s) {state_ = s;} //! Calculate exponential auxiliary term for the convolution void calcTrigExponent(Double_t deltaM, Double_t tau, Double_t x, Double_t sigma, Double_t& reTerm, Double_t& imTerm); //! Calculate convolution between exponential*sin or cos with a Gaussian void calcTrigConv(Double_t deltaM, Double_t tau, Double_t x, Double_t sigma, Double_t& reOutTerm, Double_t& imOutTerm, Bool_t trig); //! Retrieve the number of PDF parameters /*! \return the number of PDF parameters */ UInt_t nParameters() const {return param_.size();} //! Retrieve the specified parameter /*! \param [in] parName the parameter to retrieve */ LauAbsRValue* findParameter(const TString& parName); //! Retrieve the specified parameter /*! \param [in] parName the parameter to retrieve */ const LauAbsRValue* findParameter(const TString& parName) const; private: //! Copy constructor (not implemented) LauDecayTimePdf(const LauDecayTimePdf& other); //! Copy assignment operator (not implemented) LauDecayTimePdf& operator=(const LauDecayTimePdf& other); //! Name of the variable TString varName_; //! Name of the error variable TString varErrName_; //! The parameters of the PDF std::vector param_; //! Smear with the resolution model or not Bool_t smear_; //! The minimum value of the decay time Double_t minAbscissa_; //! The maximum value of the decay time Double_t maxAbscissa_; //! The minimum value of the decay time error Double_t minAbscissaError_; //! The maximum value of the decay time error Double_t maxAbscissaError_; //! The current value of the decay time error Double_t abscissaError_; //! Flag whether a value for the decay time error has been generated Bool_t abscissaErrorGenerated_; //! Value of the MPV of the Landau dist used to generate the Delta t error Double_t errorDistMPV_; //! Value of the width of the Landau dist used to generate the Delta t error Double_t errorDistSigma_; //! The number of gaussians in the resolution mode; const UInt_t nGauss_; // Parameters of the gaussian(s) that accounts for the resolution: //! mean (offset) of each Gaussian in the resolution function std::vector mean_; //! spread (sigma) of each Gaussian in the resolution function std::vector sigma_; //! fraction of each Gaussian in the resolution function std::vector frac_; // Parameters of the exponential: the mean life (tau) and the frequency of oscillation. //! Lifetime parameter LauAbsRValue* tau_; //! Mass difference parameter LauAbsRValue* deltaM_; //! Width difference parameter LauAbsRValue* deltaGamma_; //! Parameter for the fraction of prompt events in DeltaExp LauAbsRValue* fracPrompt_; // Which type of Delta t PDF is this? const FuncType type_; // Which type of Delta t PDF is this? const TimeMeasurementMethod method_; // Which method for eff/dt input are we using? const EfficiencyMethod effMethod_; // Scale the mean and sigma by the per-event error on Delta t? const std::vector scaleMeans_; const std::vector scaleWidths_; // The values of the Exp, ExpCos and ExpSin terms // (NB the Delta terms, i.e. just the gaussian, are stored as the PDF value) //! The exponential term Double_t expTerm_; //! The cos(Dm*t) term (multiplied by the exponential) Double_t cosTerm_; //! The sin(Dm*t) term (multiplied by the exponential) Double_t sinTerm_; //! The cosh(DG/2*t) term (multiplied by the exponential) Double_t coshTerm_; //! The sinh(DG/2*t) term (multiplied by the exponential) Double_t sinhTerm_; // Normalisation that is used in the amplitude independent of cosh/sinh term Double_t normTermExp_; // Normalisation that is used in the amplitude for cos term Double_t normTermCos_; // Normalisation that is used in the amplitude for sin term Double_t normTermSin_; //! The first term in the normalisation (from integrating the cosh) Double_t normTermCosh_; //! The second term in the normalisation (from integrating the sinh) Double_t normTermSinh_; //! Error Hist term Double_t errTerm_; //! Hist PDF term Double_t pdfTerm_; //! Efficiency PDF term Double_t effiTerm_; //! The cache of the per-event errors on the decay time std::vector abscissas_; //! The cache of the per-event errors on the decay time std::vector abscissaErrors_; //! The cache of the exponential terms std::vector expTerms_; //! The cache of the exponential * cos(Dm*t) terms std::vector cosTerms_; //! The cache of the exponential * sin(Dm*t) terms std::vector sinTerms_; //! The cache of the exponential * cosh(DG/2*t) terms std::vector coshTerms_; //! The cache of the exponential * sinh(DG/2*t) terms std::vector sinhTerms_; //! The cache of the exponential normalisation std::vector normTermsExp_; //! The cache of the first term in the normalisation std::vector normTermsCosh_; //! The cache of the second term in the normalisation std::vector normTermsSinh_; // To be deleted once modified all the code std::vector expNorms_; // cache efficiency term std::vector effiTerms_; //! The state of the complex error function calculation State state_; //! Histogram PDF for abscissa error distribution Lau1DHistPdf* errHist_; //! Histogram PDF for abscissa distribution Lau1DHistPdf* pdfHist_; //! efficiency PDF in spline Lau1DCubicSpline* effiFun_; //! efficiency PDF as Histogram TH1* effiHist_; //! Vector of parameters to float acceptance std::vector effiPars_; ClassDef(LauDecayTimePdf,0) // Define the Delta t PDF }; #endif diff --git a/src/LauDecayTimePdf.cc b/src/LauDecayTimePdf.cc index 1627915..5e0d5c7 100644 --- a/src/LauDecayTimePdf.cc +++ b/src/LauDecayTimePdf.cc @@ -1,1166 +1,1240 @@ /* Copyright 2006 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauDecayTimePdf.cc \brief File containing implementation of LauDecayTimePdf class. */ #include #include using std::cout; using std::cerr; using std::endl; #include using std::complex; #include "TMath.h" #include "TRandom.h" #include "TSystem.h" #include "TH1.h" #include "Lau1DCubicSpline.hh" #include "Lau1DHistPdf.hh" #include "LauConstants.hh" #include "LauComplex.hh" #include "LauDecayTimePdf.hh" #include "LauFitDataTree.hh" #include "LauParameter.hh" #include "LauRandom.hh" ClassImp(LauDecayTimePdf) LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, Double_t minAbscissaVal, Double_t maxAbscissaVal, Double_t minAbscissaErr, Double_t maxAbscissaErr, FuncType type, UInt_t nGauss, const std::vector& scale, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) : varName_(theVarName), varErrName_(theVarErrName), param_(params), smear_(kTRUE), minAbscissa_(minAbscissaVal), maxAbscissa_(maxAbscissaVal), minAbscissaError_(minAbscissaErr), maxAbscissaError_(maxAbscissaErr), abscissaError_(0.0), abscissaErrorGenerated_(kFALSE), errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286 errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102 nGauss_(nGauss), mean_(nGauss_,0), sigma_(nGauss_,0), frac_(nGauss_-1,0), tau_(0), deltaM_(0), deltaGamma_(0), fracPrompt_(0), type_(type), method_(method), effMethod_(effMethod), scaleMeans_(scale), scaleWidths_(scale), expTerm_(0.0), cosTerm_(0.0), sinTerm_(0.0), coshTerm_(0.0), sinhTerm_(0.0), normTermExp_(0.0), normTermCosh_(0.0), normTermSinh_(0.0), errTerm_(0.0), pdfTerm_(0.0), effiTerm_(0.0), state_(Good), errHist_(nullptr), pdfHist_(nullptr), effiFun_(nullptr), effiHist_(nullptr), effiPars_(0) { this->initialise(); // Calculate the integrals of the decay time independent of the t - this->storeIntegralTimeDep(minAbscissa_, maxAbscissa_); + // TODO - this is almost certainly the wrong place to do this + this->calcNorm(); } LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, Double_t minAbscissaVal, Double_t maxAbscissaVal, Double_t minAbscissaErr, Double_t maxAbscissaErr, FuncType type, UInt_t nGauss, const std::vector& scaleMeans, const std::vector& scaleWidths, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) : varName_(theVarName), varErrName_(theVarErrName), param_(params), smear_(kTRUE), minAbscissa_(minAbscissaVal), maxAbscissa_(maxAbscissaVal), minAbscissaError_(minAbscissaErr), maxAbscissaError_(maxAbscissaErr), abscissaError_(0.0), abscissaErrorGenerated_(kFALSE), errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286 errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102 nGauss_(nGauss), mean_(nGauss_,0), sigma_(nGauss_,0), frac_(nGauss_-1,0), tau_(0), deltaM_(0), deltaGamma_(0), fracPrompt_(0), type_(type), method_(method), effMethod_(effMethod), scaleMeans_(scaleMeans), scaleWidths_(scaleWidths), expTerm_(0.0), cosTerm_(0.0), sinTerm_(0.0), coshTerm_(0.0), sinhTerm_(0.0), normTermExp_(0.0), normTermCosh_(0.0), normTermSinh_(0.0), errTerm_(0.0), pdfTerm_(0.0), effiTerm_(0.0), state_(Good), errHist_(nullptr), pdfHist_(nullptr), effiFun_(nullptr), effiHist_(nullptr), effiPars_(0) { this->initialise(); // Calculate the integrals of the decay time independent of the t - this->storeIntegralTimeDep(minAbscissa_, maxAbscissa_); + // TODO - this is almost certainly the wrong place to do this + this->calcNorm(); } LauDecayTimePdf::~LauDecayTimePdf() { // Destructor delete errHist_; errHist_ = 0; delete pdfHist_; pdfHist_ = 0; delete effiFun_; effiFun_ = 0; + delete effiHist_; effiHist_ = 0; } void LauDecayTimePdf::initialise() { // The parameters are: // - the mean and the sigma (bias and spread in resolution) of the gaussian(s) // - the mean lifetime, denoted tau, of the exponential decay // - the frequency of oscillation, denoted Delta m, of the cosine and sine terms // - the decay width difference, denoted Delta Gamma, of the hyperbolic cosine and sine terms // // The next two arguments specify the range in which the PDF is defined, // and the PDF will be normalised w.r.t. these limits. // // The final three arguments define the type of Delta t PDF (Delta, Exp, ExpTrig or ExpHypTrig ), the number of gaussians // and whether or not the gaussian parameters should be scaled by the per-event errors on Delta t // First check whether the scale vector is nGauss in size if (nGauss_ != scaleMeans_.size() || nGauss_ != scaleWidths_.size()) { cerr<<"ERROR in LauDecayTimePdf::initialise : scale vector size not the same as nGauss."<Exit(EXIT_FAILURE); } if (type_ == Hist){ if (this->nParameters() != 0){ cerr<<"ERROR in LauDecayTimePdf::initialise : Hist PDF should have 0 parameters"<Exit(EXIT_FAILURE); } }else{ TString meanName("mean_"); TString sigmaName("sigma_"); TString fracName("frac_"); Bool_t foundParams(kTRUE); for (UInt_t i(0); ifindParameter(tempName); foundParams &= (mean_[i] != 0); sigma_[i] = this->findParameter(tempName2); foundParams &= (sigma_[i] != 0); if (i!=0) { frac_[i-1] = this->findParameter(tempName3); foundParams &= (frac_[i-1] != 0); } } if (type_ == Delta) { if ((this->nParameters() != (3*nGauss_-1)) || (!foundParams)) { cerr<<"ERROR in LauDecayTimePdf::initialise : Delta type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == Exp) { tau_ = this->findParameter("tau"); foundParams &= (tau_ != 0); if ((this->nParameters() != (3*nGauss_-1+1)) || (!foundParams)) { cerr<<"ERROR in LauDecayTimePdf::initialise : Exp type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == ExpTrig) { tau_ = this->findParameter("tau"); deltaM_ = this->findParameter("deltaM"); foundParams &= (tau_ != 0); foundParams &= (deltaM_ != 0); if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) { cerr<<"ERROR in LauDecayTimePdf::initialise : ExpTrig type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == ExpHypTrig) { tau_ = this->findParameter("tau"); deltaM_ = this->findParameter("deltaM"); deltaGamma_ = this->findParameter("deltaGamma"); foundParams &= (tau_ != 0); foundParams &= (deltaM_ != 0); foundParams &= (deltaGamma_ != 0); if ((this->nParameters() != (3*nGauss_-1+3)) || (!foundParams)) { cerr<<"ERROR in LauDecayTimePdf::initialise : ExpHypTrig type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == DeltaExp) { tau_ = this->findParameter("tau"); fracPrompt_ = this->findParameter("frac_prompt"); foundParams &= (tau_ != 0); foundParams &= (fracPrompt_ != 0); if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) { cerr<<"ERROR in LauDecayTimePdf::initialise : DeltaExp type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == SimFitNormBd) { tau_ = this->findParameter("tau"); deltaM_ = this->findParameter("deltaM"); foundParams &= (tau_ != 0); foundParams &= (deltaM_ != 0); if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) { cerr<<"ERROR in LauDecayTimePdf::initialise : SimFitNormBd type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == SimFitSigBd) { tau_ = this->findParameter("tau"); deltaM_ = this->findParameter("deltaM"); foundParams &= (tau_ != 0); foundParams &= (deltaM_ != 0); if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) { cerr<<"ERROR in LauDecayTimePdf::initialise : SimFitSigBd type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == SimFitNormBs) { tau_ = this->findParameter("tau"); deltaM_ = this->findParameter("deltaM"); deltaGamma_ = this->findParameter("deltaGamma"); foundParams &= (tau_ != 0); foundParams &= (deltaM_ != 0); foundParams &= (deltaGamma_ != 0); if ((this->nParameters() != (3*nGauss_-1+3)) || (!foundParams)) { cerr<<"ERROR in LauDecayTimePdf::initialise : SimFitNormBs type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == SimFitSigBs) { tau_ = this->findParameter("tau"); deltaM_ = this->findParameter("deltaM"); deltaGamma_ = this->findParameter("deltaGamma"); foundParams &= (tau_ != 0); foundParams &= (deltaM_ != 0); foundParams &= (deltaGamma_ != 0); if ((this->nParameters() != (3*nGauss_-1+3)) || (!foundParams)) { cerr<<"ERROR in LauDecayTimePdf::initialise : SimFitSigBs type PDF requires:"<Exit(EXIT_FAILURE); } } } // Cache the normalisation factor //this->calcNorm(); } void LauDecayTimePdf::cacheInfo(const LauFitDataTree& inputData) { Bool_t hasBranch = inputData.haveBranch(this->varName()); if (!hasBranch) { cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<varName()<<"\"."<varErrName()); if (!hasBranch) { cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<varErrName()<<"\"."<cacheInfo(inputData); } if (type_ == Hist){ // Pass the data to the decay-time PDF for caching if ( pdfHist_ ) { pdfHist_->cacheInfo(inputData); } }else{ // determine whether we are caching our PDF value //TODO //Bool_t doCaching( this->nFixedParameters() == this->nParameters() ); //this->cachePDF( doCaching ); // clear the vectors and reserve enough space const UInt_t nEvents = inputData.nEvents(); abscissas_.clear(); abscissas_.reserve(nEvents); abscissaErrors_.clear(); abscissaErrors_.reserve(nEvents); expTerms_.clear(); expTerms_.reserve(nEvents); cosTerms_.clear(); cosTerms_.reserve(nEvents); sinTerms_.clear(); sinTerms_.reserve(nEvents); coshTerms_.clear(); coshTerms_.reserve(nEvents); sinhTerms_.clear(); sinhTerms_.reserve(nEvents); normTermsExp_.clear(); normTermsExp_.reserve(nEvents); normTermsCosh_.clear(); normTermsCosh_.reserve(nEvents); normTermsSinh_.clear(); normTermsSinh_.reserve(nEvents); effiTerms_.clear(); effiTerms_.reserve(nEvents); for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputData.getData(iEvt); LauFitData::const_iterator iter = dataValues.find(this->varName()); const Double_t abscissa = iter->second; if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) { cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay time: "<minAbscissa()<<","<maxAbscissa()<<"]."<Exit(EXIT_FAILURE); } abscissas_.push_back( abscissa ); iter = dataValues.find(this->varErrName()); Double_t abscissaErr = iter->second; if (abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError()) { cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay-time error: "<minAbscissaError()<<","<maxAbscissaError()<<"]."<Exit(EXIT_FAILURE); } abscissaErrors_.push_back(abscissaErr); this->calcLikelihoodInfo(abscissa, abscissaErr); expTerms_.push_back(expTerm_); cosTerms_.push_back(cosTerm_); sinTerms_.push_back(sinTerm_); coshTerms_.push_back(coshTerm_); sinhTerms_.push_back(sinhTerm_); normTermsExp_.push_back(normTermExp_); normTermsCosh_.push_back(normTermCosh_); normTermsSinh_.push_back(normTermSinh_); effiTerms_.push_back(effiTerm_); } } } void LauDecayTimePdf::calcLikelihoodInfo(UInt_t iEvt) { if (type_ == Hist){ if ( pdfHist_ ) { pdfHist_->calcLikelihoodInfo(iEvt); pdfTerm_ = pdfHist_->getLikelihood(); } else { pdfTerm_ = 1.0; } }else{ expTerm_ = expTerms_[iEvt]; cosTerm_ = cosTerms_[iEvt]; sinTerm_ = sinTerms_[iEvt]; coshTerm_ = coshTerms_[iEvt]; sinhTerm_ = sinhTerms_[iEvt]; normTermExp_ = normTermsExp_[iEvt]; normTermCosh_ = normTermsCosh_[iEvt]; normTermSinh_ = normTermsSinh_[iEvt]; } if ( errHist_ ) { errHist_->calcLikelihoodInfo(iEvt); errTerm_ = errHist_->getLikelihood(); } else { errTerm_ = 1.0; } const Double_t abscissa = abscissas_[iEvt]; //Parameters will change in some cases update things! if (type_ == SimFitNormBd || type_ == SimFitSigBd || type_ == SimFitNormBs || type_ == SimFitSigBs){ const Double_t abscissaErr = abscissaErrors_[iEvt]; this->calcLikelihoodInfo(abscissa,abscissaErr); } switch( effMethod_ ) /* < If you're going to add an effMethod, extend this switch*/ { case EfficiencyMethod::Spline : if ( effiFun_ ) { this->updateEffiSpline(effiPars_); effiTerm_ = effiFun_->evaluate(abscissa); //EDITED XXX if (effiTerm_>1.0){effiTerm_=1.0;} if (effiTerm_<0.0){effiTerm_=0.0;} } else { effiTerm_ = 1.0; } break; default : effiTerm_ = effiTerms_[iEvt]; break; } // TODO need a check in here that none of the floating parameter values have changed // If they have, then we need to recalculate all or some of the terms /* if ( parsChanged ) { const Double_t abscissa = abscissas_[iEvt][0]; const Double_t abscissaErr = abscissaErrors_[iEvt]; this->calcLikelihoodInfo(abscissa, abscissaErr); } */ } void LauDecayTimePdf::calcLikelihoodInfo(Double_t abscissa) { // Check whether any of the gaussians should be scaled - if any of them should we need the per-event error Bool_t scale(kFALSE); for (std::vector::const_iterator iter = scaleMeans_.begin(); iter != scaleMeans_.end(); ++iter) { scale |= (*iter); } for (std::vector::const_iterator iter = scaleWidths_.begin(); iter != scaleWidths_.end(); ++iter) { scale |= (*iter); } if (scale) { cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Per-event error on Delta t not provided, cannot calculate anything."<calcLikelihoodInfo(abscissa, 0.0); } } void LauDecayTimePdf::calcLikelihoodInfo(Double_t abscissa, Double_t abscissaErr) { if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) { cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of the decay time: "<minAbscissa()<<","<maxAbscissa()<<"]."<Exit(EXIT_FAILURE); } if (abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError()) { cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of Delta t error: "<minAbscissaError()<<","<maxAbscissaError()<<"]."<Exit(EXIT_FAILURE); } switch( effMethod_ ) { case EfficiencyMethod::Spline : effiTerm_ = effiFun_ ? effiFun_ -> evaluate(abscissa) : 1.0 ; break; - case EfficiencyMethod::dtHist : effiTerm_ = effiHist_ ? effiHist_-> GetBinContent(effiHist_-> FindFixBin(abscissa)) : 1.0 ; break; + case EfficiencyMethod::Binned : effiTerm_ = effiHist_ ? effiHist_-> GetBinContent(effiHist_-> FindFixBin(abscissa)) : 1.0 ; break; case EfficiencyMethod::Flat : effiTerm_ = 1.0 ; break; // default : cerr << "Warning: EFFICIENCY INPUT METHOD NOT SET" << endl; effiTerms_.push_back( 1.0 ); } // Initialise the various terms to zero if (type_ == Hist){ if ( pdfHist_ ) { pdfHist_->calcLikelihoodInfo(abscissa); pdfTerm_ = pdfHist_->getLikelihood(); } else { pdfTerm_ = 1.0; } }else{ // Reset the state to Good this->state(Good); // If we're not using the resolution function calculate the simple terms and return if (!this->doSmearing()) { this->calcNonSmearedTerms(abscissa); return; } //TODO how much to be added below for SimFitNormBd/SimFitNormBs/SimFitSigBd/SimFitSigBs // Get all the up to date parameter values std::vector frac(nGauss_); std::vector mean(nGauss_); std::vector sigma(nGauss_); Double_t tau(0.0); Double_t deltaM(0.0); Double_t fracPrompt(0.0); Double_t Delta_gamma(0.0); frac[0] = 1.0; for (UInt_t i(0); ivalue(); sigma[i] = sigma_[i]->value(); if (i != 0) { frac[i] = frac_[i-1]->value(); frac[0] -= frac[i]; } } if (type_ != Delta) { tau = tau_->value(); if (type_ == ExpTrig) { deltaM = deltaM_->value(); } if (type_ == DeltaExp) { fracPrompt = fracPrompt_->value(); } if (type_ == ExpHypTrig){ deltaM = deltaM_->value(); Delta_gamma = deltaGamma_->value(); } } // Scale the gaussian parameters by the per-event error on Delta t (if appropriate) for (UInt_t i(0); i x(nGauss_); const Double_t xMax = this->maxAbscissa(); const Double_t xMin = this->minAbscissa(); for (UInt_t i(0); i 1e-10) { Double_t exponent(0.0); Double_t norm(0.0); Double_t scale = LauConstants::root2*sigma[i]; Double_t scale2 = LauConstants::rootPiBy2*sigma[i]; exponent = -0.5*x[i]*x[i]/(sigma[i]*sigma[i]); norm = scale2*(TMath::Erf((xMax - mean[i])/scale) - TMath::Erf((xMin - mean[i])/scale)); value += frac[i]*TMath::Exp(exponent)/norm; } } } if (type_ != Delta) { std::vector expTerms(nGauss_); std::vector cosTerms(nGauss_); std::vector sinTerms(nGauss_); std::vector coshTerms(nGauss_); std::vector sinhTerms(nGauss_); std::vector expTermsNorm(nGauss_); // TODO - TEL changed this name to make it compile - please check! std::vector SinhTermsNorm(nGauss_); // Calculate values of the PDF convoluated with each Gaussian for a given value of the abscsissa for (UInt_t i(0); icalcTrigExponent(deltaM, tau, x[i], sigma[i], exponentTermRe, exponentTermIm); // Elements related to the trigonometric function, i.e. convolution of Exp*Sin or Cos with Gauss Double_t sinTrigTermRe, sinTrigTermIm, cosTrigTermRe, cosTrigTermIm; this->calcTrigConv(deltaM, tau, x[i], sigma[i], sinTrigTermRe, sinTrigTermIm, kFALSE); this->calcTrigConv(deltaM, tau, x[i], sigma[i], cosTrigTermRe, cosTrigTermIm, kTRUE); // Combining elements of the full pdf LauComplex zExp(exponentTermRe, exponentTermIm); LauComplex zTrigSin(sinTrigTermRe, sinTrigTermIm); LauComplex zTrigCos(cosTrigTermRe, cosTrigTermIm); LauComplex sinConv = zExp * zTrigSin; LauComplex cosConv = zExp * zTrigCos; sinConv.scale(1.0/4.0); cosConv.scale(1.0/4.0); // Cosine*Exp and Sine*Exp terms cosTerms[i] = cosConv.re(); sinTerms[i] = sinConv.im(); // Normalisation Double_t umax = xMax - mean[i]; Double_t umin = xMin - mean[i]; expTermsNorm[i] = (1.0/2.0) * tau * (-1.0 + TMath::Erf(umax/(LauConstants::root2 * sigma[i])) + TMath::Erfc(umin/(LauConstants::root2 * sigma[i])) + TMath::Exp((pow(sigma[i], 2) - 2.0 * tau * (xMax + xMin - mean[i]))/(2.0 * pow(tau, 2))) * (TMath::Exp(xMax/tau) * TMath::Erfc((pow(sigma[i], 2) - xMin)/(LauConstants::root2 * tau))) + (TMath::Exp(xMin/tau) * TMath::Erfc((pow(sigma[i], 2) - xMax)/(LauConstants::root2 * tau)))); } else { } } // Typical case (2): B0s/B0sbar if (type_ == ExpHypTrig) { // LHCb convention if (method_ == DecayTime) { // Convolution of Exp*cosh (Exp*sinh) with a gaussian //Double_t OverallExpFactor = 0.25*TMath::Exp(-(x[i]-mean[i])*(x[i]-mean[i])/(2*sigma[i]*sigma[i])); //Double_t ExpFirstTerm = TMath::Exp((2*(x[i]-mean[i])*tau+sigma[i]*sigma[i]*(-2+Delta_gamma*tau))*(2*(x[i]-mean[i])*tau+sigma[i]*sigma[i]*(-2+Delta_gamma*tau))/(8*sigma[i]*sigma[i]*tau*tau)); //Double_t ExpSecondTerm = TMath::Exp((2*(-x[i]+mean[i])*tau+sigma[i]*sigma[i]*(2+Delta_gamma*tau))*(2*(-x[i]+mean[i])*tau+sigma[i]*sigma[i]*(2+Delta_gamma*tau))/(8*sigma[i]*sigma[i]*tau*tau)); //Double_t ErfFirstTerm = TMath::Erf((2*(x[i]-mean[i])*tau+sigma[i]*sigma[i]*(-2+Delta_gamma*tau))/(2*TMath::Sqrt(2)*sigma[i]*tau)); //Double_t ErfSecondTerm = TMath::Erf((2*(-x[i]+mean[i])*tau+sigma[i]*sigma[i]*(2+Delta_gamma*tau))/(2*TMath::Sqrt(2)*sigma[i]*tau)); //Double_t sinhConv = OverallExpFactor*(ExpFirstTerm*(1+ErfFirstTerm) + ExpSecondTerm*(-1+ErfSecondTerm)); //Double_t coshConv = OverallExpFactor*(ExpFirstTerm*(1+ErfFirstTerm) - ExpSecondTerm*(-1+ErfSecondTerm)); //cosTerms[i] = sinhConv; // sinTerms[i] = coshConv; //TODO: check this formula and try to simplify it! double OverallExpTerm_max = (1/(2*(-4 + Delta_gamma*Delta_gamma*tau*tau)))*tau*TMath::Exp(-0.5*Delta_gamma*(xMax + mean[i]) - xMax/tau); double ErfTerm_max = -2*Delta_gamma*tau*TMath::Exp(0.5*Delta_gamma*(xMax+mean[i])+xMax/tau)*TMath::Erf((xMax-mean[i])/(TMath::Sqrt(2)*sigma[i])); double ExpFirstTerm_max = TMath::Exp(xMax*Delta_gamma+(sigma[i]*sigma[i]*(-2 + Delta_gamma*tau)*(-2 + Delta_gamma*tau))/(8*tau*tau)); double ErfcFirstTerm_max = TMath::Erfc((2*(-xMax + mean[i])*tau + sigma[i]*sigma[i]*(2 - Delta_gamma*tau))/(2*TMath::Sqrt(2)*sigma[i]*tau)); double ExpSecondTerm_max = TMath::Exp(Delta_gamma*mean[i] + (sigma[i]*sigma[i]*(2 + Delta_gamma*tau)*(2 + Delta_gamma*tau))/(8*tau*tau)); double ErfcSecondTerm_max = TMath::Erfc((2*(-xMax + mean[i])*tau + sigma[i]*sigma[i]*(2 + Delta_gamma*tau))/(2*TMath::Sqrt(2)*sigma[i]*tau)); double MaxVal= OverallExpTerm_max*(ErfTerm_max + TMath::Exp(mean[i]/tau)*(ExpFirstTerm_max*(2+Delta_gamma*tau)* ErfcFirstTerm_max + ExpSecondTerm_max*(-2+Delta_gamma*tau)* ErfcSecondTerm_max)); double OverallExpTerm_min = (1/(2*(-4 + Delta_gamma*Delta_gamma*tau*tau)))*tau*TMath::Exp(-0.5*Delta_gamma*(xMin + mean[i]) - xMin/tau); double ErfTerm_min = -2*Delta_gamma*tau*TMath::Exp(0.5*Delta_gamma*(xMin+mean[i])+xMin/tau)*TMath::Erf((xMin-mean[i])/(TMath::Sqrt(2)*sigma[i])); double ExpFirstTerm_min = TMath::Exp(xMin*Delta_gamma+(sigma[i]*sigma[i]*(-2 + Delta_gamma*tau)*(-2 + Delta_gamma*tau))/(8*tau*tau)); double ErfcFirstTerm_min = TMath::Erfc((2*(-xMin + mean[i])*tau + sigma[i]*sigma[i]*(2 - Delta_gamma*tau))/(2*TMath::Sqrt(2)*sigma[i]*tau)); // TODO - TEL added this (currently identical to ExpSecondTerm_max) to get this to compile - please check!! double ExpSecondTerm_min = TMath::Exp(Delta_gamma*mean[i] + (sigma[i]*sigma[i]*(2 + Delta_gamma*tau)*(2 + Delta_gamma*tau))/(8*tau*tau)); double ErfcSecondTerm_min = TMath::Erfc((2*(-xMin + mean[i])*tau + sigma[i]*sigma[i]*(2 + Delta_gamma*tau))/(2*TMath::Sqrt(2)*sigma[i]*tau)); double minVal= OverallExpTerm_min*(ErfTerm_min + TMath::Exp(mean[i]/tau)*(ExpFirstTerm_min*(2+Delta_gamma*tau)* ErfcFirstTerm_min + ExpSecondTerm_min*(-2+Delta_gamma*tau)* ErfcSecondTerm_min)); SinhTermsNorm[i] = MaxVal - minVal; } else { } } } for (UInt_t i(0); icalcLikelihoodInfo(abscissaErr); errTerm_ = errHist_->getLikelihood(); } else { errTerm_ = 1.0; } } void LauDecayTimePdf::calcTrigExponent(Double_t deltaM, Double_t tau, Double_t x, Double_t sigma, Double_t& reTerm, Double_t& imTerm) { Double_t exponentTerm = TMath::Exp(-(2.0 * tau * x + pow(sigma, 2) * (pow(deltaM, 2) * pow(tau, 2) - 1.0))/(2.0 * pow(tau,2))); reTerm = exponentTerm * TMath::Cos(deltaM * (x - pow(sigma,2)/tau)); imTerm = - exponentTerm * TMath::Sin(deltaM * (x - pow(sigma,2)/tau)); } void LauDecayTimePdf::calcTrigConv(Double_t deltaM, Double_t tau, Double_t x, Double_t sigma, Double_t& reOutTerm, Double_t& imOutTerm, Bool_t trig) { Double_t reExpTerm, imExpTerm; LauComplex zExp; LauComplex zTrig1; LauComplex zTrig2; // Calculation for the sine or cosine term if (!trig) { reExpTerm = TMath::Sin(2.0 * deltaM * (x + pow(sigma,2)/tau)); imExpTerm = 2.0 * TMath::Sin(pow(deltaM * (x + pow(sigma,2)/tau), 2)); } else { reExpTerm = TMath::Cos(2.0 * deltaM * (x + pow(sigma,2)/tau)); imExpTerm = TMath::Sin(2.0 * deltaM * (x + pow(sigma,2)/tau)); } // Exponential term in front of Erfc/Erfi terms zExp.setRealPart(reExpTerm); zExp.setImagPart(imExpTerm); // Nominal Erfc term (common to both sine and cosine expressions zTrig1.setRealPart(-(tau * x - pow(sigma,2))/(LauConstants::root2 * tau * sigma)); zTrig1.setImagPart(-(deltaM * sigma)/ LauConstants::root2); // Second term for sine (Erfi) or cosine (Erfc) - notice the re-im swap and sign change zTrig2.setRealPart(-zTrig1.im()); zTrig2.setImagPart(-zTrig1.re()); // Calculation of Erfc and Erfi (if necessary) LauComplex term1 = ComplexErfc(zTrig1.re(), zTrig1.im()); LauComplex term2; if (!trig) { term2 = Erfi(zTrig2.re(), zTrig2.im()); } else { term2 = ComplexErfc(zTrig2.re(), zTrig2.im()); } // Multiplying all elemnets of the convolution LauComplex output = zExp * term1 + term2; reOutTerm = output.re(); imOutTerm = output.im(); } LauComplex LauDecayTimePdf::ComplexErf(Double_t x, Double_t y) { // Evaluate Erf(x + iy) using an infinite series approximation // From Abramowitz & Stegun (http://people.math.sfu.ca/~cbm/aands/page_299.htm) if (x==0){ // cout << "WARNING: Set x value to 1e-100 to avoid division by 0." << endl; x = 1e-100; } int n = 20; // this cotrols the number of iterations of the sum LauComplex ErfTerm(TMath::Erf(x),0.); LauComplex CosSineTerm(1-cos(2*x*y), sin(2*x*y)); CosSineTerm.rescale(TMath::Exp(-x*x)/(2*TMath::Pi()*x)); LauComplex firstPart = ErfTerm + CosSineTerm; LauComplex SumTerm(0,0); for (int k = 1; k<=n; k++){ Double_t f_k = 2*x*(1 - cos(2*x*y)*cosh(k*y)) + k*sin(2*x*y)*sinh(k*y); Double_t g_k = 2*x*sin(2*x*y)*cosh(k*y) + k*cos(2*x*y)*sinh(k*y); LauComplex fgTerm(f_k, g_k); fgTerm.rescale(TMath::Exp(-0.25*k*k)/(k*k + 4*x*x)); SumTerm += fgTerm; } SumTerm.rescale((2/TMath::Pi())*TMath::Exp(-x*x)); LauComplex result = firstPart + SumTerm; return result; } LauComplex LauDecayTimePdf::Erfi(Double_t x, Double_t y) { // Erfi(z) = -I*Erf(I*z) where z = x + iy double x_prime = -y; double y_prime = x; LauComplex a = ComplexErf(x_prime, y_prime); LauComplex result(a.im(), -a.re()); return result; } LauComplex LauDecayTimePdf::ComplexErfc(Double_t x, Double_t y) { // Erfc(z) = 1 - Erf(z) (z = x + iy) LauComplex one(1., 0.); LauComplex result = one - ComplexErf(x,y); return result; } void LauDecayTimePdf::calcNonSmearedTerms(Double_t abscissa) { if (type_ == Hist ){ cerr << "It is a histogrammed PDF" << endl; return; } if (type_ == Delta) { return; } Double_t tau = tau_->value(); Double_t deltaM = deltaM_->value(); // Calculate the terms related to cosine and sine not normalised if (type_ == ExpTrig) { if (method_ == DecayTime) { expTerm_ = TMath::Exp(-abscissa/tau)/(2.0*tau); } if (method_ == DecayTimeDiff) { expTerm_ = TMath::Exp(-TMath::Abs(abscissa)/tau)/(2.0*tau); } cosTerm_ = TMath::Cos(deltaM*abscissa)*expTerm_; sinTerm_ = TMath::Sin(deltaM*abscissa)*expTerm_; coshTerm_ = expTerm_; sinhTerm_ = 0.0; } // Calculate the terms related to cosine not normalised if (type_ == SimFitNormBd || type_ == SimFitNormBs) { if (method_ == DecayTime) { expTerm_ = TMath::Exp(-abscissa/tau)/(2.0*tau); } if (method_ == DecayTimeDiff) { expTerm_ = TMath::Exp(-TMath::Abs(abscissa)/tau)/(2.0*tau); } cosTerm_ = TMath::Cos(deltaM*abscissa)*expTerm_; sinTerm_ = 0.0; coshTerm_ = expTerm_; sinhTerm_ = 0.0; if (type_ == SimFitNormBs){ Double_t deltaGamma = deltaGamma_->value(); coshTerm_ *= TMath::CosH(deltaGamma*abscissa/2.0); } } // Calculate the terms related to cosine and sine not normalised if (type_ == SimFitSigBd || type_ == SimFitSigBs) { if (method_ == DecayTime) { expTerm_ = TMath::Exp(-abscissa/tau)/(2.0*tau); } if (method_ == DecayTimeDiff) { expTerm_ = TMath::Exp(-TMath::Abs(abscissa)/tau)/(2.0*tau); } cosTerm_ = TMath::Cos(deltaM*abscissa)*expTerm_; sinTerm_ = TMath::Sin(deltaM*abscissa)*expTerm_; coshTerm_ = expTerm_; sinhTerm_ = 0.0; if (type_ == SimFitNormBs){ Double_t deltaGamma = deltaGamma_->value(); coshTerm_ *= TMath::CosH(deltaGamma*abscissa/2.0); sinhTerm_ = TMath::SinH(deltaGamma*abscissa/2.0)*expTerm_; } } // Calculate the terms related to cosine, sine, cosh and sinh not normalised (no decayTimeDiff implemented) if (type_ == ExpHypTrig) { Double_t deltaGamma = deltaGamma_->value(); expTerm_ = TMath::Exp(-abscissa/tau)/(2.0*tau); cosTerm_ = TMath::Cos(deltaM*abscissa)*expTerm_; sinTerm_ = TMath::Sin(deltaM*abscissa)*expTerm_; coshTerm_ = TMath::CosH(deltaGamma*abscissa/2.0)*expTerm_; sinhTerm_ = TMath::SinH(deltaGamma*abscissa/2.0)*expTerm_; } } Double_t LauDecayTimePdf::normExpHypTerm(Double_t Abs) { Double_t tau = tau_->value(); Double_t deltaGamma = deltaGamma_->value(); Double_t y = tau*deltaGamma/2; Double_t nonTrigTerm = -(TMath::Exp(-Abs/tau))/(1 - y*y); Double_t cosHTerm = TMath::CosH(deltaGamma*Abs/2); Double_t sinHTerm = TMath::SinH(deltaGamma*Abs/2); Double_t normTerm = nonTrigTerm*(cosHTerm + y*sinHTerm); return normTerm; } Double_t LauDecayTimePdf::normExpHypTermDep(Double_t Abs) { Double_t tau = tau_->value(); Double_t deltaGamma = deltaGamma_->value(); Double_t y = tau*deltaGamma/2; Double_t nonTrigTerm = -(TMath::Exp(-Abs/tau))/(1 - y*y); Double_t cosHTerm = TMath::CosH(deltaGamma*Abs/2); Double_t sinHTerm = TMath::SinH(deltaGamma*Abs/2); Double_t normTerm = nonTrigTerm*(sinHTerm + y*cosHTerm); return normTerm; } LauComplex LauDecayTimePdf::nonSmearedCosSinIntegral(Double_t minAbs, Double_t maxAbs) { // From 1407.0748, not clear whether complex is faster in this case Double_t gamma = 1. / this->tau_->value(); LauComplex denom = LauComplex(gamma, -this->deltaM_->value()); LauComplex exponent = LauComplex(-gamma, this->deltaM_->value()); LauComplex num0 = -exponent.scale(minAbs).exp(); LauComplex num1 = -exponent.scale(maxAbs).exp(); return (num1 - num0) / denom; } -void LauDecayTimePdf::storeIntegralTimeDep(Double_t minAbs, Double_t maxAbs) +void LauDecayTimePdf::calcNorm() { - Double_t tau = tau_->value(); + // first reset integrals to zero + normTermExp_ = 0.0; + normTermCos_ = 0.0; + normTermSin_ = 0.0; + normTermCosh_ = 0.0; + normTermSinh_ = 0.0; + + switch ( effMethod_ ) { + case EfficiencyMethod::Flat : + // No efficiency variation + // Simply calculate integrals over full range + this->calcPartialIntegrals( minAbscissa_, maxAbscissa_ ); + break; + + case EfficiencyMethod::Binned : + // Efficiency varies as piecewise constant + // Total integral is sum of integrals in each bin, each weighted by efficiency in that bin + for ( Int_t bin{1}; bin <= effiHist_->GetNbinsX(); ++bin ) { + const Double_t loEdge {effiHist_->GetBinLowEdge(bin)}; + const Double_t hiEdge {loEdge + effiHist_->GetBinWidth(bin)}; + const Double_t effVal {effiHist_->GetBinContent(bin)}; + this->calcPartialIntegrals( loEdge, hiEdge, effVal ); + } + break; + + case EfficiencyMethod::Spline : + // Efficiency varies as piecewise polynomial + // TODO - to be worked out what to do here + std::cerr << "WARNING in LauDecayTimePdf::calcNorm : normalisation integrals for spline acceptance not yet implemented - effect of acceptance will be neglected!" << std::endl; + this->calcPartialIntegrals( minAbscissa_, maxAbscissa_ ); + break; + } + + // TODO - should we check here that all terms we expect to use are now non-zero? +} + +void LauDecayTimePdf::calcPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight) +{ + const Double_t tau = tau_->value(); + const Double_t Gamma = 1.0 / tau; + + // TODO - this is all neglecting resolution at the moment + // Normalisation factor for B0 decays if (type_ == ExpTrig || type_ == SimFitNormBd || type_ == SimFitSigBd ) { - if (method_ == DecayTime) { + if (method_ == DecayTime) { - normTermExp_ = tau * (TMath::Exp(-minAbs/tau) - TMath::Exp(-maxAbs/tau)); + normTermExp_ += weight * tau * ( TMath::Exp(-minAbs*Gamma) - TMath::Exp(-maxAbs*Gamma) ); - LauComplex cosSinIntegral = nonSmearedCosSinIntegral(minAbs, maxAbs); - this->normTermCos_ = cosSinIntegral.re(); - this->normTermSin_ = cosSinIntegral.im(); + LauComplex cosSinIntegral = this->nonSmearedCosSinIntegral(minAbs, maxAbs); + normTermCos_ += weight * cosSinIntegral.re(); + normTermSin_ += weight * cosSinIntegral.im(); - } - if (method_ == DecayTimeDiff) { - normTermExp_ = tau * (2.0 - TMath::Exp(-maxAbs/tau) - TMath::Exp(-minAbs/tau)); - } + } else if (method_ == DecayTimeDiff) { + + normTermExp_ += weight * tau * (2.0 - TMath::Exp(-maxAbs*Gamma) - TMath::Exp(-minAbs*Gamma)); + + // TODO - the other terms + } } // Do more business here, with RooFit libs turned on: // std::complex c(0.1, 0.1); // RooMath::faddeeva(c); // Normalisation factor for Bs decays + // TODO HACKATHON - to be replaced if (type_ == ExpHypTrig) { - normTermCosh_ = normExpHypTerm(maxAbs) - normExpHypTerm(minAbs); - normTermSinh_ = normExpHypTermDep( maxAbs) - normExpHypTermDep( minAbs); + normTermCosh_ += weight * ( normExpHypTerm(maxAbs) - normExpHypTerm(minAbs) ); + normTermSinh_ += weight * ( normExpHypTermDep( maxAbs) - normExpHypTermDep( minAbs) ); } } Double_t LauDecayTimePdf::generateError(Bool_t forceNew) { if (errHist_ && (forceNew || !abscissaErrorGenerated_)) { LauFitData errData = errHist_->generate(0); abscissaError_ = errData.find(this->varErrName())->second; abscissaErrorGenerated_ = kTRUE; } else { while (forceNew || !abscissaErrorGenerated_) { abscissaError_ = LauRandom::randomFun()->Landau(errorDistMPV_,errorDistSigma_); if (abscissaError_ < maxAbscissaError_ && abscissaError_ > minAbscissaError_) { abscissaErrorGenerated_ = kTRUE; forceNew = kFALSE; } } } return abscissaError_; } /* LauFitData LauDecayTimePdf::generate(const LauKinematics* kinematics) { // generateError SHOULD have been called before this // function but will call it here just to make sure // (has ns effect if has already been called) abscissaError_ = this->generateError(); // If the PDF is scaled by the per-event error then need to update the PDF height for each event Bool_t scale(kFALSE); for (std::vector::const_iterator iter = scaleMeans_.begin(); iter != scaleMeans_.end(); ++iter) { scale |= (*iter); } for (std::vector::const_iterator iter = scaleWidths_.begin(); iter != scaleWidths_.end(); ++iter) { scale |= (*iter); } if (scale || (!this->heightUpToDate() && !this->cachePDF())) { this->calcPDFHeight(kinematics); this->heightUpToDate(kTRUE); } // Generate the value of the abscissa. Bool_t gotAbscissa(kFALSE); Double_t genVal(0.0); Double_t genPDFVal(0.0); LauFitData genAbscissa; const Double_t xMin = this->minAbscissa(); const Double_t xMax = this->maxAbscissa(); const Double_t xRange = xMax - xMin; while (!gotAbscissa) { genVal = LauRandom::randomFun()->Rndm()*xRange + xMin; this->calcLikelihoodInfo(genVal, abscissaError_); genPDFVal = this->getUnNormLikelihood(); if (LauRandom::randomFun()->Rndm() <= genPDFVal/this->getMaxHeight()) {gotAbscissa = kTRUE;} if (genPDFVal > this->getMaxHeight()) { cerr<<"Warning in LauDecayTimePdf::generate()." <<" genPDFVal = "<getMaxHeight()<<" for the abscissa = "<varName()] = genVal; // mark that we need a new error to be generated next time abscissaErrorGenerated_ = kFALSE; return genAbscissa; } */ void LauDecayTimePdf::setErrorHisto(const TH1* hist) { if ( errHist_ != 0 ) { cerr<<"WARNING in LauDecayTimePdf::setErrorHisto : Error histogram already set, not doing it again."<varErrName(), hist, this->minAbscissaError(), this->maxAbscissaError()); } void LauDecayTimePdf::setHistoPdf(const TH1* hist) { if ( pdfHist_ != 0 ) { cerr<<"WARNING in LauDecayTimePdf::setHistoPdf : PDF histogram already set, not doing it again."<varName(), hist, this->minAbscissa(), this->maxAbscissa()); } +void LauDecayTimePdf::setEffiHist(const TH1* hist) +{ + if ( effiHist_ != nullptr ) { + std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : efficiency histogram already set, not doing it again." << std::endl; + return; + } + + if ( hist == nullptr ) { + std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : supplied efficiency histogram pointer is null." << std::endl; + return; + } + + // Check boundaries of histogram align with our abscissa's range + const Double_t axisMin {hist->GetXaxis()->GetXmin()}; + const Double_t axisMax {hist->GetXaxis()->GetXmax()}; + if ( TMath::Abs(minAbscissa_ - axisMin)>1e-6 || TMath::Abs(maxAbscissa_ - axisMax)>1e-6 ) { + std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : mismatch in range between supplied histogram and abscissa\n" + << " : histogram range: " << axisMin << " - " << axisMax << "\n" + << " : abscissa range: " << minAbscissa_ << " - " << maxAbscissa_ << "\n" + << " : Disregarding this histogram." << std::endl; + return; + } + + effiHist_ = dynamic_cast( hist->Clone() ); +} + void LauDecayTimePdf::setEffiSpline(Lau1DCubicSpline* spline) { if ( effiFun_ != 0 ) { cerr<<"WARNING in LauDecayTimePdf::setEffiPdf : efficiency function already set, not doing it again."< effiPars) { if ( effiFun_ != 0 ) { cerr<<"WARNING in LauDecayTimePdf::setEffiPdf : efficiency function already set, not doing it again."<getnKnots()){ cerr<<"ERROR in LauDecayTimePdf::setEffiPdf : number of efficiency parameters is not equal to the number of spline knots."<Exit(EXIT_FAILURE); } effiPars_ = effiPars; } LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName) { for ( std::vector::iterator iter = param_.begin(); iter != param_.end(); ++iter ) { if ((*iter)->name().Contains(parName)) { return (*iter); } } std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl; return 0; } const LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName) const { for ( std::vector::const_iterator iter = param_.begin(); iter != param_.end(); ++iter ) { if ((*iter)->name().Contains(parName)) { return (*iter); } } std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl; return 0; } void LauDecayTimePdf::updatePulls() { for ( std::vector::iterator iter = param_.begin(); iter != param_.end(); ++iter ) { std::vector params = (*iter)->getPars(); for (std::vector::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter ) { if (!(*iter)->fixed()) { (*params_iter)->updatePull(); } } } } void LauDecayTimePdf::updateEffiSpline(std::vector effiPars) { if (effiPars.size() != effiFun_->getnKnots()){ cerr<<"ERROR in LauDecayTimePdf::updateEffiSpline : number of efficiency parameters is not equal to the number of spline knots."<Exit(EXIT_FAILURE); } effiFun_->updateYValues(effiPars); }