diff --git a/inc/LauDecayTimePdf.hh b/inc/LauDecayTimePdf.hh index fb9fbcd..48d3e0c 100644 --- a/inc/LauDecayTimePdf.hh +++ b/inc/LauDecayTimePdf.hh @@ -1,769 +1,736 @@ /* Copyright 2006 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauDecayTimePdf.hh \brief File containing declaration of LauDecayTimePdf class. */ /*! \class LauDecayTimePdf \brief Class for defining the PDFs used in the time-dependent fit model to describe the decay time. LauDecayTimePdf is a class that provides the PDFs for describing the time-dependence of the various terms in a particle/antiparticle decay to a common final state. The various terms have the form of exponentially decaying trigonometric or hyperbolic functions convolved with a N-Gaussian resolution function. */ #ifndef LAU_DECAYTIME_PDF #define LAU_DECAYTIME_PDF #include #include #include #include "TString.h" #include "LauAbsRValue.hh" #include "LauFitDataTree.hh" #include "LauComplex.hh" class TH1; class Lau1DHistPdf; class Lau1DCubicSpline; // TODO - Should this have Pdf in the name? // - Audit function names and public/private access category // - Audit what should be given to constructor and what can be set later (maybe different constructors for different scenarios, e.g. smeared with per-event error/smeared with avg error/not smeared) class LauDecayTimePdf final { public: // TODO - can we think of better names? //! The functional form of the decay time PDF enum FuncType { Hist, //< Hist PDF for fixed background Delta, //< Delta function - for prompt background Exp, //< Exponential function - for non-prompt background or charged B's DeltaExp, //< Delta + Exponential function - for background with prompt and non-prompt parts ExpTrig, //< Exponential function with Delta m driven mixing - for neutral B_d's ExpHypTrig //< Exponential function with both Delta m and Delta Gamma driven mixing - for neutral B_s's }; //! How is the decay time measured - absolute or difference? enum TimeMeasurementMethod { DecayTime, //< Absolute measurement of decay time, e.g. LHCb scenario DecayTimeDiff //< Measurement of the difference of two decay times, e.g. BaBar/Belle(II) scenario }; //! How is the TD efficiency information going to be given? enum EfficiencyMethod { Spline, //< As a cubic spline Binned, //< As a histogram (TH1D/TH1F) Flat //< As a flat distribution (constant) }; //! Constructor /*! \param [in] theVarName the name of the decay time variable in the input data \param [in] theVarErrName the name of the decay time error variable in the input data \param [in] params the parameters of the PDF \param [in] minAbscissaVal the minimum value of the abscissa \param [in] maxAbscissaVal the maximum value of the abscissa \param [in] minAbscissaErr the minimum value of the abscissa error \param [in] maxAbscissaErr the maximum value of the abscissa error \param [in] type the functional form of the PDF \param [in] nGauss the number of Gaussians in the resolution function \param [in] scale controls whether the Gaussian parameters are scaled by the per-event error \param [in] method set the type of the time measurement used in the given experiment */ LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const Double_t minAbscissaErr, const Double_t maxAbscissaErr, const FuncType type, const UInt_t nGauss, const std::vector& scale, const TimeMeasurementMethod method, const EfficiencyMethod effMethod = EfficiencyMethod::Spline); //! Constructor /*! \param [in] theVarName the name of the decay time variable in the input data \param [in] theVarErrName the name of the decay time error variable in the input data \param [in] params the parameters of the PDF \param [in] minAbscissaVal the minimum value of the abscissa \param [in] maxAbscissaVal the maximum value of the abscissa \param [in] minAbscissaErr the minimum value of the abscissa error \param [in] maxAbscissaErr the maximum value of the abscissa error \param [in] type the functional form of the PDF \param [in] nGauss the number of Gaussians in the resolution function \param [in] scaleMeans controls whether the Gaussian mean parameters are scaled by the per-event error \param [in] scaleWidths controls whether the Gaussian width parameters are scaled by the per-event error \param [in] method set the type of the time measurement used in the given experiment */ LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const Double_t minAbscissaErr, const Double_t maxAbscissaErr, const FuncType type, const UInt_t nGauss, const std::vector& scaleMeans, const std::vector& scaleWidths, const TimeMeasurementMethod method, const EfficiencyMethod effMethod = EfficiencyMethod::Spline); //! Copy constructor (deleted) LauDecayTimePdf(const LauDecayTimePdf& other) = delete; //! Copy assignment operator (deleted) LauDecayTimePdf& operator=(const LauDecayTimePdf& other) = delete; //! Move constructor (deleted) LauDecayTimePdf(LauDecayTimePdf&& other) = delete; //! Move assignment operator (deleted) LauDecayTimePdf& operator=(LauDecayTimePdf&& other) = delete; //! Destructor ~LauDecayTimePdf(); // TODO - Do we need this? // - If so, should it be a hist or a LauAbsPdf? // - Or should there be a dedicated constructor for this scenario? //! Set the Histogram PDF in case of fixed background PDF void setHistoPdf(const TH1* hist); // TODO - should this be a LauAbsPdf instead? //! Set the histogram to be used for generation of per-event decay time errors /*! If not set will fall back to using Landau distribution \param [in] hist the histogram of the distribution */ void setErrorHisto(const TH1* hist); // TODO - do we still want this option? //! Set the parameters of the Landau distribution used to generate the per-event decay time errors /*! \param [in] mpv the MPV (most probable value) of the distribution \param [in] sigma the width of the distribution */ void setErrorDistTerms(const Double_t mpv, const Double_t sigma) { errorDistMPV_ = mpv; errorDistSigma_ = sigma; } // TODO - should we remove the EfficiencyMethod argument from the constructor, default to Flat and have these functions modify it? //! Set the efficiency function in the form of a histogram /*! \param [in] hist the histogram of efficiencies */ void setEffiHist(const TH1* hist); //! Set the efficiency function in the form of spline /*! \param [in] spline the efficiency spline function */ void setEffiSpline(Lau1DCubicSpline* spline); //! Retrieve the name of the error variable const TString& varName() const {return varName_;} //! Retrieve the name of the error variable const TString& varErrName() const {return varErrName_;} // TODO - this should probably be set at construction time //! Turn on or off the resolution function void doSmearing(Bool_t smear) {smear_ = smear;} //! Determine if the resolution function is turned on or off Bool_t doSmearing() const {return smear_;} // TODO - we don't use this at the moment - remove it? //! Calculate single effective decay time resolution from multiple Gaussian resolution functions /*! \return effective resolution */ Double_t effectiveResolution() const; //! Cache information from data /*! \param [in] inputData the dataset to be used to calculate everything */ void cacheInfo(const LauFitDataTree& inputData); //! Calculate the likelihood (and all associated information) given value of the abscissa /*! \param [in] abscissa the value of the abscissa */ void calcLikelihoodInfo(const Double_t abscissa); //! Calculate the likelihood (and all associated information) given value of the abscissa and its error /*! \param [in] abscissa the value of the abscissa \param [in] abscissaErr the error on the abscissa */ void calcLikelihoodInfo(const Double_t abscissa, const Double_t abscissaErr); //! Retrieve the likelihood (and all associated information) given the event number /*! \param [in] iEvt the event number */ void calcLikelihoodInfo(const UInt_t iEvt); //! Determine the efficiency value for the given abscissa /*! \param [in] abscissa the value of the abscissa \return the corresponding efficiency value */ Double_t calcEffiTerm( const Double_t abscissa ) const; //! Get FuncType from model FuncType getFuncType() const {return type_;} // TODO - should maybe do away with exp term (and it's norm) since it's just the cosh term when DG=0 and it's confusing to have both // - counter argument is to keep it for backgrounds that have a lifetime-like behaviour //! Get the exponential term Double_t getExpTerm() const {return expTerm_;} //! Get the cos(Dm*t) term (multiplied by the exponential) Double_t getCosTerm() const {return cosTerm_;} //! Get the sin(Dm*t) term (multiplied by the exponential) Double_t getSinTerm() const {return sinTerm_;} //! Get the cosh(DG/2*t) term (multiplied by the exponential) Double_t getCoshTerm() const {return coshTerm_;} //! Get the sinh(DG/2*t) term (multiplied by the exponential) Double_t getSinhTerm() const {return sinhTerm_;} //! Get the normalisation related to the exponential term only Double_t getNormTermExp() const {return normTermExp_;} //! Get the normalisation related to the cos term only Double_t getNormTermCos() const {return normTermCos_;} //! Get the normalisation related to the sin term only Double_t getNormTermSin() const {return normTermSin_;} //! Get the first term in the normalisation (from integrating the cosh) Double_t getNormTermCosh() const {return normTermCosh_;} //! Get the second term in the normalisation (from integrating the sinh) Double_t getNormTermSinh() const {return normTermSinh_;} //! Get error probability density from Error distribution Double_t getErrTerm() const{return errTerm_;} //! Get efficiency probability density from efficiency distribution Double_t getEffiTerm() const{return effiTerm_;} //! Retrieve the parameters of the PDF, e.g. so that they can be loaded into a fit /*! \return the parameters of the PDF */ const std::vector& getParameters() const { return params_; } //! Retrieve the parameters of the PDF, e.g. so that they can be loaded into a fit /*! \return the parameters of the PDF */ std::vector& getParameters() { return params_; } //! Update the pulls for all parameters void updatePulls(); //! Calculate the normalisation of all terms /*! \param [in] abscissaErr the per-event decay-time error (if used) */ void calcNorm(const Double_t abscissaErr = 0.0); - //! Calculate the normalisation integrals in the given range + //! Calculate the normalisation integrals in the given range for the case of uniform or binned efficiency /*! This form to be used for case where decay time resolution is neglected \param [in] minAbs lower bound for the integral domain - \param [in] maxAbs lower bound for the integral domain + \param [in] maxAbs upper bound for the integral domain \param [in] weight the weight for this range, typically the efficiency value */ void calcNonSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight); - //! Calculate the normalisation integrals in the given range + //! Calculate the normalisation integrals in the given range for the case of uniform or binned efficiency /*! This form to be used for case where decay time resolution is accounted for \param [in] minAbs lower bound for the integral domain - \param [in] maxAbs lower bound for the integral domain + \param [in] maxAbs upper bound for the integral domain \param [in] weight the weight for this range, typically the efficiency value \param [in] means the mean values of each Gaussian in the resolution function \param [in] sigmas the width values of each Gaussian in the resolution function \param [in] fractions the fractional weight of each Gaussian in the resolution function */ void calcSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight, const std::vector& means, const std::vector& sigmas, const std::vector& fractions); - //! Calculate the normalisation integrals in the given range for the case of spline efficiency acceptance + //! Calculate the normalisation integrals in the given range for the case of spline efficiency /*! This form to be used for case where decay time resolution is accounted for - \param [in] minAbs lower bound for the integral domain - \param [in] maxAbs lower bound for the integral domain + \param [in] splineIndex the index of the spline segment being integrated \param [in] means the mean values of each Gaussian in the resolution function \param [in] sigmas the width values of each Gaussian in the resolution function \param [in] fractions the fractional weight of each Gaussian in the resolution function */ void calcSmearedSplinePartialIntegrals(const UInt_t splineIndex, const std::vector& means, const std::vector& sigmas, const std::vector& fractions); + //! Calculate the normalisation integrals in the given range for the case of spline efficiency + /*! + This form to be used for case where decay time resolution is neglected + + \param [in] splineIndex the index of the spline segment being integrated + */ void calcNonSmearedSplinePartialIntegrals(const UInt_t splineIndex); //! Calculate normalisation for non-smeared cos and sin terms /*! \param [in] minAbs lower bound for the integral domain - \param [in] maxAbs lower bound for the integral domain + \param [in] maxAbs upper bound for the integral domain \return pair of {cosTermIntegral, sinTermIntegral} */ std::pair nonSmearedCosSinIntegral(const Double_t minAbs, const Double_t maxAbs); - //! Calculate normalisation for decay-time resolution smeared cos and sin terms - /*! - Uses the using the Faddeeva function method from - (https://arxiv.org/abs/1407.0748) - - \param [in] minAbs lower bound for the integral domain - \param [in] maxAbs lower bound for the integral domain - \param [in] sigma width of the Gaussian resolution function - \param [in] mu mean of the Gaussian resolution function - \return pair of {cosTermIntegral, sinTermIntegral} - */ - std::pair smearedCosSinIntegral(const Double_t minAbs, const Double_t maxAbs, const Double_t sigma, const Double_t mu = 0.0); - //! Calculate normalisation for non-smeared cosh and sinh terms /*! \param [in] minAbs lower bound for the integral domain - \param [in] maxAbs lower bound for the integral domain + \param [in] maxAbs upper bound for the integral domain \return pair of {coshTermIntegral, sinhTermIntegral} */ std::pair nonSmearedCoshSinhIntegral(const Double_t minAbs, const Double_t maxAbs); - //! Calculate normalisation for decay-time resolution smeared cosh and sinh terms - /*! - Uses the using the Faddeeva function method from - (https://arxiv.org/abs/1407.0748) - - \param [in] minAbs lower bound for the integral domain - \param [in] maxAbs lower bound for the integral domain - \param [in] sigma width of the Gaussian resolution function - \param [in] mu mean of the Gaussian resolution function - \return pair of {coshTermIntegral, sinhTermIntegral} - */ - std::pair smearedCoshSinhIntegral(const Double_t minAbs, const Double_t maxAbs, const Double_t sigma, const Double_t mu = 0.0); - //! Calculate normalisation for non-smeared exponential term /*! \param [in] minAbs lower bound for the integral domain - \param [in] maxAbs lower bound for the integral domain + \param [in] maxAbs upper bound for the integral domain \return integral */ Double_t nonSmearedExpIntegral(const Double_t minAbs, const Double_t maxAbs); - //! Calculate normalisation for decay-time resolution smeared exponential term + //! Calculate normalisation for decay-time resolution smeared terms /*! - Uses the using the Faddeeva function method from - (https://arxiv.org/abs/1407.0748) + Uses the Faddeeva function method from Section 3 of https://arxiv.org/abs/1407.0748 + \param [in] z the complex expression with general form: (Gamma - i Delta_m) * sigma / sqrt(2) \param [in] minAbs lower bound for the integral domain - \param [in] maxAbs lower bound for the integral domain - \param [in] sigma width of the Gaussian resolution function + \param [in] maxAbs upper bound for the integral domain + \param [in] sigmaOverRoot2 width of the Gaussian resolution function, divided by sqrt(2) \param [in] mu mean of the Gaussian resolution function - \return integral + \return complex integral */ - Double_t smearedExpIntegral(const Double_t minAbs, const Double_t maxAbs, const Double_t sigma, const Double_t mu = 0.0); + std::complex smearedGeneralIntegral(const std::complex& z, const Double_t minAbs, const Double_t maxAbs, const Double_t sigmaOverRoot2, const Double_t mu); - //! Calculate decay-time resolution smeared cos and sin terms + //! Calculate decay-time resolution smeared terms /*! - Uses the using the Faddeeva function method from - (https://arxiv.org/abs/1407.0748) + Uses the Faddeeva function method from Section 3 of https://arxiv.org/abs/1407.0748 + \param [in] z the complex expression with general form: (Gamma - i Delta_m) * sigma / sqrt(2) \param [in] t decay time - \param [in] sigma width of the Gaussian resolution function - \param [in] mu mean of the Gaussian resolution function - \return pair of {cosTerm, sinTerm} - */ - std::pair smearedCosSinTerm(const Double_t t, const Double_t sigma, const Double_t mu = 0.0); - - //! Calculate decay-time resolution smeared cosh and sinh terms - /*! - Uses the using the Faddeeva function method from - (https://arxiv.org/abs/1407.0748) - - \param [in] t decay time - \param [in] sigma width of the Gaussian resolution function - \param [in] mu mean of the Gaussian resolution function - \return pair of {coshTerm, sinhTerm} - */ - std::pair smearedCoshSinhTerm(const Double_t t, const Double_t sigma, const Double_t mu = 0.0); - - //! Calculate decay-time resolution smeared exponential term - /*! - Uses the using the Faddeeva function method from - (https://arxiv.org/abs/1407.0748) - - \param [in] t decay time - \param [in] sigma width of the Gaussian resolution function + \param [in] sigmaOverRoot2 width of the Gaussian resolution function, divided by sqrt(2) \param [in] mu mean of the Gaussian resolution function - \return exponential term convolved with resolution function + \return complex smeared term */ - Double_t smearedExpTerm(const Double_t t, const Double_t sigma, const Double_t mu = 0.0); + std::complex smearedGeneralTerm(const std::complex& z, const Double_t t, const Double_t sigmaOverRoot2, const Double_t mu); //! Calculate and cache powers of means and sigmas for each Gaussian in the resolution function /* \param [in] means mean of each Gaussian in the resolution function \param [in] sigmas width of each Gaussian in the resolution function */ void calcMeanAndSigmaPowers( const std::vector& means, const std::vector& sigmas ); //! Calculate and cache K-vectors for each term and for each Gaussian in the resolution function - /* - \param [in] sigmas widths of each Gaussian in the resolution function - */ - void calcKVectors( const std::vector& sigmas ); + void calcKVectors(); //! Generate the K vector used in eqn 31 from arXiv:1407.0748 /* \param [in] sigma width of the Gaussian resolution function \param [in] z The z value, changing for exp, sin, sinh, etc \return size 4 array of vector values */ - std::array,4> generateKvector(const std::complex& z); + std::array,4> generateKvector(const std::complex& z); - //! Generate the K vector used in eqn 31 from arXiv:1407.0748 + //! Generate the M vector used in eqn 31 from arXiv:1407.0748 /* Uses the using the Faddeeva function method from (https://arxiv.org/abs/1407.0748) \param [in] minAbs lower bound for the integral domain - \param [in] maxAbs lower bound for the integral domain - \param [in] z The z value, changing for exp, sin, sinh, etc + \param [in] maxAbs upper bound for the integral domain + \param [in] z the complex expression with general form: (Gamma - i Delta_m) * sigma / sqrt(2) \param [in] sigma width of the Gaussian resolution function \param [in] mu mean of the Gaussian resolution function \return size 4 array of vector values */ - std::array,4> generateMvector(const Double_t minAbs, const Double_t maxAbs, const std::complex& z, const Double_t sigma, const Double_t mu = 0.); - + std::array,4> generateMvector(const Double_t minAbs, const Double_t maxAbs, const std::complex& z, const Double_t sigma, const Double_t mu = 0.); //! Calculate the normalisation of a given term in a particular spline segment /* \param [in] coeffs spline coefficients in this segment \param [in] K K-vector for this term \param [in] M M-vector for this term \param [in] sigmaPowers powers of the width of the Gaussian resolution function \param [in] meanPowers powers of the mean of the Gaussian resolution function - \return real and imaginary parts of the normalisation + \return the complex normalisation */ - std::pair smearedSplineNormalise(const std::array& coeffs, const std::array,4>& K, const std::array,4>& M, const std::array& sigmaPowers, const std::array& meanPowers) const; + std::complex smearedSplineNormalise(const std::array& coeffs, const std::array,4>& K, const std::array,4>& M, const std::array& sigmaPowers, const std::array& meanPowers) const; - std::complex calcIk(const Int_t k, const Double_t minAbs, const Double_t maxAbs, const std::complex& u); + //! Calculate integrals of each power of t within a given spline segment + /* + \param [in] k the power of t + \param [in] minAbs lower bound for the integral domain + \param [in] maxAbs upper bound for the integral domain + \param [in] u the complex expression with general form: (Gamma - i Delta_m) + \return the complex integral + */ + std::complex calcIk(const UInt_t k, const Double_t minAbs, const Double_t maxAbs, const std::complex& u); - std::pair nonSmearedSplineNormalise(const UInt_t splineIndex, const std::complex& u, std::vector,4>>& cache); + //! Calculate the normalisation of a given term in a particular spline segment + /* + \param [in] splineIndex the index of the spline segment being integrated + \param [in] u the complex expression with general form: (Gamma - i Delta_m) + \param [in] cache cached results of calcIk, to be used and/or updated as appropriate + \return the complex normalisation + */ + std::complex nonSmearedSplineNormalise(const UInt_t splineIndex, const std::complex& u, std::vector,4>>& cache); //! Generate the value of the error /*! If scaling by the error should call this before calling generate \param [in] forceNew forces generation of a new value */ Double_t generateError(const Bool_t forceNew = kFALSE); //TODO not clear that this is really needed, perhaps for background? commented out for now //! Generate an event from the PDF /*! \param [in] kinematics used by some PDFs to determine the DP position, on which they have dependence */ //LauFitData generate(const LauKinematics* kinematics); //! Retrieve the decay time minimum value Double_t minAbscissa() const {return minAbscissa_;} //! Retrieve the decay time maximum value Double_t maxAbscissa() const {return maxAbscissa_;} //! Retrieve the decay time error minimum value Double_t minAbscissaError() const {return minAbscissaError_;} //! Retrieve the decay time error maximum value Double_t maxAbscissaError() const {return maxAbscissaError_;} // TODO - can we delete this? // NB calcPDFHeight only calculates the gaussian information for the (type_ == Delta) case //! Calculate the maximum height of the PDF //void calcPDFHeight( const LauKinematics* kinematics ); //! Get efficiency parameters to float in the fit std::vector& getEffiPars() {return effiPars_;} //! Propagate any updates necessary to the decay time Efficiency and recalculate normalisation if necessary void propagateParUpdates(); // TODO - can we delete this? //! Update spline Y values when floating the decay time acceptance /*! \param [in] params the vector of LauParameters describing the Y values */ //void updateEffiSpline(const std::vector& params); //! Set up the initial state correctly - called by the fit model's initialise function void initialise(); protected: //! Calculate the pure physics terms with no resolution function applied void calcNonSmearedTerms(const Double_t abscissa); //! Retrieve the number of PDF parameters /*! \return the number of PDF parameters */ UInt_t nParameters() const {return params_.size();} //! Retrieve the specified parameter /*! \param [in] parName the parameter to retrieve */ LauAbsRValue* findParameter(const TString& parName); //! Retrieve the specified parameter /*! \param [in] parName the parameter to retrieve */ const LauAbsRValue* findParameter(const TString& parName) const; //! Update the cache values for all events void updateCache(); private: //! Name of the variable const TString varName_; //! Name of the error variable const TString varErrName_; //! The parameters of the PDF std::vector params_; // TODO - should probably set this at construction time (can then be const) //! Smear with the resolution model or not Bool_t smear_; //! The minimum value of the decay time const Double_t minAbscissa_; //! The maximum value of the decay time const Double_t maxAbscissa_; //! The minimum value of the decay time error const Double_t minAbscissaError_; //! The maximum value of the decay time error const Double_t maxAbscissaError_; //! The current value of the decay time error Double_t abscissaError_; //! Flag whether a value for the decay time error has been generated Bool_t abscissaErrorGenerated_; //! Value of the MPV of the Landau dist used to generate the Delta t error Double_t errorDistMPV_; //! Value of the width of the Landau dist used to generate the Delta t error Double_t errorDistSigma_; //! The number of gaussians in the resolution model const UInt_t nGauss_; // Parameters of the gaussian(s) that accounts for the resolution: //! mean (offset) of each Gaussian in the resolution function std::vector mean_; //! spread (sigma) of each Gaussian in the resolution function std::vector sigma_; //! fraction of each Gaussian in the resolution function std::vector frac_; // Parameters of the physics decay time distribution //! Lifetime parameter LauAbsRValue* tau_; //! Mass difference parameter LauAbsRValue* deltaM_; //! Width difference parameter LauAbsRValue* deltaGamma_; //! Parameter for the fraction of prompt events in DeltaExp LauAbsRValue* fracPrompt_; //! Which type of decay time function is this? const FuncType type_; //! Are we using absolute decay time or decay time difference? const TimeMeasurementMethod method_; //! Which method for eff(decaytime) input are we using? const EfficiencyMethod effMethod_; //! Scale the mean of each Gaussian by the per-event decay time error? const std::vector scaleMeans_; //! Scale the sigma of each Gaussian by the per-event decay time error? const std::vector scaleWidths_; //! Is anything being scaled by the per-event decay time error? const Bool_t scaleWithPerEventError_; //! The exp(-G*t) term Double_t expTerm_; //! The cos(Dm*t) term (multiplied by the exponential) Double_t cosTerm_; //! The sin(Dm*t) term (multiplied by the exponential) Double_t sinTerm_; //! The cosh(DG/2*t) term (multiplied by the exponential) Double_t coshTerm_; //! The sinh(DG/2*t) term (multiplied by the exponential) Double_t sinhTerm_; //! Normalisation of the exponential term Double_t normTermExp_; //! Normalisation of the cos term Double_t normTermCos_; //! Normalisation of the sin term Double_t normTermSin_; //! Normalisation of the cosh term Double_t normTermCosh_; //! Normalisation of the sinh term Double_t normTermSinh_; //! Error PDF (NB there is no equivalent cache since the PDF errHist_ keeps a cache) Double_t errTerm_; //! Efficiency Double_t effiTerm_; //TODO : to be deleted? or needed for backgrounds? //! Hist PDF term (NB there is no equivalent cache since the PDF pdfHist_ keeps a cache) Double_t pdfTerm_; //! The cache of the decay times std::vector abscissas_; //! The cache of the per-event errors on the decay time std::vector abscissaErrors_; //! The cache of the exponential terms std::vector expTerms_; //! The cache of the exponential * cosh(DG/2*t) terms std::vector coshTerms_; //! The cache of the exponential * sinh(DG/2*t) terms std::vector sinhTerms_; //! The cache of the exponential * cos(Dm*t) terms std::vector cosTerms_; //! The cache of the exponential * sin(Dm*t) terms std::vector sinTerms_; //! The cache of the exponential normalisation std::vector normTermsExp_; //! The cache of the cosh term normalisation std::vector normTermsCosh_; //! The cache of the sinh term normalisation std::vector normTermsSinh_; //! The cache of the cos term normalisation std::vector normTermsCos_; //! The cache of the sin term normalisation std::vector normTermsSin_; //! The cache of the efficiency std::vector effiTerms_; //! Histogram PDF for abscissa error distribution Lau1DHistPdf* errHist_; //! Histogram PDF for abscissa distribution Lau1DHistPdf* pdfHist_; //! efficiency PDF in spline Lau1DCubicSpline* effiFun_; //! efficiency PDF as Histogram TH1* effiHist_; //! Vector of parameters to float acceptance std::vector effiPars_; // Caching / bookkeeping //! Binomial coefficients // TODO - would prefer this to use std::array but cling doesn't like it static constexpr Double_t binom_[4][4] = { {1., 0., 0., 0.}, {1., 1., 0., 0.}, {1., 2., 1., 0.}, {1., 3., 3., 1.} }; Bool_t nothingFloating_{kFALSE}; Bool_t anyKnotFloating_{kTRUE}; Bool_t nonKnotFloating_{kTRUE}; Bool_t physicsParFloating_{kTRUE}; Bool_t tauFloating_{kTRUE}; Bool_t deltaMFloating_{kTRUE}; Bool_t deltaGammaFloating_{kTRUE}; Bool_t resoParFloating_{kTRUE}; //std::vector meansFloating_; //std::vector sigmasFloating_; //std::vector fracsFloating_; Bool_t nothingChanged_{kFALSE}; Bool_t anyKnotChanged_{kTRUE}; Bool_t nonKnotChanged_{kTRUE}; Bool_t physicsParChanged_{kTRUE}; Bool_t tauChanged_{kTRUE}; Bool_t deltaMChanged_{kTRUE}; Bool_t deltaGammaChanged_{kTRUE}; Bool_t resoParChanged_{kTRUE}; //std::vector meansChanged_; //std::vector sigmasChanged_; //std::vector fracsChanged_; Double_t tauVal_{0.0}; + Double_t gammaVal_{0.0}; Double_t deltaMVal_{0.0}; Double_t deltaGammaVal_{0.0}; std::vector meanVals_; std::vector sigmaVals_; std::vector fracVals_; std::vector effiParVals_; - std::vector,4>> expTermIkVals_; - std::vector,4>> trigTermIkVals_; - std::vector,4>> hypHTermIkVals_; - std::vector,4>> hypLTermIkVals_; + std::vector,4>> expTermIkVals_; + std::vector,4>> trigTermIkVals_; + std::vector,4>> hypHTermIkVals_; + std::vector,4>> hypLTermIkVals_; // vector has nGauss_ entries, array has 0th - 3rd or 1st - 4th powers, respectively std::vector> meanPowerVals_; std::vector> sigmaPowerVals_; // vector has nGauss_ entries, array has 0th - 4th entries of the K-vector - std::vector,4>> expTermKvecVals_; - std::vector,4>> trigTermKvecVals_; - std::vector,4>> hypHTermKvecVals_; - std::vector,4>> hypLTermKvecVals_; + std::vector,4>> expTermKvecVals_; + std::vector,4>> trigTermKvecVals_; + std::vector,4>> hypHTermKvecVals_; + std::vector,4>> hypLTermKvecVals_; // outer vector has nSplineSegments entires, inner vector has nGauss_ entries, array has 0th - 4th entries of the M-vector - std::vector,4>>> expTermMvecVals_; - std::vector,4>>> trigTermMvecVals_; - std::vector,4>>> hypHTermMvecVals_; - std::vector,4>>> hypLTermMvecVals_; + std::vector,4>>> expTermMvecVals_; + std::vector,4>>> trigTermMvecVals_; + std::vector,4>>> hypHTermMvecVals_; + std::vector,4>>> hypLTermMvecVals_; ClassDef(LauDecayTimePdf,0) // Define the Delta t PDF }; #endif diff --git a/src/LauDecayTimePdf.cc b/src/LauDecayTimePdf.cc index d2b9fb7..79fd49d 100644 --- a/src/LauDecayTimePdf.cc +++ b/src/LauDecayTimePdf.cc @@ -1,1824 +1,1645 @@ /* Copyright 2006 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauDecayTimePdf.cc \brief File containing implementation of LauDecayTimePdf class. */ #include #include #include #include #include #include #include "TMath.h" #include "TRandom.h" #include "TSystem.h" #include "TH1.h" #include "RooMath.h" #include "Lau1DCubicSpline.hh" #include "Lau1DHistPdf.hh" #include "LauConstants.hh" #include "LauComplex.hh" #include "LauDecayTimePdf.hh" #include "LauFitDataTree.hh" #include "LauParameter.hh" #include "LauParamFixed.hh" #include "LauRandom.hh" ClassImp(LauDecayTimePdf) LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, Double_t minAbscissaVal, Double_t maxAbscissaVal, Double_t minAbscissaErr, Double_t maxAbscissaErr, FuncType type, UInt_t nGauss, const std::vector& scale, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) : varName_(theVarName), varErrName_(theVarErrName), params_(params), smear_(kTRUE), minAbscissa_(minAbscissaVal), maxAbscissa_(maxAbscissaVal), minAbscissaError_(minAbscissaErr), maxAbscissaError_(maxAbscissaErr), abscissaError_(0.0), abscissaErrorGenerated_(kFALSE), errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286 errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102 nGauss_(nGauss), mean_(nGauss_,nullptr), sigma_(nGauss_,nullptr), frac_(nGauss_-1,nullptr), tau_(nullptr), deltaM_(nullptr), deltaGamma_(nullptr), fracPrompt_(nullptr), type_(type), method_(method), effMethod_(effMethod), scaleMeans_(scale), scaleWidths_(scale), scaleWithPerEventError_( std::accumulate( scale.begin(), scale.end(), kFALSE, std::logical_or() ) ), expTerm_(0.0), cosTerm_(0.0), sinTerm_(0.0), coshTerm_(0.0), sinhTerm_(0.0), normTermExp_(0.0), normTermCosh_(0.0), normTermSinh_(0.0), errTerm_(0.0), effiTerm_(0.0), pdfTerm_(0.0), errHist_(nullptr), pdfHist_(nullptr), effiFun_(nullptr), effiHist_(nullptr), effiPars_(0), //meansFloating_(nGauss_,kTRUE), //sigmasFloating_(nGauss_,kTRUE), //fracsFloating_(nGauss_-1,kTRUE), //meansChanged_(nGauss_,kTRUE), //sigmasChanged_(nGauss_,kTRUE), //fracsChanged_(nGauss_-1,kTRUE), meanVals_(nGauss_,0.0), sigmaVals_(nGauss_,0.0), fracVals_(nGauss_-1,0.0) { } LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, Double_t minAbscissaVal, Double_t maxAbscissaVal, Double_t minAbscissaErr, Double_t maxAbscissaErr, FuncType type, UInt_t nGauss, const std::vector& scaleMeans, const std::vector& scaleWidths, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) : varName_(theVarName), varErrName_(theVarErrName), params_(params), smear_(kTRUE), minAbscissa_(minAbscissaVal), maxAbscissa_(maxAbscissaVal), minAbscissaError_(minAbscissaErr), maxAbscissaError_(maxAbscissaErr), abscissaError_(0.0), abscissaErrorGenerated_(kFALSE), errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286 errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102 nGauss_(nGauss), mean_(nGauss_,nullptr), sigma_(nGauss_,nullptr), frac_(nGauss_-1,nullptr), tau_(nullptr), deltaM_(nullptr), deltaGamma_(nullptr), fracPrompt_(nullptr), type_(type), method_(method), effMethod_(effMethod), scaleMeans_(scaleMeans), scaleWidths_(scaleWidths), scaleWithPerEventError_( std::accumulate( scaleMeans.begin(), scaleMeans.end(), kFALSE, std::logical_or() ) || std::accumulate( scaleWidths.begin(), scaleWidths.end(), kFALSE, std::logical_or() ) ), expTerm_(0.0), cosTerm_(0.0), sinTerm_(0.0), coshTerm_(0.0), sinhTerm_(0.0), normTermExp_(0.0), normTermCosh_(0.0), normTermSinh_(0.0), errTerm_(0.0), effiTerm_(0.0), pdfTerm_(0.0), errHist_(nullptr), pdfHist_(nullptr), effiFun_(nullptr), effiHist_(nullptr), effiPars_(0), //meansFloating_(nGauss_,kTRUE), //sigmasFloating_(nGauss_,kTRUE), //fracsFloating_(nGauss_-1,kTRUE), //meansChanged_(nGauss_,kTRUE), //sigmasChanged_(nGauss_,kTRUE), //fracsChanged_(nGauss_-1,kTRUE), meanVals_(nGauss_,0.0), sigmaVals_(nGauss_,0.0), fracVals_(nGauss_-1,0.0) { } LauDecayTimePdf::~LauDecayTimePdf() { // Destructor delete errHist_; errHist_ = nullptr; delete pdfHist_; pdfHist_ = nullptr; delete effiFun_; effiFun_ = nullptr; delete effiHist_; effiHist_ = nullptr; for( auto& par : effiPars_ ){ delete par; par = nullptr; } effiPars_.clear(); } void LauDecayTimePdf::initialise() { // The parameters are: // - the mean and the sigma (bias and spread in resolution) of the gaussian(s) // - the mean lifetime, denoted tau, of the exponential decay // - the frequency of oscillation, denoted Delta m, of the cosine and sine terms // - the decay width difference, denoted Delta Gamma, of the hyperbolic cosine and sine terms // // The next two arguments specify the range in which the PDF is defined, // and the PDF will be normalised w.r.t. these limits. // // The final three arguments define the type of Delta t PDF (Delta, Exp, ExpTrig or ExpHypTrig ), the number of gaussians // and whether or not the gaussian parameters should be scaled by the per-event errors on Delta t // First check whether the scale vector is nGauss in size if (nGauss_ != scaleMeans_.size() || nGauss_ != scaleWidths_.size()) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : scale vector size not the same as nGauss."<Exit(EXIT_FAILURE); } if (type_ == Hist) { if (this->nParameters() != 0){ std::cerr<<"ERROR in LauDecayTimePdf::initialise : Hist PDF should have 0 parameters"<Exit(EXIT_FAILURE); } } else { TString meanName("mean_"); TString sigmaName("sigma_"); TString fracName("frac_"); Bool_t foundParams(kTRUE); for (UInt_t i(0); ifindParameter(tempName); foundParams &= (mean_[i] != nullptr); sigma_[i] = this->findParameter(tempName2); foundParams &= (sigma_[i] != nullptr); if (i!=0) { frac_[i-1] = this->findParameter(tempName3); foundParams &= (frac_[i-1] != nullptr); } } if (type_ == Delta) { if ((this->nParameters() != (3*nGauss_-1)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : Delta type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == Exp) { tau_ = this->findParameter("tau"); foundParams &= (tau_ != nullptr); if ((this->nParameters() != (3*nGauss_-1+1)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : Exp type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == DeltaExp) { tau_ = this->findParameter("tau"); fracPrompt_ = this->findParameter("frac_prompt"); foundParams &= (tau_ != nullptr); foundParams &= (fracPrompt_ != nullptr); if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : DeltaExp type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == ExpTrig) { tau_ = this->findParameter("tau"); deltaM_ = this->findParameter("deltaM"); foundParams &= (tau_ != nullptr); foundParams &= (deltaM_ != nullptr); if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : ExpTrig type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == ExpHypTrig) { tau_ = this->findParameter("tau"); deltaM_ = this->findParameter("deltaM"); deltaGamma_ = this->findParameter("deltaGamma"); foundParams &= (tau_ != nullptr); foundParams &= (deltaM_ != nullptr); foundParams &= (deltaGamma_ != nullptr); if ((this->nParameters() != (3*nGauss_-1+3)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : ExpHypTrig type PDF requires:"<Exit(EXIT_FAILURE); } } } // Setup the normalisation caches if ( effMethod_ == EfficiencyMethod::Spline ) { const UInt_t nSplineSegments { effiFun_->getnKnots() - 1 }; if ( not this->doSmearing() ) { expTermIkVals_.clear(); expTermIkVals_.resize(nSplineSegments); trigTermIkVals_.clear(); trigTermIkVals_.resize(nSplineSegments); hypHTermIkVals_.clear(); hypHTermIkVals_.resize(nSplineSegments); hypLTermIkVals_.clear(); hypLTermIkVals_.resize(nSplineSegments); } else { meanPowerVals_.clear(); meanPowerVals_.resize(nGauss_); sigmaPowerVals_.clear(); sigmaPowerVals_.resize(nGauss_); expTermKvecVals_.clear(); expTermKvecVals_.resize(nGauss_); trigTermKvecVals_.clear(); trigTermKvecVals_.resize(nGauss_); hypHTermKvecVals_.clear(); hypHTermKvecVals_.resize(nGauss_); hypLTermKvecVals_.clear(); hypLTermKvecVals_.resize(nGauss_); expTermMvecVals_.clear(); expTermMvecVals_.resize(nSplineSegments); for ( auto& innerVec : expTermMvecVals_ ) { innerVec.resize(nGauss_); } trigTermMvecVals_.clear(); trigTermMvecVals_.resize(nSplineSegments); for ( auto& innerVec : trigTermMvecVals_ ) { innerVec.resize(nGauss_); } hypHTermMvecVals_.clear(); hypHTermMvecVals_.resize(nSplineSegments); for ( auto& innerVec : hypHTermMvecVals_ ) { innerVec.resize(nGauss_); } hypLTermMvecVals_.clear(); hypLTermMvecVals_.resize(nSplineSegments); for ( auto& innerVec : hypLTermMvecVals_ ) { innerVec.resize(nGauss_); } } } // Force calculation of all relevant info by faking that all parameter values have changed nothingFloating_ = nothingChanged_ = kFALSE; anyKnotFloating_ = anyKnotChanged_ = not effiPars_.empty(); nonKnotFloating_ = nonKnotChanged_ = kTRUE; physicsParFloating_ = physicsParChanged_ = kTRUE; tauFloating_ = tauChanged_ = ( tau_ != nullptr ); deltaMFloating_ = deltaMChanged_ = ( deltaM_ != nullptr ); deltaGammaFloating_ = deltaGammaChanged_ = ( deltaGamma_ != nullptr ); resoParFloating_ = resoParChanged_ = kTRUE; } Double_t LauDecayTimePdf::effectiveResolution() const { Double_t dilution = 0.; Double_t dMSq = deltaM_->unblindValue() * deltaM_->unblindValue(); // Might be cleaner to just append this to the vector in the init step, // the the consistency can also be checked Double_t fracSum = 0; for (auto f : frac_) fracSum += f->unblindValue(); Double_t lastFrac = 1. - fracSum; for (size_t i = 0; i < sigma_.size(); i++) { Double_t sigSq = sigma_[i]->unblindValue() * sigma_[i]->unblindValue(); Double_t thisFrac = lastFrac; if (i < sigma_.size() - 1) thisFrac = frac_[i]->unblindValue(); dilution += thisFrac * TMath::Exp(-dMSq * 0.5 * sigSq); } return TMath::Sqrt(-2. * TMath::Log(dilution)) / deltaM_->unblindValue(); } void LauDecayTimePdf::cacheInfo(const LauFitDataTree& inputData) { // Check that the input data contains the decay time variable Bool_t hasBranch = inputData.haveBranch(this->varName()); if (!hasBranch) { std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<varName()<<"\"."<varErrName()); if (!hasBranch) { std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<varErrName()<<"\"."<cacheInfo(inputData); } if (type_ == Hist) { // Pass the data to the decay-time PDF for caching if ( pdfHist_ ) { pdfHist_->cacheInfo(inputData); } } else { // Clear the vectors and reserve enough space in the event-wise caches const UInt_t nEvents = inputData.nEvents(); abscissas_.clear(); abscissas_.resize(nEvents); abscissaErrors_.clear(); abscissaErrors_.resize(nEvents); expTerms_.clear(); expTerms_.resize(nEvents); cosTerms_.clear(); cosTerms_.resize(nEvents); sinTerms_.clear(); sinTerms_.resize(nEvents); coshTerms_.clear(); coshTerms_.resize(nEvents); sinhTerms_.clear(); sinhTerms_.resize(nEvents); normTermsExp_.clear(); normTermsExp_.resize(nEvents); normTermsCos_.clear(); normTermsCos_.resize(nEvents); normTermsSin_.clear(); normTermsSin_.resize(nEvents); normTermsCosh_.clear(); normTermsCosh_.resize(nEvents); normTermsSinh_.clear(); normTermsSinh_.resize(nEvents); effiTerms_.clear(); effiTerms_.resize(nEvents); // Determine the abscissa and abscissa error values for each event for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputData.getData(iEvt); const Double_t abscissa { dataValues.at(this->varName()) }; if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) { std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay time: "<minAbscissa()<<","<maxAbscissa()<<"]."<Exit(EXIT_FAILURE); } abscissas_[iEvt] = abscissa ; const Double_t abscissaErr { scaleWithPerEventError_ ? dataValues.at(this->varErrName()) : 0.0 }; if ( scaleWithPerEventError_ && ( abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError() ) ) { std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay-time error: "<minAbscissaError()<<","<maxAbscissaError()<<"]."<Exit(EXIT_FAILURE); } abscissaErrors_[iEvt] = abscissaErr; } // Force calculation of all info by faking that all parameter values have changed nothingFloating_ = nothingChanged_ = kFALSE; anyKnotFloating_ = anyKnotChanged_ = not effiPars_.empty(); nonKnotFloating_ = nonKnotChanged_ = kTRUE; physicsParFloating_ = physicsParChanged_ = kTRUE; tauFloating_ = tauChanged_ = ( tau_ != nullptr ); deltaMFloating_ = deltaMChanged_ = ( deltaM_ != nullptr ); deltaGammaFloating_ = deltaGammaChanged_ = ( deltaGamma_ != nullptr ); resoParFloating_ = resoParChanged_ = kTRUE; // Fill the rest of the cache this->updateCache(); // Set the various "parameter-is-floating" flags, used to bookkeep the cache in propagateParUpdates LauParamFixed isFixed; nonKnotFloating_ = not std::all_of(params_.begin(), params_.end(), isFixed); anyKnotFloating_ = not std::all_of(effiPars_.begin(), effiPars_.end(), isFixed); nothingFloating_ = not (nonKnotFloating_ or anyKnotFloating_); std::cout << "INFO in LauDecayTimePdf::cacheInfo : nothing floating set to: " << (nothingFloating_ ? "True" : "False") << std::endl; std::cout << "INFO in LauDecayTimePdf::cacheInfo : any knot floating set to: " << (anyKnotFloating_ ? "True" : "False") << std::endl; std::cout << "INFO in LauDecayTimePdf::cacheInfo : non-knot floating set to: " << (nonKnotFloating_ ? "True" : "False") << std::endl; tauFloating_ = tau_ ? not tau_->fixed() : kFALSE; deltaMFloating_ = deltaM_ ? not deltaM_->fixed() : kFALSE; deltaGammaFloating_ = deltaGamma_ ? not deltaGamma_->fixed() : kFALSE; physicsParFloating_ = ( tauFloating_ or deltaMFloating_ or deltaGammaFloating_ ); std::cout << "INFO in LauDecayTimePdf::cacheInfo : tau floating set to: " << (tauFloating_ ? "True" : "False") << std::endl; std::cout << "INFO in LauDecayTimePdf::cacheInfo : deltaM floating set to: " << (deltaMFloating_ ? "True" : "False") << std::endl; std::cout << "INFO in LauDecayTimePdf::cacheInfo : deltaGamma floating set to: " << (deltaGammaFloating_ ? "True" : "False") << std::endl; resoParFloating_ = kFALSE; for ( UInt_t i{0}; i < nGauss_; ++i ) { const Bool_t meanFloating { not mean_[i]->fixed() }; const Bool_t sigmaFloating { not sigma_[i]->fixed() }; resoParFloating_ |= (meanFloating or sigmaFloating); std::cout << "INFO in LauDecayTimePdf::cacheInfo : mean[" << i << "] floating set to: " << (meanFloating ? "True" : "False") << std::endl; std::cout << "INFO in LauDecayTimePdf::cacheInfo : sigma[" << i << "] floating set to: " << (sigmaFloating ? "True" : "False") << std::endl; if ( i < (nGauss_ - 1) ) { const Bool_t fracFloating { not frac_[i]->fixed() }; resoParFloating_ |= fracFloating; std::cout << "INFO in LauDecayTimePdf::cacheInfo : frac[" << i << "] floating set to: " << (fracFloating ? "True" : "False") << std::endl; } } } } void LauDecayTimePdf::updateCache() { // Get the updated values of all parameters static auto assignValue = [](const LauAbsRValue* par){return par->unblindValue();}; if ( anyKnotChanged_ ) { std::transform( effiPars_.begin(), effiPars_.end(), effiParVals_.begin(), assignValue ); } if ( tauChanged_ ) { tauVal_ = tau_->unblindValue(); + gammaVal_ = 1.0 / tauVal_; } if ( deltaMChanged_ ) { deltaMVal_ = deltaM_->unblindValue(); } if ( deltaGammaChanged_ ) { deltaGammaVal_ = deltaGamma_->unblindValue(); } if ( resoParChanged_ ) { std::transform( mean_.begin(), mean_.end(), meanVals_.begin(), assignValue ); std::transform( sigma_.begin(), sigma_.end(), sigmaVals_.begin(), assignValue ); std::transform( frac_.begin(), frac_.end(), fracVals_.begin(), assignValue ); } // If we're not using per-event information for the decay time // error, just calculate the normalisation terms once if ( ! scaleWithPerEventError_ ) { this->calcNorm(); } const UInt_t nEvents = abscissas_.size(); for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const Double_t abscissa = abscissas_[iEvt]; const Double_t abscissaErr = abscissaErrors_[iEvt]; // If none of the physics or resolution parameters have changed // we only need to recalculate the efficiency, otherwise we // need to also update the other terms if ( not nonKnotChanged_ ) { effiTerm_ = this->calcEffiTerm( abscissa ); effiTerms_[iEvt] = effiTerm_; } else { this->calcLikelihoodInfo(abscissa, abscissaErr); expTerms_[iEvt] = expTerm_; cosTerms_[iEvt] = cosTerm_; sinTerms_[iEvt] = sinTerm_; coshTerms_[iEvt] = coshTerm_; sinhTerms_[iEvt] = sinhTerm_; effiTerms_[iEvt] = effiTerm_; } // If we are using per-event information for the decay // time error, need to calculate the normalisation // terms for every event if ( scaleWithPerEventError_ ) { this->calcNorm(abscissaErr); } normTermsExp_[iEvt] = normTermExp_; normTermsCos_[iEvt] = normTermCos_; normTermsSin_[iEvt] = normTermSin_; normTermsCosh_[iEvt] = normTermCosh_; normTermsSinh_[iEvt] = normTermSinh_; } // reset the "parameter-has-changed" flags anyKnotChanged_ = kFALSE; tauChanged_ = kFALSE; deltaMChanged_ = kFALSE; deltaGammaChanged_ = kFALSE; physicsParChanged_ = kFALSE; resoParChanged_ = kFALSE; nonKnotChanged_ = kFALSE; } void LauDecayTimePdf::calcLikelihoodInfo(const UInt_t iEvt) { // Extract all the terms and their normalisations if (type_ == Hist) { if ( pdfHist_ ) { pdfHist_->calcLikelihoodInfo(iEvt); pdfTerm_ = pdfHist_->getLikelihood(); } else { pdfTerm_ = 1.0; } } else { expTerm_ = expTerms_[iEvt]; cosTerm_ = cosTerms_[iEvt]; sinTerm_ = sinTerms_[iEvt]; coshTerm_ = coshTerms_[iEvt]; sinhTerm_ = sinhTerms_[iEvt]; normTermExp_ = normTermsExp_[iEvt]; normTermCos_ = normTermsCos_[iEvt]; normTermSin_ = normTermsSin_[iEvt]; normTermCosh_ = normTermsCosh_[iEvt]; normTermSinh_ = normTermsSinh_[iEvt]; } // Extract the decay time error PDF value if ( errHist_ ) { errHist_->calcLikelihoodInfo(iEvt); errTerm_ = errHist_->getLikelihood(); } else { errTerm_ = 1.0; } // Extract the decay time efficiency effiTerm_ = effiTerms_[iEvt]; } void LauDecayTimePdf::calcLikelihoodInfo(const Double_t abscissa) { // Check whether any of the gaussians should be scaled - if any of them should we need the per-event error if (scaleWithPerEventError_) { std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Per-event error on decay time not provided, cannot calculate anything."<calcLikelihoodInfo(abscissa, 0.0); } Double_t LauDecayTimePdf::calcEffiTerm( const Double_t abscissa ) const { Double_t effiTerm{1.0}; switch( effMethod_ ) { case EfficiencyMethod::Spline : effiTerm = effiFun_ ? effiFun_ -> evaluate(abscissa) : 1.0 ; break; case EfficiencyMethod::Binned : effiTerm = effiHist_ ? effiHist_-> GetBinContent(effiHist_-> FindFixBin(abscissa)) : 1.0 ; break; case EfficiencyMethod::Flat : effiTerm = 1.0 ; break; } if ( effiTerm > 1.0 ) { effiTerm = 1.0; } else if ( effiTerm < 0.0 ) { effiTerm = 0.0; } return effiTerm; } void LauDecayTimePdf::calcLikelihoodInfo(const Double_t abscissa, const Double_t abscissaErr) { // Check that the decay time and the decay time error are in valid ranges if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) { std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of the decay time: "<minAbscissa()<<","<maxAbscissa()<<"]."<Exit(EXIT_FAILURE); } if ( scaleWithPerEventError_ && ( abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError() ) ) { std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of Delta t error: "<minAbscissaError()<<","<maxAbscissaError()<<"]."<Exit(EXIT_FAILURE); } // Determine the decay time efficiency effiTerm_ = this->calcEffiTerm( abscissa ); // For the histogram PDF just calculate that term and return if (type_ == Hist){ if ( pdfHist_ ) { pdfHist_->calcLikelihoodInfo(abscissa); pdfTerm_ = pdfHist_->getLikelihood(); } else { pdfTerm_ = 1.0; } return; } // If we're not using the resolution function, calculate the simple terms and return if (!this->doSmearing()) { this->calcNonSmearedTerms(abscissa); return; } // Get all the up to date parameter values for the resolution function std::vector frac(nGauss_); std::vector mean(nGauss_); std::vector sigma(nGauss_); Double_t fracPrompt(0.0); // TODO - why do we do the fractions this way around? frac[0] = 1.0; for (UInt_t i(0); iunblindValue(); } // Scale the gaussian parameters by the per-event error on Delta t (if appropriate) for (UInt_t i(0); i x(nGauss_); const Double_t xMax = this->maxAbscissa(); const Double_t xMin = this->minAbscissa(); for (UInt_t i(0); i 1e-10) { Double_t exponent(0.0); Double_t norm(0.0); Double_t scale = LauConstants::root2*sigma[i]; Double_t scale2 = LauConstants::rootPiBy2*sigma[i]; exponent = -0.5*x[i]*x[i]/(sigma[i]*sigma[i]); norm = scale2*(TMath::Erf((xMax - mean[i])/scale) - TMath::Erf((xMin - mean[i])/scale)); value += frac[i]*TMath::Exp(exponent)/norm; } } } if (type_ != Delta) { // Reset values of terms expTerm_ = 0.0; cosTerm_ = 0.0; sinTerm_ = 0.0; coshTerm_ = 0.0; sinhTerm_ = 0.0; // Calculate values of the PDF convoluted with each Gaussian for a given value of the abscsissa for (UInt_t i(0); ismearedGeneralTerm( z, abscissa, sigmaOverRoot2, mean[i] ).real() }; expTerm_ += frac[i] * expTerm; // Typical case (1): B0/B0bar - if ( type_ == ExpTrig ) { + if ( type_ == ExpTrig or type_ == ExpHypTrig ) { + + const std::complex zTrig { gammaVal_ * sigmaOverRoot2, -deltaMVal_ * sigmaOverRoot2 }; + const std::complex trigTerm { this->smearedGeneralTerm( zTrig, abscissa, sigmaOverRoot2, mean[i] ) }; - // LHCb convention, i.e. convolution evaluate between 0 and inf - if (method_ == DecayTime) { + const Double_t cosTerm { trigTerm.real() }; + const Double_t sinTerm { trigTerm.imag() }; - auto [cosTerm, sinTerm] = smearedCosSinTerm(abscissa, sigma[i], mean[i]); + cosTerm_ += frac[i] * cosTerm; + sinTerm_ += frac[i] * sinTerm; + + if ( type_ == ExpTrig ) { coshTerm_ += frac[i] * expTerm; - cosTerm_ += frac[i] * cosTerm; - sinTerm_ += frac[i] * sinTerm; } else { - // lhcb.web.cern.ch - } - } - // Typical case (2): B0s/B0sbar - else if ( type_ == ExpHypTrig ) { - // LHCb convention - if (method_ == DecayTime) { + const std::complex zH { (gammaVal_ - 0.5 * deltaGammaVal_) * sigmaOverRoot2 }; + const std::complex zL { (gammaVal_ + 0.5 * deltaGammaVal_) * sigmaOverRoot2 }; + + const Double_t termH { this->smearedGeneralTerm( zH, abscissa, sigmaOverRoot2, mean[i] ).real() }; + const Double_t termL { this->smearedGeneralTerm( zL, abscissa, sigmaOverRoot2, mean[i] ).real() }; - auto [coshTerm, sinhTerm] = smearedCoshSinhTerm(abscissa, sigma[i], mean[i]); - auto [cosTerm, sinTerm] = smearedCosSinTerm(abscissa, sigma[i], mean[i]); + const Double_t coshTerm { 0.5 * (termH + termL) }; + const Double_t sinhTerm { 0.5 * (termH - termL) }; coshTerm_ += frac[i] * coshTerm; sinhTerm_ += frac[i] * sinhTerm; - cosTerm_ += frac[i] * cosTerm; - sinTerm_ += frac[i] * sinTerm; - - } else { - // lhcb.web.cern.ch } } } if (type_ == DeltaExp) { value *= fracPrompt; value += (1.0-fracPrompt)*expTerm_; } else { value = expTerm_; } } // Calculate the decay time error PDF value if ( errHist_ ) { std::vector absErrVec = {abscissaErr}; //Otherwise seg fault errHist_->calcLikelihoodInfo(absErrVec); errTerm_ = errHist_->getLikelihood(); } else { errTerm_ = 1.0; } } void LauDecayTimePdf::calcNonSmearedTerms(Double_t abscissa) { // Reset values of terms errTerm_ = 1.0; expTerm_ = 0.0; cosTerm_ = 0.0; sinTerm_ = 0.0; coshTerm_ = 0.0; sinhTerm_ = 0.0; if ( type_ == Hist || type_ == Delta ){ return; } - const Double_t gamma { 1.0 / tauVal_ }; - if (method_ == DecayTime) { - expTerm_ = TMath::Exp(-abscissa*gamma); + expTerm_ = TMath::Exp(-abscissa*gammaVal_); } else if (method_ == DecayTimeDiff) { - expTerm_ = TMath::Exp(-TMath::Abs(abscissa)*gamma); + expTerm_ = TMath::Exp(-TMath::Abs(abscissa)*gammaVal_); } // Calculate also the terms related to cosine and sine if (type_ == ExpTrig) { coshTerm_ = expTerm_; sinhTerm_ = 0.0; cosTerm_ = TMath::Cos(deltaMVal_*abscissa)*expTerm_; sinTerm_ = TMath::Sin(deltaMVal_*abscissa)*expTerm_; } // Calculate also the terms related to cosh, sinh, cosine, and sine else if (type_ == ExpHypTrig) { coshTerm_ = TMath::CosH(0.5*deltaGammaVal_*abscissa)*expTerm_; sinhTerm_ = TMath::SinH(0.5*deltaGammaVal_*abscissa)*expTerm_; cosTerm_ = TMath::Cos(deltaMVal_*abscissa)*expTerm_; sinTerm_ = TMath::Sin(deltaMVal_*abscissa)*expTerm_; } } -std::pair LauDecayTimePdf::smearedCosSinTerm(Double_t t, Double_t sigma, Double_t mu) -{ - using namespace std::complex_literals; - - const Double_t gamma { 1.0 / tauVal_ }; - - const Double_t x = (t - mu) / (LauConstants::root2 * sigma); - - const std::complex z = std::complex(gamma * sigma / LauConstants::root2, -deltaMVal_ * sigma / LauConstants::root2); - - const std::complex arg1 = std::complex(0., 1.) * (z - x); - - const std::complex arg2 { -(x*x) - (arg1 * arg1) }; - - const std::complex conv = arg1.imag() < -5.? 0.5 * std::exp(arg2) * RooMath::erfc( -1i * arg1 ) : 0.5 * TMath::Exp(-(x * x)) * RooMath::faddeeva(arg1) ; - - const Double_t cos_conv = conv.real(); - const Double_t sin_conv = conv.imag(); - - return {cos_conv, sin_conv}; -} - -std::pair LauDecayTimePdf::smearedCoshSinhTerm(Double_t t, Double_t sigma, Double_t mu) +std::complex LauDecayTimePdf::smearedGeneralTerm( const std::complex& z, const Double_t t, const Double_t sigmaOverRoot2, const Double_t mu ) { using namespace std::complex_literals; - const Double_t gamma { 1.0 / tauVal_ }; - - std::complex x((t - mu) / (LauConstants::root2 * sigma),0.); - Double_t xRe = x.real(); + const Double_t xRe { (t - mu) / (2.0 * sigmaOverRoot2) }; + const std::complex x { xRe, 0.0 }; - Double_t z_H = ((gamma - 0.5 * deltaGammaVal_) * sigma) / LauConstants::root2; - Double_t z_L = ((gamma + 0.5 * deltaGammaVal_) * sigma) / LauConstants::root2; + const std::complex arg1 { 1i * (z - x) }; - //Doing H - std::complex arg_H1(0., z_H - x.real()); - std::complex arg_H2 = -(x*x) - (arg_H1 * arg_H1); + const std::complex arg2 { -(x*x) - (arg1*arg1) }; - std::complex conv_H = arg_H1.imag() < -5. ? (0.5 * std::exp(arg_H2)) * RooMath::erfc(-1i * arg_H1) : 0.5 * TMath::Exp(-( xRe * xRe )) * RooMath::faddeeva(arg_H1); + const std::complex conv { ( arg1.imag() < -5.0 ) ? 0.5 * std::exp(arg2) * RooMath::erfc( -1i * arg1 ) : 0.5 * std::exp(-(xRe*xRe)) * RooMath::faddeeva(arg1) }; - //Doing L - std::complex arg_L1(0., z_L - x.real()); - std::complex arg_L2 = -(x*x) - (arg_L1 * arg_L1); - - std::complex conv_L = arg_L1.imag() < -5. ? (0.5 * std::exp(arg_L2)) * RooMath::erfc(-1i * arg_L1) : 0.5 * TMath::Exp(-( xRe * xRe )) * RooMath::faddeeva(arg_L1); - - std::complex cosh_conv = 0.5 * (conv_H + conv_L); - std::complex sinh_conv = 0.5 * (conv_H - conv_L); - - return {cosh_conv.real(), sinh_conv.real()}; + return conv; } -Double_t LauDecayTimePdf::smearedExpTerm(Double_t t, Double_t sigma, Double_t mu) -{ - using namespace std::complex_literals; - - const Double_t gamma { 1.0 / tauVal_ }; - - const std::complex x((t - mu) / (LauConstants::root2 * sigma),0.); - const Double_t xRe = x.real(); - - const Double_t z = (gamma * sigma) / LauConstants::root2; - - const std::complex arg1(0., z - x.real()); - - const std::complex arg2 = -(x * x) - (arg1 * arg1); - - const std::complex conv = arg1.imag() < -5. ? 0.5 * (std::exp(arg2)) * RooMath::erfc(-1i * arg1) : 0.5 * TMath::Exp(-(xRe * xRe)) * RooMath::faddeeva(arg1) ; - - return conv.real(); -} - -std::pair LauDecayTimePdf::nonSmearedCosSinIntegral(Double_t minAbs, Double_t maxAbs) +std::pair LauDecayTimePdf::nonSmearedCosSinIntegral(const Double_t minAbs, const Double_t maxAbs) { // From 1407.0748, not clear whether complex is faster in this case - const Double_t gamma { 1.0 / tauVal_ }; - - LauComplex denom = LauComplex(gamma, -deltaMVal_); - LauComplex exponent = LauComplex(-gamma, deltaMVal_); + const LauComplex denom { gammaVal_, -deltaMVal_ }; + const LauComplex exponent { -gammaVal_, deltaMVal_ }; - LauComplex num0 = -exponent.scale(minAbs).exp(); - LauComplex num1 = -exponent.scale(maxAbs).exp(); + const LauComplex num0 { -exponent.scale(minAbs).exp() }; + const LauComplex num1 { -exponent.scale(maxAbs).exp() }; - LauComplex integral = (num1 - num0) / denom; + const LauComplex integral { (num1 - num0) / denom }; return {integral.re(), integral.im()}; } -std::pair LauDecayTimePdf::smearedCosSinIntegral(Double_t minAbs, Double_t maxAbs, Double_t sigma, Double_t mu) +Double_t LauDecayTimePdf::nonSmearedExpIntegral(const Double_t minAbs, const Double_t maxAbs) { - using namespace std::complex_literals; - - const Double_t gamma { 1.0 / tauVal_ }; - - Double_t x1 = (maxAbs - mu) / (LauConstants::root2 * sigma); - Double_t x0 = (minAbs - mu) / (LauConstants::root2 * sigma); - - std::complex z = std::complex(gamma * sigma / LauConstants::root2, -deltaMVal_ * sigma / LauConstants::root2); - - std::complex arg1 = std::complex(0., 1.) * (z - x1); - std::complex arg0 = std::complex(0., 1.) * (z - x0); - - std::complex integral = 0. + 0i; - - if(arg1.imag() < -5.) - {integral = RooMath::erf(x1) - std::exp(-(x1 * x1) - (arg1 * arg1)) * RooMath::erfc(-1i * arg1);} - else - {integral = RooMath::erf(x1) - TMath::Exp(-(x1 * x1)) * RooMath::faddeeva(arg1);} - - if(arg0.imag() < -5.) - {integral -= RooMath::erf(x0) - std::exp(-(x0 * x0) - (arg0 * arg0)) * RooMath::erfc(-1i * arg0);} - else - {integral -= RooMath::erf(x0) - TMath::Exp(-(x0 * x0)) * RooMath::faddeeva(arg0);} - - integral *= (sigma / (2. * LauConstants::root2 * z)); - - Double_t cos_integral = integral.real(); - Double_t sin_integral = integral.imag(); - - return {cos_integral, sin_integral}; + return tauVal_ * ( TMath::Exp(-minAbs*gammaVal_) - TMath::Exp(-maxAbs*gammaVal_) ); } -Double_t LauDecayTimePdf::nonSmearedExpIntegral(Double_t minAbs, Double_t maxAbs) -{ - const Double_t gamma { 1.0 / tauVal_ }; - - return tauVal_ * ( TMath::Exp(-minAbs*gamma) - TMath::Exp(-maxAbs*gamma) ); -} - -Double_t LauDecayTimePdf::smearedExpIntegral(Double_t minAbs, Double_t maxAbs, Double_t sigma, Double_t mu) -{ - using namespace std::complex_literals; - - const Double_t gamma { 1.0 / tauVal_ }; - - const Double_t x1 = (maxAbs - mu) / (LauConstants::root2 * sigma); - const Double_t x0 = (minAbs - mu) / (LauConstants::root2 * sigma); - - const Double_t z = (gamma * sigma) / LauConstants::root2; - - std::complex arg1(0., z - x1); - std::complex arg0(0., z - x0); - - std::complex integral = 0. + 0i; - - if(arg1.imag() < -5.) - {integral = RooMath::erf(x1) - std::exp(-(x1 * x1) - (arg1 * arg1)) * RooMath::erfc(-1i * arg1);} - else - {integral = RooMath::erf(x1) - TMath::Exp(-(x1 * x1)) * RooMath::faddeeva(arg1);} - - if(arg0.imag() < -5.) - {integral -= RooMath::erf(x0) - std::exp(-(x0 * x0) - (arg0 * arg0)) * RooMath::erfc(-1i * arg0);} - else - {integral -= RooMath::erf(x0) - TMath::Exp(-(x0 * x0)) * RooMath::faddeeva(arg0);} - - integral *= (sigma / (2. * LauConstants::root2 * z)); - - return integral.real(); -} - -std::pair LauDecayTimePdf::nonSmearedCoshSinhIntegral(Double_t minAbs, Double_t maxAbs) +std::pair LauDecayTimePdf::nonSmearedCoshSinhIntegral(const Double_t minAbs, const Double_t maxAbs) { // Use exponential formualtion rather than cosh, sinh. // Fewer terms (reused for each), but not guaranteed to be faster. - const Double_t gamma { 1.0 / tauVal_ }; + const Double_t gammaH { gammaVal_ - 0.5 * deltaGammaVal_ }; + const Double_t gammaL { gammaVal_ + 0.5 * deltaGammaVal_ }; - Double_t gammaH = gamma - 0.5 * deltaGammaVal_; - Double_t gammaL = gamma + 0.5 * deltaGammaVal_; + const Double_t tauH { 1.0 / gammaH }; + const Double_t tauL { 1.0 / gammaL }; - Double_t nL1 = -TMath::Exp(-gammaL * maxAbs) / gammaL; - Double_t nH1 = -TMath::Exp(-gammaH * maxAbs) / gammaH; - Double_t nL0 = -TMath::Exp(-gammaL * minAbs) / gammaL; - Double_t nH0 = -TMath::Exp(-gammaH * minAbs) / gammaH; + const Double_t nL1 { -TMath::Exp(-gammaL * maxAbs) * tauL }; + const Double_t nH1 { -TMath::Exp(-gammaH * maxAbs) * tauH }; + const Double_t nL0 { -TMath::Exp(-gammaL * minAbs) * tauL }; + const Double_t nH0 { -TMath::Exp(-gammaH * minAbs) * tauH }; - Double_t cosh_integral = 0.5 * ( (nH1 + nL1) - (nH0 + nL0) ); - Double_t sinh_integral = 0.5 * ( (nH1 - nL1) - (nH0 - nL0) ); + const Double_t coshIntegral { 0.5 * ( (nH1 + nL1) - (nH0 + nL0) ) }; + const Double_t sinhIntegral { 0.5 * ( (nH1 - nL1) - (nH0 - nL0) ) }; - return {cosh_integral, sinh_integral}; + return {coshIntegral, sinhIntegral}; } -std::pair LauDecayTimePdf::smearedCoshSinhIntegral(Double_t minAbs, Double_t maxAbs, Double_t sigma, Double_t mu) +std::complex LauDecayTimePdf::smearedGeneralIntegral(const std::complex& z, const Double_t minAbs, const Double_t maxAbs, const Double_t sigmaOverRoot2, const Double_t mu) { using namespace std::complex_literals; - const Double_t gamma { 1.0 / tauVal_ }; - - Double_t x1 = (maxAbs - mu) / (LauConstants::root2 * sigma); - Double_t x0 = (minAbs - mu) / (LauConstants::root2 * sigma); + const Double_t x1 { (maxAbs - mu) / (2.0 * sigmaOverRoot2) }; + const Double_t x0 { (minAbs - mu) / (2.0 * sigmaOverRoot2) }; - Double_t z_H = ((gamma - 0.5 * deltaGammaVal_) * sigma) / LauConstants::root2; + const std::complex arg1 { 1i * (z - x1) }; + const std::complex arg0 { 1i * (z - x0) }; - std::complex arg1_H(0., z_H - x1); - std::complex arg0_H(0., z_H - x0); + std::complex integral = 0.0 + 0i; - std::complex integral_H = 0. + 0i; - if(arg1_H.imag() < -5.) - {integral_H = RooMath::erf(x1) - std::exp(-(x1 * x1) - (arg1_H * arg1_H)) * RooMath::erfc(-1i * arg1_H);} + if(arg1.imag() < -5.0) + {integral = RooMath::erf(x1) - std::exp(-(x1*x1) - (arg1*arg1)) * RooMath::erfc(-1i * arg1);} else - {integral_H = RooMath::erf(x1) - TMath::Exp(-(x1 * x1)) * RooMath::faddeeva(arg1_H);} + {integral = RooMath::erf(x1) - TMath::Exp(-(x1*x1)) * RooMath::faddeeva(arg1);} - if(arg0_H.imag() < -5.) - {integral_H -= RooMath::erf(x0) - std::exp(-(x0 * x0) - (arg0_H * arg0_H)) * RooMath::erfc(-1i * arg0_H);} + if(arg0.imag() < -5.0) + {integral -= RooMath::erf(x0) - std::exp(-(x0*x0) - (arg0*arg0)) * RooMath::erfc(-1i * arg0);} else - {integral_H -= RooMath::erf(x0) - TMath::Exp(-(x0 * x0)) * RooMath::faddeeva(arg0_H);} + {integral -= RooMath::erf(x0) - TMath::Exp(-(x0*x0)) * RooMath::faddeeva(arg0);} - integral_H *= (sigma / (2. * LauConstants::root2 * z_H)); + integral *= (sigmaOverRoot2 / (2.0 * z)); - // Same for light (L) - - Double_t z_L = ((gamma + 0.5 * deltaGammaVal_) * sigma) / LauConstants::root2; - - std::complex arg1_L(0., z_L - x1); - std::complex arg0_L(0., z_L - x0); - - std::complex integral_L = 0. + 0i; - if(arg1_L.imag() < -5.) - {integral_L = RooMath::erf(x1) - std::exp(-(x1 * x1) - (arg1_L * arg1_L)) * RooMath::erfc(-1i * arg1_L);} - else - {integral_L = RooMath::erf(x1) - TMath::Exp(-(x1 * x1)) * RooMath::faddeeva(arg1_L);} - - if(arg0_L.imag() < -5.) - {integral_L -= RooMath::erf(x0) - std::exp(-(x0 * x0) - (arg0_L * arg0_L)) * RooMath::erfc(-1i * arg0_L);} - else - {integral_L -= RooMath::erf(x0) - TMath::Exp(-(x0 * x0)) * RooMath::faddeeva(arg0_L);} - - integral_L *= (sigma / (2. * LauConstants::root2 * z_L)); - - std::complex cosh_integral = 0.5 * (integral_H + integral_L); - std::complex sinh_integral = 0.5 * (integral_H - integral_L); - - return {cosh_integral.real(), sinh_integral.real()}; + return integral; } void LauDecayTimePdf::calcNorm(const Double_t abscissaErr) { /*if( abscissaErr <= 0. and scaleWithPerEventError_) { std::cerr << "\033[1;31m IN CALCNORM: \33[0m" << std::endl; std::cerr << "\033[1;31m absErr: " << abscissaErr << "\033[0m" << std::endl; //DEBUG }*/ // first reset integrals to zero normTermExp_ = 0.0; normTermCos_ = 0.0; normTermSin_ = 0.0; normTermCosh_ = 0.0; normTermSinh_ = 0.0; // Get all the up to date parameter values std::vector fracs(nGauss_); std::vector means(nGauss_); std::vector sigmas(nGauss_); // TODO - why do we do the fractions this way around? fracs[0] = 1.0; for (UInt_t i(0); i doSmearing() ) {this->calcSmearedPartialIntegrals( minAbscissa_, maxAbscissa_ , uniformEffVal, means, sigmas, fracs );} else {this->calcNonSmearedPartialIntegrals( minAbscissa_, maxAbscissa_, uniformEffVal );} break; } case EfficiencyMethod::Binned : { // Efficiency varies as piecewise constant // Total integral is sum of integrals in each bin, each weighted by efficiency in that bin const Int_t nBins { effiHist_->GetNbinsX() }; for ( Int_t bin{1}; bin <= nBins; ++bin ) { const Double_t loEdge {effiHist_->GetBinLowEdge(bin)}; const Double_t hiEdge {loEdge + effiHist_->GetBinWidth(bin)}; const Double_t effVal {effiHist_->GetBinContent(bin)}; if ( this -> doSmearing() ) {this->calcSmearedPartialIntegrals( loEdge, hiEdge, effVal, means, sigmas, fracs );} else {this->calcNonSmearedPartialIntegrals( loEdge, hiEdge, effVal );} } break; } case EfficiencyMethod::Spline : { // Efficiency varies as piecewise polynomial // Use methods from https://arxiv.org/abs/1407.0748 section 4 to calculate // TODO we shouldn't need the next line, let's try to ensure by this point that we don't need it if(not effiFun_){std::cerr << "FATAL : no spline defined!"; gSystem->Exit(EXIT_FAILURE);} const UInt_t nSplineSegments { effiFun_->getnKnots() - 1 }; if ( this->doSmearing() ) { if ( nonKnotChanged_ ) { - this->calcKVectors( sigmas ); if ( resoParChanged_ ) { this->calcMeanAndSigmaPowers( means, sigmas ); } + this->calcKVectors(); } for(UInt_t i{0}; i < nSplineSegments; ++i) { this->calcSmearedSplinePartialIntegrals( i, means, sigmas, fracs ); } } else { for(UInt_t i{0}; i < nSplineSegments; ++i) { this->calcNonSmearedSplinePartialIntegrals( i ); } } break; } } // std::cout << "\033[1;34m In calcNorm: \033[0m" << std::endl; //DEBUG // std::cout << "\033[1;34m normTermExp : \033[0m" << normTermExp_ << std::endl; //DEBUG // std::cout << "\033[1;34m normTerm[Cos,Sin] : \033[0m[" << normTermCos_ << ", " << normTermSin_ << "]" << std::endl; //DEBUG // std::cout << "\033[1;34m normTerm[Cosh,Sinh]: \033[0m[" << normTermCosh_ << ", " << normTermSinh_ << "]" << std::endl; //DEBUG } // TODO - Mildly concerned this is void rather than returning the integrals // (but this would require refactoring for different return values). // As long as it doesn't get called outside of calcNorm() it should be fine - DPO // (TL: comment applies to all calc*PartialIntegrals functions.) void LauDecayTimePdf::calcNonSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight) { - Double_t normTermExp {0.0}; - if (method_ == DecayTime) { - normTermExp = weight * this -> nonSmearedExpIntegral(minAbs, maxAbs); - } else if (method_ == DecayTimeDiff) { - const Double_t gamma { 1.0 / tauVal_ }; + /* TODO - need to implement something for DecayTimeDiff everywhere + if (method_ == DecayTimeDiff) { // TODO - there should be some TMath::Abs here surely? - normTermExp = weight * tauVal_ * (2.0 - TMath::Exp(-maxAbs*gamma) - TMath::Exp(-minAbs*gamma)); + normTermExp = weight * tauVal_ * (2.0 - TMath::Exp(-maxAbs*gammaVal_) - TMath::Exp(-minAbs*gammaVal_)); } - normTermExp_ += normTermExp; + */ - // Normalisation factor for B0 decays - if ( type_ == ExpTrig ) { + const Double_t normTermExp { weight * this->nonSmearedExpIntegral(minAbs, maxAbs) }; + normTermExp_ += normTermExp; - normTermCosh_ += normTermExp; + if ( type_ == ExpTrig or type_ == ExpHypTrig ) { auto [cosIntegral, sinIntegral] = this->nonSmearedCosSinIntegral(minAbs, maxAbs); normTermCos_ += weight * cosIntegral; normTermSin_ += weight * sinIntegral; - } - // Normalisation factor for Bs decays - else if ( type_ == ExpHypTrig ) { + if ( type_ == ExpTrig ) { - auto [coshIntegral, sinhIntegral] = this->nonSmearedCoshSinhIntegral(minAbs, maxAbs); - normTermCosh_ += weight * coshIntegral; - normTermSinh_ += weight * sinhIntegral; + normTermCosh_ += normTermExp; - auto [cosIntegral, sinIntegral] = this->nonSmearedCosSinIntegral(minAbs, maxAbs); - normTermCos_ += weight * cosIntegral; - normTermSin_ += weight * sinIntegral; + } else { + + auto [coshIntegral, sinhIntegral] = this->nonSmearedCoshSinhIntegral(minAbs, maxAbs); + normTermCosh_ += weight * coshIntegral; + normTermSinh_ += weight * sinhIntegral; + + auto [cosIntegral, sinIntegral] = this->nonSmearedCosSinIntegral(minAbs, maxAbs); + normTermCos_ += weight * cosIntegral; + normTermSin_ += weight * sinIntegral; + } } } void LauDecayTimePdf::calcSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight, const std::vector& means, const std::vector& sigmas, const std::vector& fractions) { for (UInt_t i(0); ismearedGeneralIntegral( z, minAbs, maxAbs, sigmaOverRoot2, means[i] ) }; + const Double_t normTermExp { weight * integral.real() }; - Double_t normTermExp {0.0}; - if (method_ == DecayTime) { - normTermExp = weight * this -> smearedExpIntegral(minAbs, maxAbs, sigmas[i], means[i]); - } else if (method_ == DecayTimeDiff) { - const Double_t gamma { 1.0 / tauVal_ }; - // TODO - this is neglecting resolution at the moment - // TODO - there should be some TMath::Abs here surely? - normTermExp = weight * tauVal_ * (2.0 - TMath::Exp(-maxAbs*gamma) - TMath::Exp(-minAbs*gamma)); - } normTermExp_ += fractions[i] * normTermExp; - // Normalisation factor for B0 decays - if ( type_ == ExpTrig ) { + if ( type_ == ExpTrig or type_ == ExpHypTrig ) { + + const std::complex zTrig { gammaVal_ * sigmaOverRoot2, -deltaMVal_ * sigmaOverRoot2 }; + const std::complex integralTrig { this->smearedGeneralIntegral( zTrig, minAbs, maxAbs, sigmaOverRoot2, means[i] ) }; - normTermCosh_ += fractions[i] * normTermExp; + const Double_t cosIntegral { integralTrig.real() }; + const Double_t sinIntegral { integralTrig.imag() }; - auto [cosIntegral, sinIntegral] = this->smearedCosSinIntegral(minAbs, maxAbs, sigmas[i], means[i]); normTermCos_ += fractions[i] * weight * cosIntegral; normTermSin_ += fractions[i] * weight * sinIntegral; - } - // Normalisation factor for Bs decays - else if ( type_ == ExpHypTrig ) { + if ( type_ == ExpTrig ) { - auto [coshIntegral, sinhIntegral] = this->smearedCoshSinhIntegral(minAbs, maxAbs, sigmas[i], means[i]); - normTermCosh_ += fractions[i] * weight * coshIntegral; - normTermSinh_ += fractions[i] * weight * sinhIntegral; + normTermCosh_ += fractions[i] * normTermExp; - auto [cosIntegral, sinIntegral] = this->smearedCosSinIntegral(minAbs, maxAbs, sigmas[i], means[i]); - normTermCos_ += fractions[i] * weight * cosIntegral; - normTermSin_ += fractions[i] * weight * sinIntegral; + } else { + + // Heavy (H) eigenstate case + const std::complex zH { (gammaVal_ - 0.5 * deltaGammaVal_) * sigmaOverRoot2, 0.0 }; + const std::complex integralH { this->smearedGeneralIntegral( zH, minAbs, maxAbs, sigmaOverRoot2, means[i] ) }; + + // Light (L) eigenstate case + const std::complex zL { (gammaVal_ + 0.5 * deltaGammaVal_) * sigmaOverRoot2, 0.0 };; + const std::complex integralL { this->smearedGeneralIntegral( zL, minAbs, maxAbs, sigmaOverRoot2, means[i] ) }; + + const std::complex coshIntegral { 0.5 * (integralH + integralL) }; + const std::complex sinhIntegral { 0.5 * (integralH - integralL) }; + + normTermCosh_ += fractions[i] * weight * coshIntegral.real(); + normTermSinh_ += fractions[i] * weight * sinhIntegral.real(); + } } } } - void LauDecayTimePdf::calcMeanAndSigmaPowers( const std::vector& means, const std::vector& sigmas ) { // Calculate powers of mu and sigma/sqrt(2) needed by all terms in the smeared spline normalisation for (UInt_t i(0); i& sigmas ) +void LauDecayTimePdf::calcKVectors() { using namespace std::complex_literals; - const Double_t gamma { 1.0 / tauVal_ }; - - std::complex z; + std::complex z; for (UInt_t i(0); igenerateKvector(z); if ( type_ == ExpTrig || type_ == ExpHypTrig ) { - z.real( gamma * sigmaOverRoot2 ); + z.real( gammaVal_ * sigmaOverRoot2 ); z.imag( -deltaMVal_ * sigmaOverRoot2 ); trigTermKvecVals_[i] = this->generateKvector(z); if ( type_ == ExpHypTrig ) { - z.real( ( gamma - 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 );; + z.real( ( gammaVal_ - 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 );; z.imag( 0.0 ); hypHTermKvecVals_[i] = this->generateKvector(z); - z.real( ( gamma + 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 ); + z.real( ( gammaVal_ + 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 ); z.imag( 0.0 ); hypLTermKvecVals_[i] = this->generateKvector(z); } } } } void LauDecayTimePdf::calcSmearedSplinePartialIntegrals(const UInt_t splineIndex, const std::vector& means, const std::vector& sigmas, const std::vector& fractions) { using namespace std::complex_literals; - const Double_t gamma { 1.0 / tauVal_ }; - const std::vector& xVals { effiFun_ -> getXValues() }; const Double_t minAbs = xVals[splineIndex]; const Double_t maxAbs = xVals[splineIndex+1]; const std::array coeffs { effiFun_ -> getCoefficients(splineIndex) }; - std::complex z; + std::complex z; for (UInt_t i(0); igenerateMvector(minAbs, maxAbs, z, sigmas[i], means[i]); - } + if ( nonKnotChanged_ ) { + expTermMvecVals_[splineIndex][i] = this->generateMvector(minAbs, maxAbs, z, sigmas[i], means[i]); + } - normTermExp = this->smearedSplineNormalise(coeffs, expTermKvecVals_[i], expTermMvecVals_[splineIndex][i], sigmaPowerVals_[i], meanPowerVals_[i]).first; + const Double_t normTermExp { this->smearedSplineNormalise(coeffs, expTermKvecVals_[i], expTermMvecVals_[splineIndex][i], sigmaPowerVals_[i], meanPowerVals_[i]).real() }; - } else if (method_ == DecayTimeDiff) {//TODO this isn't implemented at all - //const Double_t gamma { 1.0 / tauVal_ }; - // TODO - this is neglecting resolution at the moment - // TODO - there should be some TMath::Abs here surely? - //normTermExp = tauVal_ * (2.0 - TMath::Exp(-maxAbs*gamma) - TMath::Exp(-minAbs*gamma)); - } normTermExp_ += fractions[i] * normTermExp; - // Normalisation factor for B0 decays - if ( type_ == ExpTrig ) { - - normTermCosh_ += fractions[i] * normTermExp; + if ( type_ == ExpTrig or type_ == ExpHypTrig ) { - z.real( gamma * sigmaOverRoot2 ); + z.real( gammaVal_ * sigmaOverRoot2 ); z.imag( -deltaMVal_ * sigmaOverRoot2 ); if ( nonKnotChanged_ ) { trigTermMvecVals_[splineIndex][i] = this->generateMvector(minAbs, maxAbs, z, sigmas[i], means[i]); } - auto [cosIntegral, sinIntegral] = this->smearedSplineNormalise(coeffs, trigTermKvecVals_[i], trigTermMvecVals_[splineIndex][i], sigmaPowerVals_[i], meanPowerVals_[i]); + std::complex integral { this->smearedSplineNormalise(coeffs, trigTermKvecVals_[i], trigTermMvecVals_[splineIndex][i], sigmaPowerVals_[i], meanPowerVals_[i]) }; + + const Double_t cosIntegral { integral.real() }; + const Double_t sinIntegral { integral.imag() }; normTermCos_ += fractions[i] * cosIntegral; normTermSin_ += fractions[i] * sinIntegral; - } + if ( type_ == ExpTrig ) { - // Normalisation factor for Bs decays - else if ( type_ == ExpHypTrig ) { + normTermCosh_ += fractions[i] * normTermExp; - const std::complex zH { ( gamma - 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 }; - const std::complex zL { ( gamma + 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 }; + } else { - if ( nonKnotChanged_ ) { - hypHTermMvecVals_[splineIndex][i] = this->generateMvector(minAbs, maxAbs, z, sigmas[i], means[i]); - hypLTermMvecVals_[splineIndex][i] = this->generateMvector(minAbs, maxAbs, z, sigmas[i], means[i]); - } + const std::complex zH { ( gammaVal_ - 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 }; + const std::complex zL { ( gammaVal_ + 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 }; - const Double_t N_H = this->smearedSplineNormalise(coeffs, hypHTermKvecVals_[i], hypHTermMvecVals_[splineIndex][i], sigmaPowerVals_[i], meanPowerVals_[i]).first; - const Double_t N_L = this->smearedSplineNormalise(coeffs, hypLTermKvecVals_[i], hypLTermMvecVals_[splineIndex][i], sigmaPowerVals_[i], meanPowerVals_[i]).first; + if ( nonKnotChanged_ ) { + hypHTermMvecVals_[splineIndex][i] = this->generateMvector(minAbs, maxAbs, zH, sigmas[i], means[i]); + hypLTermMvecVals_[splineIndex][i] = this->generateMvector(minAbs, maxAbs, zL, sigmas[i], means[i]); + } - const Double_t coshIntegral = 0.5 * (N_H + N_L); - const Double_t sinhIntegral = 0.5 * (N_H - N_L); + const Double_t integralH { this->smearedSplineNormalise(coeffs, hypHTermKvecVals_[i], hypHTermMvecVals_[splineIndex][i], sigmaPowerVals_[i], meanPowerVals_[i]).real() }; + const Double_t integralL { this->smearedSplineNormalise(coeffs, hypLTermKvecVals_[i], hypLTermMvecVals_[splineIndex][i], sigmaPowerVals_[i], meanPowerVals_[i]).real() }; - normTermCosh_ += fractions[i] * coshIntegral; - normTermSinh_ += fractions[i] * sinhIntegral; + const Double_t coshIntegral { 0.5 * (integralH + integralL) }; + const Double_t sinhIntegral { 0.5 * (integralH - integralL) }; - z.real( gamma * sigmaOverRoot2 ); - z.imag( -deltaMVal_ * sigmaOverRoot2 ); - - if ( nonKnotChanged_ ) { - trigTermMvecVals_[splineIndex][i] = this->generateMvector(minAbs, maxAbs, z, sigmas[i], means[i]); + normTermCosh_ += fractions[i] * coshIntegral; + normTermSinh_ += fractions[i] * sinhIntegral; } - - auto [cosIntegral, sinIntegral] = this->smearedSplineNormalise(coeffs, trigTermKvecVals_[i], trigTermMvecVals_[splineIndex][i], sigmaPowerVals_[i], meanPowerVals_[i]); - - normTermCos_ += fractions[i] * cosIntegral; - normTermSin_ += fractions[i] * sinIntegral; } } } -std::array,4> LauDecayTimePdf::generateKvector(const std::complex& z) +std::array,4> LauDecayTimePdf::generateKvector(const std::complex& z) { - std::array,4> K = {0.,0.,0.,0.}; + std::array,4> K {0.,0.,0.,0.}; - const std::complex zr = 1./z; + const std::complex zr { 1./z }; K[0] = 0.5*zr; K[1] = 0.5*zr*zr; K[2] = zr*(1.+zr*zr); K[3] = 3.*zr*zr*(1.+zr*zr); return K; } -std::array,4> LauDecayTimePdf::generateMvector(const Double_t minAbs, const Double_t maxAbs, const std::complex& z, const Double_t sigma, const Double_t mean) +std::array,4> LauDecayTimePdf::generateMvector(const Double_t minAbs, const Double_t maxAbs, const std::complex& z, const Double_t sigma, const Double_t mean) { using namespace std::complex_literals; - std::array,4> M0 = {0.,0.,0.,0.}; - std::array,4> M1 = {0.,0.,0.,0.}; - std::array,4> M; + std::array,4> M0 {0.,0.,0.,0.}; + std::array,4> M1 {0.,0.,0.,0.}; + std::array,4> M {0.,0.,0.,0.}; - const Double_t x1 = (maxAbs - mean) / (LauConstants::root2 * sigma); - const Double_t x0 = (minAbs - mean) / (LauConstants::root2 * sigma); + const Double_t x1 { (maxAbs - mean) / (LauConstants::root2 * sigma) }; + const Double_t x0 { (minAbs - mean) / (LauConstants::root2 * sigma) }; //Values used a lot - const Double_t ex2_1 = TMath::Exp(-(x1*x1)); - const Double_t ex2_0 = TMath::Exp(-(x0*x0)); - const Double_t sqrtPir = 1/LauConstants::rootPi; + const Double_t ex2_1 { TMath::Exp(-(x1*x1)) }; + const Double_t ex2_0 { TMath::Exp(-(x0*x0)) }; + const Double_t sqrtPir { 1.0 / LauConstants::rootPi }; - const std::complex arg1 = (0.+1.i) * (z-x1); - const std::complex arg0 = (0.+1.i) * (z-x0); + const std::complex arg1 { 1i * (z - x1) }; + const std::complex arg0 { 1i * (z - x0) }; //fad = the faddeeva term times the ex2 value (done in different ways depending on the domain) - std::complex fad1; - std::complex fad0; + std::complex fad1; + std::complex fad0; - if(arg1.imag() < -5.) - {fad1 = std::exp(-(x1 * x1) - (arg1 * arg1)) * RooMath::erfc(-1i * arg1);} + if(arg1.imag() < -5.0) + {fad1 = std::exp(-(x1*x1) - (arg1*arg1)) * RooMath::erfc(-1i * arg1);} else {fad1 = ex2_1*RooMath::faddeeva(arg1);} - if(arg0.imag() < -5.) - {fad0 = std::exp(-(x0 * x0) - (arg0 * arg0)) * RooMath::erfc(-1i * arg0);} + if(arg0.imag() < -5.0) + {fad0 = std::exp(-(x0*x0) - (arg0*arg0)) * RooMath::erfc(-1i * arg0);} else {fad0 = ex2_0*RooMath::faddeeva(arg0);} //doing the actual functions for x1 M1[0] = RooMath::erf(x1) - fad1; M1[1] = 2. * (-sqrtPir*ex2_1 - x1*fad1); M1[2] = 2. * (-2*x1*sqrtPir*ex2_1 - (2*x1*x1 - 1)*fad1); M1[3] = 4. * (-(2*x1*x1 - 1)*sqrtPir*ex2_1 - x1*(2*x1*x1-3)*fad1); //doing them again for x0 M0[0] = RooMath::erf(x0) - fad0; M0[1] = 2. * (-sqrtPir*ex2_0 - x0*fad0); M0[2] = 2. * (-2*x0*sqrtPir*ex2_0 - (2*x0*x0 - 1)*fad0); M0[3] = 4. * (-(2*x0*x0 - 1)*sqrtPir*ex2_0 - x0*(2*x0*x0-3)*fad0); for(Int_t i = 0; i < 4; ++i){M[i] = M1[i] - M0[i];} return M; } -std::pair LauDecayTimePdf::smearedSplineNormalise(const std::array& coeffs, const std::array,4>& K, const std::array,4>& M, const std::array& sigmaPowers, const std::array& meanPowers) const +std::complex LauDecayTimePdf::smearedSplineNormalise(const std::array& coeffs, const std::array,4>& K, const std::array,4>& M, const std::array& sigmaPowers, const std::array& meanPowers) const { using namespace std::complex_literals; //Triple sum to get N (eqn 31 and 29 in https://arxiv.org/pdf/1407.0748.pdf) - std::complex N = 0. + 0i; + std::complex N = 0. + 0i; for(Int_t k = 0; k < 4; ++k){ for(Int_t n = 0; n <=k; ++n){ for(Int_t i = 0; i <=n; ++i){ - const Double_t b = binom_[n][i]*binom_[k][n];//The binomial coefficient terms + //The binomial coefficient terms + const Double_t b { binom_[n][i]*binom_[k][n] }; N += sigmaPowers[n]*coeffs[k]*meanPowers[k-n]*K[i]*M[n-i]*b; }}} - return std::make_pair( N.real(), N.imag() ); + return N; } void LauDecayTimePdf::calcNonSmearedSplinePartialIntegrals(const UInt_t splineIndex) { using namespace std::complex_literals; - const Double_t gamma { 1.0 / tauVal_ }; - - Double_t normTermExp {0.0}; - if (method_ == DecayTime) { - const std::complex u = gamma; - normTermExp = this -> nonSmearedSplineNormalise(splineIndex, u, expTermIkVals_).first; + const std::complex u { gammaVal_ }; + const Double_t normTermExp { this->nonSmearedSplineNormalise(splineIndex, u, expTermIkVals_).real() }; - } else if (method_ == DecayTimeDiff) {//TODO this isn't implemented at all - //const Double_t gamma { 1.0 / tauVal_ }; - // TODO - this is neglecting resolution at the moment - // TODO - there should be some TMath::Abs here surely? - //normTermExp = tauVal_ * (2.0 - TMath::Exp(-maxAbs*gamma) - TMath::Exp(-minAbs*gamma)); - } normTermExp_ += normTermExp; - // Normalisation factor for B0 decays - if ( type_ == ExpTrig ) { + if ( type_ == ExpTrig or type_ == ExpHypTrig ) { - normTermCosh_ += normTermExp; + const std::complex uTrig { gammaVal_, -deltaMVal_ }; + const std::complex integral { this->nonSmearedSplineNormalise(splineIndex, uTrig, trigTermIkVals_) }; - const std::complex u = std::complex(gamma, -deltaMVal_); - auto [cosIntegral, sinIntegral] = this -> nonSmearedSplineNormalise(splineIndex, u, trigTermIkVals_); + const Double_t cosIntegral { integral.real() }; + const Double_t sinIntegral { integral.imag() }; normTermCos_ += cosIntegral; normTermSin_ += sinIntegral; - } - // Normalisation factor for Bs decays - else if ( type_ == ExpHypTrig ) { + if ( type_ == ExpTrig ) { - const std::complex u_H = (gamma - 0.5 * deltaGammaVal_); - const std::complex u_L = (gamma + 0.5 * deltaGammaVal_); + normTermCosh_ += normTermExp; - const Double_t N_H = this -> nonSmearedSplineNormalise(splineIndex, u_H, hypHTermIkVals_).first; - const Double_t N_L = this -> nonSmearedSplineNormalise(splineIndex, u_L, hypHTermIkVals_).first; + } else { - const Double_t coshIntegral = 0.5 * (N_H + N_L); - const Double_t sinhIntegral = 0.5 * (N_H - N_L); + const std::complex uH { gammaVal_ - 0.5 * deltaGammaVal_ }; + const std::complex uL { gammaVal_ + 0.5 * deltaGammaVal_ }; - normTermCosh_ += coshIntegral; - normTermSinh_ += sinhIntegral; + const Double_t integralH { this->nonSmearedSplineNormalise(splineIndex, uH, hypHTermIkVals_).real() }; + const Double_t integralL { this->nonSmearedSplineNormalise(splineIndex, uL, hypHTermIkVals_).real() }; - const std::complex u = std::complex(gamma, -deltaMVal_); - auto [cosIntegral, sinIntegral] = this -> nonSmearedSplineNormalise(splineIndex, u, trigTermIkVals_); + const Double_t coshIntegral { 0.5 * (integralH + integralL) }; + const Double_t sinhIntegral { 0.5 * (integralH - integralL) }; - normTermCos_ += cosIntegral; - normTermSin_ += sinIntegral; + normTermCosh_ += coshIntegral; + normTermSinh_ += sinhIntegral; + } } } -std::complex LauDecayTimePdf::calcIk( const Int_t k, const Double_t minAbs, const Double_t maxAbs, const std::complex& u ) +std::complex LauDecayTimePdf::calcIk( const UInt_t k, const Double_t minAbs, const Double_t maxAbs, const std::complex& u ) { //Taking mu = 0, this does not have to be the case in general auto G = [&u](const Int_t n){return -TMath::Factorial(n)/std::pow(u,n+1);};//power of n+1 used rather than n, this is due to maths error in the paper auto H = [&u](const Int_t n, const Double_t t){return std::pow(t,n)*std::exp(-u*t);}; - std::complex ans = 0; - for (Int_t j = 0; j <= k; ++j) + std::complex ans { 0.0, 0.0 }; + for (UInt_t j = 0; j <= k; ++j) {ans += binom_[k][j]*G(j)*( H( k-j, maxAbs ) - H( k-j, minAbs ) );} return ans; } -std::pair LauDecayTimePdf::nonSmearedSplineNormalise( const UInt_t splineIndex, const std::complex& u, std::vector,4>>& cache ) +std::complex LauDecayTimePdf::nonSmearedSplineNormalise( const UInt_t splineIndex, const std::complex& u, std::vector,4>>& cache ) { // u = Gamma - iDeltam in general using namespace std::complex_literals; const std::vector& xVals = effiFun_ -> getXValues(); const Double_t minAbs = xVals[splineIndex]; const Double_t maxAbs = xVals[splineIndex+1]; std::array coeffs = effiFun_->getCoefficients(splineIndex); //sum to get N (eqn 30 in https://arxiv.org/pdf/1407.0748.pdf, using I_k from Appendix B.1 with the corrected maths error) - std::complex N = 0. + 0i; + std::complex N = 0. + 0i; if ( nonKnotChanged_ ) { - for(Int_t i = 0; i < 4; ++i) { + for(UInt_t i = 0; i < 4; ++i) { cache[splineIndex][i] = calcIk(i, minAbs, maxAbs, u); N += cache[splineIndex][i] * coeffs[i]; } } else { - for(Int_t i = 0; i < 4; ++i) { + for(UInt_t i = 0; i < 4; ++i) { N += cache[splineIndex][i] * coeffs[i]; } } - return std::make_pair( N.real(), N.imag() ); + return N; } Double_t LauDecayTimePdf::generateError(Bool_t forceNew) { if (errHist_ && (forceNew || !abscissaErrorGenerated_)) { LauFitData errData = errHist_->generate(nullptr); abscissaError_ = errData.at(this->varErrName()); abscissaErrorGenerated_ = kTRUE; } else { while (forceNew || !abscissaErrorGenerated_) { abscissaError_ = LauRandom::randomFun()->Landau(errorDistMPV_,errorDistSigma_); if (abscissaError_ < maxAbscissaError_ && abscissaError_ > minAbscissaError_) { abscissaErrorGenerated_ = kTRUE; forceNew = kFALSE; } } } return abscissaError_; } /* LauFitData LauDecayTimePdf::generate(const LauKinematics* kinematics) { // generateError SHOULD have been called before this // function but will call it here just to make sure // (has ns effect if has already been called) abscissaError_ = this->generateError(); // If the PDF is scaled by the per-event error then need to update the PDF height for each event Bool_t scale(kFALSE); for (std::vector::const_iterator iter = scaleMeans_.begin(); iter != scaleMeans_.end(); ++iter) { scale |= (*iter); } for (std::vector::const_iterator iter = scaleWidths_.begin(); iter != scaleWidths_.end(); ++iter) { scale |= (*iter); } if (scale || (!this->heightUpToDate() && !this->cachePDF())) { this->calcPDFHeight(kinematics); this->heightUpToDate(kTRUE); } // Generate the value of the abscissa. Bool_t gotAbscissa(kFALSE); Double_t genVal(0.0); Double_t genPDFVal(0.0); LauFitData genAbscissa; const Double_t xMin = this->minAbscissa(); const Double_t xMax = this->maxAbscissa(); const Double_t xRange = xMax - xMin; while (!gotAbscissa) { genVal = LauRandom::randomFun()->Rndm()*xRange + xMin; this->calcLikelihoodInfo(genVal, abscissaError_); genPDFVal = this->getUnNormLikelihood(); if (LauRandom::randomFun()->Rndm() <= genPDFVal/this->getMaxHeight()) {gotAbscissa = kTRUE;} if (genPDFVal > this->getMaxHeight()) { std::cerr<<"Warning in LauDecayTimePdf::generate()." <<" genPDFVal = "<getMaxHeight()<<" for the abscissa = "<varName()] = genVal; // mark that we need a new error to be generated next time abscissaErrorGenerated_ = kFALSE; return genAbscissa; } */ void LauDecayTimePdf::setErrorHisto(const TH1* hist) { if ( errHist_ != nullptr ) { std::cerr<<"WARNING in LauDecayTimePdf::setErrorHisto : Error histogram already set, not doing it again."<varErrName(), hist, this->minAbscissaError(), this->maxAbscissaError()); } void LauDecayTimePdf::setHistoPdf(const TH1* hist) { if ( pdfHist_ != nullptr ) { std::cerr<<"WARNING in LauDecayTimePdf::setHistoPdf : PDF histogram already set, not doing it again."<varName(), hist, this->minAbscissa(), this->maxAbscissa()); } void LauDecayTimePdf::setEffiHist(const TH1* hist) { if ( effiHist_ != nullptr ) { std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : efficiency histogram already set, not doing it again." << std::endl; return; } if ( hist == nullptr ) { std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : supplied efficiency histogram pointer is null." << std::endl; return; } // Check boundaries of histogram align with our abscissa's range const Double_t axisMin {hist->GetXaxis()->GetXmin()}; const Double_t axisMax {hist->GetXaxis()->GetXmax()}; if ( TMath::Abs(minAbscissa_ - axisMin)>1e-6 || TMath::Abs(maxAbscissa_ - axisMax)>1e-6 ) { std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : mismatch in range between supplied histogram and abscissa\n" << " : histogram range: " << axisMin << " - " << axisMax << "\n" << " : abscissa range: " << minAbscissa_ << " - " << maxAbscissa_ << "\n" << " : Disregarding this histogram." << std::endl; return; } effiHist_ = dynamic_cast( hist->Clone() ); //Normalise the hist if the (relative) efficiencies have very large values if(effiHist_ -> GetMaximum() > 1.) { effiHist_ -> Scale( 1. / effiHist_->Integral() ); //Normalise std::cout << "INFO in LauDecayTimePdf::setEffiHist : Supplied histogram for Decay Time Acceptance has values too large: normalising..." << std::endl; } } void LauDecayTimePdf::setEffiSpline(Lau1DCubicSpline* spline) { if ( effiFun_ != nullptr ) { std::cerr<<"WARNING in LauDecayTimePdf::setEffiSpline : efficiency function already set, not doing it again."< effis = effiFun_->getYValues(); effiPars_.resize( effis.size() ); effiParVals_.resize( effis.size() ); size_t index = 0; for( Double_t& effi : effis ) { effiPars_[ index ] = new LauParameter( Form( "%s_Knot_%lu", varName_.Data() ,index ), effi, 0.0, 1.0, kTRUE ); ++index; } } LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName) { for ( std::vector::iterator iter = params_.begin(); iter != params_.end(); ++iter ) { if ((*iter)->name().Contains(parName)) { return (*iter); } } std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl; return nullptr; } const LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName) const { for ( std::vector::const_iterator iter = params_.begin(); iter != params_.end(); ++iter ) { if ((*iter)->name().Contains(parName)) { return (*iter); } } std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl; return nullptr; } void LauDecayTimePdf::updatePulls() { for ( std::vector::iterator iter = params_.begin(); iter != params_.end(); ++iter ) { std::vector params = (*iter)->getPars(); for (std::vector::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter ) { if (!(*iter)->fixed()) { (*params_iter)->updatePull(); } } } } void LauDecayTimePdf::propagateParUpdates() { // If none of the parameters are floating there's nothing to do if ( nothingFloating_ ) { return; } // Otherwise, determine which of the floating parameters have changed (if any) and act accordingly static auto checkEquality = [](const LauAbsRValue* par, const Double_t cacheVal){return par->unblindValue() == cacheVal;}; anyKnotChanged_ = anyKnotFloating_ and not std::equal(effiPars_.begin(), effiPars_.end(), effiParVals_.begin(), checkEquality); // Update the acceptance spline if any of the knot values have changed if ( anyKnotChanged_ ) { effiFun_->updateYValues( effiPars_ ); } // Check also the physics and resolution parameters if ( nonKnotFloating_ ) { if ( physicsParFloating_ ) { tauChanged_ = tauFloating_ and not checkEquality(tau_, tauVal_); deltaMChanged_ = deltaMFloating_ and not checkEquality(deltaM_, deltaMVal_); deltaGammaChanged_ = deltaGammaFloating_ and not checkEquality(deltaGamma_, deltaGammaVal_); physicsParChanged_ = tauChanged_ || deltaMChanged_ || deltaGammaChanged_; } if ( resoParFloating_ ) { resoParChanged_ = kFALSE; resoParChanged_ |= not std::equal( mean_.begin(), mean_.end(), meanVals_.begin(), checkEquality ); resoParChanged_ |= not std::equal( sigma_.begin(), sigma_.end(), sigmaVals_.begin(), checkEquality ); resoParChanged_ |= not std::equal( frac_.begin(), frac_.end(), fracVals_.begin(), checkEquality ); } nonKnotChanged_ = physicsParChanged_ or resoParChanged_; } // If nothing has changed, there's nothing to do if ( not ( anyKnotChanged_ or nonKnotChanged_ ) ) { return; } // Otherwise we need to update the cache this->updateCache(); } /* void LauDecayTimePdf::updateEffiSpline(const std::vector& effiPars) { if (effiPars.size() != effiFun_->getnKnots()){ std::cerr<<"ERROR in LauDecayTimePdf::updateEffiSpline : number of efficiency parameters is not equal to the number of spline knots."<Exit(EXIT_FAILURE); } effiFun_->updateYValues(effiPars); } */