diff --git a/inc/LauDecayTimePdf.hh b/inc/LauDecayTimePdf.hh index 564eaa0..2b32219 100644 --- a/inc/LauDecayTimePdf.hh +++ b/inc/LauDecayTimePdf.hh @@ -1,599 +1,625 @@ /* Copyright 2006 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauDecayTimePdf.hh \brief File containing declaration of LauDecayTimePdf class. */ /*! \class LauDecayTimePdf \brief Class for defining the PDFs used in the time-dependent fit model to describe the decay time. LauDecayTimePdf is a class that provides the PDFs for describing the time-dependence of the various terms in a particle/antiparticle decay to a common final state. The various terms have the form of exponentially decaying trigonometric or hyperbolic functions convolved with a N-Gaussian resolution function. */ #ifndef LAU_DECAYTIME_PDF #define LAU_DECAYTIME_PDF #include #include #include "TString.h" #include "LauAbsRValue.hh" #include "LauFitDataTree.hh" #include "LauComplex.hh" class TH1; class Lau1DHistPdf; class Lau1DCubicSpline; // TODO - Should this have Pdf in the name? // - Audit function names and public/private access category -// - Audit what should be given to constructor and what can be set later +// - Audit what should be given to constructor and what can be set later (maybe different constructors for different scenarios, e.g. smeared with per-event error/smeared with avg error/not smeared) class LauDecayTimePdf final { public: // TODO - can we think of better names? //! The functional form of the decay time PDF enum FuncType { Hist, //< Hist PDF for fixed background Delta, //< Delta function - for prompt background Exp, //< Exponential function - for non-prompt background or charged B's DeltaExp, //< Delta + Exponential function - for background with prompt and non-prompt parts ExpTrig, //< Exponential function with Delta m driven mixing - for neutral B_d's ExpHypTrig //< Exponential function with both Delta m and Delta Gamma driven mixing - for neutral B_s's }; - // TODO - can go? - //! State of complex error function calculation - enum State { - Good, //< All OK - Overflow1, //< Overflow in term 1 - Overflow2 //< Overflow in term 2 - }; - - //! How is the decay time measured - absolute or difference + //! How is the decay time measured - absolute or difference? enum TimeMeasurementMethod { DecayTime, //< Absolute measurement of decay time, e.g. LHCb scenario DecayTimeDiff //< Measurement of the difference of two decay times, e.g. BaBar/Belle(II) scenario }; //! How is the TD efficiency information going to be given? enum EfficiencyMethod { Spline, //< As a cubic spline Binned, //< As a histogram (TH1D/TH1F) Flat //< As a flat distribution (constant) }; //! Constructor /*! \param [in] theVarName the name of the decay time variable in the input data \param [in] theVarErrName the name of the decay time error variable in the input data \param [in] params the parameters of the PDF \param [in] minAbscissaVal the minimum value of the abscissa \param [in] maxAbscissaVal the maximum value of the abscissa \param [in] minAbscissaErr the minimum value of the abscissa error \param [in] maxAbscissaErr the maximum value of the abscissa error \param [in] type the functional form of the PDF \param [in] nGauss the number of Gaussians in the resolution function \param [in] scale controls whether the Gaussian parameters are scaled by the per-event error \param [in] method set the type of the time measurement used in the given experiment */ LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, - Double_t minAbscissaVal, Double_t maxAbscissaVal, - Double_t minAbscissaErr, Double_t maxAbscissaErr, + const Double_t minAbscissaVal, const Double_t maxAbscissaVal, + const Double_t minAbscissaErr, const Double_t maxAbscissaErr, const FuncType type, const UInt_t nGauss, const std::vector& scale, const TimeMeasurementMethod method, const EfficiencyMethod effMethod = EfficiencyMethod::Spline); //! Constructor /*! \param [in] theVarName the name of the decay time variable in the input data \param [in] theVarErrName the name of the decay time error variable in the input data \param [in] params the parameters of the PDF \param [in] minAbscissaVal the minimum value of the abscissa \param [in] maxAbscissaVal the maximum value of the abscissa \param [in] minAbscissaErr the minimum value of the abscissa error \param [in] maxAbscissaErr the maximum value of the abscissa error \param [in] type the functional form of the PDF \param [in] nGauss the number of Gaussians in the resolution function \param [in] scaleMeans controls whether the Gaussian mean parameters are scaled by the per-event error \param [in] scaleWidths controls whether the Gaussian width parameters are scaled by the per-event error \param [in] method set the type of the time measurement used in the given experiment */ LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const Double_t minAbscissaErr, const Double_t maxAbscissaErr, const FuncType type, const UInt_t nGauss, const std::vector& scaleMeans, const std::vector& scaleWidths, const TimeMeasurementMethod method, const EfficiencyMethod effMethod = EfficiencyMethod::Spline); //! Copy constructor (deleted) LauDecayTimePdf(const LauDecayTimePdf& other) = delete; //! Copy assignment operator (deleted) LauDecayTimePdf& operator=(const LauDecayTimePdf& other) = delete; //! Move constructor (deleted) LauDecayTimePdf(LauDecayTimePdf&& other) = delete; //! Move assignment operator (deleted) LauDecayTimePdf& operator=(LauDecayTimePdf&& other) = delete; //! Destructor ~LauDecayTimePdf(); + // TODO - Do we need this? + // - If so, should it be a hist or a LauAbsPdf? + // - Or should there be a dedicated constructor for this scenario? + //! Set the Histogram PDF in case of fixed background PDF + void setHistoPdf(const TH1* hist); + // TODO - should this be a LauAbsPdf instead? //! Set the histogram to be used for generation of per-event decay time errors /*! If not set will fall back to using Landau distribution \param [in] hist the histogram of the distribution */ void setErrorHisto(const TH1* hist); - // TODO - do we need this? If so, should it be a hist or a LauAbsPdf? - //! Set the Histogram PDF in case of fixed background PDF - void setHistoPdf(const TH1* hist); - - //! Set efficiency PDF in the form of Spline - /*! - \param [in] spline the efficiency spline function - */ - void setEffiSpline(Lau1DCubicSpline* spline); - // TODO - do we still want this option? //! Set the parameters of the Landau distribution used to generate the per-event decay time errors /*! \param [in] mpv the MPV (most probable value) of the distribution \param [in] sigma the width of the distribution */ void setErrorDistTerms(const Double_t mpv, const Double_t sigma) { errorDistMPV_ = mpv; errorDistSigma_ = sigma; } - //! Set the efficiency PDF in the form of a Histogram + // TODO - should we remove the EfficiencyMethod argument from the constructor, default to Flat and have these functions modify it? + //! Set the efficiency function in the form of a histogram /*! \param [in] hist the histogram of efficiencies */ void setEffiHist(const TH1* hist); + //! Set the efficiency function in the form of spline + /*! + \param [in] spline the efficiency spline function + */ + void setEffiSpline(Lau1DCubicSpline* spline); + //! Retrieve the name of the error variable const TString& varName() const {return varName_;} //! Retrieve the name of the error variable const TString& varErrName() const {return varErrName_;} + // TODO - this should probably be set at construction time //! Turn on or off the resolution function void doSmearing(Bool_t smear) {smear_ = smear;} //! Determine if the resolution function is turned on or off Bool_t doSmearing() const {return smear_;} - //! Calculate single effective decay time resolution from multiple - // Gaussian resolution functions + // TODO - we don't use this at the moment + //! Calculate single effective decay time resolution from multiple Gaussian resolution functions /*! \return effective resolution */ Double_t effectiveResolution() const; //! Cache information from data /*! - Will cache, for every event, the abscissa values and, if all parameters are fixed, the PDF value. - \param [in] inputData the data to be used to calculate everything + \param [in] inputData the dataset to be used to calculate everything */ void cacheInfo(const LauFitDataTree& inputData); //! Calculate the likelihood (and all associated information) given value of the abscissa /*! \param [in] abscissa the value of the abscissa */ - void calcLikelihoodInfo(Double_t abscissa); + void calcLikelihoodInfo(const Double_t abscissa); //! Calculate the likelihood (and all associated information) given value of the abscissa and its error /*! \param [in] abscissa the value of the abscissa \param [in] abscissaErr the error on the abscissa */ - void calcLikelihoodInfo(Double_t abscissa, Double_t abscissaErr); + void calcLikelihoodInfo(const Double_t abscissa, const Double_t abscissaErr); //! Retrieve the likelihood (and all associated information) given the event number /*! \param [in] iEvt the event number */ - void calcLikelihoodInfo(UInt_t iEvt); - - // TODO - these three can go now I think - //! Evaluate the complex error fonction - //LauComplex ComplexErf(Double_t x, Double_t y); - - //! Compute the imaginary error function: Erfi(z) = -I*Erf(iz) - //LauComplex Erfi(Double_t x, Double_t y); - - //! Compute the complementary complex error function - //LauComplex ComplexErfc(Double_t x, Double_t y); + void calcLikelihoodInfo(const UInt_t iEvt); //! Get FuncType from model FuncType getFuncType() const {return type_;} // TODO - should maybe do away with exp term (and it's norm) since it's just the cosh term when DG=0 and it's confusing to have both + // - counter argument is to keep it for backgrounds that have a lifetime-like behaviour //! Get the exponential term Double_t getExpTerm() const {return expTerm_;} //! Get the cos(Dm*t) term (multiplied by the exponential) Double_t getCosTerm() const {return cosTerm_;} //! Get the sin(Dm*t) term (multiplied by the exponential) Double_t getSinTerm() const {return sinTerm_;} //! Get the cosh(DG/2*t) term (multiplied by the exponential) Double_t getCoshTerm() const {return coshTerm_;} //! Get the sinh(DG/2*t) term (multiplied by the exponential) Double_t getSinhTerm() const {return sinhTerm_;} //! Get the normalisation related to the exponential term only Double_t getNormTermExp() const {return normTermExp_;} //! Get the normalisation related to the cos term only Double_t getNormTermCos() const {return normTermCos_;} //! Get the normalisation related to the sin term only Double_t getNormTermSin() const {return normTermSin_;} //! Get the first term in the normalisation (from integrating the cosh) Double_t getNormTermCosh() const {return normTermCosh_;} //! Get the second term in the normalisation (from integrating the sinh) Double_t getNormTermSinh() const {return normTermSinh_;} //! Get error probability density from Error distribution Double_t getErrTerm() const{return errTerm_;} //! Get efficiency probability density from efficiency distribution Double_t getEffiTerm() const{return effiTerm_;} //! Retrieve the parameters of the PDF, e.g. so that they can be loaded into a fit /*! \return the parameters of the PDF */ const std::vector& getParameters() const { return param_; } //! Retrieve the parameters of the PDF, e.g. so that they can be loaded into a fit /*! \return the parameters of the PDF */ std::vector& getParameters() { return param_; } //! Update the pulls for all parameters void updatePulls(); - // TODO - think these can go - // Calculate the normalisation for the non smeared Hyperbolic terms - //Double_t normExpHypTerm(Double_t Abs); - //Double_t normExpHypTermDep(Double_t Abs); + //! Calculate the normalisation of all terms + /*! + \param [in] abscissaErr the per-event decay-time error (if used) + */ + void calcNorm(const Double_t abscissaErr = 0.0); - //! Calculate normalisation for non-smeared cos and sin terms using the - // complex number method + //! Calculate the normalisation integrals in the given range + /*! + This form to be used for case where decay time resolution is neglected + + \param [in] minAbs lower bound for the integral domain + \param [in] maxAbs lower bound for the integral domain + \param [in] weight the weight for this range, typically the efficiency value + */ + void calcNonSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight); + + //! Calculate the normalisation integrals in the given range + /*! + This form to be used for case where decay time resolution is accounted for + + \param [in] minAbs lower bound for the integral domain + \param [in] maxAbs lower bound for the integral domain + \param [in] weight the weight for this range, typically the efficiency value + \param [in] means the mean values of each Gaussian in the resolution function + \param [in] sigmas the width values of each Gaussian in the resolution function + \param [in] fractions the fractional weight of each Gaussian in the resolution function + */ + void calcSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight, const std::vector& means, const std::vector& sigmas, const std::vector& fractions); + + //! Calculate normalisation for non-smeared cos and sin terms /*! \param [in] minAbs lower bound for the integral domain \param [in] maxAbs lower bound for the integral domain \return pair of {cosTermIntegral, sinTermIntegral} */ - std::pair nonSmearedCosSinIntegral(Double_t minAbs, Double_t maxAbs); + std::pair nonSmearedCosSinIntegral(const Double_t minAbs, const Double_t maxAbs); - //! Calculate normalisation for decay-time resolution smeared cos and - // sin terms using the using the Faddeeva function - // (https://arxiv.org/abs/1407.0748) + //! Calculate normalisation for decay-time resolution smeared cos and sin terms /*! + Uses the using the Faddeeva function method from + (https://arxiv.org/abs/1407.0748) + \param [in] minAbs lower bound for the integral domain \param [in] maxAbs lower bound for the integral domain + \param [in] sigma width of the Gaussian resolution function + \param [in] mu mean of the Gaussian resolution function \return pair of {cosTermIntegral, sinTermIntegral} */ - std::pair smearedCosSinIntegral(Double_t minAbs, Double_t maxAbs, Double_t sigma, Double_t mu = 0.); + std::pair smearedCosSinIntegral(const Double_t minAbs, const Double_t maxAbs, const Double_t sigma, const Double_t mu = 0.0); //! Calculate normalisation for non-smeared cosh and sinh terms /*! \param [in] minAbs lower bound for the integral domain \param [in] maxAbs lower bound for the integral domain \return pair of {coshTermIntegral, sinhTermIntegral} */ - std::pair nonSmearedCoshSinhIntegral(Double_t minAbs, Double_t maxAbs); + std::pair nonSmearedCoshSinhIntegral(const Double_t minAbs, const Double_t maxAbs); - - //! Calculate normalisation for decay-time resolution smeared cosh and - // sinh terms using the using the Faddeeva function - // (https://arxiv.org/abs/1407.0748) + //! Calculate normalisation for decay-time resolution smeared cosh and sinh terms /*! + Uses the using the Faddeeva function method from + (https://arxiv.org/abs/1407.0748) + \param [in] minAbs lower bound for the integral domain \param [in] maxAbs lower bound for the integral domain + \param [in] sigma width of the Gaussian resolution function + \param [in] mu mean of the Gaussian resolution function \return pair of {coshTermIntegral, sinhTermIntegral} */ - std::pair smearedCoshSinhIntegral(Double_t minAbs, Double_t maxAbs, Double_t sigma, Double_t mu = 0.); + std::pair smearedCoshSinhIntegral(const Double_t minAbs, const Double_t maxAbs, const Double_t sigma, const Double_t mu = 0.0); - //! Calculate normalisation for decay-time resolution non-smeared Exp + //! Calculate normalisation for non-smeared exponential term /*! \param [in] minAbs lower bound for the integral domain \param [in] maxAbs lower bound for the integral domain \return integral */ - Double_t nonSmearedExpIntegral(Double_t minAbs, Double_t maxAbs); + Double_t nonSmearedExpIntegral(const Double_t minAbs, const Double_t maxAbs); - //! Calculate normalisation for decay-time resolution smeared Exp - // using the using the Faddeeva function - // (https://arxiv.org/abs/1407.0748) + //! Calculate normalisation for decay-time resolution smeared exponential term /*! + Uses the using the Faddeeva function method from + (https://arxiv.org/abs/1407.0748) + \param [in] minAbs lower bound for the integral domain \param [in] maxAbs lower bound for the integral domain + \param [in] sigma width of the Gaussian resolution function + \param [in] mu mean of the Gaussian resolution function \return integral */ - Double_t smearedExpIntegral(Double_t minAbs, Double_t maxAbs, Double_t sigma, Double_t mu = 0.); + Double_t smearedExpIntegral(const Double_t minAbs, const Double_t maxAbs, const Double_t sigma, const Double_t mu = 0.0); + + //! Calculate decay-time resolution smeared cos and sin terms + /*! + Uses the using the Faddeeva function method from + (https://arxiv.org/abs/1407.0748) + + \param [in] t decay time + \param [in] sigma width of the Gaussian resolution function + \param [in] mu mean of the Gaussian resolution function + \return pair of {cosTerm, sinTerm} + */ + std::pair smearedCosSinTerm(const Double_t t, const Double_t sigma, const Double_t mu = 0.0); - // Store the normalisation - void calcNorm(); - void calcSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight = 1.0, const std::vector& means = {}, const std::vector& sigmas = {}, const std::vector& fractions = {}); - void calcNonSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight = 1.0); + //! Calculate decay-time resolution smeared cosh and sinh terms + /*! + Uses the using the Faddeeva function method from + (https://arxiv.org/abs/1407.0748) - std::pair smearedCosSinTerm(Double_t t, Double_t sigma, Double_t mu = 0.); + \param [in] t decay time + \param [in] sigma width of the Gaussian resolution function + \param [in] mu mean of the Gaussian resolution function + \return pair of {coshTerm, sinhTerm} + */ + std::pair smearedCoshSinhTerm(const Double_t t, const Double_t sigma, const Double_t mu = 0.0); - std::pair smearedCoshSinhTerm(Double_t t, Double_t sigma, Double_t mu = 0.); + //! Calculate decay-time resolution smeared exponential term + /*! + Uses the using the Faddeeva function method from + (https://arxiv.org/abs/1407.0748) - Double_t smearedExpTerm(Double_t t, Double_t sigma, Double_t mu = 0.); + \param [in] t decay time + \param [in] sigma width of the Gaussian resolution function + \param [in] mu mean of the Gaussian resolution function + \return exponential term convolved with resolution function + */ + Double_t smearedExpTerm(const Double_t t, const Double_t sigma, const Double_t mu = 0.0); //! Generate the value of the error /*! If scaling by the error should call this before calling generate \param [in] forceNew forces generation of a new value */ Double_t generateError(const Bool_t forceNew = kFALSE); - //! Generate an event from the PDF - TODO not clear that this is really needed, perhaps for background? commented out for now + //TODO not clear that this is really needed, perhaps for background? commented out for now + //! Generate an event from the PDF /*! \param [in] kinematics used by some PDFs to determine the DP position, on which they have dependence */ //LauFitData generate(const LauKinematics* kinematics); - //! Determine the state of the calculation - // TODO - this can surely go now? - State state() const {return state_;} - - //! Retrieve the decay time error - Double_t abscissaError() const {return abscissaError_;} - //! Retrieve the decay time minimum value Double_t minAbscissa() const {return minAbscissa_;} //! Retrieve the decay time maximum value Double_t maxAbscissa() const {return maxAbscissa_;} //! Retrieve the decay time error minimum value Double_t minAbscissaError() const {return minAbscissaError_;} //! Retrieve the decay time error maximum value Double_t maxAbscissaError() const {return maxAbscissaError_;} - // TODO - can we delete these? - //void checkPositiveness() {}; // Nothing to check here. - // NB calcNorm and calcPDFHeight only calculate the gaussian information for the (type_ == Delta) case - //! Calculate the normalisation factor of the PDF - //void calcNorm(); + // TODO - can we delete this? + // NB calcPDFHeight only calculates the gaussian information for the (type_ == Delta) case //! Calculate the maximum height of the PDF //void calcPDFHeight( const LauKinematics* kinematics ); //! Get efficiency parameters to float in the fit std::vector& getEffiPars() {return effiPars_;} //! Update spline Y values when floating the decay time acceptance /*! \param [in] params the vector of LauParameters describing the Y values */ void updateEffiSpline(std::vector params); protected: //! Set up the initial state correctly - called by the constructors void initialise(); //! Calculate the pure physics terms with no resolution function applied void calcNonSmearedTerms(const Double_t abscissa); - inline void state(State s) {state_ = s;} - - //! Calculate exponential auxiliary term for the convolution - //void calcTrigExponent(Double_t deltaM, Double_t tau, Double_t x, Double_t sigma, Double_t& reTerm, Double_t& imTerm); - - //! Calculate convolution between exponential*sin or cos with a Gaussian - //void calcTrigConv(Double_t deltaM, Double_t tau, Double_t x, Double_t sigma, Double_t& reOutTerm, Double_t& imOutTerm, Bool_t trig); - //! Retrieve the number of PDF parameters /*! \return the number of PDF parameters */ UInt_t nParameters() const {return param_.size();} //! Retrieve the specified parameter /*! \param [in] parName the parameter to retrieve */ LauAbsRValue* findParameter(const TString& parName); //! Retrieve the specified parameter /*! \param [in] parName the parameter to retrieve */ const LauAbsRValue* findParameter(const TString& parName) const; private: //! Name of the variable - TString varName_; + const TString varName_; //! Name of the error variable - TString varErrName_; + const TString varErrName_; //! The parameters of the PDF std::vector param_; + // TODO - should probably set this at construction time (can then be const) //! Smear with the resolution model or not Bool_t smear_; //! The minimum value of the decay time - Double_t minAbscissa_; + const Double_t minAbscissa_; //! The maximum value of the decay time - Double_t maxAbscissa_; + const Double_t maxAbscissa_; //! The minimum value of the decay time error - Double_t minAbscissaError_; + const Double_t minAbscissaError_; //! The maximum value of the decay time error - Double_t maxAbscissaError_; + const Double_t maxAbscissaError_; //! The current value of the decay time error Double_t abscissaError_; //! Flag whether a value for the decay time error has been generated Bool_t abscissaErrorGenerated_; //! Value of the MPV of the Landau dist used to generate the Delta t error Double_t errorDistMPV_; //! Value of the width of the Landau dist used to generate the Delta t error Double_t errorDistSigma_; - //! The number of gaussians in the resolution mode; + //! The number of gaussians in the resolution model const UInt_t nGauss_; // Parameters of the gaussian(s) that accounts for the resolution: //! mean (offset) of each Gaussian in the resolution function std::vector mean_; //! spread (sigma) of each Gaussian in the resolution function std::vector sigma_; //! fraction of each Gaussian in the resolution function std::vector frac_; - // Parameters of the exponential: the mean life (tau) and the frequency of oscillation. + // Parameters of the physics decay time distribution //! Lifetime parameter LauAbsRValue* tau_; //! Mass difference parameter LauAbsRValue* deltaM_; //! Width difference parameter LauAbsRValue* deltaGamma_; //! Parameter for the fraction of prompt events in DeltaExp LauAbsRValue* fracPrompt_; - // Which type of Delta t PDF is this? + //! Which type of decay time function is this? const FuncType type_; - // Which type of Delta t PDF is this? + //! Are we using absolute decay time or decay time difference? const TimeMeasurementMethod method_; - // Which method for eff/dt input are we using? + //! Which method for eff(decaytime) input are we using? const EfficiencyMethod effMethod_; - // Scale the mean and sigma by the per-event error on Delta t? + //! Scale the mean of each Gaussian by the per-event decay time error? const std::vector scaleMeans_; + //! Scale the sigma of each Gaussian by the per-event decay time error? const std::vector scaleWidths_; + //! Is anything being scaled by the per-event decay time error? + const Bool_t scaleWithPerEventError_; - //! The exponential term + //! The exp(-G*t) term Double_t expTerm_; //! The cos(Dm*t) term (multiplied by the exponential) Double_t cosTerm_; //! The sin(Dm*t) term (multiplied by the exponential) Double_t sinTerm_; //! The cosh(DG/2*t) term (multiplied by the exponential) Double_t coshTerm_; //! The sinh(DG/2*t) term (multiplied by the exponential) Double_t sinhTerm_; - // Normalisation that is used in the amplitude independent of cosh/sinh term + //! Normalisation of the exponential term Double_t normTermExp_; - // Normalisation that is used in the amplitude for cos term + //! Normalisation of the cos term Double_t normTermCos_; - // Normalisation that is used in the amplitude for sin term + //! Normalisation of the sin term Double_t normTermSin_; - //! The first term in the normalisation (from integrating the cosh) + //! Normalisation of the cosh term Double_t normTermCosh_; - //! The second term in the normalisation (from integrating the sinh) + //! Normalisation of the sinh term Double_t normTermSinh_; - //! Error + //! Error PDF (NB there is no equivalent cache since the PDF errHist_ keeps a cache) Double_t errTerm_; //! Efficiency Double_t effiTerm_; - //! Hist PDF term - TODO : to be deleted? or needed for backgrounds? if so we need to cache it? + //TODO : to be deleted? or needed for backgrounds? + //! Hist PDF term (NB there is no equivalent cache since the PDF pdfHist_ keeps a cache) Double_t pdfTerm_; - - //! The cache of the per-event errors on the decay time + //! The cache of the decay times std::vector abscissas_; //! The cache of the per-event errors on the decay time std::vector abscissaErrors_; //! The cache of the exponential terms std::vector expTerms_; //! The cache of the exponential * cosh(DG/2*t) terms std::vector coshTerms_; //! The cache of the exponential * sinh(DG/2*t) terms std::vector sinhTerms_; //! The cache of the exponential * cos(Dm*t) terms std::vector cosTerms_; //! The cache of the exponential * sin(Dm*t) terms std::vector sinTerms_; //! The cache of the exponential normalisation std::vector normTermsExp_; //! The cache of the cosh term normalisation std::vector normTermsCosh_; //! The cache of the sinh term normalisation std::vector normTermsSinh_; //! The cache of the cos term normalisation std::vector normTermsCos_; //! The cache of the sin term normalisation std::vector normTermsSin_; //! The cache of the efficiency std::vector effiTerms_; - //! The state of the complex error function calculation - State state_; - //! Histogram PDF for abscissa error distribution Lau1DHistPdf* errHist_; //! Histogram PDF for abscissa distribution Lau1DHistPdf* pdfHist_; //! efficiency PDF in spline Lau1DCubicSpline* effiFun_; //! efficiency PDF as Histogram TH1* effiHist_; //! Vector of parameters to float acceptance std::vector effiPars_; ClassDef(LauDecayTimePdf,0) // Define the Delta t PDF }; #endif diff --git a/src/LauDecayTimePdf.cc b/src/LauDecayTimePdf.cc index 40a3d13..c459ac4 100644 --- a/src/LauDecayTimePdf.cc +++ b/src/LauDecayTimePdf.cc @@ -1,1626 +1,1249 @@ /* Copyright 2006 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauDecayTimePdf.cc \brief File containing implementation of LauDecayTimePdf class. */ +#include +#include #include +#include #include -//using std::cerr; -//using std::endl; - -#include -//using std::complex; #include "TMath.h" #include "TRandom.h" #include "TSystem.h" #include "TH1.h" #include "RooMath.h" #include "Lau1DCubicSpline.hh" #include "Lau1DHistPdf.hh" #include "LauConstants.hh" #include "LauComplex.hh" #include "LauDecayTimePdf.hh" #include "LauFitDataTree.hh" #include "LauParameter.hh" #include "LauRandom.hh" ClassImp(LauDecayTimePdf) LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, Double_t minAbscissaVal, Double_t maxAbscissaVal, Double_t minAbscissaErr, Double_t maxAbscissaErr, FuncType type, UInt_t nGauss, const std::vector& scale, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) : varName_(theVarName), varErrName_(theVarErrName), param_(params), smear_(kTRUE), minAbscissa_(minAbscissaVal), maxAbscissa_(maxAbscissaVal), minAbscissaError_(minAbscissaErr), maxAbscissaError_(maxAbscissaErr), abscissaError_(0.0), abscissaErrorGenerated_(kFALSE), errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286 errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102 nGauss_(nGauss), mean_(nGauss_,0), sigma_(nGauss_,0), frac_(nGauss_-1,0), tau_(0), deltaM_(0), deltaGamma_(0), fracPrompt_(0), type_(type), method_(method), effMethod_(effMethod), scaleMeans_(scale), scaleWidths_(scale), + scaleWithPerEventError_( std::accumulate( scale.begin(), scale.end(), kFALSE, std::logical_or() ) ), expTerm_(0.0), cosTerm_(0.0), sinTerm_(0.0), coshTerm_(0.0), sinhTerm_(0.0), normTermExp_(0.0), normTermCosh_(0.0), normTermSinh_(0.0), errTerm_(0.0), effiTerm_(0.0), pdfTerm_(0.0), - state_(Good), errHist_(nullptr), pdfHist_(nullptr), effiFun_(nullptr), effiHist_(nullptr), effiPars_(0) { this->initialise(); } LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, Double_t minAbscissaVal, Double_t maxAbscissaVal, Double_t minAbscissaErr, Double_t maxAbscissaErr, FuncType type, UInt_t nGauss, const std::vector& scaleMeans, const std::vector& scaleWidths, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) : varName_(theVarName), varErrName_(theVarErrName), param_(params), smear_(kTRUE), minAbscissa_(minAbscissaVal), maxAbscissa_(maxAbscissaVal), minAbscissaError_(minAbscissaErr), maxAbscissaError_(maxAbscissaErr), abscissaError_(0.0), abscissaErrorGenerated_(kFALSE), errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286 errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102 nGauss_(nGauss), mean_(nGauss_,0), sigma_(nGauss_,0), frac_(nGauss_-1,0), tau_(0), deltaM_(0), deltaGamma_(0), fracPrompt_(0), type_(type), method_(method), effMethod_(effMethod), scaleMeans_(scaleMeans), scaleWidths_(scaleWidths), + scaleWithPerEventError_( std::accumulate( scaleMeans.begin(), scaleMeans.end(), kFALSE, std::logical_or() ) || std::accumulate( scaleWidths.begin(), scaleWidths.end(), kFALSE, std::logical_or() ) ), expTerm_(0.0), cosTerm_(0.0), sinTerm_(0.0), coshTerm_(0.0), sinhTerm_(0.0), normTermExp_(0.0), normTermCosh_(0.0), normTermSinh_(0.0), errTerm_(0.0), effiTerm_(0.0), pdfTerm_(0.0), - state_(Good), errHist_(nullptr), pdfHist_(nullptr), effiFun_(nullptr), effiHist_(nullptr), effiPars_(0) { this->initialise(); } LauDecayTimePdf::~LauDecayTimePdf() { // Destructor delete errHist_; errHist_ = nullptr; delete pdfHist_; pdfHist_ = nullptr; delete effiFun_; effiFun_ = nullptr; delete effiHist_; effiHist_ = nullptr; for( auto& par : effiPars_ ){ delete par; par = nullptr; } effiPars_.clear(); } void LauDecayTimePdf::initialise() { // The parameters are: // - the mean and the sigma (bias and spread in resolution) of the gaussian(s) // - the mean lifetime, denoted tau, of the exponential decay // - the frequency of oscillation, denoted Delta m, of the cosine and sine terms // - the decay width difference, denoted Delta Gamma, of the hyperbolic cosine and sine terms // // The next two arguments specify the range in which the PDF is defined, // and the PDF will be normalised w.r.t. these limits. // // The final three arguments define the type of Delta t PDF (Delta, Exp, ExpTrig or ExpHypTrig ), the number of gaussians // and whether or not the gaussian parameters should be scaled by the per-event errors on Delta t // First check whether the scale vector is nGauss in size if (nGauss_ != scaleMeans_.size() || nGauss_ != scaleWidths_.size()) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : scale vector size not the same as nGauss."<Exit(EXIT_FAILURE); } - // TODO - need to sort out the categories - if (type_ == Hist) { if (this->nParameters() != 0){ std::cerr<<"ERROR in LauDecayTimePdf::initialise : Hist PDF should have 0 parameters"<Exit(EXIT_FAILURE); } } else { TString meanName("mean_"); TString sigmaName("sigma_"); TString fracName("frac_"); Bool_t foundParams(kTRUE); for (UInt_t i(0); ifindParameter(tempName); foundParams &= (mean_[i] != 0); sigma_[i] = this->findParameter(tempName2); foundParams &= (sigma_[i] != 0); if (i!=0) { frac_[i-1] = this->findParameter(tempName3); foundParams &= (frac_[i-1] != 0); } } if (type_ == Delta) { if ((this->nParameters() != (3*nGauss_-1)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : Delta type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == Exp) { tau_ = this->findParameter("tau"); foundParams &= (tau_ != 0); if ((this->nParameters() != (3*nGauss_-1+1)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : Exp type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == DeltaExp) { tau_ = this->findParameter("tau"); fracPrompt_ = this->findParameter("frac_prompt"); foundParams &= (tau_ != 0); foundParams &= (fracPrompt_ != 0); if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : DeltaExp type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == ExpTrig) { tau_ = this->findParameter("tau"); deltaM_ = this->findParameter("deltaM"); foundParams &= (tau_ != 0); foundParams &= (deltaM_ != 0); if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : ExpTrig type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == ExpHypTrig) { tau_ = this->findParameter("tau"); deltaM_ = this->findParameter("deltaM"); deltaGamma_ = this->findParameter("deltaGamma"); foundParams &= (tau_ != 0); foundParams &= (deltaM_ != 0); foundParams &= (deltaGamma_ != 0); if ((this->nParameters() != (3*nGauss_-1+3)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : ExpHypTrig type PDF requires:"<Exit(EXIT_FAILURE); } } } } Double_t LauDecayTimePdf::effectiveResolution() const { Double_t dilution = 0.; Double_t dMSq = deltaM_->unblindValue() * deltaM_->unblindValue(); // Might be cleaner to just append this to the vector in the init step, // the the consistency can also be checked Double_t fracSum = 0; for (auto f : frac_) fracSum += f->unblindValue(); Double_t lastFrac = 1. - fracSum; for (size_t i = 0; i < sigma_.size(); i++) { Double_t sigSq = sigma_[i]->unblindValue() * sigma_[i]->unblindValue(); Double_t thisFrac = lastFrac; if (i < sigma_.size() - 1) thisFrac = frac_[i]->unblindValue(); dilution += thisFrac * TMath::Exp(-dMSq * sigSq / 2.); } return TMath::Sqrt(-2. * TMath::Log(dilution)) / deltaM_->unblindValue(); } void LauDecayTimePdf::cacheInfo(const LauFitDataTree& inputData) { + // Check that the input data contains the decay time variable Bool_t hasBranch = inputData.haveBranch(this->varName()); if (!hasBranch) { std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<varName()<<"\"."<varErrName()); - if (!hasBranch) { - std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<varErrName()<<"\"."<varErrName()); + if (!hasBranch) { + std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<varErrName()<<"\"."<cacheInfo(inputData); } if (type_ == Hist) { // Pass the data to the decay-time PDF for caching if ( pdfHist_ ) { pdfHist_->cacheInfo(inputData); } } else { // determine whether we are caching our PDF value //TODO //Bool_t doCaching( this->nFixedParameters() == this->nParameters() ); //this->cachePDF( doCaching ); // clear the vectors and reserve enough space const UInt_t nEvents = inputData.nEvents(); abscissas_.clear(); abscissas_.reserve(nEvents); abscissaErrors_.clear(); abscissaErrors_.reserve(nEvents); expTerms_.clear(); expTerms_.reserve(nEvents); cosTerms_.clear(); cosTerms_.reserve(nEvents); sinTerms_.clear(); sinTerms_.reserve(nEvents); coshTerms_.clear(); coshTerms_.reserve(nEvents); sinhTerms_.clear(); sinhTerms_.reserve(nEvents); normTermsExp_.clear(); normTermsExp_.reserve(nEvents); normTermsCos_.clear(); normTermsCos_.reserve(nEvents); normTermsSin_.clear(); normTermsSin_.reserve(nEvents); normTermsCosh_.clear(); normTermsCosh_.reserve(nEvents); normTermsSinh_.clear(); normTermsSinh_.reserve(nEvents); effiTerms_.clear(); effiTerms_.reserve(nEvents); + // If we're not using per-event information for the decay time + // error, just calculate the normalisation terms once + if ( ! scaleWithPerEventError_ ) { + this->calcNorm(); + } + for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputData.getData(iEvt); - LauFitData::const_iterator iter = dataValues.find(this->varName()); - const Double_t abscissa = iter->second; + const Double_t abscissa { dataValues.at(this->varName()) }; if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) { std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay time: "<minAbscissa()<<","<maxAbscissa()<<"]."<Exit(EXIT_FAILURE); } abscissas_.push_back( abscissa ); - iter = dataValues.find(this->varErrName()); - Double_t abscissaErr = iter->second; + const Double_t abscissaErr { scaleWithPerEventError_ ? dataValues.at(this->varErrName()) : 0.0 }; - if (abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError()) { + if ( scaleWithPerEventError_ && ( abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError() ) ) { std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay-time error: "<minAbscissaError()<<","<maxAbscissaError()<<"]."<Exit(EXIT_FAILURE); } abscissaErrors_.push_back(abscissaErr); this->calcLikelihoodInfo(abscissa, abscissaErr); + // If we are using per-event information for the decay + // time error, need to calculate the normalisation + // terms for every event + if ( scaleWithPerEventError_ ) { + this->calcNorm(abscissaErr); + } + expTerms_.push_back(expTerm_); cosTerms_.push_back(cosTerm_); sinTerms_.push_back(sinTerm_); coshTerms_.push_back(coshTerm_); sinhTerms_.push_back(sinhTerm_); normTermsExp_.push_back(normTermExp_); normTermsCos_.push_back(normTermCos_); normTermsSin_.push_back(normTermSin_); normTermsCosh_.push_back(normTermCosh_); normTermsSinh_.push_back(normTermSinh_); effiTerms_.push_back(effiTerm_); } } } -void LauDecayTimePdf::calcLikelihoodInfo(UInt_t iEvt) +void LauDecayTimePdf::calcLikelihoodInfo(const UInt_t iEvt) { + // Extract all the terms and their normalisations if (type_ == Hist) { if ( pdfHist_ ) { pdfHist_->calcLikelihoodInfo(iEvt); pdfTerm_ = pdfHist_->getLikelihood(); } else { pdfTerm_ = 1.0; } } else { expTerm_ = expTerms_[iEvt]; cosTerm_ = cosTerms_[iEvt]; sinTerm_ = sinTerms_[iEvt]; coshTerm_ = coshTerms_[iEvt]; sinhTerm_ = sinhTerms_[iEvt]; normTermExp_ = normTermsExp_[iEvt]; normTermCos_ = normTermsCos_[iEvt]; normTermSin_ = normTermsSin_[iEvt]; normTermCosh_ = normTermsCosh_[iEvt]; normTermSinh_ = normTermsSinh_[iEvt]; } + // Extract the decay time error PDF value if ( errHist_ ) { errHist_->calcLikelihoodInfo(iEvt); errTerm_ = errHist_->getLikelihood(); } else { errTerm_ = 1.0; } - //TODO Parameters will change in some cases update things! Need to make this intelligent! + // Extract the decay time efficiency + effiTerm_ = effiTerms_[iEvt]; + + // TODO - Parameters can change in some cases, so we'll need to update things! + // - For the moment do the blunt force thing and recalculate everything for every event! + // - Need to make this intelligent! const Double_t abscissa = abscissas_[iEvt]; const Double_t abscissaErr = abscissaErrors_[iEvt]; this->calcLikelihoodInfo(abscissa,abscissaErr); this->calcNorm(); - - switch( effMethod_ ) /* < If you're going to add an effMethod, extend this switch*/ - { - case EfficiencyMethod::Spline : - if ( effiFun_ ) { - this->updateEffiSpline(effiPars_); - effiTerm_ = effiFun_->evaluate(abscissa); //EDITED XXX - if (effiTerm_>1.0){effiTerm_=1.0;} - if (effiTerm_<0.0){effiTerm_=0.0;} - } else { - effiTerm_ = 1.0; - } - break; - - default : - effiTerm_ = effiTerms_[iEvt]; - break; - } } -void LauDecayTimePdf::calcLikelihoodInfo(Double_t abscissa) +void LauDecayTimePdf::calcLikelihoodInfo(const Double_t abscissa) { // Check whether any of the gaussians should be scaled - if any of them should we need the per-event error - Bool_t scale(kFALSE); - for (std::vector::const_iterator iter = scaleMeans_.begin(); iter != scaleMeans_.end(); ++iter) { - scale |= (*iter); - } - for (std::vector::const_iterator iter = scaleWidths_.begin(); iter != scaleWidths_.end(); ++iter) { - scale |= (*iter); - } - if (scale) { - std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Per-event error on Delta t not provided, cannot calculate anything."<calcLikelihoodInfo(abscissa, 0.0); } + + this->calcLikelihoodInfo(abscissa, 0.0); } -// void LauDecayTimePdf::calcLikelihoodInfo(Double_t abscissa, Double_t abscissaErr) -// { -// if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) { -// std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of the decay time: "<minAbscissa()<<","<maxAbscissa()<<"]."<Exit(EXIT_FAILURE); -// } -// -// if (abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError()) { -// std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of Delta t error: "<minAbscissaError()<<","<maxAbscissaError()<<"]."<Exit(EXIT_FAILURE); -// } -// -// switch( effMethod_ ) -// { -// case EfficiencyMethod::Spline : effiTerm_ = effiFun_ ? effiFun_ -> evaluate(abscissa) : 1.0 ; break; -// case EfficiencyMethod::Binned : effiTerm_ = effiHist_ ? effiHist_-> GetBinContent(effiHist_-> FindFixBin(abscissa)) : 1.0 ; break; -// case EfficiencyMethod::Flat : effiTerm_ = 1.0 ; break; -// // default : std::cerr << "Warning: EFFICIENCY INPUT METHOD NOT SET" << std::endl; effiTerms_.push_back( 1.0 ); -// } -// -// // Initialise the various terms to zero -// if (type_ == Hist){ -// if ( pdfHist_ ) { -// pdfHist_->calcLikelihoodInfo(abscissa); -// pdfTerm_ = pdfHist_->getLikelihood(); -// } else { -// pdfTerm_ = 1.0; -// } -// }else{ -// -// // Reset the state to Good -// this->state(Good); -// -// // If we're not using the resolution function calculate the simple terms and return -// if (!this->doSmearing()) { -// this->calcNonSmearedTerms(abscissa); -// return; -// } -// -// //TODO how much to be added below for SimFitNormBd/SimFitNormBs/SimFitSigBd/SimFitSigBs -// -// // Get all the up to date parameter values -// std::vector frac(nGauss_); -// std::vector mean(nGauss_); -// std::vector sigma(nGauss_); -// Double_t tau(0.0); -// Double_t deltaM(0.0); -// Double_t fracPrompt(0.0); -// Double_t Delta_gamma(0.0); -// frac[0] = 1.0; -// for (UInt_t i(0); iunblindValue(); -// sigma[i] = sigma_[i]->unblindValue(); -// if (i != 0) { -// frac[i] = frac_[i-1]->unblindValue(); -// frac[0] -= frac[i]; -// } -// } -// if (type_ != Delta) { -// tau = tau_->unblindValue(); -// if (type_ == ExpTrig) { -// deltaM = deltaM_->unblindValue(); -// } -// if (type_ == DeltaExp) { -// fracPrompt = fracPrompt_->unblindValue(); -// } -// if (type_ == ExpHypTrig){ -// deltaM = deltaM_->unblindValue(); -// Delta_gamma = deltaGamma_->unblindValue(); -// } -// } -// -// // Scale the gaussian parameters by the per-event error on Delta t (if appropriate) -// for (UInt_t i(0); i x(nGauss_); -// const Double_t xMax = this->maxAbscissa(); -// const Double_t xMin = this->minAbscissa(); -// for (UInt_t i(0); i 1e-10) { -// Double_t exponent(0.0); -// Double_t norm(0.0); -// Double_t scale = LauConstants::root2*sigma[i]; -// Double_t scale2 = LauConstants::rootPiBy2*sigma[i]; -// exponent = -0.5*x[i]*x[i]/(sigma[i]*sigma[i]); -// norm = scale2*(TMath::Erf((xMax - mean[i])/scale) -// - TMath::Erf((xMin - mean[i])/scale)); -// value += frac[i]*TMath::Exp(exponent)/norm; -// } -// } -// } -// -// if (type_ != Delta) { -// -// std::vector expTerms(nGauss_); -// std::vector cosTerms(nGauss_); -// std::vector sinTerms(nGauss_); -// std::vector coshTerms(nGauss_); -// std::vector sinhTerms(nGauss_); -// -// std::vector expTermsNorm(nGauss_); -// // TODO - TEL changed this name to make it compile - please check! -// std::vector SinhTermsNorm(nGauss_); -// -// // Calculate values of the PDF convoluated with each Gaussian for a given value of the abscsissa -// for (UInt_t i(0); icalcTrigExponent(deltaM, tau, x[i], sigma[i], exponentTermRe, exponentTermIm); -// -// // Elements related to the trigonometric function, i.e. convolution of Exp*Sin or Cos with Gauss -// Double_t sinTrigTermRe, sinTrigTermIm, cosTrigTermRe, cosTrigTermIm; -// this->calcTrigConv(deltaM, tau, x[i], sigma[i], sinTrigTermRe, sinTrigTermIm, kFALSE); -// this->calcTrigConv(deltaM, tau, x[i], sigma[i], cosTrigTermRe, cosTrigTermIm, kTRUE); -// -// // Combining elements of the full pdf -// LauComplex zExp(exponentTermRe, exponentTermIm); -// LauComplex zTrigSin(sinTrigTermRe, sinTrigTermIm); -// LauComplex zTrigCos(cosTrigTermRe, cosTrigTermIm); -// -// LauComplex sinConv = zExp * zTrigSin; -// LauComplex cosConv = zExp * zTrigCos; -// sinConv.scale(1.0/4.0); -// cosConv.scale(1.0/4.0); -// -// // Cosine*Exp and Sine*Exp terms -// cosTerms[i] = cosConv.re(); -// sinTerms[i] = sinConv.im(); -// -// // Normalisation -// Double_t umax = xMax - mean[i]; -// Double_t umin = xMin - mean[i]; -// -// expTermsNorm[i] = (1.0/2.0) * tau * (-1.0 + TMath::Erf(umax/(LauConstants::root2 * sigma[i])) + TMath::Erfc(umin/(LauConstants::root2 * sigma[i])) + -// TMath::Exp((pow(sigma[i], 2) - 2.0 * tau * (xMax + xMin - mean[i]))/(2.0 * pow(tau, 2))) * -// (TMath::Exp(xMax/tau) * TMath::Erfc((pow(sigma[i], 2) - xMin)/(LauConstants::root2 * tau))) + -// (TMath::Exp(xMin/tau) * TMath::Erfc((pow(sigma[i], 2) - xMax)/(LauConstants::root2 * tau)))); -// } else { -// -// -// } -// } -// // Typical case (2): B0s/B0sbar -// if (type_ == ExpHypTrig) { -// // LHCb convention -// if (method_ == DecayTime) { -// // Convolution of Exp*cosh (Exp*sinh) with a gaussian -// //Double_t OverallExpFactor = 0.25*TMath::Exp(-(x[i]-mean[i])*(x[i]-mean[i])/(2*sigma[i]*sigma[i])); -// //Double_t ExpFirstTerm = TMath::Exp((2*(x[i]-mean[i])*tau+sigma[i]*sigma[i]*(-2+Delta_gamma*tau))*(2*(x[i]-mean[i])*tau+sigma[i]*sigma[i]*(-2+Delta_gamma*tau))/(8*sigma[i]*sigma[i]*tau*tau)); -// //Double_t ExpSecondTerm = TMath::Exp((2*(-x[i]+mean[i])*tau+sigma[i]*sigma[i]*(2+Delta_gamma*tau))*(2*(-x[i]+mean[i])*tau+sigma[i]*sigma[i]*(2+Delta_gamma*tau))/(8*sigma[i]*sigma[i]*tau*tau)); -// //Double_t ErfFirstTerm = TMath::Erf((2*(x[i]-mean[i])*tau+sigma[i]*sigma[i]*(-2+Delta_gamma*tau))/(2*TMath::Sqrt(2)*sigma[i]*tau)); -// //Double_t ErfSecondTerm = TMath::Erf((2*(-x[i]+mean[i])*tau+sigma[i]*sigma[i]*(2+Delta_gamma*tau))/(2*TMath::Sqrt(2)*sigma[i]*tau)); -// -// //Double_t sinhConv = OverallExpFactor*(ExpFirstTerm*(1+ErfFirstTerm) + ExpSecondTerm*(-1+ErfSecondTerm)); -// //Double_t coshConv = OverallExpFactor*(ExpFirstTerm*(1+ErfFirstTerm) - ExpSecondTerm*(-1+ErfSecondTerm)); -// -// //cosTerms[i] = sinhConv; -// // sinTerms[i] = coshConv; -// -// //TODO: check this formula and try to simplify it! -// double OverallExpTerm_max = (1/(2*(-4 + Delta_gamma*Delta_gamma*tau*tau)))*tau*TMath::Exp(-0.5*Delta_gamma*(xMax + mean[i]) - xMax/tau); -// double ErfTerm_max = -2*Delta_gamma*tau*TMath::Exp(0.5*Delta_gamma*(xMax+mean[i])+xMax/tau)*TMath::Erf((xMax-mean[i])/(TMath::Sqrt(2)*sigma[i])); -// double ExpFirstTerm_max = TMath::Exp(xMax*Delta_gamma+(sigma[i]*sigma[i]*(-2 + Delta_gamma*tau)*(-2 + Delta_gamma*tau))/(8*tau*tau)); -// double ErfcFirstTerm_max = TMath::Erfc((2*(-xMax + mean[i])*tau + sigma[i]*sigma[i]*(2 - Delta_gamma*tau))/(2*TMath::Sqrt(2)*sigma[i]*tau)); -// double ExpSecondTerm_max = TMath::Exp(Delta_gamma*mean[i] + (sigma[i]*sigma[i]*(2 + Delta_gamma*tau)*(2 + Delta_gamma*tau))/(8*tau*tau)); -// double ErfcSecondTerm_max = TMath::Erfc((2*(-xMax + mean[i])*tau + sigma[i]*sigma[i]*(2 + Delta_gamma*tau))/(2*TMath::Sqrt(2)*sigma[i]*tau)); -// double MaxVal= OverallExpTerm_max*(ErfTerm_max + TMath::Exp(mean[i]/tau)*(ExpFirstTerm_max*(2+Delta_gamma*tau)* ErfcFirstTerm_max + ExpSecondTerm_max*(-2+Delta_gamma*tau)* ErfcSecondTerm_max)); -// -// double OverallExpTerm_min = (1/(2*(-4 + Delta_gamma*Delta_gamma*tau*tau)))*tau*TMath::Exp(-0.5*Delta_gamma*(xMin + mean[i]) - xMin/tau); -// double ErfTerm_min = -2*Delta_gamma*tau*TMath::Exp(0.5*Delta_gamma*(xMin+mean[i])+xMin/tau)*TMath::Erf((xMin-mean[i])/(TMath::Sqrt(2)*sigma[i])); -// double ExpFirstTerm_min = TMath::Exp(xMin*Delta_gamma+(sigma[i]*sigma[i]*(-2 + Delta_gamma*tau)*(-2 + Delta_gamma*tau))/(8*tau*tau)); -// double ErfcFirstTerm_min = TMath::Erfc((2*(-xMin + mean[i])*tau + sigma[i]*sigma[i]*(2 - Delta_gamma*tau))/(2*TMath::Sqrt(2)*sigma[i]*tau)); -// // TODO - TEL added this (currently identical to ExpSecondTerm_max) to get this to compile - please check!! -// double ExpSecondTerm_min = TMath::Exp(Delta_gamma*mean[i] + (sigma[i]*sigma[i]*(2 + Delta_gamma*tau)*(2 + Delta_gamma*tau))/(8*tau*tau)); -// double ErfcSecondTerm_min = TMath::Erfc((2*(-xMin + mean[i])*tau + sigma[i]*sigma[i]*(2 + Delta_gamma*tau))/(2*TMath::Sqrt(2)*sigma[i]*tau)); -// double minVal= OverallExpTerm_min*(ErfTerm_min + TMath::Exp(mean[i]/tau)*(ExpFirstTerm_min*(2+Delta_gamma*tau)* ErfcFirstTerm_min + ExpSecondTerm_min*(-2+Delta_gamma*tau)* ErfcSecondTerm_min)); -// SinhTermsNorm[i] = MaxVal - minVal; -// -// } else { -// -// } -// } -// -// } -// -// for (UInt_t i(0); icalcLikelihoodInfo(abscissaErr); -// errTerm_ = errHist_->getLikelihood(); -// } else { -// errTerm_ = 1.0; -// } -// } - -void LauDecayTimePdf::calcLikelihoodInfo(Double_t abscissa, Double_t abscissaErr) +void LauDecayTimePdf::calcLikelihoodInfo(const Double_t abscissa, const Double_t abscissaErr) { + // Check that the decay time and the decay time error are in valid ranges if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) { std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of the decay time: "<minAbscissa()<<","<maxAbscissa()<<"]."<Exit(EXIT_FAILURE); } - - if (abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError()) { + if ( scaleWithPerEventError_ && ( abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError() ) ) { std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of Delta t error: "<minAbscissaError()<<","<maxAbscissaError()<<"]."<Exit(EXIT_FAILURE); } + // Determine the decay time efficiency switch( effMethod_ ) { - case EfficiencyMethod::Spline : effiTerm_ = effiFun_ ? effiFun_ -> evaluate(abscissa) : 1.0 ; break; - case EfficiencyMethod::Binned : effiTerm_ = effiHist_ ? effiHist_-> GetBinContent(effiHist_-> FindFixBin(abscissa)) : 1.0 ; break; - case EfficiencyMethod::Flat : effiTerm_ = 1.0 ; break; + case EfficiencyMethod::Spline : effiTerm_ = effiFun_ ? effiFun_ -> evaluate(abscissa) : 1.0 ; break; + case EfficiencyMethod::Binned : effiTerm_ = effiHist_ ? effiHist_-> GetBinContent(effiHist_-> FindFixBin(abscissa)) : 1.0 ; break; + case EfficiencyMethod::Flat : effiTerm_ = 1.0 ; break; } + if ( effiTerm_ > 1.0 ) { effiTerm_ = 1.0; } + else if ( effiTerm_ < 0.0 ) { effiTerm_ = 0.0; } - // Initialise the various terms to zero + // For the histogram PDF just calculate that term and return if (type_ == Hist){ if ( pdfHist_ ) { pdfHist_->calcLikelihoodInfo(abscissa); pdfTerm_ = pdfHist_->getLikelihood(); } else { pdfTerm_ = 1.0; } - // TODO - should return here? + return; } - // Reset the state to Good - //this->state(Good); - - // If we're not using the resolution function calculate the simple terms and return + // If we're not using the resolution function, calculate the simple terms and return if (!this->doSmearing()) { this->calcNonSmearedTerms(abscissa); return; } - // Get all the up to date parameter values + // Get all the up to date parameter values for the resolution function std::vector frac(nGauss_); std::vector mean(nGauss_); std::vector sigma(nGauss_); Double_t fracPrompt(0.0); // TODO - why do we do the fractions this way around? frac[0] = 1.0; for (UInt_t i(0); iunblindValue(); sigma[i] = sigma_[i]->unblindValue(); if (i != 0) { frac[i] = frac_[i-1]->unblindValue(); frac[0] -= frac[i]; } } if (type_ == DeltaExp) { fracPrompt = fracPrompt_->unblindValue(); } // Scale the gaussian parameters by the per-event error on Delta t (if appropriate) for (UInt_t i(0); i x(nGauss_); const Double_t xMax = this->maxAbscissa(); const Double_t xMin = this->minAbscissa(); for (UInt_t i(0); i 1e-10) { Double_t exponent(0.0); Double_t norm(0.0); Double_t scale = LauConstants::root2*sigma[i]; Double_t scale2 = LauConstants::rootPiBy2*sigma[i]; exponent = -0.5*x[i]*x[i]/(sigma[i]*sigma[i]); norm = scale2*(TMath::Erf((xMax - mean[i])/scale) - TMath::Erf((xMin - mean[i])/scale)); value += frac[i]*TMath::Exp(exponent)/norm; } } } if (type_ != Delta) { // Reset values of terms expTerm_ = 0.0; cosTerm_ = 0.0; sinTerm_ = 0.0; coshTerm_ = 0.0; sinhTerm_ = 0.0; // Calculate values of the PDF convoluted with each Gaussian for a given value of the abscsissa for (UInt_t i(0); icalcLikelihoodInfo(abscissaErr); errTerm_ = errHist_->getLikelihood(); } else { errTerm_ = 1.0; } } -//void LauDecayTimePdf::calcTrigExponent(Double_t deltaM, Double_t tau, Double_t x, Double_t sigma, Double_t& reTerm, Double_t& imTerm) -//{ -// -// Double_t exponentTerm = TMath::Exp(-(2.0 * tau * x + pow(sigma, 2) * (pow(deltaM, 2) * pow(tau, 2) - 1.0))/(2.0 * pow(tau,2))); -// reTerm = exponentTerm * TMath::Cos(deltaM * (x - pow(sigma,2)/tau)); -// imTerm = - exponentTerm * TMath::Sin(deltaM * (x - pow(sigma,2)/tau)); -// -//} - -// void LauDecayTimePdf::calcTrigConv(Double_t deltaM, Double_t tau, Double_t x, Double_t sigma, Double_t& reOutTerm, Double_t& imOutTerm, Bool_t trig) -// { -// -// Double_t reExpTerm, imExpTerm; -// LauComplex zExp; -// LauComplex zTrig1; -// LauComplex zTrig2; -// -// // Calculation for the sine or cosine term -// if (!trig) { -// reExpTerm = TMath::Sin(2.0 * deltaM * (x + pow(sigma,2)/tau)); -// imExpTerm = 2.0 * TMath::Sin(pow(deltaM * (x + pow(sigma,2)/tau), 2)); -// } else { -// reExpTerm = TMath::Cos(2.0 * deltaM * (x + pow(sigma,2)/tau)); -// imExpTerm = TMath::Sin(2.0 * deltaM * (x + pow(sigma,2)/tau)); -// } -// -// // Exponential term in front of Erfc/Erfi terms -// zExp.setRealPart(reExpTerm); -// zExp.setImagPart(imExpTerm); -// -// // Nominal Erfc term (common to both sine and cosine expressions -// zTrig1.setRealPart(-(tau * x - pow(sigma,2))/(LauConstants::root2 * tau * sigma)); -// zTrig1.setImagPart(-(deltaM * sigma)/ LauConstants::root2); -// -// // Second term for sine (Erfi) or cosine (Erfc) - notice the re-im swap and sign change -// zTrig2.setRealPart(-zTrig1.im()); -// zTrig2.setImagPart(-zTrig1.re()); -// -// // Calculation of Erfc and Erfi (if necessary) -// LauComplex term1 = ComplexErfc(zTrig1.re(), zTrig1.im()); -// LauComplex term2; -// if (!trig) { -// term2 = Erfi(zTrig2.re(), zTrig2.im()); -// } else { -// term2 = ComplexErfc(zTrig2.re(), zTrig2.im()); -// } -// -// // Multiplying all elemnets of the convolution -// LauComplex output = zExp * term1 + term2; -// reOutTerm = output.re(); -// imOutTerm = output.im(); -// -// } - -// LauComplex LauDecayTimePdf::ComplexErf(Double_t x, Double_t y) -// { -// // Evaluate Erf(x + iy) using an infinite series approximation -// // From Abramowitz & Stegun (http://people.math.sfu.ca/~cbm/aands/page_299.htm) -// if (x==0){ -// // std::cout << "WARNING: Set x value to 1e-100 to avoid division by 0." << std::endl; -// x = 1e-100; -// } -// int n = 20; // this cotrols the number of iterations of the sum -// LauComplex ErfTerm(TMath::Erf(x),0.); -// LauComplex CosSineTerm(1-cos(2*x*y), sin(2*x*y)); -// CosSineTerm.rescale(TMath::Exp(-x*x)/(2*TMath::Pi()*x)); -// LauComplex firstPart = ErfTerm + CosSineTerm; -// LauComplex SumTerm(0,0); -// -// for (int k = 1; k<=n; k++){ -// Double_t f_k = 2*x*(1 - cos(2*x*y)*cosh(k*y)) + k*sin(2*x*y)*sinh(k*y); -// Double_t g_k = 2*x*sin(2*x*y)*cosh(k*y) + k*cos(2*x*y)*sinh(k*y); -// LauComplex fgTerm(f_k, g_k); -// fgTerm.rescale(TMath::Exp(-0.25*k*k)/(k*k + 4*x*x)); -// SumTerm += fgTerm; -// } -// SumTerm.rescale((2/TMath::Pi())*TMath::Exp(-x*x)); -// LauComplex result = firstPart + SumTerm; -// return result; -// } - -// LauComplex LauDecayTimePdf::Erfi(Double_t x, Double_t y) -// { -// // Erfi(z) = -I*Erf(I*z) where z = x + iy -// double x_prime = -y; -// double y_prime = x; -// LauComplex a = ComplexErf(x_prime, y_prime); -// LauComplex result(a.im(), -a.re()); -// return result; -// } - -// LauComplex LauDecayTimePdf::ComplexErfc(Double_t x, Double_t y) -// { -// // Erfc(z) = 1 - Erf(z) (z = x + iy) -// LauComplex one(1., 0.); -// LauComplex result = one - ComplexErf(x,y); -// return result; -// } - -//Double_t LauDecayTimePdf::normExpHypTerm(Double_t Abs) -//{ -// Double_t tau = tau_->unblindValue(); -// Double_t deltaGamma = deltaGamma_->unblindValue(); -// -// Double_t y = tau*deltaGamma/2; -// Double_t nonTrigTerm = -(TMath::Exp(-Abs/tau))/(1 - y*y); -// -// Double_t cosHTerm = TMath::CosH(deltaGamma*Abs/2); -// Double_t sinHTerm = TMath::SinH(deltaGamma*Abs/2); -// -// Double_t normTerm = nonTrigTerm*(cosHTerm + y*sinHTerm); -// return normTerm; -//} - -//Double_t LauDecayTimePdf::normExpHypTermDep(Double_t Abs) -//{ -// Double_t tau = tau_->unblindValue(); -// Double_t deltaGamma = deltaGamma_->unblindValue(); -// -// Double_t y = tau*deltaGamma/2; -// Double_t nonTrigTerm = -(TMath::Exp(-Abs/tau))/(1 - y*y); -// -// Double_t cosHTerm = TMath::CosH(deltaGamma*Abs/2); -// Double_t sinHTerm = TMath::SinH(deltaGamma*Abs/2); -// -// Double_t normTerm = nonTrigTerm*(sinHTerm + y*cosHTerm); -// return normTerm; -//} - void LauDecayTimePdf::calcNonSmearedTerms(Double_t abscissa) { + // Reset values of terms + errTerm_ = 1.0; + expTerm_ = 0.0; + cosTerm_ = 0.0; + sinTerm_ = 0.0; + coshTerm_ = 0.0; + sinhTerm_ = 0.0; + if ( type_ == Hist || type_ == Delta ){ return; } const Double_t tau { tau_->unblindValue() }; const Double_t gamma { 1.0 / tau }; if (method_ == DecayTime) { expTerm_ = TMath::Exp(-abscissa*gamma); } else if (method_ == DecayTimeDiff) { expTerm_ = TMath::Exp(-TMath::Abs(abscissa)*gamma); } // Calculate also the terms related to cosine and sine if (type_ == ExpTrig) { const Double_t deltaM = deltaM_->unblindValue(); coshTerm_ = expTerm_; sinhTerm_ = 0.0; cosTerm_ = TMath::Cos(deltaM*abscissa)*expTerm_; sinTerm_ = TMath::Sin(deltaM*abscissa)*expTerm_; } // Calculate also the terms related to cosh, sinh, cosine, and sine else if (type_ == ExpHypTrig) { const Double_t deltaM = deltaM_->unblindValue(); const Double_t deltaGamma = deltaGamma_->unblindValue(); coshTerm_ = TMath::CosH(0.5*deltaGamma*abscissa)*expTerm_; sinhTerm_ = TMath::SinH(0.5*deltaGamma*abscissa)*expTerm_; cosTerm_ = TMath::Cos(deltaM*abscissa)*expTerm_; sinTerm_ = TMath::Sin(deltaM*abscissa)*expTerm_; } } std::pair LauDecayTimePdf::smearedCosSinTerm(Double_t t, Double_t sigma, Double_t mu) { using namespace std::complex_literals; const Double_t gamma = 1. / this->tau_->unblindValue(); const Double_t x = (t - mu) / (LauConstants::root2 * sigma); const std::complex z = std::complex(gamma * sigma / LauConstants::root2, -this->deltaM_->unblindValue() * sigma / LauConstants::root2); const std::complex arg1 = std::complex(0., 1.) * (z - x); const std::complex arg2 { -(x*x) - (arg1 * arg1) }; -// const std::complex conv = 0.5 * std::exp(arg2) * RooMath::erfc( -1i * arg1 ); const std::complex conv = arg1.imag() < -5.? 0.5 * std::exp(arg2) * RooMath::erfc( -1i * arg1 ) : 0.5 * TMath::Exp(-(x * x)) * RooMath::faddeeva(arg1) ; const Double_t cos_conv = conv.real(); const Double_t sin_conv = conv.imag(); return {cos_conv, sin_conv}; } std::pair LauDecayTimePdf::smearedCoshSinhTerm(Double_t t, Double_t sigma, Double_t mu) { using namespace std::complex_literals; Double_t gamma = 1. / this->tau_->unblindValue(); std::complex x((t - mu) / (LauConstants::root2 * sigma),0.); Double_t xRe = x.real(); Double_t z_H = ((gamma - deltaGamma_->unblindValue() / 2.) * sigma) / LauConstants::root2; Double_t z_L = ((gamma + deltaGamma_->unblindValue() / 2.) * sigma) / LauConstants::root2; //Doing H std::complex arg_H1(0., z_H - x.real()); std::complex arg_H2 = -(x*x) - (arg_H1 * arg_H1); std::complex conv_H = arg_H1.imag() < -5. ? (0.5 * std::exp(arg_H2)) * RooMath::erfc(-1i * arg_H1) : 0.5 * TMath::Exp(-( xRe * xRe )) * RooMath::faddeeva(arg_H1); //Doing L std::complex arg_L1(0., z_L - x.real()); std::complex arg_L2 = -(x*x) - (arg_L1 * arg_L1); std::complex conv_L = arg_L1.imag() < -5. ? (0.5 * std::exp(arg_L2)) * RooMath::erfc(-1i * arg_L1) : 0.5 * TMath::Exp(-( xRe * xRe )) * RooMath::faddeeva(arg_L1); std::complex cosh_conv = 0.5 * (conv_H + conv_L); std::complex sinh_conv = 0.5 * (conv_H - conv_L); return {cosh_conv.real(), sinh_conv.real()}; } Double_t LauDecayTimePdf::smearedExpTerm(Double_t t, Double_t sigma, Double_t mu) { using namespace std::complex_literals; const Double_t gamma = 1. / this->tau_->unblindValue(); const std::complex x((t - mu) / (LauConstants::root2 * sigma),0.); const Double_t xRe = x.real(); const Double_t z = (gamma * sigma) / LauConstants::root2; const std::complex arg1(0., z - x.real()); const std::complex arg2 = -(x * x) - (arg1 * arg1); const std::complex conv = arg1.imag() < -5. ? 0.5 * (std::exp(arg2)) * RooMath::erfc(-1i * arg1) : 0.5 * TMath::Exp(-(xRe * xRe)) * RooMath::faddeeva(arg1) ; -// const std::complex conv = 0.5 * (std::exp(arg2)) * RooMath::erfc(-1i * arg1); return conv.real(); } std::pair LauDecayTimePdf::nonSmearedCosSinIntegral(Double_t minAbs, Double_t maxAbs) { // From 1407.0748, not clear whether complex is faster in this case Double_t gamma = 1. / this->tau_->unblindValue(); LauComplex denom = LauComplex(gamma, -this->deltaM_->unblindValue()); LauComplex exponent = LauComplex(-gamma, this->deltaM_->unblindValue()); LauComplex num0 = -exponent.scale(minAbs).exp(); LauComplex num1 = -exponent.scale(maxAbs).exp(); LauComplex integral = (num1 - num0) / denom; return {integral.re(), integral.im()}; } std::pair LauDecayTimePdf::smearedCosSinIntegral(Double_t minAbs, Double_t maxAbs, Double_t sigma, Double_t mu) { using namespace std::complex_literals; Double_t gamma = 1. / this->tau_->unblindValue(); Double_t x1 = (maxAbs - mu) / (LauConstants::root2 * sigma); Double_t x0 = (minAbs - mu) / (LauConstants::root2 * sigma); std::complex z = std::complex(gamma * sigma / LauConstants::root2, -this->deltaM_->unblindValue() * sigma / LauConstants::root2); std::complex arg1 = std::complex(0., 1.) * (z - x1); std::complex arg0 = std::complex(0., 1.) * (z - x0); std::complex integral = 0. + 0i; if(arg1.imag() < -5.) {integral = RooMath::erf(x1) - std::exp(-(x1 * x1) - (arg1 * arg1)) * RooMath::erfc(-1i * arg1);} else {integral = RooMath::erf(x1) - TMath::Exp(-(x1 * x1)) * RooMath::faddeeva(arg1);} if(arg0.imag() < -5.) {integral -= RooMath::erf(x0) - std::exp(-(x0 * x0) - (arg0 * arg0)) * RooMath::erfc(-1i * arg0);} else {integral -= RooMath::erf(x0) - TMath::Exp(-(x0 * x0)) * RooMath::faddeeva(arg0);} integral *= (sigma / (2. * LauConstants::root2 * z)); Double_t cos_integral = integral.real(); Double_t sin_integral = integral.imag(); return {cos_integral, sin_integral}; } Double_t LauDecayTimePdf::nonSmearedExpIntegral(Double_t minAbs, Double_t maxAbs) { const Double_t tau = tau_->unblindValue(); const Double_t Gamma = 1.0 / tau; return tau * ( TMath::Exp(-minAbs*Gamma) - TMath::Exp(-maxAbs*Gamma) ); } Double_t LauDecayTimePdf::smearedExpIntegral(Double_t minAbs, Double_t maxAbs, Double_t sigma, Double_t mu) { using namespace std::complex_literals; const Double_t gamma = 1. / this->tau_->unblindValue(); const Double_t x1 = (maxAbs - mu) / (LauConstants::root2 * sigma); const Double_t x0 = (minAbs - mu) / (LauConstants::root2 * sigma); const Double_t z = (gamma * sigma) / LauConstants::root2; std::complex arg1(0., z - x1); std::complex arg0(0., z - x0); std::complex integral = 0. + 0i; if(arg1.imag() < -5.) {integral = RooMath::erf(x1) - std::exp(-(x1 * x1) - (arg1 * arg1)) * RooMath::erfc(-1i * arg1);} else {integral = RooMath::erf(x1) - TMath::Exp(-(x1 * x1)) * RooMath::faddeeva(arg1);} if(arg0.imag() < -5.) {integral -= RooMath::erf(x0) - std::exp(-(x0 * x0) - (arg0 * arg0)) * RooMath::erfc(-1i * arg0);} else {integral -= RooMath::erf(x0) - TMath::Exp(-(x0 * x0)) * RooMath::faddeeva(arg0);} integral *= (sigma / (2. * LauConstants::root2 * z)); return integral.real(); } std::pair LauDecayTimePdf::nonSmearedCoshSinhIntegral(Double_t minAbs, Double_t maxAbs) { // Use exponential formualtion rather than cosh, sinh. // Fewer terms (reused for each), but not guaranteed to be faster. Double_t gamma = 1. / this->tau_->unblindValue(); Double_t gammaH = gamma - 0.5 * deltaGamma_->unblindValue(); Double_t gammaL = gamma - 0.5 * deltaGamma_->unblindValue(); Double_t nL1 = -TMath::Exp(-gammaL * maxAbs) / gammaL; Double_t nH1 = -TMath::Exp(-gammaH * maxAbs) / gammaH; Double_t nL0 = -TMath::Exp(-gammaL * minAbs) / gammaL; Double_t nH0 = -TMath::Exp(-gammaH * minAbs) / gammaH; Double_t cosh_integral = 0.5 * ( (nH1 + nL1) - (nH0 + nL0) ); Double_t sinh_integral = 0.5 * ( (nH1 - nL1) - (nH0 - nL0) ); return {cosh_integral, sinh_integral}; } std::pair LauDecayTimePdf::smearedCoshSinhIntegral(Double_t minAbs, Double_t maxAbs, Double_t sigma, Double_t mu) { using namespace std::complex_literals; Double_t gamma = 1. / this->tau_->unblindValue(); Double_t x1 = (maxAbs - mu) / (LauConstants::root2 * sigma); Double_t x0 = (minAbs - mu) / (LauConstants::root2 * sigma); Double_t z_H = ((gamma - deltaGamma_->unblindValue() / 2.) * sigma) / LauConstants::root2; std::complex arg1_H(0., z_H - x1); std::complex arg0_H(0., z_H - x0); std::complex integral_H = 0. + 0i; if(arg1_H.imag() < -5.) {integral_H = RooMath::erf(x1) - std::exp(-(x1 * x1) - (arg1_H * arg1_H)) * RooMath::erfc(-1i * arg1_H);} else {integral_H = RooMath::erf(x1) - TMath::Exp(-(x1 * x1)) * RooMath::faddeeva(arg1_H);} if(arg0_H.imag() < -5.) {integral_H -= RooMath::erf(x0) - std::exp(-(x0 * x0) - (arg0_H * arg0_H)) * RooMath::erfc(-1i * arg0_H);} else {integral_H -= RooMath::erf(x0) - TMath::Exp(-(x0 * x0)) * RooMath::faddeeva(arg0_H);} integral_H *= (sigma / (2. * LauConstants::root2 * z_H)); // Same for light (L) Double_t z_L = ((gamma + deltaGamma_->unblindValue() / 2.) * sigma) / LauConstants::root2; std::complex arg1_L(0., z_L - x1); std::complex arg0_L(0., z_L - x0); std::complex integral_L = 0. + 0i; if(arg1_L.imag() < -5.) {integral_L = RooMath::erf(x1) - std::exp(-(x1 * x1) - (arg1_L * arg1_L)) * RooMath::erfc(-1i * arg1_L);} else {integral_L = RooMath::erf(x1) - TMath::Exp(-(x1 * x1)) * RooMath::faddeeva(arg1_L);} if(arg0_L.imag() < -5.) {integral_L -= RooMath::erf(x0) - std::exp(-(x0 * x0) - (arg0_L * arg0_L)) * RooMath::erfc(-1i * arg0_L);} else {integral_L -= RooMath::erf(x0) - TMath::Exp(-(x0 * x0)) * RooMath::faddeeva(arg0_L);} integral_L *= (sigma / (2. * LauConstants::root2 * z_L)); std::complex cosh_integral = 0.5 * (integral_H + integral_L); std::complex sinh_integral = 0.5 * (integral_H - integral_L); return {cosh_integral.real(), sinh_integral.real()}; } -void LauDecayTimePdf::calcNorm() +void LauDecayTimePdf::calcNorm(const Double_t abscissaErr) { // first reset integrals to zero normTermExp_ = 0.0; normTermCos_ = 0.0; normTermSin_ = 0.0; normTermCosh_ = 0.0; normTermSinh_ = 0.0; // Get all the up to date parameter values std::vector frac(nGauss_); std::vector mean(nGauss_); std::vector sigma(nGauss_); // TODO - why do we do the fractions this way around? frac[0] = 1.0; for (UInt_t i(0); iunblindValue(); sigma[i] = sigma_[i]->unblindValue(); if (i != 0) { frac[i] = frac_[i-1]->unblindValue(); frac[0] -= frac[i]; } } - // Scale the gaussian parameters by the per-event error on Delta t (if appropriate) + // Scale the gaussian parameters by the per-event error on decay time (if appropriate) for (UInt_t i(0); i doSmearing() ) - {this->calcSmearedPartialIntegrals( minAbscissa_, maxAbscissa_ , 1., mean, sigma, frac);} + {this->calcSmearedPartialIntegrals( minAbscissa_, maxAbscissa_ , 1.0, mean, sigma, frac);} else - {this->calcNonSmearedPartialIntegrals( minAbscissa_, maxAbscissa_ );} + {this->calcNonSmearedPartialIntegrals( minAbscissa_, maxAbscissa_, 1.0 );} break; case EfficiencyMethod::Binned : // Efficiency varies as piecewise constant // Total integral is sum of integrals in each bin, each weighted by efficiency in that bin for ( Int_t bin{1}; bin <= effiHist_->GetNbinsX(); ++bin ) { const Double_t loEdge {effiHist_->GetBinLowEdge(bin)}; const Double_t hiEdge {loEdge + effiHist_->GetBinWidth(bin)}; const Double_t effVal {effiHist_->GetBinContent(bin)}; if ( this -> doSmearing() ) {this->calcSmearedPartialIntegrals( loEdge, hiEdge, effVal, mean, sigma, frac );} else {this->calcNonSmearedPartialIntegrals( loEdge, hiEdge, effVal );} } break; case EfficiencyMethod::Spline : // Efficiency varies as piecewise polynomial // TODO - to be worked out what to do here std::cerr << "WARNING in LauDecayTimePdf::calcNorm : normalisation integrals for spline acceptance not yet implemented - effect of acceptance will be neglected!" << std::endl; if ( this -> doSmearing() ) - {this->calcSmearedPartialIntegrals( minAbscissa_, maxAbscissa_ , 1., mean, sigma, frac);} + {this->calcSmearedPartialIntegrals( minAbscissa_, maxAbscissa_ , 1.0, mean, sigma, frac);} else - {this->calcNonSmearedPartialIntegrals( minAbscissa_, maxAbscissa_ );} + {this->calcNonSmearedPartialIntegrals( minAbscissa_, maxAbscissa_, 1.0 );} break; } // TODO - should we check here that all terms we expect to use are now non-zero? } // TODO - Mildly concerned this is void rather than returning the integrals // (but this would require refactoring for different return values). // As long as it doesn't get called outside of calcNorm() it should be fine - DPO void LauDecayTimePdf::calcNonSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight) { - // TODO - this is all neglecting resolution at the moment - Double_t normTermExp {0.0}; if (method_ == DecayTime) { normTermExp = weight * this -> nonSmearedExpIntegral(minAbs, maxAbs); } else if (method_ == DecayTimeDiff) { const Double_t tau = tau_->unblindValue(); const Double_t Gamma = 1.0 / tau; // TODO - there should be some TMath::Abs here surely? normTermExp = weight * tau * (2.0 - TMath::Exp(-maxAbs*Gamma) - TMath::Exp(-minAbs*Gamma)); } normTermExp_ += normTermExp; // Normalisation factor for B0 decays if ( type_ == ExpTrig ) { normTermCosh_ += normTermExp; auto [cosIntegral, sinIntegral] = this->nonSmearedCosSinIntegral(minAbs, maxAbs); normTermCos_ += weight * cosIntegral; normTermSin_ += weight * sinIntegral; } // Normalisation factor for Bs decays else if ( type_ == ExpHypTrig ) { auto [coshIntegral, sinhIntegral] = this->nonSmearedCoshSinhIntegral(minAbs, maxAbs); normTermCosh_ += weight * coshIntegral; normTermSinh_ += weight * sinhIntegral; auto [cosIntegral, sinIntegral] = this->nonSmearedCosSinIntegral(minAbs, maxAbs); normTermCos_ += weight * cosIntegral; normTermSin_ += weight * sinIntegral; } } void LauDecayTimePdf::calcSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight, const std::vector& means, const std::vector& sigmas, const std::vector& fractions) { - // TODO - this is all neglecting resolution at the moment for (UInt_t i(0); i smearedExpIntegral(minAbs, maxAbs, sigmas[i], means[i]); } else if (method_ == DecayTimeDiff) { const Double_t tau = tau_->unblindValue(); const Double_t Gamma = 1.0 / tau; + // TODO - this is neglecting resolution at the moment // TODO - there should be some TMath::Abs here surely? normTermExp = weight * tau * (2.0 - TMath::Exp(-maxAbs*Gamma) - TMath::Exp(-minAbs*Gamma)); } normTermExp_ += fractions[i] * normTermExp; // Normalisation factor for B0 decays if ( type_ == ExpTrig ) { normTermCosh_ += fractions[i] * normTermExp; auto [cosIntegral, sinIntegral] = this->smearedCosSinIntegral(minAbs, maxAbs, sigmas[i], means[i]); normTermCos_ += fractions[i] * weight * cosIntegral; normTermSin_ += fractions[i] * weight * sinIntegral; } // Normalisation factor for Bs decays else if ( type_ == ExpHypTrig ) { auto [coshIntegral, sinhIntegral] = this->smearedCoshSinhIntegral(minAbs, maxAbs, sigmas[i], means[i]); normTermCosh_ += fractions[i] * weight * coshIntegral; normTermSinh_ += fractions[i] * weight * sinhIntegral; auto [cosIntegral, sinIntegral] = this->smearedCosSinIntegral(minAbs, maxAbs, sigmas[i], means[i]); normTermCos_ += fractions[i] * weight * cosIntegral; normTermSin_ += fractions[i] * weight * sinIntegral; } } } Double_t LauDecayTimePdf::generateError(Bool_t forceNew) { if (errHist_ && (forceNew || !abscissaErrorGenerated_)) { - LauFitData errData = errHist_->generate(0); - abscissaError_ = errData.find(this->varErrName())->second; + LauFitData errData = errHist_->generate(nullptr); + abscissaError_ = errData.at(this->varErrName()); abscissaErrorGenerated_ = kTRUE; } else { while (forceNew || !abscissaErrorGenerated_) { abscissaError_ = LauRandom::randomFun()->Landau(errorDistMPV_,errorDistSigma_); if (abscissaError_ < maxAbscissaError_ && abscissaError_ > minAbscissaError_) { abscissaErrorGenerated_ = kTRUE; forceNew = kFALSE; } } } return abscissaError_; } /* LauFitData LauDecayTimePdf::generate(const LauKinematics* kinematics) { // generateError SHOULD have been called before this // function but will call it here just to make sure // (has ns effect if has already been called) abscissaError_ = this->generateError(); // If the PDF is scaled by the per-event error then need to update the PDF height for each event Bool_t scale(kFALSE); for (std::vector::const_iterator iter = scaleMeans_.begin(); iter != scaleMeans_.end(); ++iter) { scale |= (*iter); } for (std::vector::const_iterator iter = scaleWidths_.begin(); iter != scaleWidths_.end(); ++iter) { scale |= (*iter); } if (scale || (!this->heightUpToDate() && !this->cachePDF())) { this->calcPDFHeight(kinematics); this->heightUpToDate(kTRUE); } // Generate the value of the abscissa. Bool_t gotAbscissa(kFALSE); Double_t genVal(0.0); Double_t genPDFVal(0.0); LauFitData genAbscissa; const Double_t xMin = this->minAbscissa(); const Double_t xMax = this->maxAbscissa(); const Double_t xRange = xMax - xMin; while (!gotAbscissa) { genVal = LauRandom::randomFun()->Rndm()*xRange + xMin; this->calcLikelihoodInfo(genVal, abscissaError_); genPDFVal = this->getUnNormLikelihood(); if (LauRandom::randomFun()->Rndm() <= genPDFVal/this->getMaxHeight()) {gotAbscissa = kTRUE;} if (genPDFVal > this->getMaxHeight()) { std::cerr<<"Warning in LauDecayTimePdf::generate()." <<" genPDFVal = "<getMaxHeight()<<" for the abscissa = "<varName()] = genVal; // mark that we need a new error to be generated next time abscissaErrorGenerated_ = kFALSE; return genAbscissa; } */ void LauDecayTimePdf::setErrorHisto(const TH1* hist) { - if ( errHist_ != 0 ) { + if ( errHist_ != nullptr ) { std::cerr<<"WARNING in LauDecayTimePdf::setErrorHisto : Error histogram already set, not doing it again."<varErrName(), hist, this->minAbscissaError(), this->maxAbscissaError()); } void LauDecayTimePdf::setHistoPdf(const TH1* hist) { - if ( pdfHist_ != 0 ) { + if ( pdfHist_ != nullptr ) { std::cerr<<"WARNING in LauDecayTimePdf::setHistoPdf : PDF histogram already set, not doing it again."<varName(), hist, this->minAbscissa(), this->maxAbscissa()); } void LauDecayTimePdf::setEffiHist(const TH1* hist) { if ( effiHist_ != nullptr ) { std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : efficiency histogram already set, not doing it again." << std::endl; return; } if ( hist == nullptr ) { std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : supplied efficiency histogram pointer is null." << std::endl; return; } // Check boundaries of histogram align with our abscissa's range const Double_t axisMin {hist->GetXaxis()->GetXmin()}; const Double_t axisMax {hist->GetXaxis()->GetXmax()}; if ( TMath::Abs(minAbscissa_ - axisMin)>1e-6 || TMath::Abs(maxAbscissa_ - axisMax)>1e-6 ) { std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : mismatch in range between supplied histogram and abscissa\n" << " : histogram range: " << axisMin << " - " << axisMax << "\n" << " : abscissa range: " << minAbscissa_ << " - " << maxAbscissa_ << "\n" << " : Disregarding this histogram." << std::endl; return; } effiHist_ = dynamic_cast( hist->Clone() ); - - //Since we didn't do it in the constructor - this -> calcNorm(); } void LauDecayTimePdf::setEffiSpline(Lau1DCubicSpline* spline) { if ( effiFun_ != 0 ) { std::cerr<<"WARNING in LauDecayTimePdf::setEffiPdf : efficiency function already set, not doing it again."< effis = effiFun_->getYValues(); effiPars_.resize( effis.size() ); size_t index = 0; for( Double_t& effi : effis ) { effiPars_[ index ] = new LauParameter( Form( "%s_Knot_%lu", varName_.Data() ,index ), effi, 0.0, 1.0, kTRUE ); ++index; } } LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName) { for ( std::vector::iterator iter = param_.begin(); iter != param_.end(); ++iter ) { if ((*iter)->name().Contains(parName)) { return (*iter); } } std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl; return 0; } const LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName) const { for ( std::vector::const_iterator iter = param_.begin(); iter != param_.end(); ++iter ) { if ((*iter)->name().Contains(parName)) { return (*iter); } } std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl; return 0; } void LauDecayTimePdf::updatePulls() { for ( std::vector::iterator iter = param_.begin(); iter != param_.end(); ++iter ) { std::vector params = (*iter)->getPars(); for (std::vector::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter ) { if (!(*iter)->fixed()) { (*params_iter)->updatePull(); } } } } void LauDecayTimePdf::updateEffiSpline(std::vector effiPars) { if (effiPars.size() != effiFun_->getnKnots()){ std::cerr<<"ERROR in LauDecayTimePdf::updateEffiSpline : number of efficiency parameters is not equal to the number of spline knots."<Exit(EXIT_FAILURE); } effiFun_->updateYValues(effiPars); } diff --git a/src/LauTimeDepFitModel.cc b/src/LauTimeDepFitModel.cc index 548ee18..2920247 100644 --- a/src/LauTimeDepFitModel.cc +++ b/src/LauTimeDepFitModel.cc @@ -1,2898 +1,2895 @@ /* Copyright 2006 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauTimeDepFitModel.cc \brief File containing implementation of LauTimeDepFitModel class. */ #include #include #include #include #include #include "TFile.h" #include "TMinuit.h" #include "TRandom.h" #include "TSystem.h" #include "TVirtualFitter.h" #include "LauAbsBkgndDPModel.hh" #include "LauAbsCoeffSet.hh" #include "LauAbsPdf.hh" #include "LauAsymmCalc.hh" #include "LauComplex.hh" #include "LauConstants.hh" #include "LauDPPartialIntegralInfo.hh" #include "LauDaughters.hh" #include "LauDecayTimePdf.hh" #include "LauFitNtuple.hh" #include "LauGenNtuple.hh" #include "LauIsobarDynamics.hh" #include "LauKinematics.hh" #include "LauPrint.hh" #include "LauRandom.hh" #include "LauScfMap.hh" #include "LauTimeDepFitModel.hh" #include "LauFlavTag.hh" ClassImp(LauTimeDepFitModel) LauTimeDepFitModel::LauTimeDepFitModel(LauIsobarDynamics* modelB0bar, LauIsobarDynamics* modelB0, LauFlavTag* flavTag) : LauAbsFitModel(), sigModelB0bar_(modelB0bar), sigModelB0_(modelB0), kinematicsB0bar_(modelB0bar ? modelB0bar->getKinematics() : 0), kinematicsB0_(modelB0 ? modelB0->getKinematics() : 0), usingBkgnd_(kFALSE), flavTag_(flavTag), nSigComp_(0), nSigDPPar_(0), nDecayTimePar_(0), nExtraPdfPar_(0), nNormPar_(0), nCalibPar_(0), nTagEffPar_(0), nEffiPar_(0), nAsymPar_(0), coeffsB0bar_(0), coeffsB0_(0), coeffPars_(0), fitFracB0bar_(0), fitFracB0_(0), fitFracAsymm_(0), acp_(0), meanEffB0bar_("meanEffB0bar",0.0,0.0,1.0), meanEffB0_("meanEffB0",0.0,0.0,1.0), DPRateB0bar_("DPRateB0bar",0.0,0.0,100.0), DPRateB0_("DPRateB0",0.0,0.0,100.0), signalEvents_(0), signalAsym_(0), cpevVarName_(""), cpEigenValue_(CPEven), evtCPEigenVals_(0), deltaM_("deltaM",0.0), deltaGamma_("deltaGamma",0.0), tau_("tau",LauConstants::tauB0), phiMix_("phiMix", 2.0*LauConstants::beta, -LauConstants::threePi, LauConstants::threePi, kFALSE), sinPhiMix_("sinPhiMix", TMath::Sin(2.0*LauConstants::beta), -1.0, 1.0, kFALSE), cosPhiMix_("cosPhiMix", TMath::Cos(2.0*LauConstants::beta), -1.0, 1.0, kFALSE), useSinCos_(kFALSE), phiMixComplex_(TMath::Cos(-2.0*LauConstants::beta),TMath::Sin(-2.0*LauConstants::beta)), signalDecayTimePdf_(), backgroundDecayTimePdfs_(), curEvtDecayTime_(0.0), curEvtDecayTimeErr_(0.0), sigExtraPdf_(), sigFlavTagPdf_(), bkgdFlavTagPdf_(), AProd_("AProd",0.0,-1.0,1.0,kTRUE), iterationsMax_(100000000), nGenLoop_(0), ASq_(0.0), aSqMaxVar_(0.0), aSqMaxSet_(1.25), storeGenAmpInfo_(kFALSE), signalTree_(), reuseSignal_(kFALSE), sigDPLike_(0.0), sigExtraLike_(0.0), sigFlavTagLike_(0.0), bkgdFlavTagLike_(0.0), sigTotalLike_(0.0) { // Set up ftag here? // Make sure that the integration scheme will be symmetrised sigModelB0bar_->forceSymmetriseIntegration(kTRUE); sigModelB0_->forceSymmetriseIntegration(kTRUE); } LauTimeDepFitModel::~LauTimeDepFitModel() { for (LauPdfList::iterator pdf_iter = sigExtraPdf_->begin(); pdf_iter != sigExtraPdf_->end(); ++pdf_iter) { delete *(pdf_iter); } for (std::vector::iterator iter = bkgndTree_.begin(); iter != bkgndTree_.end(); ++iter){ delete *(iter); } } void LauTimeDepFitModel::setupBkgndVectors() { UInt_t nBkgnds = this->nBkgndClasses(); BkgndDPModels_.resize( nBkgnds ); BkgndPdfs_.resize( nBkgnds ); bkgndEvents_.resize( nBkgnds ); bkgndAsym_.resize( nBkgnds ); bkgndTree_.resize( nBkgnds ); reuseBkgnd_.resize( nBkgnds ); bkgndDPLike_.resize( nBkgnds ); bkgndExtraLike_.resize( nBkgnds ); bkgndTotalLike_.resize( nBkgnds ); } void LauTimeDepFitModel::setNSigEvents(LauParameter* nSigEvents) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; signalEvents_->name("signalEvents"); Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0),2.0*(TMath::Abs(value)+1.0)); signalAsym_ = new LauParameter("signalAsym",0.0,-1.0,1.0,kTRUE); } void LauTimeDepFitModel::setNSigEvents(LauParameter* nSigEvents, LauParameter* sigAsym) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The event LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( sigAsym == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The asym LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; signalEvents_->name("signalEvents"); Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); signalAsym_ = sigAsym; signalAsym_->name("signalAsym"); signalAsym_->range(-1.0,1.0); } void LauTimeDepFitModel::setNBkgndEvents(LauAbsRValue* nBkgndEvents) { if ( nBkgndEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBgkndEvents : The background yield LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( ! this->validBkgndClass( nBkgndEvents->name() ) ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : Invalid background class \"" << nBkgndEvents->name() << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; gSystem->Exit(EXIT_FAILURE); } UInt_t bkgndID = this->bkgndClassID( nBkgndEvents->name() ); if ( bkgndEvents_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background yield." << std::endl; return; } if ( bkgndAsym_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background asymmetry." << std::endl; return; } nBkgndEvents->name( nBkgndEvents->name()+"Events" ); if ( nBkgndEvents->isLValue() ) { Double_t value = nBkgndEvents->value(); LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); } bkgndEvents_[bkgndID] = nBkgndEvents; bkgndAsym_[bkgndID] = new LauParameter(nBkgndEvents->name()+"Asym",0.0,-1.0,1.0,kTRUE); } void LauTimeDepFitModel::setNBkgndEvents(LauAbsRValue* nBkgndEvents, LauAbsRValue* bkgndAsym) { if ( nBkgndEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : The background yield LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( bkgndAsym == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : The background asym LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( ! this->validBkgndClass( nBkgndEvents->name() ) ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : Invalid background class \"" << nBkgndEvents->name() << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; gSystem->Exit(EXIT_FAILURE); } UInt_t bkgndID = this->bkgndClassID( nBkgndEvents->name() ); if ( bkgndEvents_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background yield." << std::endl; return; } if ( bkgndAsym_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background asymmetry." << std::endl; return; } bkgndEvents_[bkgndID]->name( nBkgndEvents->name()+"Events" ); if ( nBkgndEvents->isLValue() ) { Double_t value = nBkgndEvents->value(); LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); } bkgndEvents_[bkgndID] = nBkgndEvents; bkgndAsym_[bkgndID]->name( nBkgndEvents->name()+"Asym" ); if ( bkgndAsym->isLValue() ) { LauParameter* asym = dynamic_cast( bkgndAsym ); asym->range(-1.0, 1.0); } bkgndAsym_[bkgndID] = bkgndAsym; } void LauTimeDepFitModel::setSignalDtPdf(LauDecayTimePdf* pdf) { if (pdf==0) { std::cerr<<"ERROR in LauTimeDepFitModel::setSignalDtPdf : The PDF pointer is null, not adding it."<validBkgndClass( bkgndClass) ) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndDPModel : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); BkgndDPModels_[bkgndID] = model; usingBkgnd_ = kTRUE; } void LauTimeDepFitModel::setSignalPdfs(LauAbsPdf* pdf) { // These "extra variables" are assumed to be purely kinematical, like mES and DeltaE //or making use of Rest of Event information, and therefore independent of whether //the parent is a B0 or a B0bar. If this assupmtion doesn't hold, do modify this part! if (pdf==0) { std::cerr<<"ERROR in LauTimeDepFitModel::setSignalPdfs : The PDF pointer is null."<push_back(pdf); } void LauTimeDepFitModel::setBkgndPdf(const TString& bkgndClass, LauAbsPdf* pdf) { if (pdf==0) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndPdf : PDF pointer is null." << std::endl; return; } // check that this background name is valid if ( ! this->validBkgndClass( bkgndClass ) ) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndPdf : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); BkgndPdfs_[bkgndID].push_back(pdf); usingBkgnd_ = kTRUE; } void LauTimeDepFitModel::setPhiMix(const Double_t phiMix, const Bool_t fixPhiMix, const Bool_t useSinCos) { phiMix_.value(phiMix); phiMix_.initValue(phiMix); phiMix_.genValue(phiMix); phiMix_.fixed(fixPhiMix); const Double_t sinPhiMix = TMath::Sin(phiMix); sinPhiMix_.value(sinPhiMix); sinPhiMix_.initValue(sinPhiMix); sinPhiMix_.genValue(sinPhiMix); sinPhiMix_.fixed(fixPhiMix); const Double_t cosPhiMix = TMath::Cos(phiMix); cosPhiMix_.value(cosPhiMix); cosPhiMix_.initValue(cosPhiMix); cosPhiMix_.genValue(cosPhiMix); cosPhiMix_.fixed(fixPhiMix); useSinCos_ = useSinCos; phiMixComplex_.setRealPart(cosPhiMix); phiMixComplex_.setImagPart(-1.0*sinPhiMix); } void LauTimeDepFitModel::initialise() { // From the initial parameter values calculate the coefficients // so they can be passed to the signal model this->updateCoeffs(); // Initialisation if (this->useDP() == kTRUE) { this->initialiseDPModels(); } //Flavour tagging //flavTag_->initialise(); if (!this->useDP() && sigExtraPdf_->empty()) { std::cerr<<"ERROR in LauTimeDepFitModel::initialise : Signal model doesn't exist for any variable."<Exit(EXIT_FAILURE); } if (this->useDP() == kTRUE) { // Check that we have all the Dalitz-plot models if ((sigModelB0bar_ == 0) || (sigModelB0_ == 0)) { std::cerr<<"ERROR in LauTimeDepFitModel::initialise : the pointer to one (particle or anti-particle) of the signal DP models is null."<Exit(EXIT_FAILURE); } } // Next check that, if a given component is being used we've got the // right number of PDFs for all the variables involved // TODO - should probably check variable names and so on as well //UInt_t nsigpdfvars(0); //for ( LauPdfList::const_iterator pdf_iter = sigExtraPdf_.begin(); pdf_iter != sigExtraPdf_.end(); ++pdf_iter ) { // std::vector varNames = (*pdf_iter)->varNames(); // for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { // if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { // ++nsigpdfvars; // } // } //} //if (usingBkgnd_) { // for (LauBkgndPdfsList::const_iterator bgclass_iter = BkgndPdfsB0_.begin(); bgclass_iter != BkgndPdfsB0_.end(); ++bgclass_iter) { // UInt_t nbkgndpdfvars(0); // const LauPdfList& pdfList = (*bgclass_iter); // for ( LauPdfList::const_iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter ) { // std::vector varNames = (*pdf_iter)->varNames(); // for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { // if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { // ++nbkgndpdfvars; // } // } // } // if (nbkgndpdfvars != nsigpdfvars) { // std::cerr << "ERROR in LauTimeDepFitModel::initialise : There are " << nsigpdfvars << " signal PDF variables but " << nbkgndpdfvars << " bkgnd PDF variables." << std::endl; // gSystem->Exit(EXIT_FAILURE); // } // } //} // Clear the vectors of parameter information so we can start from scratch this->clearFitParVectors(); // Set the fit parameters for signal and background models this->setSignalDPParameters(); // Set the fit parameters for the decay time models this->setDecayTimeParameters(); // Set the fit parameters for the extra PDFs this->setExtraPdfParameters(); // Set the initial bg and signal events this->setFitNEvents(); // Handle flavour-tagging calibration parameters this->setCalibParams(); // Add tagging efficiency parameters this->setTagEffParams(); // Add the efficiency parameters this->setEffiParams(); //Asymmetry terms AProd and in setAsymmetries()? //this->setAsymParams(); // Check that we have the expected number of fit variables const LauParameterPList& fitVars = this->fitPars(); if (fitVars.size() != (nSigDPPar_ + nDecayTimePar_ + nExtraPdfPar_ + nNormPar_ + nCalibPar_ + nTagEffPar_ + nEffiPar_)) { std::cerr<<"ERROR in LauTimeDepFitModel::initialise : Number of fit parameters not of expected size."<Exit(EXIT_FAILURE); } if (sigModelB0_ == 0) { std::cerr<<"ERROR in LauTimeDepFitModel::initialiseDPModels : B0 signal DP model doesn't exist"<Exit(EXIT_FAILURE); } // Need to check that the number of components we have and that the dynamics has matches up const UInt_t nAmpB0bar = sigModelB0bar_->getnTotAmp(); const UInt_t nAmpB0 = sigModelB0_->getnTotAmp(); if ( nAmpB0bar != nAmpB0 ) { std::cerr << "ERROR in LauTimeDepFitModel::initialiseDPModels : Unequal number of signal DP components in the particle and anti-particle models: " << nAmpB0bar << " != " << nAmpB0 << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( nAmpB0bar != nSigComp_ ) { std::cerr << "ERROR in LauTimeDepFitModel::initialiseDPModels : Number of signal DP components in the model (" << nAmpB0bar << ") not equal to number of coefficients supplied (" << nSigComp_ << ")." << std::endl; gSystem->Exit(EXIT_FAILURE); } std::cout<<"INFO in LauTimeDepFitModel::initialiseDPModels : Initialising signal DP model"<initialise(coeffsB0bar_); sigModelB0_->initialise(coeffsB0_); fifjEffSum_.clear(); fifjEffSum_.resize(nSigComp_); for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { fifjEffSum_[iAmp].resize(nSigComp_); } // calculate the integrals of the A*Abar terms this->calcInterferenceTermIntegrals(); this->calcInterTermNorm(); // Add backgrounds if (usingBkgnd_ == kTRUE) { for (LauBkgndDPModelList::iterator iter = BkgndDPModels_.begin(); iter != BkgndDPModels_.end(); ++iter) { (*iter)->initialise(); } } } void LauTimeDepFitModel::calcInterferenceTermIntegrals() { const std::vector& integralInfoListB0bar = sigModelB0bar_->getIntegralInfos(); const std::vector& integralInfoListB0 = sigModelB0_->getIntegralInfos(); // TODO should check (first time) that they match in terms of number of entries in the vectors and that each entry has the same number of points, ranges, weights etc. LauComplex A, Abar, fifjEffSumTerm; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { fifjEffSum_[iAmp][jAmp].zero(); } } const UInt_t nIntegralRegions = integralInfoListB0bar.size(); for ( UInt_t iRegion(0); iRegion < nIntegralRegions; ++iRegion ) { const LauDPPartialIntegralInfo* integralInfoB0bar = integralInfoListB0bar[iRegion]; const LauDPPartialIntegralInfo* integralInfoB0 = integralInfoListB0[iRegion]; const UInt_t nm13Points = integralInfoB0bar->getnm13Points(); const UInt_t nm23Points = integralInfoB0bar->getnm23Points(); for (UInt_t m13 = 0; m13 < nm13Points; ++m13) { for (UInt_t m23 = 0; m23 < nm23Points; ++m23) { const Double_t weight = integralInfoB0bar->getWeight(m13,m23); const Double_t eff = integralInfoB0bar->getEfficiency(m13,m23); const Double_t effWeight = eff*weight; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { A = integralInfoB0->getAmplitude(m13, m23, iAmp); for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { Abar = integralInfoB0bar->getAmplitude(m13, m23, jAmp); fifjEffSumTerm = Abar*A.conj(); fifjEffSumTerm.rescale(effWeight); fifjEffSum_[iAmp][jAmp] += fifjEffSumTerm; } } } } } } void LauTimeDepFitModel::calcInterTermNorm() { const std::vector& fNormB0bar = sigModelB0bar_->getFNorm(); const std::vector& fNormB0 = sigModelB0_->getFNorm(); LauComplex norm; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { LauComplex coeffTerm = coeffsB0bar_[jAmp]*coeffsB0_[iAmp].conj(); coeffTerm *= fifjEffSum_[iAmp][jAmp]; coeffTerm.rescale(fNormB0bar[jAmp] * fNormB0[iAmp]); norm += coeffTerm; } } norm *= phiMixComplex_; interTermReNorm_ = 2.0*norm.re(); interTermImNorm_ = 2.0*norm.im(); } void LauTimeDepFitModel::setAmpCoeffSet(LauAbsCoeffSet* coeffSet) { // Is there a component called compName in the signal models? TString compName = coeffSet->name(); TString conjName = sigModelB0bar_->getConjResName(compName); const LauDaughters* daughtersB0bar = sigModelB0bar_->getDaughters(); const LauDaughters* daughtersB0 = sigModelB0_->getDaughters(); const Bool_t conjugate = daughtersB0bar->isConjugate( daughtersB0 ); if ( ! sigModelB0bar_->hasResonance(compName) ) { if ( ! sigModelB0bar_->hasResonance(conjName) ) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : B0bar signal DP model doesn't contain component \""<name( compName ); } if ( conjugate ) { if ( ! sigModelB0_->hasResonance(conjName) ) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : B0 signal DP model doesn't contain component \""<hasResonance(compName) ) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : B0 signal DP model doesn't contain component \""<::const_iterator iter=coeffPars_.begin(); iter!=coeffPars_.end(); ++iter) { if ((*iter)->name() == compName) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : Have already set coefficients for \""<index(nSigComp_); coeffPars_.push_back(coeffSet); TString parName = coeffSet->baseName(); parName += "FitFracAsym"; fitFracAsymm_.push_back(LauParameter(parName, 0.0, -1.0, 1.0)); acp_.push_back(coeffSet->acp()); ++nSigComp_; std::cout<<"INFO in LauTimeDepFitModel::setAmpCoeffSet : Added coefficients for component \""<acp(); LauAsymmCalc asymmCalc(fitFracB0bar_[i][i].value(), fitFracB0_[i][i].value()); Double_t asym = asymmCalc.getAsymmetry(); fitFracAsymm_[i].value(asym); if (initValues) { fitFracAsymm_[i].genValue(asym); fitFracAsymm_[i].initValue(asym); } } } void LauTimeDepFitModel::setSignalDPParameters() { // Set the fit parameters for the signal model. nSigDPPar_ = 0; if ( ! this->useDP() ) { return; } std::cout << "INFO in LauTimeDepFitModel::setSignalDPParameters : Setting the initial fit parameters for the signal DP model." << std::endl; // Place isobar coefficient parameters in vector of fit variables LauParameterPList& fitVars = this->fitPars(); for (UInt_t i = 0; i < nSigComp_; ++i) { LauParameterPList pars = coeffPars_[i]->getParameters(); for (LauParameterPList::iterator iter = pars.begin(); iter != pars.end(); ++iter) { if ( !(*iter)->clone() ) { fitVars.push_back(*iter); ++nSigDPPar_; } } } // Obtain the resonance parameters and place them in the vector of fit variables and in a separate vector // Need to make sure that they are unique because some might appear in both DP models LauParameterPSet& resVars = this->resPars(); resVars.clear(); LauParameterPList& sigDPParsB0bar = sigModelB0bar_->getFloatingParameters(); LauParameterPList& sigDPParsB0 = sigModelB0_->getFloatingParameters(); for ( LauParameterPList::iterator iter = sigDPParsB0bar.begin(); iter != sigDPParsB0bar.end(); ++iter ) { if ( resVars.insert(*iter).second ) { fitVars.push_back(*iter); ++nSigDPPar_; } } for ( LauParameterPList::iterator iter = sigDPParsB0.begin(); iter != sigDPParsB0.end(); ++iter ) { if ( resVars.insert(*iter).second ) { fitVars.push_back(*iter); ++nSigDPPar_; } } } UInt_t LauTimeDepFitModel::addParametersToFitList(std::vector theVector) { UInt_t counter(0); LauParameterPList& fitVars = this->fitPars(); // loop through the map for (std::vector::iterator iter = theVector.begin(); iter != theVector.end(); ++iter) { // grab the pdf and then its parameters LauDecayTimePdf* thePdf = *iter; // The first one is the tagging category LauAbsRValuePList& rvalues = thePdf->getParameters(); // loop through the parameters for (LauAbsRValuePList::iterator pars_iter = rvalues.begin(); pars_iter != rvalues.end(); ++pars_iter) { LauParameterPList params = (*pars_iter)->getPars(); for (LauParameterPList::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter) { // for each "original" parameter add it to the list of fit parameters and increment the counter if ( !(*params_iter)->clone() && ( !(*params_iter)->fixed() || (this->twoStageFit() && (*params_iter)->secondStage()) ) ) { fitVars.push_back(*params_iter); ++counter; } } } } return counter; } UInt_t LauTimeDepFitModel::addParametersToFitList(LauPdfList* theList) { UInt_t counter(0); counter += this->addFitParameters(*(theList)); return counter; } void LauTimeDepFitModel::setDecayTimeParameters() { nDecayTimePar_ = 0; std::cout << "INFO in LauTimeDepFitModel::setDecayTimeParameters : Setting the initial fit parameters of the DecayTime Pdfs." << std::endl; LauParameterPList& fitVars = this->fitPars(); // Loop over the Dt PDFs LauAbsRValuePList& rvalues = signalDecayTimePdf_->getParameters(); // loop through the parameters for (LauAbsRValuePList::iterator pars_iter = rvalues.begin(); pars_iter != rvalues.end(); ++pars_iter) { LauParameterPList params = (*pars_iter)->getPars(); for (LauParameterPList::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter) { // for each "original" parameter add it to the list of fit parameters and increment the counter if ( !(*params_iter)->clone() && ( !(*params_iter)->fixed() || (this->twoStageFit() && (*params_iter)->secondStage()) ) ) { fitVars.push_back(*params_iter); ++nDecayTimePar_; } } } if (usingBkgnd_){ nDecayTimePar_ += this->addParametersToFitList(backgroundDecayTimePdfs_); } if (useSinCos_) { fitVars.push_back(&sinPhiMix_); fitVars.push_back(&cosPhiMix_); nDecayTimePar_ += 2; } else { fitVars.push_back(&phiMix_); ++nDecayTimePar_; } } void LauTimeDepFitModel::setExtraPdfParameters() { // Include the parameters of the PDF for each tagging category in the fit // NB all of them are passed to the fit, even though some have been fixed through parameter.fixed(kTRUE) // With the new "cloned parameter" scheme only "original" parameters are passed to the fit. // Their clones are updated automatically when the originals are updated. nExtraPdfPar_ = 0; std::cout << "INFO in LauTimeDepFitModel::setExtraPdfParameters : Setting the initial fit parameters of the extra Pdfs." << std::endl; if (sigExtraPdf_){ nExtraPdfPar_ += this->addFitParameters((*sigExtraPdf_)); } if (usingBkgnd_ == kTRUE) { for (LauBkgndPdfsList::iterator iter = BkgndPdfs_.begin(); iter != BkgndPdfs_.end(); ++iter) { nExtraPdfPar_ += this->addFitParameters(*iter); } } } void LauTimeDepFitModel::setFitNEvents() { nNormPar_ = 0; std::cout << "INFO in LauTimeDepFitModel::setFitNEvents : Setting the initial fit parameters of the signal and ackground yields." << std::endl; // Initialise the total number of events to be the sum of all the hypotheses Double_t nTotEvts = signalEvents_->value(); this->eventsPerExpt(TMath::FloorNint(nTotEvts)); LauParameterPList& fitVars = this->fitPars(); // if doing an extended ML fit add the signal fraction into the fit parameters if (this->doEMLFit()) { std::cout<<"INFO in LauTimeDepFitModel::setFitNEvents : Initialising number of events for signal and background components..."<useDP() == kFALSE) { fitVars.push_back(signalAsym_); ++nNormPar_; } // TODO arguably should delegate this //LauTagCatParamMap& signalTagCatFrac = flavTag_->getSignalTagCatFrac(); // tagging-category fractions for signal events //for (LauTagCatParamMap::iterator iter = signalTagCatFrac.begin(); iter != signalTagCatFrac.end(); ++iter) { // if (iter == signalTagCatFrac.begin()) { // continue; // } // LauParameter* par = &((*iter).second); // fitVars.push_back(par); // ++nNormPar_; //} // Backgrounds if (usingBkgnd_ == kTRUE) { for (LauBkgndYieldList::iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { std::vector parameters = (*iter)->getPars(); for ( LauParameter* parameter : parameters ) { if(!parameter->clone()) { fitVars.push_back(parameter); ++nNormPar_; } } } for (LauBkgndYieldList::iterator iter = bkgndAsym_.begin(); iter != bkgndAsym_.end(); ++iter) { std::vector parameters = (*iter)->getPars(); for ( LauParameter* parameter : parameters ) { if(!parameter->clone()) { fitVars.push_back(parameter); ++nNormPar_; } } } } } void LauTimeDepFitModel::setAsymParams() { nAsymPar_ = 0; LauParameterPList& fitVars = this->fitPars(); if (!AProd_.fixed()){ fitVars.push_back(&AProd_); nAsymPar_+=1; } } void LauTimeDepFitModel::setTagEffParams() { nTagEffPar_ = 0; Bool_t useAltPars = flavTag_->getUseAveDelta(); std::cout << "INFO in LauTimeDepFitModel::setTagEffParams : Setting the initial fit parameters for flavour tagging efficiencies." << std::endl; if (useAltPars){ std::vector tageff_ave = flavTag_->getTagEffAve(); std::vector tageff_delta = flavTag_->getTagEffDelta(); LauParameterPList& fitVars = this->fitPars(); for(std::vector::iterator iter = tageff_ave.begin(); iter != tageff_ave.end(); ++iter){ LauParameter* eff = *iter; if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } for(std::vector::iterator iter = tageff_delta.begin(); iter != tageff_delta.end(); ++iter){ LauParameter* eff = *iter; if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } else { std::vector tageff_b0 = flavTag_->getTagEffB0(); std::vector tageff_b0bar = flavTag_->getTagEffB0bar(); LauParameterPList& fitVars = this->fitPars(); for(std::vector::iterator iter = tageff_b0.begin(); iter != tageff_b0.end(); ++iter){ LauParameter* eff = *iter; if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } for(std::vector::iterator iter = tageff_b0bar.begin(); iter != tageff_b0bar.end(); ++iter){ LauParameter* eff = *iter; if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } } void LauTimeDepFitModel::setCalibParams() { Bool_t useAltPars = flavTag_->getUseAveDelta(); std::cout << "INFO in LauTimeDepFitModel::setCalibParams : Setting the initial fit parameters of the flavour tagging calibration parameters." << std::endl; if (useAltPars){ std::vector p0pars_ave = flavTag_->getCalibP0Ave(); std::vector p0pars_delta = flavTag_->getCalibP0Delta(); std::vector p1pars_ave = flavTag_->getCalibP1Ave(); std::vector p1pars_delta = flavTag_->getCalibP1Delta(); LauParameterPList& fitVars = this->fitPars(); for(std::vector::iterator iter = p0pars_ave.begin(); iter != p0pars_ave.end(); ++iter){ LauParameter* p0 = *iter; if (p0->fixed()){continue;} fitVars.push_back(p0); ++nCalibPar_; } for(std::vector::iterator iter = p0pars_delta.begin(); iter != p0pars_delta.end(); ++iter){ LauParameter* p0 = *iter; if (p0->fixed()){continue;} fitVars.push_back(p0); ++nCalibPar_; } for(std::vector::iterator iter = p1pars_ave.begin(); iter != p1pars_ave.end(); ++iter){ LauParameter* p1 = *iter; if (p1->fixed()){continue;} fitVars.push_back(p1); ++nCalibPar_; } for(std::vector::iterator iter = p1pars_delta.begin(); iter != p1pars_delta.end(); ++iter){ LauParameter* p1 = *iter; if (p1->fixed()){continue;} fitVars.push_back(p1); ++nCalibPar_; } } else { std::vector p0pars_b0 = flavTag_->getCalibP0B0(); std::vector p0pars_b0bar = flavTag_->getCalibP0B0bar(); std::vector p1pars_b0 = flavTag_->getCalibP1B0(); std::vector p1pars_b0bar = flavTag_->getCalibP1B0bar(); LauParameterPList& fitVars = this->fitPars(); for(std::vector::iterator iter = p0pars_b0.begin(); iter != p0pars_b0.end(); ++iter){ LauParameter* p0 = *iter; if (p0->fixed()){continue;} fitVars.push_back(p0); ++nCalibPar_; } for(std::vector::iterator iter = p0pars_b0bar.begin(); iter != p0pars_b0bar.end(); ++iter){ LauParameter* p0 = *iter; if (p0->fixed()){continue;} fitVars.push_back(p0); ++nCalibPar_; } for(std::vector::iterator iter = p1pars_b0.begin(); iter != p1pars_b0.end(); ++iter){ LauParameter* p1 = *iter; if (p1->fixed()){continue;} fitVars.push_back(p1); ++nCalibPar_; } for(std::vector::iterator iter = p1pars_b0bar.begin(); iter != p1pars_b0bar.end(); ++iter){ LauParameter* p1 = *iter; if (p1->fixed()){continue;} fitVars.push_back(p1); ++nCalibPar_; } } } void LauTimeDepFitModel::setEffiParams() { nEffiPar_ = 0; LauParameterPList& fitVars = this->fitPars(); std::vector& effiPars = signalDecayTimePdf_->getEffiPars(); for(std::vector::iterator iter = effiPars.begin(); iter != effiPars.end(); ++iter){ LauParameter* par = *iter; if (par->fixed()){continue;} fitVars.push_back(par); ++nEffiPar_; } } void LauTimeDepFitModel::setExtraNtupleVars() { // Set-up other parameters derived from the fit results, e.g. fit fractions. if (this->useDP() != kTRUE) { return; } // First clear the vectors so we start from scratch this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); // Add the B0 and B0bar fit fractions for each signal component fitFracB0bar_ = sigModelB0bar_->getFitFractions(); if (fitFracB0bar_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetFitFractions(); if (fitFracB0_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); icalcAsymmetries(kTRUE); // Add the Fit Fraction asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(fitFracAsymm_[i]); } // Add the calculated CP asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(acp_[i]); } // Now add in the DP efficiency values Double_t initMeanEffB0bar = sigModelB0bar_->getMeanEff().initValue(); meanEffB0bar_.value(initMeanEffB0bar); meanEffB0bar_.initValue(initMeanEffB0bar); meanEffB0bar_.genValue(initMeanEffB0bar); extraVars.push_back(meanEffB0bar_); Double_t initMeanEffB0 = sigModelB0_->getMeanEff().initValue(); meanEffB0_.value(initMeanEffB0); meanEffB0_.initValue(initMeanEffB0); meanEffB0_.genValue(initMeanEffB0); extraVars.push_back(meanEffB0_); // Also add in the DP rates Double_t initDPRateB0bar = sigModelB0bar_->getDPRate().initValue(); DPRateB0bar_.value(initDPRateB0bar); DPRateB0bar_.initValue(initDPRateB0bar); DPRateB0bar_.genValue(initDPRateB0bar); extraVars.push_back(DPRateB0bar_); Double_t initDPRateB0 = sigModelB0_->getDPRate().initValue(); DPRateB0_.value(initDPRateB0); DPRateB0_.initValue(initDPRateB0); DPRateB0_.genValue(initDPRateB0); extraVars.push_back(DPRateB0_); } void LauTimeDepFitModel::setAsymmetries(const Double_t AProd, const Bool_t AProdFix){ AProd_.value(AProd); AProd_.fixed(AProdFix); } void LauTimeDepFitModel::finaliseFitResults(const TString& tablePrefixName) { // Retrieve parameters from the fit results for calculations and toy generation // and eventually store these in output root ntuples/text files // Now take the fit parameters and update them as necessary // i.e. to make mag > 0.0, phase in the right range. // This function will also calculate any other values, such as the // fit fractions, using any errors provided by fitParErrors as appropriate. // Also obtain the pull values: (measured - generated)/(average error) if (this->useDP() == kTRUE) { for (UInt_t i = 0; i < nSigComp_; ++i) { // Check whether we have "a > 0.0", and phases in the right range coeffPars_[i]->finaliseValues(); } } // update the pulls on the event fractions and asymmetries if (this->doEMLFit()) { signalEvents_->updatePull(); } if (this->useDP() == kFALSE) { signalAsym_->updatePull(); } // Finalise the pulls on the decay time parameters signalDecayTimePdf_->updatePulls(); // and for backgrounds if required if (usingBkgnd_){ for (std::vector::iterator iter = backgroundDecayTimePdfs_.begin(); iter != backgroundDecayTimePdfs_.end(); ++iter) { LauDecayTimePdf* pdf = *iter; pdf->updatePulls(); } } if (useSinCos_) { cosPhiMix_.updatePull(); sinPhiMix_.updatePull(); } else { this->checkMixingPhase(); } if (usingBkgnd_ == kTRUE) { for (LauBkgndYieldList::iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { std::vector parameters = (*iter)->getPars(); for ( LauParameter* parameter : parameters ) { parameter->updatePull(); } } for (LauBkgndYieldList::iterator iter = bkgndAsym_.begin(); iter != bkgndAsym_.end(); ++iter) { std::vector parameters = (*iter)->getPars(); for ( LauParameter* parameter : parameters ) { parameter->updatePull(); } } } // Update the pulls on all the extra PDFs' parameters if (sigExtraPdf_){ this->updateFitParameters(*(sigExtraPdf_)); } if (usingBkgnd_ == kTRUE) { for (LauBkgndPdfsList::iterator iter = BkgndPdfs_.begin(); iter != BkgndPdfs_.end(); ++iter) { this->updateFitParameters(*iter); } } // Fill the fit results to the ntuple // update the coefficients and then calculate the fit fractions and ACP's if (this->useDP() == kTRUE) { this->updateCoeffs(); sigModelB0bar_->updateCoeffs(coeffsB0bar_); sigModelB0bar_->calcExtraInfo(); sigModelB0_->updateCoeffs(coeffsB0_); sigModelB0_->calcExtraInfo(); LauParArray fitFracB0bar = sigModelB0bar_->getFitFractions(); if (fitFracB0bar.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0 = sigModelB0_->getFitFractions(); if (fitFracB0.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); meanEffB0_.value(sigModelB0_->getMeanEff().value()); DPRateB0bar_.value(sigModelB0bar_->getDPRate().value()); DPRateB0_.value(sigModelB0_->getDPRate().value()); this->calcAsymmetries(); // Then store the final fit parameters, and any extra parameters for // the signal model (e.g. fit fractions, FF asymmetries, ACPs, mean efficiency and DP rate) this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); for (UInt_t i(0); iprintFitFractions(std::cout); this->printAsymmetries(std::cout); } const LauParameterPList& fitVars = this->fitPars(); const LauParameterList& extraVars = this->extraPars(); LauFitNtuple* ntuple = this->fitNtuple(); ntuple->storeParsAndErrors(fitVars, extraVars); // find out the correlation matrix for the parameters ntuple->storeCorrMatrix(this->iExpt(), this->fitStatus(), this->covarianceMatrix()); // Fill the data into ntuple ntuple->updateFitNtuple(); // Print out the partial fit fractions, phases and the // averaged efficiency, reweighted by the dynamics (and anything else) if (this->writeLatexTable()) { TString sigOutFileName(tablePrefixName); sigOutFileName += "_"; sigOutFileName += this->iExpt(); sigOutFileName += "Expt.tex"; this->writeOutTable(sigOutFileName); } } void LauTimeDepFitModel::printFitFractions(std::ostream& output) { // Print out Fit Fractions, total DP rate and mean efficiency // First for the B0bar events for (UInt_t i = 0; i < nSigComp_; i++) { const TString compName(coeffPars_[i]->name()); output<<"B0bar FitFraction for component "<useDP() == kTRUE) { // print the fit coefficients in one table coeffPars_.front()->printTableHeading(fout); for (UInt_t i = 0; i < nSigComp_; i++) { coeffPars_[i]->printTableRow(fout); } fout<<"\\hline"<name(); resName = resName.ReplaceAll("_", "\\_"); fout< =$ & $"; print.printFormat(fout, meanEffB0bar_.value()); fout << "$ & $"; print.printFormat(fout, meanEffB0_.value()); fout << "$ & & \\\\" << std::endl; if (useSinCos_) { fout << "$\\sinPhiMix =$ & $"; print.printFormat(fout, sinPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, sinPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; fout << "$\\cosPhiMix =$ & $"; print.printFormat(fout, cosPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, cosPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } else { fout << "$\\phiMix =$ & $"; print.printFormat(fout, phiMix_.value()); fout << " \\pm "; print.printFormat(fout, phiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } fout << "\\hline \n\\end{tabular}" << std::endl; } if (!sigExtraPdf_->empty()) { fout<<"\\begin{tabular}{|l|c|}"<printFitParameters(*(sigExtraPdf_), fout); if (usingBkgnd_ == kTRUE && !BkgndPdfs_.empty()) { fout << "\\hline" << std::endl; fout << "\\Extra Background PDFs' Parameters: & \\\\" << std::endl; for (LauBkgndPdfsList::const_iterator iter = BkgndPdfs_.begin(); iter != BkgndPdfs_.end(); ++iter) { this->printFitParameters(*iter, fout); } } fout<<"\\hline \n\\end{tabular}"<updateSigEvents(); // Check whether we want to have randomised initial fit parameters for the signal model if (this->useRandomInitFitPars() == kTRUE) { this->randomiseInitFitPars(); } } void LauTimeDepFitModel::randomiseInitFitPars() { // Only randomise those parameters that are not fixed! std::cout<<"INFO in LauTimeDepFitModel::randomiseInitFitPars : Randomising the initial values of the coefficients of the DP components (and phiMix)..."<randomiseInitValues(); } phiMix_.randomiseValue(-LauConstants::pi, LauConstants::pi); if (useSinCos_) { sinPhiMix_.initValue(TMath::Sin(phiMix_.initValue())); cosPhiMix_.initValue(TMath::Cos(phiMix_.initValue())); } } LauTimeDepFitModel::LauGenInfo LauTimeDepFitModel::eventsToGenerate() { // Determine the number of events to generate for each hypothesis // If we're smearing then smear each one individually // NB this individual smearing has to be done individually per tagging category as well LauGenInfo nEvtsGen; // Signal // If we're including the DP and decay time we can't decide on the tag // yet, since it depends on the whole DP+dt PDF, however, if // we're not then we need to decide. Double_t evtWeight(1.0); Double_t nEvts = signalEvents_->genValue(); if ( nEvts < 0.0 ) { evtWeight = -1.0; nEvts = TMath::Abs( nEvts ); } Double_t sigAsym(0.0); if (this->useDP() == kFALSE) { sigAsym = signalAsym_->genValue(); //TODO fill in here if we care } else { Double_t rateB0bar = sigModelB0bar_->getDPRate().value(); Double_t rateB0 = sigModelB0_->getDPRate().value(); if ( rateB0bar+rateB0 > 1e-30) { sigAsym = (rateB0bar-rateB0)/(rateB0bar+rateB0); } //for (LauTagCatParamMap::const_iterator iter = signalTagCatFrac.begin(); iter != signalTagCatFrac.end(); ++iter) { // const LauParameter& par = iter->second; // Double_t eventsbyTagCat = par.value() * nEvts; // if (this->doPoissonSmearing()) { // eventsbyTagCat = LauRandom::randomFun()->Poisson(eventsbyTagCat); // } // eventsB0[iter->first] = std::make_pair( TMath::Nint(eventsbyTagCat), evtWeight ); //} //nEvtsGen[std::make_pair("signal",0)] = eventsB0; // generate signal event, decide tag later. nEvtsGen["signal"] = std::make_pair( nEvts, evtWeight ); } std::cout<<"INFO in LauTimeDepFitModel::eventsToGenerate : Generating toy MC with:"<first); // Type const TString& type(iter->first); // Number of events Int_t nEvtsGen( iter->second.first ); // get the event weight for this category const Double_t evtWeight( iter->second.second ); for (Int_t iEvt(0); iEvtsetGenNtupleDoubleBranchValue( "evtWeight", evtWeight ); if (evtCategory == "signal") { this->setGenNtupleIntegerBranchValue("genSig",1); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { this->setGenNtupleIntegerBranchValue( bkgndClassNamesGen[iBkgnd], 0 ); } // All the generate*Event() methods have to fill in curEvtDecayTime_ and curEvtDecayTimeErr_ // In addition, generateSignalEvent has to decide on the tag and fill in curEvtTagFlv_ genOK = this->generateSignalEvent(); } else { this->setGenNtupleIntegerBranchValue("genSig",0); UInt_t bkgndID(0); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { Int_t gen(0); if ( bkgndClassNames[iBkgnd] == type ) { gen = 1; bkgndID = iBkgnd; } this->setGenNtupleIntegerBranchValue( bkgndClassNamesGen[iBkgnd], gen ); } genOK = this->generateBkgndEvent(bkgndID); } if (!genOK) { // If there was a problem with the generation then break out and return. // The problem model will have adjusted itself so that all should be OK next time. break; } if (this->useDP() == kTRUE) { this->setDPDtBranchValues(); // store DP, decay time and tagging variables in the ntuple } // Store the event's tag and tagging category this->setGenNtupleIntegerBranchValue("cpEigenvalue", cpEigenValue_); this->setGenNtupleDoubleBranchValue(flavTag_->getTrueTagVarName(),curEvtTrueTagFlv_); std::vector tagVarName = flavTag_->getTagVarNames(); std::vector mistagVarName = flavTag_->getMistagVarNames(); // Loop over the taggers - values set via generateSignalEvent const ULong_t nTaggers {flavTag_->getNTaggers()}; for (ULong_t i=0; isetGenNtupleIntegerBranchValue(tagVarName[i],curEvtTagFlv_[i]); this->setGenNtupleDoubleBranchValue(mistagVarName[i],curEvtMistag_[i]); } // Store the event number (within this experiment) // and then increment it this->setGenNtupleIntegerBranchValue("iEvtWithinExpt",evtNum); ++evtNum; // Write the values into the tree this->fillGenNtupleBranches(); // Print an occasional progress message if (iEvt%1000 == 0) {std::cout<<"INFO in LauTimeDepFitModel::genExpt : Generated event number "<useDP() && genOK) { sigModelB0bar_->checkToyMC(kTRUE); sigModelB0_->checkToyMC(kTRUE); std::cout<<"aSqMaxSet = "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0 = sigModelB0_->getFitFractions(); if (fitFracB0.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::generate : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); meanEffB0_.value(sigModelB0_->getMeanEff().value()); DPRateB0bar_.value(sigModelB0bar_->getDPRate().value()); DPRateB0_.value(sigModelB0_->getDPRate().value()); } } // If we're reusing embedded events or if the generation is being // reset then clear the lists of used events if (reuseSignal_ || !genOK) { if (signalTree_) { signalTree_->clearUsedList(); } } for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { LauEmbeddedData* data = bkgndTree_[bkgndID]; if (reuseBkgnd_[bkgndID] || !genOK) { if (data) { data->clearUsedList(); } } } return genOK; } Bool_t LauTimeDepFitModel::generateSignalEvent() { // Generate signal event, including SCF if necessary. // DP:DecayTime generation follows. // If it's ok, we then generate mES, DeltaE, Fisher/NN... Bool_t genOK(kTRUE); Bool_t generatedEvent(kFALSE); Bool_t doSquareDP = kinematicsB0bar_->squareDP(); doSquareDP &= kinematicsB0_->squareDP(); LauKinematics* kinematics(kinematicsB0bar_); if (this->useDP()) { if (signalTree_) { signalTree_->getEmbeddedEvent(kinematics); //curEvtTagFlv_ = TMath::Nint(signalTree_->getValue("tagFlv")); curEvtDecayTimeErr_ = signalTree_->getValue(signalDecayTimePdf_->varErrName()); curEvtDecayTime_ = signalTree_->getValue(signalDecayTimePdf_->varName()); if (signalTree_->haveBranch("mcMatch")) { Int_t match = TMath::Nint(signalTree_->getValue("mcMatch")); if (match) { this->setGenNtupleIntegerBranchValue("genTMSig",1); this->setGenNtupleIntegerBranchValue("genSCFSig",0); } else { this->setGenNtupleIntegerBranchValue("genTMSig",0); this->setGenNtupleIntegerBranchValue("genSCFSig",1); } } } else { nGenLoop_ = 0; // generate the decay time error (NB the kTRUE forces the generation of a new value) curEvtDecayTimeErr_ = signalDecayTimePdf_->generateError(kTRUE); // clear vectors curEvtTagFlv_.clear(); std::vector tageffB0 = flavTag_->getTagEffB0(); std::vector tageffB0bar = flavTag_->getTagEffB0bar(); std::vector tageffave = flavTag_->getTagEffAve(); std::vector tageffdelta = flavTag_->getTagEffDelta(); Double_t tagEffB0(0.), tagEffB0bar(0.); curEvtMistag_.clear(); curEvtTrueTagFlv_ = 0; // First choose the true tag, accounting for the production asymmetry // CONVENTION WARNING Double_t random = LauRandom::randomFun()->Rndm(); if (random <= 0.5 * ( 1.0 - AProd_.unblindValue() ) ) { curEvtTrueTagFlv_ = 1; // B0 tag } else { curEvtTrueTagFlv_ = -1; // B0bar tag } // Next generate the tag decisions and per-event mistag probabilities Double_t randNo(0); const ULong_t nTaggers { flavTag_->getNTaggers() }; for(ULong_t position{0}; positiongetEtaGen(position)); if(flavTag_->getUseAveDelta()){ tagEffB0 = tageffave[position]->unblindValue() + 0.5*tageffdelta[position]->unblindValue(); tagEffB0bar = tageffave[position]->unblindValue() - 0.5*tageffdelta[position]->unblindValue(); } else { tagEffB0 = tageffB0[position]->unblindValue(); tagEffB0bar = tageffB0bar[position]->unblindValue(); } if (curEvtTrueTagFlv_ == 1){ randNo = LauRandom::randomFun()->Rndm(); // Try to tag in tageff% of cases if (randNo <= tagEffB0) { randNo = LauRandom::randomFun()->Rndm(); // Account for mistag if (randNo > curEvtMistag_[position]){ curEvtTagFlv_.push_back(1); // B0 tag } else { curEvtTagFlv_.push_back(-1); // B0bar tag } } else { curEvtTagFlv_.push_back(0); // Untagged } } else { randNo = LauRandom::randomFun()->Rndm(); // Try to tag in tageff% of cases if (randNo <= tagEffB0bar) { randNo = LauRandom::randomFun()->Rndm(); // Account for mistag if (randNo > curEvtMistag_[position]){ curEvtTagFlv_.push_back(-1); // B0bar tag } else { curEvtTagFlv_.push_back(1); // B0 tag } } else { curEvtTagFlv_.push_back(0); // Untagged } } } // Now generate from the combined DP / decay-time PDF while (generatedEvent == kFALSE && nGenLoop_ < iterationsMax_) { // Generate the DP position Double_t m13Sq{0.0}, m23Sq{0.0}; kinematicsB0bar_->genFlatPhaseSpace(m13Sq, m23Sq); // Next, calculate the total A and Abar for the given DP position sigModelB0_->calcLikelihoodInfo(m13Sq, m23Sq); sigModelB0bar_->calcLikelihoodInfo(m13Sq, m23Sq); // Retrieve the amplitudes and efficiency from the dynamics const LauComplex& Abar { sigModelB0bar_->getEvtDPAmp() }; const LauComplex& A { sigModelB0_->getEvtDPAmp() }; const Double_t dpEff { sigModelB0bar_->getEvtEff() }; // Next calculate the DP terms const Double_t aSqSum { A.abs2() + Abar.abs2() }; const Double_t aSqDif { A.abs2() - Abar.abs2() }; Double_t interTermRe { 0.0 }; Double_t interTermIm { 0.0 }; if ( cpEigenValue_ != QFS ) { const LauComplex inter { Abar * A.conj() * phiMixComplex_ }; if ( cpEigenValue_ == CPEven ) { interTermIm = 2.0 * inter.im(); interTermRe = 2.0 * inter.re(); } else { interTermIm = -2.0 * inter.im(); interTermRe = -2.0 * inter.re(); } } // Generate decay time const Double_t tMin = signalDecayTimePdf_->minAbscissa(); const Double_t tMax = signalDecayTimePdf_->maxAbscissa(); curEvtDecayTime_ = LauRandom::randomFun()->Rndm()*(tMax-tMin) + tMin; // Calculate all the decay time info signalDecayTimePdf_->calcLikelihoodInfo(curEvtDecayTime_,curEvtDecayTimeErr_); - // ...and check that the calculation went ok, otherwise loop again - if (signalDecayTimePdf_->state() != LauDecayTimePdf::Good) { - std::cout<<"signalDecayTimePdf_ state is bad"<getEffiTerm() }; // First get all the decay time terms const Double_t dtCos { signalDecayTimePdf_->getCosTerm() }; const Double_t dtSin { signalDecayTimePdf_->getSinTerm() }; const Double_t dtCosh { signalDecayTimePdf_->getCoshTerm() }; const Double_t dtSinh { signalDecayTimePdf_->getSinhTerm() }; // Combine DP and decay-time info for all terms // Multiplying the cos and sin terms by the true flavour at production const Double_t coshTerm { dtCosh * aSqSum }; const Double_t sinhTerm { dtSinh * interTermRe }; const Double_t cosTerm { dtCos * aSqDif * curEvtTrueTagFlv_ }; const Double_t sinTerm { dtSin * interTermIm * curEvtTrueTagFlv_ }; // Sum to obtain the total and multiply by the efficiency const Double_t ASq { ( coshTerm + sinhTerm + cosTerm - sinTerm ) * dpEff * dtEff }; //std::cout << "Total Amplitude Eff: " << ASq << std::endl; //Finally we throw the dice to see whether this event should be generated //We make a distinction between the likelihood of TM and SCF to tag the SCF events as such Double_t randNum = LauRandom::randomFun()->Rndm(); if (randNum <= ASq/aSqMaxSet_ ) { generatedEvent = kTRUE; nGenLoop_ = 0; if (ASq > aSqMaxVar_) {aSqMaxVar_ = ASq;} } else { nGenLoop_++; } } // end of while !generatedEvent loop } // end of if (signalTree_) else control } else { if ( signalTree_ ) { signalTree_->getEmbeddedEvent(0); //curEvtTagFlv_ = TMath::Nint(signalTree_->getValue("tagFlv")); curEvtDecayTimeErr_ = signalTree_->getValue(signalDecayTimePdf_->varErrName()); curEvtDecayTime_ = signalTree_->getValue(signalDecayTimePdf_->varName()); } } // Check whether we have generated the toy MC OK. if (nGenLoop_ >= iterationsMax_) { aSqMaxSet_ = 1.01 * aSqMaxVar_; genOK = kFALSE; std::cerr<<"WARNING in LauTimeDepFitModel::generateSignalEvent : Hit max iterations: setting aSqMaxSet_ to "< aSqMaxSet_) { aSqMaxSet_ = 1.01 * aSqMaxVar_; genOK = kFALSE; std::cerr<<"WARNING in LauTimeDepFitModel::generateSignalEvent : Found a larger ASq value: setting aSqMaxSet_ to "<updateKinematics(kinematicsB0bar_->getm13Sq(), kinematicsB0bar_->getm23Sq() ); this->generateExtraPdfValues(sigExtraPdf_, signalTree_); } // Check for problems with the embedding if (signalTree_ && (signalTree_->nEvents() == signalTree_->nUsedEvents())) { std::cerr<<"WARNING in LauTimeDepFitModel::generateSignalEvent : Source of embedded signal events used up, clearing the list of used events."<clearUsedList(); } return genOK; } Bool_t LauTimeDepFitModel::generateBkgndEvent([[maybe_unused]] UInt_t bkgndID) { // Generate Bkgnd event Bool_t genOK(kTRUE); //LauAbsBkgndDPModel* model(0); //LauEmbeddedData* embeddedData(0); //LauPdfList* extraPdfs(0); //LauKinematics* kinematics(0); //model = BkgndDPModels_[bkgndID]; //if (this->enableEmbedding()) { // // find the right embedded data for the current tagging category // LauTagCatEmbDataMap::const_iterator emb_iter = bkgndTree_[bkgndID].find(curEvtTagCat_); // embeddedData = (emb_iter != bkgndTree_[bkgndID].end()) ? emb_iter->second : 0; //} //extraPdfs = &BkgndPdfs_[bkgndID]; //kinematics = kinematicsB0bar_; //if (this->useDP()) { // if (embeddedData) { // embeddedData->getEmbeddedEvent(kinematics); // } else { // if (model == 0) { // const TString& bkgndClass = this->bkgndClassName(bkgndID); // std::cerr << "ERROR in LauCPFitModel::generateBkgndEvent : Can't find the DP model for background class \"" << bkgndClass << "\"." << std::endl; // gSystem->Exit(EXIT_FAILURE); // } // genOK = model->generate(); // } //} else { // if (embeddedData) { // embeddedData->getEmbeddedEvent(0); // } //} //if (genOK) { // this->generateExtraPdfValues(extraPdfs, embeddedData); //} //// Check for problems with the embedding //if (embeddedData && (embeddedData->nEvents() == embeddedData->nUsedEvents())) { // const TString& bkgndClass = this->bkgndClassName(bkgndID); // std::cerr << "WARNING in LauCPFitModel::generateBkgndEvent : Source of embedded " << bkgndClass << " events used up, clearing the list of used events." << std::endl; // embeddedData->clearUsedList(); //} return genOK; } void LauTimeDepFitModel::setupGenNtupleBranches() { // Setup the required ntuple branches this->addGenNtupleDoubleBranch("evtWeight"); this->addGenNtupleIntegerBranch("genSig"); this->addGenNtupleIntegerBranch("cpEigenvalue"); std::vector tagVarName = flavTag_->getTagVarNames(); const ULong_t nTaggers {flavTag_->getNTaggers()}; for (ULong_t position{0}; positionaddGenNtupleIntegerBranch(tagVarName[position]); } if (this->useDP() == kTRUE) { // Let's add the decay time variables. this->addGenNtupleDoubleBranch(signalDecayTimePdf_->varName()); this->addGenNtupleDoubleBranch(signalDecayTimePdf_->varErrName()); this->addGenNtupleDoubleBranch("m12"); this->addGenNtupleDoubleBranch("m23"); this->addGenNtupleDoubleBranch("m13"); this->addGenNtupleDoubleBranch("m12Sq"); this->addGenNtupleDoubleBranch("m23Sq"); this->addGenNtupleDoubleBranch("m13Sq"); this->addGenNtupleDoubleBranch("cosHel12"); this->addGenNtupleDoubleBranch("cosHel23"); this->addGenNtupleDoubleBranch("cosHel13"); if (kinematicsB0bar_->squareDP() && kinematicsB0_->squareDP()) { this->addGenNtupleDoubleBranch("mPrime"); this->addGenNtupleDoubleBranch("thPrime"); } // Can add the real and imaginary parts of the B0 and B0bar total // amplitudes seen in the generation (restrict this with a flag // that defaults to false) if ( storeGenAmpInfo_ ) { this->addGenNtupleDoubleBranch("reB0Amp"); this->addGenNtupleDoubleBranch("imB0Amp"); this->addGenNtupleDoubleBranch("reB0barAmp"); this->addGenNtupleDoubleBranch("imB0barAmp"); } } // Let's look at the extra variables for signal in one of the tagging categories if ( sigExtraPdf_ ) { for (LauPdfList::const_iterator pdf_iter = sigExtraPdf_->begin(); pdf_iter != sigExtraPdf_->end(); ++pdf_iter) { for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { this->addGenNtupleDoubleBranch( (*var_iter) ); } } } } } void LauTimeDepFitModel::setDPDtBranchValues() { // Store the decay time variables. this->setGenNtupleDoubleBranchValue(signalDecayTimePdf_->varName(),curEvtDecayTime_); this->setGenNtupleDoubleBranchValue(signalDecayTimePdf_->varErrName(),curEvtDecayTimeErr_); // CONVENTION WARNING // TODO check - for now use B0 for any tags //LauKinematics* kinematics(0); //if (curEvtTagFlv_[position]<0) { LauKinematics* kinematics = kinematicsB0_; //} else { // kinematics = kinematicsB0bar_; //} // Store all the DP information this->setGenNtupleDoubleBranchValue("m12", kinematics->getm12()); this->setGenNtupleDoubleBranchValue("m23", kinematics->getm23()); this->setGenNtupleDoubleBranchValue("m13", kinematics->getm13()); this->setGenNtupleDoubleBranchValue("m12Sq", kinematics->getm12Sq()); this->setGenNtupleDoubleBranchValue("m23Sq", kinematics->getm23Sq()); this->setGenNtupleDoubleBranchValue("m13Sq", kinematics->getm13Sq()); this->setGenNtupleDoubleBranchValue("cosHel12", kinematics->getc12()); this->setGenNtupleDoubleBranchValue("cosHel23", kinematics->getc23()); this->setGenNtupleDoubleBranchValue("cosHel13", kinematics->getc13()); if (kinematics->squareDP()) { this->setGenNtupleDoubleBranchValue("mPrime", kinematics->getmPrime()); this->setGenNtupleDoubleBranchValue("thPrime", kinematics->getThetaPrime()); } // Can add the real and imaginary parts of the B0 and B0bar total // amplitudes seen in the generation (restrict this with a flag // that defaults to false) if ( storeGenAmpInfo_ ) { if ( this->getGenNtupleIntegerBranchValue("genSig")==1 ) { LauComplex Abar = sigModelB0bar_->getEvtDPAmp(); LauComplex A = sigModelB0_->getEvtDPAmp(); this->setGenNtupleDoubleBranchValue("reB0Amp", A.re()); this->setGenNtupleDoubleBranchValue("imB0Amp", A.im()); this->setGenNtupleDoubleBranchValue("reB0barAmp", Abar.re()); this->setGenNtupleDoubleBranchValue("imB0barAmp", Abar.im()); } else { this->setGenNtupleDoubleBranchValue("reB0Amp", 0.0); this->setGenNtupleDoubleBranchValue("imB0Amp", 0.0); this->setGenNtupleDoubleBranchValue("reB0barAmp", 0.0); this->setGenNtupleDoubleBranchValue("imB0barAmp", 0.0); } } } void LauTimeDepFitModel::generateExtraPdfValues(LauPdfList* extraPdfs, LauEmbeddedData* embeddedData) { // CONVENTION WARNING LauKinematics* kinematics = kinematicsB0_; //LauKinematics* kinematics(0); //if (curEvtTagFlv_<0) { // kinematics = kinematicsB0_; //} else { // kinematics = kinematicsB0bar_; //} // Generate from the extra PDFs if (extraPdfs) { for (LauPdfList::iterator pdf_iter = extraPdfs->begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { LauFitData genValues; if (embeddedData) { genValues = embeddedData->getValues( (*pdf_iter)->varNames() ); } else { genValues = (*pdf_iter)->generate(kinematics); } for ( LauFitData::const_iterator var_iter = genValues.begin(); var_iter != genValues.end(); ++var_iter ) { TString varName = var_iter->first; if ( varName != "m13Sq" && varName != "m23Sq" ) { Double_t value = var_iter->second; this->setGenNtupleDoubleBranchValue(varName,value); } } } } } void LauTimeDepFitModel::propagateParUpdates() { // Update the complex mixing phase if (useSinCos_) { phiMixComplex_.setRealPart(cosPhiMix_.unblindValue()); phiMixComplex_.setImagPart(-1.0*sinPhiMix_.unblindValue()); } else { phiMixComplex_.setRealPart(TMath::Cos(-1.0*phiMix_.unblindValue())); phiMixComplex_.setImagPart(TMath::Sin(-1.0*phiMix_.unblindValue())); } // Update the total normalisation for the signal likelihood if (this->useDP() == kTRUE) { this->updateCoeffs(); sigModelB0bar_->updateCoeffs(coeffsB0bar_); sigModelB0_->updateCoeffs(coeffsB0_); this->calcInterTermNorm(); } // Update the decay time normalisation if ( signalDecayTimePdf_ ) { // TODO - should make this intelligent (only update if certain parameters are floating and have changed in the last iteration) - this could go here or inside LauDecayTimePdf::calcNorm // - will maybe also need to add an update of the background PDFs here signalDecayTimePdf_->calcNorm(); } // Update the signal events from the background numbers if not doing an extended fit // And update the tagging category fractions this->updateSigEvents(); } void LauTimeDepFitModel::updateSigEvents() { // The background parameters will have been set from Minuit. // We need to update the signal events using these. if (!this->doEMLFit()) { Double_t nTotEvts = this->eventsPerExpt(); Double_t signalEvents = nTotEvts; signalEvents_->range(-2.0*nTotEvts,2.0*nTotEvts); for (LauBkgndYieldList::iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { LauAbsRValue* nBkgndEvents = (*iter); if ( nBkgndEvents->isLValue() ) { LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*nTotEvts,2.0*nTotEvts); } } // Subtract background events (if any) from signal. if (usingBkgnd_ == kTRUE) { for (LauBkgndYieldList::const_iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { signalEvents -= (*iter)->value(); } } if ( ! signalEvents_->fixed() ) { signalEvents_->value(signalEvents); } } } void LauTimeDepFitModel::cacheInputFitVars() { // Fill the internal data trees of the signal and background models. // Note that we store the events of both charges in both the // negative and the positive models. It's only later, at the stage // when the likelihood is being calculated, that we separate them. LauFitDataTree* inputFitData = this->fitData(); evtCPEigenVals_.clear(); const Bool_t hasCPEV = ( (cpevVarName_ != "") && inputFitData->haveBranch( cpevVarName_ ) ); UInt_t nEvents = inputFitData->nEvents(); evtCPEigenVals_.reserve( nEvents ); LauFitData::const_iterator fitdata_iter; for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputFitData->getData(iEvt); // if the CP-eigenvalue is in the data use those, otherwise use the default if ( hasCPEV ) { fitdata_iter = dataValues.find( cpevVarName_ ); const Int_t cpEV = static_cast( fitdata_iter->second ); if ( cpEV == 1 ) { cpEigenValue_ = CPEven; } else if ( cpEV == -1 ) { cpEigenValue_ = CPOdd; } else if ( cpEV == 0 ) { cpEigenValue_ = QFS; } else { std::cerr<<"WARNING in LauTimeDepFitModel::cacheInputFitVars : Unknown value: "<cacheInputFitVars(inputFitData); if (this->useDP() == kTRUE) { // DecayTime and SigmaDecayTime signalDecayTimePdf_->cacheInfo(*inputFitData); } // ...and then the extra PDFs if (sigExtraPdf_){ this->cacheInfo((*sigExtraPdf_), *inputFitData); } if(usingBkgnd_ == kTRUE){ for (LauBkgndPdfsList::iterator iter = BkgndPdfs_.begin(); iter != BkgndPdfs_.end(); ++iter) { this->cacheInfo((*iter), *inputFitData); } } if (this->useDP() == kTRUE) { sigModelB0bar_->fillDataTree(*inputFitData); sigModelB0_->fillDataTree(*inputFitData); if (usingBkgnd_ == kTRUE) { for (LauBkgndDPModelList::iterator iter = BkgndDPModels_.begin(); iter != BkgndDPModels_.end(); ++iter) { (*iter)->fillDataTree(*inputFitData); } } } } Double_t LauTimeDepFitModel::getTotEvtLikelihood(const UInt_t iEvt) { // Find out whether the tag-side B was a B0 or a B0bar. curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); // Get the CP eigenvalue of the current event cpEigenValue_ = evtCPEigenVals_[iEvt]; // Get the DP and DecayTime likelihood for signal (TODO and eventually backgrounds) this->getEvtDPDtLikelihood(iEvt); // Get the flavour tagging likelihood from eta PDFs (per tagging category - TODO backgrounds to come later) sigFlavTagLike_ = 1.0; //this->getEvtFlavTagLikelihood(iEvt); // Get the combined extra PDFs likelihood for signal (TODO and eventually backgrounds) this->getEvtExtraLikelihoods(iEvt); // Construct the total likelihood for signal, qqbar and BBbar backgrounds Double_t sigLike = sigDPLike_ * sigFlavTagLike_ * sigExtraLike_; //std::cout << "DP like = " << sigDPLike_ << std::endl; //std::cout << "flav tag like = " << sigFlavTagLike_ << std::endl; //std::cout << "extra like = " << sigExtraLike_ << std::endl; // TODO Double_t signalEvents = signalEvents_->unblindValue(); if (this->useDP() == kFALSE) { //signalEvents *= 0.5 * (1.0 + curEvtTagFlv_ * signalAsym_->unblindValue()); } if ( ! signalEvents_->fixed() ) { sigLike *= signalEvents; } return sigLike; } Double_t LauTimeDepFitModel::getEventSum() const { Double_t eventSum(0.0); eventSum += signalEvents_->unblindValue(); return eventSum; } void LauTimeDepFitModel::getEvtDPDtLikelihood(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // Dalitz plot for the given event evtNo. if ( ! this->useDP() ) { // There's always going to be a term in the likelihood for the // signal, so we'd better not zero it. sigDPLike_ = 1.0; const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_ == kTRUE) { bkgndDPLike_[bkgndID] = 1.0; } else { bkgndDPLike_[bkgndID] = 0.0; } } return; } // Calculate event quantities // Get the dynamics to calculate everything required for the likelihood calculation sigModelB0bar_->calcLikelihoodInfo(iEvt); sigModelB0_->calcLikelihoodInfo(iEvt); // Background part // TODO add them into the actual Likelihood calculations const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_ == kTRUE) { bkgndDPLike_[bkgndID] = BkgndDPModels_[bkgndID]->getLikelihood(iEvt); } else { bkgndDPLike_[bkgndID] = 0.0; } } // Retrieve the amplitudes and efficiency from the dynamics const LauComplex& Abar { sigModelB0bar_->getEvtDPAmp() }; const LauComplex& A { sigModelB0_->getEvtDPAmp() }; const Double_t dpEff { sigModelB0bar_->getEvtEff() }; // Next calculate the DP terms const Double_t aSqSum { A.abs2() + Abar.abs2() }; const Double_t aSqDif { A.abs2() - Abar.abs2() }; Double_t interTermRe { 0.0 }; Double_t interTermIm { 0.0 }; if ( cpEigenValue_ != QFS ) { const LauComplex inter { Abar * A.conj() * phiMixComplex_ }; if ( cpEigenValue_ == CPEven ) { interTermIm = 2.0 * inter.im(); interTermRe = 2.0 * inter.re(); } else { interTermIm = -2.0 * inter.im(); interTermRe = -2.0 * inter.re(); } } // First get all the decay time terms signalDecayTimePdf_->calcLikelihoodInfo(iEvt); // TODO Backgrounds // Get the decay time acceptance const Double_t dtEff { signalDecayTimePdf_->getEffiTerm() }; - // First get all the decay time terms + // Get all the decay time terms const Double_t dtCos { signalDecayTimePdf_->getCosTerm() }; const Double_t dtSin { signalDecayTimePdf_->getSinTerm() }; const Double_t dtCosh { signalDecayTimePdf_->getCoshTerm() }; const Double_t dtSinh { signalDecayTimePdf_->getSinhTerm() }; + // Get the decay time error term + const Double_t dtErrLike { signalDecayTimePdf_->getErrTerm() }; + // Get flavour tagging terms flavTag_->updateEventInfo(iEvt); Double_t omega{1.0}; Double_t omegabar{1.0}; const ULong_t nTaggers { flavTag_->getNTaggers() }; for (ULong_t position{0}; positiongetCapitalOmega(position,+1); omegabar *= flavTag_->getCapitalOmega(position,-1); } const Double_t prodAsym { AProd_.unblindValue() }; const Double_t ftOmegaHyp { ((1.0 - prodAsym)*omega + (1.0 + prodAsym)*omegabar) }; const Double_t ftOmegaTrig { ((1.0 - prodAsym)*omega - (1.0 + prodAsym)*omegabar) }; Double_t coshTerm { dtCosh * ftOmegaHyp * aSqSum }; Double_t sinhTerm { dtSinh * ftOmegaHyp * interTermRe }; Double_t cosTerm { dtCos * ftOmegaTrig * aSqDif }; Double_t sinTerm { dtSin * ftOmegaTrig * interTermIm }; curEvtTrueTagFlv_ = flavTag_->getCurEvtTrueTagFlv(); if (curEvtTrueTagFlv_ != 0 && cpEigenValue_ == QFS){ cosTerm *= curEvtTrueTagFlv_; sinTerm *= curEvtTrueTagFlv_; } // Combine all terms to get the total amplitude squared const Double_t ASq { coshTerm + sinhTerm + cosTerm - sinTerm }; // Calculate the DP and time normalisation const Double_t normASqSum { sigModelB0_->getDPNorm() + sigModelB0bar_->getDPNorm() }; const Double_t normASqDiff { sigModelB0_->getDPNorm() - sigModelB0bar_->getDPNorm() }; Double_t normInterTermRe { 0.0 }; Double_t normInterTermIm { 0.0 }; if ( cpEigenValue_ != QFS ) { // TODO - double check this sign flipping here (it's presumably right but...) normInterTermRe = ( cpEigenValue_ == CPOdd ) ? -1.0 * interTermReNorm_ : interTermReNorm_; normInterTermIm = ( cpEigenValue_ == CPOdd ) ? -1.0 * interTermImNorm_ : interTermImNorm_; } //const Double_t normExpTerm { signalDecayTimePdf_->getNormTermExp() }; const Double_t normCoshTerm { signalDecayTimePdf_->getNormTermCosh() }; const Double_t normSinhTerm { signalDecayTimePdf_->getNormTermSinh() }; const Double_t normCosTerm { signalDecayTimePdf_->getNormTermCos() }; const Double_t normSinTerm { signalDecayTimePdf_->getNormTermSin() }; Double_t asymPart { - 2.0 * prodAsym * ( normASqDiff * normCosTerm + normInterTermIm * normSinTerm ) }; // TODO - double check what to do about the true flavour here if (curEvtTrueTagFlv_ != 0 && cpEigenValue_ == QFS){ asymPart *= curEvtTrueTagFlv_; } // Combine all terms to get the total normalisation const Double_t norm { normASqSum * normCoshTerm + normInterTermRe * normSinhTerm + asymPart }; - // Multiply the squared-amplitude by the efficiency (DP and decay time) + // Multiply the squared-amplitude by the efficiency (DP and decay time) and decay-time error likelihood // and normalise to obtain the signal likelihood - sigDPLike_ = ( ASq * dpEff * dtEff ) / norm; + sigDPLike_ = ( ASq * dpEff * dtEff * dtErrLike ) / norm; } void LauTimeDepFitModel::getEvtExtraLikelihoods(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // extra variables for the given event evtNo. sigExtraLike_ = 1.0; //There's always a likelihood term for signal, so we better not zero it. // First, those independent of the tagging of the event: // signal if (sigExtraPdf_) { sigExtraLike_ = this->prodPdfValue( (*sigExtraPdf_), iEvt ); } // Background const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_) { bkgndExtraLike_[bkgndID] = this->prodPdfValue( BkgndPdfs_[bkgndID], iEvt ); } else { bkgndExtraLike_[bkgndID] = 0.0; } } } void LauTimeDepFitModel::getEvtFlavTagLikelihood(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // extra variables for the given event evtNo. sigFlavTagLike_ = 1.0; //There's always a likelihood term for signal, so we better not zero it. // Loop over taggers const ULong_t nTaggers { flavTag_->getNTaggers() }; for (ULong_t position{0}; positioncalcLikelihoodInfo(iEvt); sigFlavTagLike_ = sigFlavTagPdf_[position]->getLikelihood(); } } if (sigFlavTagLike_<=0){ std::cout<<"INFO in LauTimeDepFitModel::getEvtFlavTagLikelihood : Event with 0 FlavTag Liklihood"<antiparticleCoeff()); coeffsB0_.push_back(coeffPars_[i]->particleCoeff()); } } void LauTimeDepFitModel::checkMixingPhase() { Double_t phase = phiMix_.value(); Double_t genPhase = phiMix_.genValue(); // Check now whether the phase lies in the right range (-pi to pi). Bool_t withinRange(kFALSE); while (withinRange == kFALSE) { if (phase > -LauConstants::pi && phase < LauConstants::pi) { withinRange = kTRUE; } else { // Not within the specified range if (phase > LauConstants::pi) { phase -= LauConstants::twoPi; } else if (phase < -LauConstants::pi) { phase += LauConstants::twoPi; } } } // A further problem can occur when the generated phase is close to -pi or pi. // The phase can wrap over to the other end of the scale - // this leads to artificially large pulls so we wrap it back. Double_t diff = phase - genPhase; if (diff > LauConstants::pi) { phase -= LauConstants::twoPi; } else if (diff < -LauConstants::pi) { phase += LauConstants::twoPi; } // finally store the new value in the parameter // and update the pull phiMix_.value(phase); phiMix_.updatePull(); } void LauTimeDepFitModel::embedSignal(const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment) { if (signalTree_) { std::cerr<<"ERROR in LauTimeDepFitModel::embedSignal : Already embedding signal from file."<findBranches(); if (!dataOK) { delete signalTree_; signalTree_ = 0; std::cerr<<"ERROR in LauTimeDepFitModel::embedSignal : Problem creating data tree for embedding."<validBkgndClass( bkgndClass ) ) { std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); LauEmbeddedData* bkgTree = bkgndTree_[bkgndID]; if (bkgTree) { std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Already embedding background from a file." << std::endl; return; } bkgTree = new LauEmbeddedData(fileName,treeName,reuseEventsWithinExperiment); Bool_t dataOK = bkgTree->findBranches(); if (!dataOK) { delete bkgTree; bkgTree = 0; std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Problem creating data tree for embedding." << std::endl; return; } reuseBkgnd_[bkgndID] = reuseEventsWithinEnsemble; if (this->enableEmbedding() == kFALSE) { this->enableEmbedding(kTRUE); } } void LauTimeDepFitModel::setupSPlotNtupleBranches() { // add branches for storing the experiment number and the number of // the event within the current experiment this->addSPlotNtupleIntegerBranch("iExpt"); this->addSPlotNtupleIntegerBranch("iEvtWithinExpt"); // Store the efficiency of the event (for inclusive BF calculations). if (this->storeDPEff()) { this->addSPlotNtupleDoubleBranch("efficiency"); } // Store the total event likelihood for each species. this->addSPlotNtupleDoubleBranch("sigTotalLike"); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); name += "TotalLike"; this->addSPlotNtupleDoubleBranch(name); } } // Store the DP likelihoods if (this->useDP()) { this->addSPlotNtupleDoubleBranch("sigDPLike"); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); name += "DPLike"; this->addSPlotNtupleDoubleBranch(name); } } } // Store the likelihoods for each extra PDF const LauPdfList* pdfList( sigExtraPdf_ ); this->addSPlotNtupleBranches(pdfList, "sig"); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauPdfList* pdfList2 = &(BkgndPdfs_[iBkgnd]); this->addSPlotNtupleBranches(pdfList2, bkgndClass); } } } void LauTimeDepFitModel::addSPlotNtupleBranches(const LauPdfList* extraPdfs, const TString& prefix) { if (!extraPdfs) { return; } // Loop through each of the PDFs for (LauPdfList::const_iterator pdf_iter = extraPdfs->begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars(0); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply add one branch for that variable TString varName = (*pdf_iter)->varName(); TString name(prefix); name += varName; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else if ( nVars == 2 ) { // If the PDF has two variables then we // need a branch for them both together and // branches for each TString allVars(""); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { allVars += (*var_iter); TString name(prefix); name += (*var_iter); name += "Like"; this->addSPlotNtupleDoubleBranch(name); } TString name(prefix); name += allVars; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else { std::cerr<<"WARNING in LauTimeDepFitModel::addSPlotNtupleBranches : Can't yet deal with 3D PDFs."<begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { // calculate the likelihood for this event (*pdf_iter)->calcLikelihoodInfo(iEvt); extraLike = (*pdf_iter)->getLikelihood(); totalLike *= extraLike; // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars(0); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply store the value for that variable TString varName = (*pdf_iter)->varName(); TString name(prefix); name += varName; name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else if ( nVars == 2 ) { // If the PDF has two variables then we // store the value for them both together // and for each on their own TString allVars(""); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { allVars += (*var_iter); TString name(prefix); name += (*var_iter); name += "Like"; Double_t indivLike = (*pdf_iter)->getLikelihood( (*var_iter) ); this->setSPlotNtupleDoubleBranchValue(name, indivLike); } TString name(prefix); name += allVars; name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else { std::cerr<<"WARNING in LauAllFitModel::setSPlotNtupleBranchValues : Can't yet deal with 3D PDFs."<useDP()) { nameSet.insert("DP"); } for (LauPdfList::const_iterator pdf_iter = sigExtraPdf_->begin(); pdf_iter != sigExtraPdf_->end(); ++pdf_iter) { // Loop over the variables involved in each PDF for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { // If they are not DP coordinates then add them if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { nameSet.insert( (*var_iter) ); } } } return nameSet; } LauSPlot::NumbMap LauTimeDepFitModel::freeSpeciesNames() const { LauSPlot::NumbMap numbMap; if (!signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } if ( usingBkgnd_ ) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauAbsRValue* par = bkgndEvents_[iBkgnd]; if (!par->fixed()) { numbMap[bkgndClass] = par->genValue(); if ( ! par->isLValue() ) { std::cerr << "WARNING in LauTimeDepFitModel::freeSpeciesNames : \"" << par->name() << "\" is a LauFormulaPar, which implies it is perhaps not entirely free to float in the fit, so the sWeight calculation may not be reliable" << std::endl; } } } } return numbMap; } LauSPlot::NumbMap LauTimeDepFitModel::fixdSpeciesNames() const { LauSPlot::NumbMap numbMap; if (signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } if ( usingBkgnd_ ) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauAbsRValue* par = bkgndEvents_[iBkgnd]; if (par->fixed()) { numbMap[bkgndClass] = par->genValue(); } } } return numbMap; } LauSPlot::TwoDMap LauTimeDepFitModel::twodimPDFs() const { LauSPlot::TwoDMap twodimMap; const LauPdfList* pdfList = sigExtraPdf_; for (LauPdfList::const_iterator pdf_iter = pdfList->begin(); pdf_iter != pdfList->end(); ++pdf_iter) { // Count the number of input variables that are not DP variables UInt_t nVars(0); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { twodimMap.insert( std::make_pair( "sig", std::make_pair( (*pdf_iter)->varNames()[0], (*pdf_iter)->varNames()[1] ) ) ); } } if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauPdfList& pdfList2 = BkgndPdfs_[iBkgnd]; for (LauPdfList::const_iterator pdf_iter = pdfList2.begin(); pdf_iter != pdfList2.end(); ++pdf_iter) { // Count the number of input variables that are not DP variables UInt_t nVars(0); std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { twodimMap.insert( std::make_pair( bkgndClass, std::make_pair( varNames[0], varNames[1] ) ) ); } } } } return twodimMap; } void LauTimeDepFitModel::storePerEvtLlhds() { std::cout<<"INFO in LauTimeDepFitModel::storePerEvtLlhds : Storing per-event likelihood values..."<fitData(); // if we've not been using the DP model then we need to cache all // the info here so that we can get the efficiency from it if (!this->useDP() && this->storeDPEff()) { sigModelB0bar_->initialise(coeffsB0bar_); sigModelB0_->initialise(coeffsB0_); sigModelB0bar_->fillDataTree(*inputFitData); sigModelB0_->fillDataTree(*inputFitData); } UInt_t evtsPerExpt(this->eventsPerExpt()); LauIsobarDynamics* sigModel(sigModelB0bar_); for (UInt_t iEvt = 0; iEvt < evtsPerExpt; ++iEvt) { // Find out whether we have B0bar or B0 flavTag_->updateEventInfo(iEvt); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); // the DP information this->getEvtDPDtLikelihood(iEvt); if (this->storeDPEff()) { if (!this->useDP()) { sigModel->calcLikelihoodInfo(iEvt); } this->setSPlotNtupleDoubleBranchValue("efficiency",sigModel->getEvtEff()); } if (this->useDP()) { sigTotalLike_ = sigDPLike_; this->setSPlotNtupleDoubleBranchValue("sigDPLike",sigDPLike_); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name = this->bkgndClassName(iBkgnd); name += "DPLike"; this->setSPlotNtupleDoubleBranchValue(name,bkgndDPLike_[iBkgnd]); } } } else { sigTotalLike_ = 1.0; } // the signal PDF values sigTotalLike_ *= this->setSPlotNtupleBranchValues(sigExtraPdf_, "sig", iEvt); // the background PDF values LauBkgndPdfsList* bkgndPdfs(0); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); LauPdfList& pdfs = (*bkgndPdfs)[iBkgnd]; bkgndTotalLike_[iBkgnd] *= this->setSPlotNtupleBranchValues(&(pdfs), bkgndClass, iEvt); } } // the total likelihoods this->setSPlotNtupleDoubleBranchValue("sigTotalLike",sigTotalLike_); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name = this->bkgndClassName(iBkgnd); name += "TotalLike"; this->setSPlotNtupleDoubleBranchValue(name,bkgndTotalLike_[iBkgnd]); } } // fill the tree this->fillSPlotNtupleBranches(); } std::cout<<"INFO in LauTimeDepFitModel::storePerEvtLlhds : Finished storing per-event likelihood values."< #include #include #include #include #include "TFile.h" #include "TMinuit.h" #include "TRandom.h" #include "TSystem.h" #include "TVirtualFitter.h" #include "LauAbsBkgndDPModel.hh" #include "LauAbsCoeffSet.hh" #include "LauAbsPdf.hh" #include "LauAsymmCalc.hh" #include "LauComplex.hh" #include "LauConstants.hh" #include "LauDPPartialIntegralInfo.hh" #include "LauDaughters.hh" #include "LauDecayTimePdf.hh" #include "LauFitNtuple.hh" #include "LauGenNtuple.hh" #include "LauIsobarDynamics.hh" #include "LauKinematics.hh" #include "LauPrint.hh" #include "LauRandom.hh" #include "LauScfMap.hh" #include "LauTimeDepFlavModel.hh" ClassImp(LauTimeDepFlavModel) LauTimeDepFlavModel::LauTimeDepFlavModel(LauIsobarDynamics* modelB0bar, LauIsobarDynamics* modelB0, const Bool_t useUntaggedEvents, const TString& tagVarName, const TString& tagCatVarName) : LauAbsFitModel(), sigModelB0bar_(modelB0bar), sigModelB0_(modelB0), kinematicsB0bar_(modelB0bar ? modelB0bar->getKinematics() : 0), kinematicsB0_(modelB0 ? modelB0->getKinematics() : 0), useUntaggedEvents_(useUntaggedEvents), nSigComp_(0), nSigDPPar_(0), nDecayTimePar_(0), nExtraPdfPar_(0), nNormPar_(0), coeffsB0bar_(0), coeffsB0_(0), coeffPars_(0), fitFracB0bar_(0), fitFracB0_(0), fitFracAsymm_(0), acp_(0), meanEffB0bar_("meanEffB0bar",0.0,0.0,1.0), meanEffB0_("meanEffB0",0.0,0.0,1.0), DPRateB0bar_("DPRateB0bar",0.0,0.0,100.0), DPRateB0_("DPRateB0",0.0,0.0,100.0), signalEvents_(0), signalAsym_(0), signalTagCatFrac_(), tagVarName_(tagVarName), tagCatVarName_(tagCatVarName), cpevVarName_(""), validTagCats_(), curEvtTagFlv_(0), curEvtTagCat_(0), cpEigenValue_(CPEven), evtTagFlvVals_(0), evtTagCatVals_(0), evtCPEigenVals_(0), dilution_(), deltaDilution_(), //deltaM_("deltaM",LauConstants::deltaMd), deltaM_("deltaM",0.0), deltaGamma_("deltaGamma",0.0), tau_("tau",LauConstants::tauB0), phiMix_("phiMix", 2.0*LauConstants::beta, -LauConstants::threePi, LauConstants::threePi, kFALSE), sinPhiMix_("sinPhiMix", TMath::Sin(2.0*LauConstants::beta), -3.0, 3.0, kFALSE), cosPhiMix_("cosPhiMix", TMath::Cos(2.0*LauConstants::beta), -3.0, 3.0, kFALSE), useSinCos_(kFALSE), phiMixComplex_(TMath::Cos(-2.0*LauConstants::beta),TMath::Sin(-2.0*LauConstants::beta)), signalDecayTimePdfs_(), curEvtDecayTime_(0.0), curEvtDecayTimeErr_(0.0), sigExtraPdf_(), iterationsMax_(500000), nGenLoop_(0), ASq_(0.0), aSqMaxVar_(0.0), aSqMaxSet_(1.25), storeGenAmpInfo_(kFALSE), signalTree_(), reuseSignal_(kFALSE), sigDPLike_(0.0), sigExtraLike_(0.0), sigTotalLike_(0.0) { // Add the untagged category as a valid category this->addValidTagCat(0); // Set the fraction, average dilution and dilution difference for the untagged category this->setSignalTagCatPars(0, 1.0, 0.0, 0.0, kTRUE); } LauTimeDepFlavModel::~LauTimeDepFlavModel() { // TODO - need to delete the various embedded data structures here } void LauTimeDepFlavModel::setupBkgndVectors() { } void LauTimeDepFlavModel::setNSigEvents(LauParameter* nSigEvents) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFlavModel::setNSigEvents : The LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauTimeDepFlavModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauTimeDepFlavModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; signalEvents_->name("signalEvents"); Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0),2.0*(TMath::Abs(value)+1.0)); signalAsym_ = new LauParameter("signalAsym",0.0,-1.0,1.0,kTRUE); } void LauTimeDepFlavModel::setNSigEvents(LauParameter* nSigEvents, LauParameter* sigAsym) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFlavModel::setNSigEvents : The event LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( sigAsym == 0 ) { std::cerr << "ERROR in LauTimeDepFlavModel::setNSigEvents : The asym LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauTimeDepFlavModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauTimeDepFlavModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; signalEvents_->name("signalEvents"); Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); signalAsym_ = sigAsym; signalAsym_->name("signalAsym"); signalAsym_->range(-1.0,1.0); } void LauTimeDepFlavModel::setNBkgndEvents(LauAbsRValue* /*nBkgndEvents*/) { std::cerr << "WARNING in LauTimeDepFlavModel::setNBkgndEvents : This model does not yet support backgrounds" << std::endl; } void LauTimeDepFlavModel::addValidTagCats(const std::vector& tagCats) { for (std::vector::const_iterator iter = tagCats.begin(); iter != tagCats.end(); ++iter) { this->addValidTagCat(*iter); } } void LauTimeDepFlavModel::addValidTagCat(Int_t tagCat) { validTagCats_.insert(tagCat); } void LauTimeDepFlavModel::setSignalTagCatPars(const Int_t tagCat, const Double_t tagCatFrac, const Double_t dilution, const Double_t deltaDilution, const Bool_t fixTCFrac) { if (!this->validTagCat(tagCat)) { std::cerr<<"ERROR in LauTimeDepFlavModel::setSignalTagCatPars : Tagging category \""<checkSignalTagCatFractions(); only when the user has //set them all up, in this->initialise(); } void LauTimeDepFlavModel::checkSignalTagCatFractions() { Double_t totalTaggedFrac(0.0); for (LauTagCatParamMap::const_iterator iter=signalTagCatFrac_.begin(); iter!=signalTagCatFrac_.end(); ++iter) { if (iter->first != 0) { const LauParameter& par = iter->second; totalTaggedFrac += par.value(); } } if ( ((totalTaggedFrac < (1.0-1.0e-8))&&!useUntaggedEvents_) || (totalTaggedFrac > (1.0+1.0e-8)) ) { std::cerr<<"WARNING in LauTimeDepFlavModel::checkSignalTagCatFractions : Tagging category fractions add up to "<second; Double_t newVal = par.value() / totalTaggedFrac; par.value(newVal); par.initValue(newVal); par.genValue(newVal); } } else if (useUntaggedEvents_) { Double_t tagCatFrac = 1.0 - totalTaggedFrac; TString tagCatFracName("signalTagCatFrac0"); signalTagCatFrac_[0].name(tagCatFracName); signalTagCatFrac_[0].range(0.0,1.0); signalTagCatFrac_[0].value(tagCatFrac); signalTagCatFrac_[0].initValue(tagCatFrac); signalTagCatFrac_[0].genValue(tagCatFrac); signalTagCatFrac_[0].fixed(kTRUE); TString dilutionName("dilution0"); dilution_[0].name(dilutionName); dilution_[0].range(0.0,1.0); dilution_[0].value(0.0); dilution_[0].initValue(0.0); dilution_[0].genValue(0.0); TString deltaDilutionName("deltaDilution0"); deltaDilution_[0].name(deltaDilutionName); deltaDilution_[0].range(-2.0,2.0); deltaDilution_[0].value(0.0); deltaDilution_[0].initValue(0.0); deltaDilution_[0].genValue(0.0); } for (LauTagCatParamMap::const_iterator iter=dilution_.begin(); iter!=dilution_.end(); ++iter) { std::cout<<"INFO in LauTimeDepFlavModel::checkSignalTagCatFractions : Setting dilution for tagging category "<<(*iter).first<<" to "<<(*iter).second<validTagCat(tagCat)) { std::cerr<<"ERROR in LauTimeDepFlavModel::setSignalDtPdf : Tagging category \""<validTagCat(tagCat)) { std::cerr<<"ERROR in LauTimeDepFlavModel::setSignalPdfs : Tagging category \""<updateCoeffs(); // Initialisation if (this->useDP() == kTRUE) { this->initialiseDPModels(); } if (!this->useDP() && sigExtraPdf_.empty()) { std::cerr<<"ERROR in LauTimeDepFlavModel::initialise : Signal model doesn't exist for any variable."<Exit(EXIT_FAILURE); } if (this->useDP() == kTRUE) { // Check that we have all the Dalitz-plot models if ((sigModelB0bar_ == 0) || (sigModelB0_ == 0)) { std::cerr<<"ERROR in LauTimeDepFlavModel::initialise : the pointer to one (particle or anti-particle) of the signal DP models is null."<Exit(EXIT_FAILURE); } } // Check here that the tagging category fractions add up to 1, otherwise "normalise". Also set up the untagged cat. // NB this has to be done early in the initialization as other methods access the tagCats map. this->checkSignalTagCatFractions(); // Clear the vectors of parameter information so we can start from scratch this->clearFitParVectors(); // Set the fit parameters for signal and background models this->setSignalDPParameters(); // Set the fit parameters for the decay time models this->setDecayTimeParameters(); // Set the fit parameters for the extra PDFs this->setExtraPdfParameters(); // Set the initial bg and signal events this->setFitNEvents(); // Check that we have the expected number of fit variables const LauParameterPList& fitVars = this->fitPars(); if (fitVars.size() != (nSigDPPar_ + nDecayTimePar_ + nExtraPdfPar_ + nNormPar_)) { std::cerr<<"ERROR in LauTimeDepFlavModel::initialise : Number of fit parameters not of expected size."<Exit(EXIT_FAILURE); } this->setExtraNtupleVars(); } void LauTimeDepFlavModel::recalculateNormalisation() { sigModelB0bar_->recalculateNormalisation(); sigModelB0_->recalculateNormalisation(); sigModelB0bar_->modifyDataTree(); sigModelB0_->modifyDataTree(); this->calcInterferenceTermIntegrals(); } void LauTimeDepFlavModel::initialiseDPModels() { if (sigModelB0bar_ == 0) { std::cerr<<"ERROR in LauTimeDepFlavModel::initialiseDPModels : B0bar signal DP model doesn't exist"<Exit(EXIT_FAILURE); } if (sigModelB0_ == 0) { std::cerr<<"ERROR in LauTimeDepFlavModel::initialiseDPModels : B0 signal DP model doesn't exist"<Exit(EXIT_FAILURE); } // Need to check that the number of components we have and that the dynamics has matches up //const UInt_t nAmpB0bar = sigModelB0bar_->getnAmp(); //const UInt_t nAmpB0 = sigModelB0_->getnAmp(); const UInt_t nAmpB0bar = sigModelB0bar_->getnTotAmp(); const UInt_t nAmpB0 = sigModelB0_->getnTotAmp(); if ( nAmpB0bar != nAmpB0 ) { std::cerr << "ERROR in LauTimeDepFlavModel::initialiseDPModels : Unequal number of signal DP components in the particle and anti-particle models: " << nAmpB0bar << " != " << nAmpB0 << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( nAmpB0bar != nSigComp_ ) { std::cerr << "ERROR in LauTimeDepFlavModel::initialiseDPModels : Number of signal DP components in the model (" << nAmpB0bar << ") not equal to number of coefficients supplied (" << nSigComp_ << ")." << std::endl; gSystem->Exit(EXIT_FAILURE); } std::cout<<"INFO in LauTimeDepFlavModel::initialiseDPModels : Initialising signal DP model"<initialise(coeffsB0bar_); sigModelB0_->initialise(coeffsB0_); fifjEffSum_.clear(); fifjEffSum_.resize(nSigComp_); for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { fifjEffSum_[iAmp].resize(nSigComp_); } // calculate the integrals of the A*Abar terms this->calcInterferenceTermIntegrals(); this->calcInterTermNorm(); } void LauTimeDepFlavModel::calcInterferenceTermIntegrals() { const std::vector& integralInfoListB0bar = sigModelB0bar_->getIntegralInfos(); const std::vector& integralInfoListB0 = sigModelB0_->getIntegralInfos(); // TODO should check (first time) that they match in terms of number of entries in the vectors and that each entry has the same number of points, ranges, weights etc. LauComplex A, Abar, fifjEffSumTerm; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { fifjEffSum_[iAmp][jAmp].zero(); } } const UInt_t nIntegralRegions = integralInfoListB0bar.size(); for ( UInt_t iRegion(0); iRegion < nIntegralRegions; ++iRegion ) { const LauDPPartialIntegralInfo* integralInfoB0bar = integralInfoListB0bar[iRegion]; const LauDPPartialIntegralInfo* integralInfoB0 = integralInfoListB0[iRegion]; const UInt_t nm13Points = integralInfoB0bar->getnm13Points(); const UInt_t nm23Points = integralInfoB0bar->getnm23Points(); for (UInt_t m13 = 0; m13 < nm13Points; ++m13) { for (UInt_t m23 = 0; m23 < nm23Points; ++m23) { const Double_t weight = integralInfoB0bar->getWeight(m13,m23); const Double_t eff = integralInfoB0bar->getEfficiency(m13,m23); const Double_t effWeight = eff*weight; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { A = integralInfoB0->getAmplitude(m13, m23, iAmp); for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { Abar = integralInfoB0bar->getAmplitude(m13, m23, jAmp); fifjEffSumTerm = Abar*A.conj(); fifjEffSumTerm.rescale(effWeight); fifjEffSum_[iAmp][jAmp] += fifjEffSumTerm; } } } } } } void LauTimeDepFlavModel::calcInterTermNorm() { const std::vector fNormB0bar = sigModelB0bar_->getFNorm(); const std::vector fNormB0 = sigModelB0_->getFNorm(); LauComplex norm; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { LauComplex coeffTerm = coeffsB0bar_[jAmp]*coeffsB0_[iAmp].conj(); coeffTerm *= fifjEffSum_[iAmp][jAmp]; coeffTerm.rescale(fNormB0bar[jAmp] * fNormB0[iAmp]); norm += coeffTerm; } } norm *= phiMixComplex_; interTermReNorm_ = 2.0*norm.re(); interTermImNorm_ = 2.0*norm.im(); } void LauTimeDepFlavModel::setAmpCoeffSet(LauAbsCoeffSet* coeffSet) { // Is there a component called compName in the signal models? TString compName = coeffSet->name(); TString conjName = sigModelB0bar_->getConjResName(compName); //TODO this part needs work - it doesn't work for e.g. pi+ pi- K_S0, where you want the daughters to be in the same order but it is still conjugate! const LauDaughters* daughtersB0bar = sigModelB0bar_->getDaughters(); const LauDaughters* daughtersB0 = sigModelB0_->getDaughters(); const Bool_t conjugate = daughtersB0bar->isConjugate( daughtersB0 ); if ( ! sigModelB0bar_->hasResonance(compName) ) { if ( ! sigModelB0bar_->hasResonance(conjName) ) { std::cerr<<"ERROR in LauTimeDepFlavModel::setAmpCoeffSet : B0bar signal DP model doesn't contain component \""<name( compName ); } if ( conjugate ) { if ( ! sigModelB0_->hasResonance(conjName) ) { std::cerr<<"ERROR in LauTimeDepFlavModel::setAmpCoeffSet : B0 signal DP model doesn't contain component \""<hasResonance(compName) ) { std::cerr<<"ERROR in LauTimeDepFlavModel::setAmpCoeffSet : B0 signal DP model doesn't contain component \""<::const_iterator iter=coeffPars_.begin(); iter!=coeffPars_.end(); ++iter) { if ((*iter)->name() == compName) { std::cerr<<"ERROR in LauTimeDepFlavModel::setAmpCoeffSet : Have already set coefficients for \""<index(nSigComp_); coeffPars_.push_back(coeffSet); TString parName = coeffSet->baseName(); parName += "FitFracAsym"; fitFracAsymm_.push_back(LauParameter(parName, 0.0, -1.0, 1.0)); acp_.push_back(coeffSet->acp()); ++nSigComp_; std::cout<<"INFO in LauTimeDepFlavModel::setAmpCoeffSet : Added coefficients for component \""<acp(); LauAsymmCalc asymmCalc(fitFracB0bar_[i][i].value(), fitFracB0_[i][i].value()); Double_t asym = asymmCalc.getAsymmetry(); fitFracAsymm_[i].value(asym); if (initValues) { fitFracAsymm_[i].genValue(asym); fitFracAsymm_[i].initValue(asym); } } } void LauTimeDepFlavModel::setSignalDPParameters() { // Set the fit parameters for the signal model. nSigDPPar_ = 0; if ( ! this->useDP() ) { return; } std::cout << "INFO in LauTimeDepFlavModel::setSignalDPParameters : Setting the initial fit parameters for the signal DP model." << std::endl; // Place isobar coefficient parameters in vector of fit variables LauParameterPList& fitVars = this->fitPars(); for (UInt_t i = 0; i < nSigComp_; i++) { LauParameterPList pars = coeffPars_[i]->getParameters(); for (LauParameterPList::iterator iter = pars.begin(); iter != pars.end(); ++iter) { if ( !(*iter)->clone() ) { fitVars.push_back(*iter); ++nSigDPPar_; } } } // Obtain the resonance parameters and place them in the vector of fit variables and in a separate vector // Need to make sure that they are unique because some might appear in both DP models LauParameterPSet& resVars = this->resPars(); resVars.clear(); LauParameterPList& sigDPParsB0bar = sigModelB0bar_->getFloatingParameters(); LauParameterPList& sigDPParsB0 = sigModelB0_->getFloatingParameters(); for ( LauParameterPList::iterator iter = sigDPParsB0bar.begin(); iter != sigDPParsB0bar.end(); ++iter ) { if ( resVars.insert(*iter).second ) { fitVars.push_back(*iter); ++nSigDPPar_; } } for ( LauParameterPList::iterator iter = sigDPParsB0.begin(); iter != sigDPParsB0.end(); ++iter ) { if ( resVars.insert(*iter).second ) { fitVars.push_back(*iter); ++nSigDPPar_; } } } UInt_t LauTimeDepFlavModel::addParametersToFitList(LauTagCatDtPdfMap& theMap) { UInt_t counter(0); LauParameterPList& fitVars = this->fitPars(); // loop through the map for (LauTagCatDtPdfMap::iterator iter = theMap.begin(); iter != theMap.end(); ++iter) { // grab the pdf and then its parameters LauDecayTimePdf* thePdf = (*iter).second; // The first one is the tagging category LauAbsRValuePList& rvalues = thePdf->getParameters(); // loop through the parameters for (LauAbsRValuePList::iterator pars_iter = rvalues.begin(); pars_iter != rvalues.end(); ++pars_iter) { LauParameterPList params = (*pars_iter)->getPars(); for (LauParameterPList::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter) { // for each "original" parameter add it to the list of fit parameters and increment the counter if ( !(*params_iter)->clone() && ( !(*params_iter)->fixed() || (this->twoStageFit() && (*params_iter)->secondStage()) ) ) { fitVars.push_back(*params_iter); ++counter; } } } } return counter; } UInt_t LauTimeDepFlavModel::addParametersToFitList(LauTagCatPdfMap& theMap) { UInt_t counter(0); // loop through the map for (LauTagCatPdfMap::iterator iter = theMap.begin(); iter != theMap.end(); ++iter) { counter += this->addFitParameters(iter->second); // first is the tagging category } return counter; } void LauTimeDepFlavModel::setDecayTimeParameters() { nDecayTimePar_ = 0; // Loop over the Dt PDFs nDecayTimePar_ += this->addParametersToFitList(signalDecayTimePdfs_); LauParameterPList& fitVars = this->fitPars(); if (useSinCos_) { fitVars.push_back(&sinPhiMix_); fitVars.push_back(&cosPhiMix_); nDecayTimePar_ += 2; } else { fitVars.push_back(&phiMix_); ++nDecayTimePar_; } } void LauTimeDepFlavModel::setExtraPdfParameters() { // Include the parameters of the PDF for each tagging category in the fit // NB all of them are passed to the fit, even though some have been fixed through parameter.fixed(kTRUE) // With the new "cloned parameter" scheme only "original" parameters are passed to the fit. // Their clones are updated automatically when the originals are updated. nExtraPdfPar_ = 0; nExtraPdfPar_ += this->addParametersToFitList(sigExtraPdf_); } void LauTimeDepFlavModel::setFitNEvents() { nNormPar_ = 0; // Initialise the total number of events to be the sum of all the hypotheses Double_t nTotEvts = signalEvents_->value(); this->eventsPerExpt(TMath::FloorNint(nTotEvts)); LauParameterPList& fitVars = this->fitPars(); // if doing an extended ML fit add the signal fraction into the fit parameters if (this->doEMLFit()) { std::cout<<"INFO in LauTimeDepFlavModel::setFitNEvents : Initialising number of events for signal and background components..."<useDP() == kFALSE) { fitVars.push_back(signalAsym_); ++nNormPar_; } // tagging-category fractions for signal events for (LauTagCatParamMap::iterator iter = signalTagCatFrac_.begin(); iter != signalTagCatFrac_.end(); ++iter) { if (iter == signalTagCatFrac_.begin()) { continue; } LauParameter* par = &((*iter).second); fitVars.push_back(par); ++nNormPar_; } } void LauTimeDepFlavModel::setExtraNtupleVars() { // Set-up other parameters derived from the fit results, e.g. fit fractions. if (this->useDP() != kTRUE) { return; } // First clear the vectors so we start from scratch this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); // Add the B0 and B0bar fit fractions for each signal component fitFracB0bar_ = sigModelB0bar_->getFitFractions(); if (fitFracB0bar_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFlavModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetFitFractions(); if (fitFracB0_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFlavModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); icalcAsymmetries(kTRUE); // Add the Fit Fraction asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(fitFracAsymm_[i]); } // Add the calculated CP asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(acp_[i]); } // Now add in the DP efficiency values Double_t initMeanEffB0bar = sigModelB0bar_->getMeanEff().initValue(); meanEffB0bar_.value(initMeanEffB0bar); meanEffB0bar_.initValue(initMeanEffB0bar); meanEffB0bar_.genValue(initMeanEffB0bar); extraVars.push_back(meanEffB0bar_); Double_t initMeanEffB0 = sigModelB0_->getMeanEff().initValue(); meanEffB0_.value(initMeanEffB0); meanEffB0_.initValue(initMeanEffB0); meanEffB0_.genValue(initMeanEffB0); extraVars.push_back(meanEffB0_); // Also add in the DP rates Double_t initDPRateB0bar = sigModelB0bar_->getDPRate().initValue(); DPRateB0bar_.value(initDPRateB0bar); DPRateB0bar_.initValue(initDPRateB0bar); DPRateB0bar_.genValue(initDPRateB0bar); extraVars.push_back(DPRateB0bar_); Double_t initDPRateB0 = sigModelB0_->getDPRate().initValue(); DPRateB0_.value(initDPRateB0); DPRateB0_.initValue(initDPRateB0); DPRateB0_.genValue(initDPRateB0); extraVars.push_back(DPRateB0_); } void LauTimeDepFlavModel::finaliseFitResults(const TString& tablePrefixName) { // Retrieve parameters from the fit results for calculations and toy generation // and eventually store these in output root ntuples/text files // Now take the fit parameters and update them as necessary // i.e. to make mag > 0.0, phase in the right range. // This function will also calculate any other values, such as the // fit fractions, using any errors provided by fitParErrors as appropriate. // Also obtain the pull values: (measured - generated)/(average error) if (this->useDP() == kTRUE) { for (UInt_t i = 0; i < nSigComp_; ++i) { // Check whether we have "a > 0.0", and phases in the right range coeffPars_[i]->finaliseValues(); } } // update the pulls on the event fractions and asymmetries if (this->doEMLFit()) { signalEvents_->updatePull(); } if (this->useDP() == kFALSE) { signalAsym_->updatePull(); } // Finalise the pulls on the decay time parameters for (LauTagCatDtPdfMap::iterator iter = signalDecayTimePdfs_.begin(); iter != signalDecayTimePdfs_.end(); ++iter) { LauDecayTimePdf* pdf = (*iter).second; pdf->updatePulls(); } if (useSinCos_) { cosPhiMix_.updatePull(); sinPhiMix_.updatePull(); } else { this->checkMixingPhase(); } // Update the pulls on all the extra PDFs' parameters for (LauTagCatPdfMap::iterator iter = sigExtraPdf_.begin(); iter != sigExtraPdf_.end(); ++iter) { this->updateFitParameters(iter->second); } // Tagging-category fractions for signal and background events Double_t firstCatFrac(1.0); Int_t firstCat(0); for (LauTagCatParamMap::iterator iter = signalTagCatFrac_.begin(); iter != signalTagCatFrac_.end(); ++iter) { if (iter == signalTagCatFrac_.begin()) { firstCat = iter->first; continue; } LauParameter& par = (*iter).second; firstCatFrac -= par.value(); // update the parameter pull par.updatePull(); } signalTagCatFrac_[firstCat].value(firstCatFrac); signalTagCatFrac_[firstCat].updatePull(); // Fill the fit results to the ntuple // update the coefficients and then calculate the fit fractions and ACP's if (this->useDP() == kTRUE) { this->updateCoeffs(); sigModelB0bar_->updateCoeffs(coeffsB0bar_); sigModelB0bar_->calcExtraInfo(); sigModelB0_->updateCoeffs(coeffsB0_); sigModelB0_->calcExtraInfo(); LauParArray fitFracB0bar = sigModelB0bar_->getFitFractions(); if (fitFracB0bar.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFlavModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0 = sigModelB0_->getFitFractions(); if (fitFracB0.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFlavModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); meanEffB0_.value(sigModelB0_->getMeanEff().value()); DPRateB0bar_.value(sigModelB0bar_->getDPRate().value()); DPRateB0_.value(sigModelB0_->getDPRate().value()); this->calcAsymmetries(); // Then store the final fit parameters, and any extra parameters for // the signal model (e.g. fit fractions, FF asymmetries, ACPs, mean efficiency and DP rate) this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); for (UInt_t i(0); iprintFitFractions(std::cout); this->printAsymmetries(std::cout); } const LauParameterPList& fitVars = this->fitPars(); const LauParameterList& extraVars = this->extraPars(); LauFitNtuple* ntuple = this->fitNtuple(); ntuple->storeParsAndErrors(fitVars, extraVars); // find out the correlation matrix for the parameters ntuple->storeCorrMatrix(this->iExpt(), this->fitStatus(), this->covarianceMatrix()); // Fill the data into ntuple ntuple->updateFitNtuple(); // Print out the partial fit fractions, phases and the // averaged efficiency, reweighted by the dynamics (and anything else) if (this->writeLatexTable()) { TString sigOutFileName(tablePrefixName); sigOutFileName += "_"; sigOutFileName += this->iExpt(); sigOutFileName += "Expt.tex"; this->writeOutTable(sigOutFileName); } } void LauTimeDepFlavModel::printFitFractions(std::ostream& output) { // Print out Fit Fractions, total DP rate and mean efficiency // First for the B0bar events for (UInt_t i = 0; i < nSigComp_; i++) { const TString compName(coeffPars_[i]->name()); output<<"B0bar FitFraction for component "<useDP() == kTRUE) { // print the fit coefficients in one table coeffPars_.front()->printTableHeading(fout); for (UInt_t i = 0; i < nSigComp_; i++) { coeffPars_[i]->printTableRow(fout); } fout<<"\\hline"<name(); resName = resName.ReplaceAll("_", "\\_"); fout< =$ & $"; print.printFormat(fout, meanEffB0bar_.value()); fout << "$ & $"; print.printFormat(fout, meanEffB0_.value()); fout << "$ & & \\\\" << std::endl; if (useSinCos_) { fout << "$\\sinPhiMix =$ & $"; print.printFormat(fout, sinPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, sinPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; fout << "$\\cosPhiMix =$ & $"; print.printFormat(fout, cosPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, cosPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } else { fout << "$\\phiMix =$ & $"; print.printFormat(fout, phiMix_.value()); fout << " \\pm "; print.printFormat(fout, phiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } fout << "\\hline \n\\end{tabular}" << std::endl; } if (!sigExtraPdf_.empty()) { fout<<"\\begin{tabular}{|l|c|}"<printFitParameters(iter->second, fout); } fout<<"\\hline \n\\end{tabular}"<updateSigEvents(); // Check whether we want to have randomised initial fit parameters for the signal model if (this->useRandomInitFitPars() == kTRUE) { this->randomiseInitFitPars(); } } void LauTimeDepFlavModel::randomiseInitFitPars() { // Only randomise those parameters that are not fixed! std::cout<<"INFO in LauTimeDepFlavModel::randomiseInitFitPars : Randomising the initial values of the coefficients of the DP components (and phiMix)..."<randomiseInitValues(); } phiMix_.randomiseValue(-LauConstants::pi, LauConstants::pi); if (useSinCos_) { sinPhiMix_.initValue(TMath::Sin(phiMix_.initValue())); cosPhiMix_.initValue(TMath::Cos(phiMix_.initValue())); } } LauTimeDepFlavModel::LauGenInfo LauTimeDepFlavModel::eventsToGenerate() { // Determine the number of events to generate for each hypothesis // If we're smearing then smear each one individually // NB this individual smearing has to be done individually per tagging category as well LauGenInfo nEvtsGen; LauTagCatGenInfo eventsB0, eventsB0bar; // Signal // If we're including the DP and decay time we can't decide on the tag // yet, since it depends on the whole DP+dt PDF, however, if // we're not then we need to decide. Double_t evtWeight(1.0); Double_t nEvts = signalEvents_->genValue(); if ( nEvts < 0.0 ) { evtWeight = -1.0; nEvts = TMath::Abs( nEvts ); } Double_t sigAsym(0.0); if (this->useDP() == kFALSE) { sigAsym = signalAsym_->genValue(); for (LauTagCatParamMap::const_iterator iter = signalTagCatFrac_.begin(); iter != signalTagCatFrac_.end(); ++iter) { const LauParameter& par = iter->second; Double_t eventsbyTagCat = par.value() * nEvts; Double_t eventsB0byTagCat = TMath::Nint(eventsbyTagCat/2.0 * (1.0 - sigAsym)); Double_t eventsB0barbyTagCat = TMath::Nint(eventsbyTagCat/2.0 * (1.0 + sigAsym)); if (this->doPoissonSmearing()) { eventsB0byTagCat = LauRandom::randomFun()->Poisson(eventsB0byTagCat); eventsB0barbyTagCat = LauRandom::randomFun()->Poisson(eventsB0barbyTagCat); } eventsB0[iter->first] = std::make_pair( TMath::Nint(eventsB0byTagCat), evtWeight ); eventsB0bar[iter->first] = std::make_pair( TMath::Nint(eventsB0barbyTagCat), evtWeight ); } nEvtsGen[std::make_pair("signal",-1)] = eventsB0; nEvtsGen[std::make_pair("signal",+1)] = eventsB0bar; } else { Double_t rateB0bar = sigModelB0bar_->getDPRate().value(); Double_t rateB0 = sigModelB0_->getDPRate().value(); if ( rateB0bar+rateB0 > 1e-30) { sigAsym = (rateB0bar-rateB0)/(rateB0bar+rateB0); } for (LauTagCatParamMap::const_iterator iter = signalTagCatFrac_.begin(); iter != signalTagCatFrac_.end(); ++iter) { const LauParameter& par = iter->second; Double_t eventsbyTagCat = par.value() * nEvts; if (this->doPoissonSmearing()) { eventsbyTagCat = LauRandom::randomFun()->Poisson(eventsbyTagCat); } eventsB0[iter->first] = std::make_pair( TMath::Nint(eventsbyTagCat), evtWeight ); } nEvtsGen[std::make_pair("signal",0)] = eventsB0; // generate signal event, decide tag later. } std::cout<<"INFO in LauTimeDepFlavModel::eventsToGenerate : Generating toy MC with:"<setGenNtupleIntegerBranchValue("genSig",1); // All the generate*Event() methods have to fill in curEvtDecayTime_ and curEvtDecayTimeErr_ // In addition, generateSignalEvent has to decide on the tag and fill in curEvtTagFlv_ genOK = this->generateSignalEvent(); } else { genOK = kFALSE; } if (!genOK) { // If there was a problem with the generation then break out and return. // The problem model will have adjusted itself so that all should be OK next time. break; } if (this->useDP() == kTRUE) { this->setDPDtBranchValues(); // store DP, decay time and tagging variables in the ntuple } // Store the event's tag and tagging category this->setGenNtupleIntegerBranchValue("cpEigenvalue", cpEigenValue_); this->setGenNtupleIntegerBranchValue("tagCat",curEvtTagCat_); this->setGenNtupleIntegerBranchValue("tagFlv",curEvtTagFlv_); // Store the event number (within this experiment) // and then increment it this->setGenNtupleIntegerBranchValue("iEvtWithinExpt",evtNum); ++evtNum; // Write the values into the tree this->fillGenNtupleBranches(); // Print an occasional progress message if (iEvt%1000 == 0) {std::cout<<"INFO in LauTimeDepFlavModel::genExpt : Generated event number "<useDP() && genOK) { sigModelB0bar_->checkToyMC(kTRUE); sigModelB0_->checkToyMC(kTRUE); std::cout<<"aSqMaxSet = "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0 = sigModelB0_->getFitFractions(); if (fitFracB0.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFlavModel::generate : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); meanEffB0_.value(sigModelB0_->getMeanEff().value()); DPRateB0bar_.value(sigModelB0bar_->getDPRate().value()); DPRateB0_.value(sigModelB0_->getDPRate().value()); } } // If we're reusing embedded events or if the generation is being // reset then clear the lists of used events //if (!signalTree_.empty() && (reuseSignal_ || !genOK)) { if (reuseSignal_ || !genOK) { for(LauTagCatEmbDataMap::const_iterator iter = signalTree_.begin(); iter != signalTree_.end(); ++iter) { (iter->second)->clearUsedList(); } } return genOK; } Bool_t LauTimeDepFlavModel::generateSignalEvent() { // Generate signal event, including SCF if necessary. // DP:DecayTime generation follows. // If it's ok, we then generate mES, DeltaE, Fisher/NN... Bool_t genOK(kTRUE); Bool_t generatedEvent(kFALSE); Bool_t doSquareDP = kinematicsB0bar_->squareDP(); doSquareDP &= kinematicsB0_->squareDP(); LauKinematics* kinematics(kinematicsB0bar_); // find the right decay time PDF for the current tagging category LauTagCatDtPdfMap::const_iterator dt_iter = signalDecayTimePdfs_.find(curEvtTagCat_); LauDecayTimePdf* decayTimePdf = (dt_iter != signalDecayTimePdfs_.end()) ? dt_iter->second : 0; // find the right embedded data for the current tagging category LauTagCatEmbDataMap::const_iterator emb_iter = signalTree_.find(curEvtTagCat_); LauEmbeddedData* embeddedData = (emb_iter != signalTree_.end()) ? emb_iter->second : 0; // find the right extra PDFs for the current tagging category LauTagCatPdfMap::iterator extra_iter = sigExtraPdf_.find(curEvtTagCat_); LauPdfList* extraPdfs = (extra_iter != sigExtraPdf_.end()) ? &(extra_iter->second) : 0; if (this->useDP()) { if (embeddedData) { embeddedData->getEmbeddedEvent(kinematics); curEvtTagFlv_ = TMath::Nint(embeddedData->getValue("tagFlv")); curEvtDecayTimeErr_ = embeddedData->getValue(decayTimePdf->varErrName()); curEvtDecayTime_ = embeddedData->getValue(decayTimePdf->varName()); if (embeddedData->haveBranch("mcMatch")) { Int_t match = TMath::Nint(embeddedData->getValue("mcMatch")); if (match) { this->setGenNtupleIntegerBranchValue("genTMSig",1); this->setGenNtupleIntegerBranchValue("genSCFSig",0); } else { this->setGenNtupleIntegerBranchValue("genTMSig",0); this->setGenNtupleIntegerBranchValue("genSCFSig",1); } } } else { nGenLoop_ = 0; // generate the decay time error (NB the kTRUE forces the generation of a new value) curEvtDecayTimeErr_ = decayTimePdf->generateError(kTRUE); while (generatedEvent == kFALSE && nGenLoop_ < iterationsMax_) { // Calculate the unnormalised truth-matched signal likelihood // First let define the tag flavour Double_t randNo = LauRandom::randomFun()->Rndm(); if (randNo < 0.5) { curEvtTagFlv_ = +1; // B0 tag } else { curEvtTagFlv_ = -1; // B0bar tag } // Calculate event quantities that depend only on the tagCat and tagFlv Double_t qD = curEvtTagFlv_*dilution_[curEvtTagCat_].unblindValue(); Double_t qDDo2 = curEvtTagFlv_*0.5*deltaDilution_[curEvtTagCat_].unblindValue(); // Generate the DP position Double_t m13Sq(0.0), m23Sq(0.0); kinematicsB0bar_->genFlatPhaseSpace(m13Sq, m23Sq); // Next, calculate the total A and Abar for the given DP position sigModelB0_->calcLikelihoodInfo(m13Sq, m23Sq); sigModelB0bar_->calcLikelihoodInfo(m13Sq, m23Sq); // Retrieve the amplitudes and efficiency from the dynamics const LauComplex& Abar = sigModelB0bar_->getEvtDPAmp(); const LauComplex& A = sigModelB0_->getEvtDPAmp(); Double_t eff = sigModelB0bar_->getEvtEff(); // Next calculate the DP terms Double_t aSqSum = A.abs2() + Abar.abs2(); Double_t aSqDif = A.abs2() - Abar.abs2(); LauComplex inter = Abar * A.conj() * phiMixComplex_; Double_t interTermIm = 2.0 * inter.im(); Double_t interTermRe = 2.0 * inter.re(); // Generate decay time const Double_t tMin = decayTimePdf->minAbscissa(); const Double_t tMax = decayTimePdf->maxAbscissa(); curEvtDecayTime_ = LauRandom::randomFun()->Rndm()*(tMax-tMin) + tMin; // Calculate all the decay time info decayTimePdf->calcLikelihoodInfo(curEvtDecayTime_, curEvtDecayTimeErr_); - // ...and check that the calculation went ok, otherwise loop again - if (decayTimePdf->state() != LauDecayTimePdf::Good) { - ++nGenLoop_; - continue; - } // First get all the decay time terms //Double_t dtExp = decayTimePdf->getExpTerm(); Double_t dtCos = decayTimePdf->getCosTerm(); Double_t dtSin = decayTimePdf->getSinTerm(); Double_t dtCosh = decayTimePdf->getCoshTerm(); Double_t dtSinh = decayTimePdf->getSinhTerm(); // Combine all terms Double_t cosTerm = dtCos * qD * aSqDif; Double_t sinTerm = dtSin * qD * interTermIm; Double_t coshTerm = dtCosh * (1.0 + qDDo2) * aSqSum; Double_t sinhTerm = dtSinh * (1.0 + qDDo2) * interTermRe; if ( cpEigenValue_ == CPOdd ) { sinTerm *= -1.0; sinhTerm *= -1.0; } // ... to get the total and multiply by the efficiency Double_t ASq = coshTerm + cosTerm - sinTerm + sinhTerm; //ASq /= decayTimePdf->getNormTerm(); ASq *= eff; //Finally we throw the dice to see whether this event should be generated //We make a distinction between the likelihood of TM and SCF to tag the SCF events as such randNo = LauRandom::randomFun()->Rndm(); if (randNo <= ASq/aSqMaxSet_ ) { generatedEvent = kTRUE; nGenLoop_ = 0; if (ASq > aSqMaxVar_) {aSqMaxVar_ = ASq;} } else { nGenLoop_++; } } // end of while !generatedEvent loop } // end of if (embeddedData) else control } else { if ( embeddedData ) { embeddedData->getEmbeddedEvent(0); curEvtTagFlv_ = TMath::Nint(embeddedData->getValue("tagFlv")); curEvtDecayTimeErr_ = embeddedData->getValue(decayTimePdf->varErrName()); curEvtDecayTime_ = embeddedData->getValue(decayTimePdf->varName()); } } // Check whether we have generated the toy MC OK. if (nGenLoop_ >= iterationsMax_) { aSqMaxSet_ = 1.01 * aSqMaxVar_; genOK = kFALSE; std::cerr<<"WARNING in LauTimeDepFlavModel::generateSignalEvent : Hit max iterations: setting aSqMaxSet_ to "< aSqMaxSet_) { aSqMaxSet_ = 1.01 * aSqMaxVar_; genOK = kFALSE; std::cerr<<"WARNING in LauTimeDepFlavModel::generateSignalEvent : Found a larger ASq value: setting aSqMaxSet_ to "<updateKinematics(kinematicsB0bar_->getm13Sq(), kinematicsB0bar_->getm23Sq() ); this->generateExtraPdfValues(extraPdfs, embeddedData); } // Check for problems with the embedding if (embeddedData && (embeddedData->nEvents() == embeddedData->nUsedEvents())) { std::cerr<<"WARNING in LauTimeDepFlavModel::generateSignalEvent : Source of embedded signal events used up, clearing the list of used events."<clearUsedList(); } return genOK; } void LauTimeDepFlavModel::setupGenNtupleBranches() { // Setup the required ntuple branches this->addGenNtupleDoubleBranch("evtWeight"); this->addGenNtupleIntegerBranch("genSig"); this->addGenNtupleIntegerBranch("cpEigenvalue"); this->addGenNtupleIntegerBranch("tagFlv"); this->addGenNtupleIntegerBranch("tagCat"); if (this->useDP() == kTRUE) { // Let's add the decay time variables. if (signalDecayTimePdfs_.begin() != signalDecayTimePdfs_.end()) { LauDecayTimePdf* pdf = signalDecayTimePdfs_.begin()->second; this->addGenNtupleDoubleBranch(pdf->varName()); this->addGenNtupleDoubleBranch(pdf->varErrName()); } this->addGenNtupleDoubleBranch("m12"); this->addGenNtupleDoubleBranch("m23"); this->addGenNtupleDoubleBranch("m13"); this->addGenNtupleDoubleBranch("m12Sq"); this->addGenNtupleDoubleBranch("m23Sq"); this->addGenNtupleDoubleBranch("m13Sq"); this->addGenNtupleDoubleBranch("cosHel12"); this->addGenNtupleDoubleBranch("cosHel23"); this->addGenNtupleDoubleBranch("cosHel13"); if (kinematicsB0bar_->squareDP() && kinematicsB0_->squareDP()) { this->addGenNtupleDoubleBranch("mPrime"); this->addGenNtupleDoubleBranch("thPrime"); } // Can add the real and imaginary parts of the B0 and B0bar total // amplitudes seen in the generation (restrict this with a flag // that defaults to false) if ( storeGenAmpInfo_ ) { this->addGenNtupleDoubleBranch("reB0Amp"); this->addGenNtupleDoubleBranch("imB0Amp"); this->addGenNtupleDoubleBranch("reB0barAmp"); this->addGenNtupleDoubleBranch("imB0barAmp"); } } // Let's look at the extra variables for signal in one of the tagging categories if ( ! sigExtraPdf_.empty() ) { LauPdfList oneTagCatPdfList = sigExtraPdf_.begin()->second; for (LauPdfList::const_iterator pdf_iter = oneTagCatPdfList.begin(); pdf_iter != oneTagCatPdfList.end(); ++pdf_iter) { for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { this->addGenNtupleDoubleBranch( (*var_iter) ); } } } } } void LauTimeDepFlavModel::setDPDtBranchValues() { // Store the decay time variables. if (signalDecayTimePdfs_.begin() != signalDecayTimePdfs_.end()) { LauDecayTimePdf* pdf = signalDecayTimePdfs_.begin()->second; this->setGenNtupleDoubleBranchValue(pdf->varName(),curEvtDecayTime_); this->setGenNtupleDoubleBranchValue(pdf->varErrName(),curEvtDecayTimeErr_); } LauKinematics* kinematics(0); if (curEvtTagFlv_<0) { kinematics = kinematicsB0_; } else { kinematics = kinematicsB0bar_; } // Store all the DP information this->setGenNtupleDoubleBranchValue("m12", kinematics->getm12()); this->setGenNtupleDoubleBranchValue("m23", kinematics->getm23()); this->setGenNtupleDoubleBranchValue("m13", kinematics->getm13()); this->setGenNtupleDoubleBranchValue("m12Sq", kinematics->getm12Sq()); this->setGenNtupleDoubleBranchValue("m23Sq", kinematics->getm23Sq()); this->setGenNtupleDoubleBranchValue("m13Sq", kinematics->getm13Sq()); this->setGenNtupleDoubleBranchValue("cosHel12", kinematics->getc12()); this->setGenNtupleDoubleBranchValue("cosHel23", kinematics->getc23()); this->setGenNtupleDoubleBranchValue("cosHel13", kinematics->getc13()); if (kinematics->squareDP()) { this->setGenNtupleDoubleBranchValue("mPrime", kinematics->getmPrime()); this->setGenNtupleDoubleBranchValue("thPrime", kinematics->getThetaPrime()); } // Can add the real and imaginary parts of the B0 and B0bar total // amplitudes seen in the generation (restrict this with a flag // that defaults to false) if ( storeGenAmpInfo_ ) { if ( this->getGenNtupleIntegerBranchValue("genSig")==1 ) { LauComplex Abar = sigModelB0bar_->getEvtDPAmp(); LauComplex A = sigModelB0_->getEvtDPAmp(); this->setGenNtupleDoubleBranchValue("reB0Amp", A.re()); this->setGenNtupleDoubleBranchValue("imB0Amp", A.im()); this->setGenNtupleDoubleBranchValue("reB0barAmp", Abar.re()); this->setGenNtupleDoubleBranchValue("imB0barAmp", Abar.im()); } else { this->setGenNtupleDoubleBranchValue("reB0Amp", 0.0); this->setGenNtupleDoubleBranchValue("imB0Amp", 0.0); this->setGenNtupleDoubleBranchValue("reB0barAmp", 0.0); this->setGenNtupleDoubleBranchValue("imB0barAmp", 0.0); } } } void LauTimeDepFlavModel::generateExtraPdfValues(LauPdfList* extraPdfs, LauEmbeddedData* embeddedData) { LauKinematics* kinematics(0); if (curEvtTagFlv_<0) { kinematics = kinematicsB0_; } else { kinematics = kinematicsB0bar_; } // Generate from the extra PDFs if (extraPdfs) { for (LauPdfList::iterator pdf_iter = extraPdfs->begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { LauFitData genValues; if (embeddedData) { genValues = embeddedData->getValues( (*pdf_iter)->varNames() ); } else { genValues = (*pdf_iter)->generate(kinematics); } for ( LauFitData::const_iterator var_iter = genValues.begin(); var_iter != genValues.end(); ++var_iter ) { TString varName = var_iter->first; if ( varName != "m13Sq" && varName != "m23Sq" ) { Double_t value = var_iter->second; this->setGenNtupleDoubleBranchValue(varName,value); } } } } } void LauTimeDepFlavModel::propagateParUpdates() { // Update the complex mixing phase if (useSinCos_) { phiMixComplex_.setRealPart(cosPhiMix_.unblindValue()); phiMixComplex_.setImagPart(-1.0*sinPhiMix_.unblindValue()); } else { phiMixComplex_.setRealPart(TMath::Cos(-1.0*phiMix_.unblindValue())); phiMixComplex_.setImagPart(TMath::Sin(-1.0*phiMix_.unblindValue())); } // Update the total normalisation for the signal likelihood if (this->useDP() == kTRUE) { this->updateCoeffs(); sigModelB0bar_->updateCoeffs(coeffsB0bar_); sigModelB0_->updateCoeffs(coeffsB0_); this->calcInterTermNorm(); } // Update the signal events from the background numbers if not doing an extended fit if (!this->doEMLFit()) { this->updateSigEvents(); } } void LauTimeDepFlavModel::updateSigEvents() { // The background parameters will have been set from Minuit. // We need to update the signal events using these. Double_t nTotEvts = this->eventsPerExpt(); Double_t signalEvents = nTotEvts; // tagging-category fractions for signal events this->setFirstTagCatFrac(signalTagCatFrac_); signalEvents_->range(-2.0*nTotEvts,2.0*nTotEvts); if ( ! signalEvents_->fixed() ) { signalEvents_->value(signalEvents); } } void LauTimeDepFlavModel::setFirstTagCatFrac(LauTagCatParamMap& theMap) { Double_t firstCatFrac = 1.0; Int_t firstCat(0); for (LauTagCatParamMap::iterator iter = theMap.begin(); iter != theMap.end(); ++iter) { if (iter == theMap.begin()) { firstCat = iter->first; continue; } LauParameter& par = iter->second; firstCatFrac -= par.unblindValue(); } theMap[firstCat].value(firstCatFrac); } void LauTimeDepFlavModel::cacheInputFitVars() { // Fill the internal data trees of the signal and background models. // Note that we store the events of both charges in both the // negative and the positive models. It's only later, at the stage // when the likelihood is being calculated, that we separate them. LauFitDataTree* inputFitData = this->fitData(); // Start by caching the tagging and CP-eigenstate information evtTagCatVals_.clear(); evtTagFlvVals_.clear(); evtCPEigenVals_.clear(); if ( ! inputFitData->haveBranch( tagCatVarName_ ) ) { std::cerr << "ERROR in LauTimeDepFlavModel::cacheInputFitVars : Input data does not contain branch \"" << tagCatVarName_ << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( ! inputFitData->haveBranch( tagVarName_ ) ) { std::cerr << "ERROR in LauTimeDepFlavModel::cacheInputFitVars : Input data does not contain branch \"" << tagVarName_ << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } const Bool_t hasCPEV = ( (cpevVarName_ != "") && inputFitData->haveBranch( cpevVarName_ ) ); UInt_t nEvents = inputFitData->nEvents(); evtTagCatVals_.reserve( nEvents ); evtTagFlvVals_.reserve( nEvents ); evtCPEigenVals_.reserve( nEvents ); LauFitData::const_iterator fitdata_iter; for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputFitData->getData(iEvt); fitdata_iter = dataValues.find( tagCatVarName_ ); curEvtTagCat_ = static_cast( fitdata_iter->second ); if ( ! this->validTagCat( curEvtTagCat_ ) ) { std::cerr << "WARNING in LauTimeDepFlavModel::cacheInputFitVars : Invalid tagging category " << curEvtTagCat_ << " for event " << iEvt << ", setting it to untagged" << std::endl; curEvtTagCat_ = 0; } evtTagCatVals_.push_back( curEvtTagCat_ ); fitdata_iter = dataValues.find( tagVarName_ ); curEvtTagFlv_ = static_cast( fitdata_iter->second ); if ( TMath::Abs( curEvtTagFlv_ ) != 1 ) { if ( curEvtTagFlv_ > 0 ) { std::cerr << "WARNING in LauTimeDepFlavModel::cacheInputFitVars : Invalid tagging output " << curEvtTagFlv_ << " for event " << iEvt << ", setting it to +1" << std::endl; curEvtTagFlv_ = +1; } else { std::cerr << "WARNING in LauTimeDepFlavModel::cacheInputFitVars : Invalid tagging output " << curEvtTagFlv_ << " for event " << iEvt << ", setting it to -1" << std::endl; curEvtTagFlv_ = -1; } } evtTagFlvVals_.push_back( curEvtTagFlv_ ); // if the CP-eigenvalue is in the data use those, otherwise use the default if ( hasCPEV ) { fitdata_iter = dataValues.find( cpevVarName_ ); const Int_t cpEV = static_cast( fitdata_iter->second ); if ( cpEV == 1 ) { cpEigenValue_ = CPEven; } else if ( cpEV == -1 ) { cpEigenValue_ = CPOdd; } else { std::cerr<<"WARNING in LauTimeDepFlavModel::cacheInputFitVars : Unknown value: "<useDP() == kTRUE) { // DecayTime and SigmaDecayTime for (LauTagCatDtPdfMap::iterator dt_iter = signalDecayTimePdfs_.begin(); dt_iter != signalDecayTimePdfs_.end(); ++dt_iter) { (*dt_iter).second->cacheInfo(*inputFitData); } } // ...and then the extra PDFs for (LauTagCatPdfMap::iterator pdf_iter = sigExtraPdf_.begin(); pdf_iter != sigExtraPdf_.end(); ++pdf_iter) { this->cacheInfo(pdf_iter->second, *inputFitData); } if (this->useDP() == kTRUE) { sigModelB0bar_->fillDataTree(*inputFitData); sigModelB0_->fillDataTree(*inputFitData); } } Double_t LauTimeDepFlavModel::getTotEvtLikelihood(const UInt_t iEvt) { // Find out whether the tag-side B was a B0 or a B0bar. curEvtTagFlv_ = evtTagFlvVals_[iEvt]; // Also get the tagging category. curEvtTagCat_ = evtTagCatVals_[iEvt]; // Get the CP eigenvalue of the current event cpEigenValue_ = evtCPEigenVals_[iEvt]; // Get the DP and DecayTime likelihood for signal (TODO and eventually backgrounds) this->getEvtDPDtLikelihood(iEvt); // Get the combined extra PDFs likelihood for signal (TODO and eventually backgrounds) this->getEvtExtraLikelihoods(iEvt); // Construct the total likelihood for signal, qqbar and BBbar backgrounds Double_t sigLike = sigDPLike_ * sigExtraLike_; Double_t signalEvents = signalEvents_->unblindValue(); if (this->useDP() == kFALSE) { signalEvents *= 0.5 * (1.0 + curEvtTagFlv_ * signalAsym_->unblindValue()); } // Construct the total event likelihood Double_t likelihood(sigLike*signalTagCatFrac_[curEvtTagCat_].unblindValue()); if ( ! signalEvents_->fixed() ) { likelihood *= signalEvents; } return likelihood; } Double_t LauTimeDepFlavModel::getEventSum() const { Double_t eventSum(0.0); eventSum += signalEvents_->unblindValue(); return eventSum; } void LauTimeDepFlavModel::getEvtDPDtLikelihood(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // Dalitz plot for the given event evtNo. sigDPLike_ = 1.0; //There's always a likelihood term for signal, so we better not zero it. if ( this->useDP() == kFALSE ) { return; } // Mistag probabilities. Defined as: omega = prob of the tagging B0 being reported as B0bar // Whether we want omega or omegaBar depends on q_tag, hence curEvtTagFlv_*... in the previous lines //Double_t misTagFrac = 0.5 * (1.0 - dilution_[curEvtTagCat_] - qDDo2); //Double_t misTagFracBar = 0.5 * (1.0 - dilution_[curEvtTagCat_] + qDDo2); // Calculate event quantities Double_t qD = curEvtTagFlv_*dilution_[curEvtTagCat_].unblindValue(); Double_t qDDo2 = curEvtTagFlv_*0.5*deltaDilution_[curEvtTagCat_].unblindValue(); // Get the dynamics to calculate everything required for the likelihood calculation sigModelB0bar_->calcLikelihoodInfo(iEvt); sigModelB0_->calcLikelihoodInfo(iEvt); // Retrieve the amplitudes and efficiency from the dynamics const LauComplex& Abar = sigModelB0bar_->getEvtDPAmp(); const LauComplex& A = sigModelB0_->getEvtDPAmp(); Double_t eff = sigModelB0bar_->getEvtEff(); // Next calculate the DP terms Double_t aSqSum = A.abs2() + Abar.abs2(); Double_t aSqDif = A.abs2() - Abar.abs2(); LauComplex inter = Abar * A.conj() * phiMixComplex_; Double_t interTermIm = 2.0 * inter.im(); Double_t interTermRe = 2.0 * inter.re(); // First get all the decay time terms //LauDecayTimePdf* signalDtPdf = signalDecayTimePdfs_[curEvtTagCat_]; LauDecayTimePdf* decayTimePdf = signalDecayTimePdfs_[curEvtTagCat_]; decayTimePdf->calcLikelihoodInfo(iEvt); // First get all the decay time terms Double_t dtCos = decayTimePdf->getCosTerm(); Double_t dtSin = decayTimePdf->getSinTerm(); Double_t dtCosh = decayTimePdf->getCoshTerm(); Double_t dtSinh = decayTimePdf->getSinhTerm(); Double_t cosTerm = dtCos * qD * aSqDif; Double_t sinTerm = dtSin * qD * interTermIm; Double_t coshTerm = dtCosh * (1.0 + qDDo2) * aSqSum; Double_t sinhTerm = dtSinh * (1.0 + qDDo2) * interTermRe; if ( cpEigenValue_ == CPOdd ) { sinTerm *= -1.0; sinhTerm *= -1.0; } // ... to get the total and multiply by the efficiency Double_t ASq = coshTerm + cosTerm - sinTerm + sinhTerm; ASq *= eff; // Calculate the DP and time normalisation Double_t normTermIndep = sigModelB0bar_->getDPNorm() + sigModelB0_->getDPNorm(); Double_t normTermCosh = decayTimePdf->getNormTermCosh(); Double_t normTermDep = interTermReNorm_; Double_t normTermSinh = decayTimePdf->getNormTermSinh(); Double_t norm = normTermIndep*normTermCosh + normTermDep*normTermSinh; // Calculate the normalised signal likelihood sigDPLike_ = ASq / norm; } void LauTimeDepFlavModel::getEvtExtraLikelihoods(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // extra variables for the given event evtNo. sigExtraLike_ = 1.0; //There's always a likelihood term for signal, so we better not zero it. // First, those independent of the tagging of the event: // signal LauTagCatPdfMap::iterator sig_iter = sigExtraPdf_.find(curEvtTagCat_); LauPdfList* pdfList = (sig_iter != sigExtraPdf_.end())? &(sig_iter->second) : 0; if (pdfList) { sigExtraLike_ = this->prodPdfValue( *pdfList, iEvt ); } } void LauTimeDepFlavModel::updateCoeffs() { coeffsB0bar_.clear(); coeffsB0_.clear(); coeffsB0bar_.reserve(nSigComp_); coeffsB0_.reserve(nSigComp_); for (UInt_t i = 0; i < nSigComp_; ++i) { coeffsB0bar_.push_back(coeffPars_[i]->antiparticleCoeff()); coeffsB0_.push_back(coeffPars_[i]->particleCoeff()); } } Bool_t LauTimeDepFlavModel::validTagCat(Int_t tagCat) const { return (validTagCats_.find(tagCat) != validTagCats_.end()); } Bool_t LauTimeDepFlavModel::checkTagCatFracMap(const LauTagCatParamMap& theMap) const { // First check that there is an entry for each tagging category. // NB an entry won't have been added if it isn't a valid category // so don't need to check for that here. if (theMap.size() != signalTagCatFrac_.size()) { std::cerr<<"ERROR in LauTimeDepFlavModel::checkTagCatFracMap : Not all tagging categories present."< 1E-10) { std::cerr<<"ERROR in LauTimeDepFlavModel::checkTagCatFracMap : Tagging category event fractions do not sum to unity."< -LauConstants::pi && phase < LauConstants::pi) { withinRange = kTRUE; } else { // Not within the specified range if (phase > LauConstants::pi) { phase -= LauConstants::twoPi; } else if (phase < -LauConstants::pi) { phase += LauConstants::twoPi; } } } // A further problem can occur when the generated phase is close to -pi or pi. // The phase can wrap over to the other end of the scale - // this leads to artificially large pulls so we wrap it back. Double_t diff = phase - genPhase; if (diff > LauConstants::pi) { phase -= LauConstants::twoPi; } else if (diff < -LauConstants::pi) { phase += LauConstants::twoPi; } // finally store the new value in the parameter // and update the pull phiMix_.value(phase); phiMix_.updatePull(); } void LauTimeDepFlavModel::embedSignal(Int_t tagCat, const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment) { if (signalTree_[tagCat]) { std::cerr<<"ERROR in LauTimeDepFlavModel::embedSignal : Already embedding signal from file for tagging category "<findBranches(); if (!dataOK) { delete signalTree_[tagCat]; signalTree_[tagCat] = 0; std::cerr<<"ERROR in LauTimeDepFlavModel::embedSignal : Problem creating data tree for embedding."<addSPlotNtupleIntegerBranch("iExpt"); this->addSPlotNtupleIntegerBranch("iEvtWithinExpt"); // Store the efficiency of the event (for inclusive BF calculations). if (this->storeDPEff()) { this->addSPlotNtupleDoubleBranch("efficiency"); } // Store the total event likelihood for each species. this->addSPlotNtupleDoubleBranch("sigTotalLike"); // Store the DP likelihoods if (this->useDP()) { this->addSPlotNtupleDoubleBranch("sigDPLike"); } // Store the likelihoods for each extra PDF const LauPdfList* pdfList( &(sigExtraPdf_.begin()->second) ); this->addSPlotNtupleBranches(pdfList, "sig"); } void LauTimeDepFlavModel::addSPlotNtupleBranches(const LauPdfList* extraPdfs, const TString& prefix) { if (!extraPdfs) { return; } // Loop through each of the PDFs for (LauPdfList::const_iterator pdf_iter = extraPdfs->begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars(0); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply add one branch for that variable TString varName = (*pdf_iter)->varName(); TString name(prefix); name += varName; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else if ( nVars == 2 ) { // If the PDF has two variables then we // need a branch for them both together and // branches for each TString allVars(""); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { allVars += (*var_iter); TString name(prefix); name += (*var_iter); name += "Like"; this->addSPlotNtupleDoubleBranch(name); } TString name(prefix); name += allVars; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else { std::cerr<<"WARNING in LauTimeDepFlavModel::addSPlotNtupleBranches : Can't yet deal with 3D PDFs."<begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { // calculate the likelihood for this event (*pdf_iter)->calcLikelihoodInfo(iEvt); extraLike = (*pdf_iter)->getLikelihood(); totalLike *= extraLike; // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars(0); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply store the value for that variable TString varName = (*pdf_iter)->varName(); TString name(prefix); name += varName; name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else if ( nVars == 2 ) { // If the PDF has two variables then we // store the value for them both together // and for each on their own TString allVars(""); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { allVars += (*var_iter); TString name(prefix); name += (*var_iter); name += "Like"; Double_t indivLike = (*pdf_iter)->getLikelihood( (*var_iter) ); this->setSPlotNtupleDoubleBranchValue(name, indivLike); } TString name(prefix); name += allVars; name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else { std::cerr<<"WARNING in LauAllFitModel::setSPlotNtupleBranchValues : Can't yet deal with 3D PDFs."<useDP()) { nameSet.insert("DP"); } LauPdfList pdfList( (sigExtraPdf_.begin()->second) ); for (LauPdfList::const_iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter) { // Loop over the variables involved in each PDF for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { // If they are not DP coordinates then add them if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { nameSet.insert( (*var_iter) ); } } } return nameSet; } LauSPlot::NumbMap LauTimeDepFlavModel::freeSpeciesNames() const { LauSPlot::NumbMap numbMap; if (!signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } return numbMap; } LauSPlot::NumbMap LauTimeDepFlavModel::fixdSpeciesNames() const { LauSPlot::NumbMap numbMap; if (signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } return numbMap; } LauSPlot::TwoDMap LauTimeDepFlavModel::twodimPDFs() const { LauSPlot::TwoDMap twodimMap; const LauPdfList* pdfList = &(sigExtraPdf_.begin()->second); for (LauPdfList::const_iterator pdf_iter = pdfList->begin(); pdf_iter != pdfList->end(); ++pdf_iter) { // Count the number of input variables that are not DP variables UInt_t nVars(0); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { twodimMap.insert( std::make_pair( "sig", std::make_pair( (*pdf_iter)->varNames()[0], (*pdf_iter)->varNames()[1] ) ) ); } } return twodimMap; } void LauTimeDepFlavModel::storePerEvtLlhds() { std::cout<<"INFO in LauTimeDepFlavModel::storePerEvtLlhds : Storing per-event likelihood values..."<fitData(); // if we've not been using the DP model then we need to cache all // the info here so that we can get the efficiency from it if (!this->useDP() && this->storeDPEff()) { sigModelB0bar_->initialise(coeffsB0bar_); sigModelB0_->initialise(coeffsB0_); sigModelB0bar_->fillDataTree(*inputFitData); sigModelB0_->fillDataTree(*inputFitData); } UInt_t evtsPerExpt(this->eventsPerExpt()); LauIsobarDynamics* sigModel(sigModelB0bar_); for (UInt_t iEvt = 0; iEvt < evtsPerExpt; ++iEvt) { // Find out whether we have B0bar or B0 curEvtTagFlv_ = evtTagFlvVals_[iEvt]; curEvtTagCat_ = evtTagCatVals_[iEvt]; LauTagCatPdfMap::iterator sig_iter = sigExtraPdf_.find(curEvtTagCat_); LauPdfList* sigPdfs = (sig_iter != sigExtraPdf_.end())? &(sig_iter->second) : 0; // the DP information this->getEvtDPDtLikelihood(iEvt); if (this->storeDPEff()) { if (!this->useDP()) { sigModel->calcLikelihoodInfo(iEvt); } this->setSPlotNtupleDoubleBranchValue("efficiency",sigModel->getEvtEff()); } if (this->useDP()) { sigTotalLike_ = sigDPLike_; this->setSPlotNtupleDoubleBranchValue("sigDPLike",sigDPLike_); } else { sigTotalLike_ = 1.0; } // the signal PDF values sigTotalLike_ *= this->setSPlotNtupleBranchValues(sigPdfs, "sig", iEvt); // the total likelihoods this->setSPlotNtupleDoubleBranchValue("sigTotalLike",sigTotalLike_); // fill the tree this->fillSPlotNtupleBranches(); } std::cout<<"INFO in LauTimeDepFlavModel::storePerEvtLlhds : Finished storing per-event likelihood values."< #include #include #include #include #include "TFile.h" #include "TMinuit.h" #include "TRandom.h" #include "TSystem.h" #include "TVirtualFitter.h" #include "LauAbsBkgndDPModel.hh" #include "LauAbsCoeffSet.hh" #include "LauAbsPdf.hh" #include "LauAsymmCalc.hh" #include "LauComplex.hh" #include "LauConstants.hh" #include "LauDPPartialIntegralInfo.hh" #include "LauDaughters.hh" #include "LauDecayTimePdf.hh" #include "LauFitNtuple.hh" #include "LauGenNtuple.hh" #include "LauIsobarDynamics.hh" #include "LauKinematics.hh" #include "LauPrint.hh" #include "LauRandom.hh" #include "LauScfMap.hh" #include "LauTimeDepNonFlavModel.hh" ClassImp(LauTimeDepNonFlavModel) LauTimeDepNonFlavModel::LauTimeDepNonFlavModel(LauIsobarDynamics* modelB0bar_f, LauIsobarDynamics* modelB0_f, LauIsobarDynamics* modelB0bar_fbar, LauIsobarDynamics* modelB0_fbar, const Bool_t useUntaggedEvents, const TString& tagVarName, const TString& tagCatVarName) : LauAbsFitModel(), sigModelB0bar_f_(modelB0bar_f), sigModelB0_f_(modelB0_f), sigModelB0bar_fbar_(modelB0bar_fbar), sigModelB0_fbar_(modelB0_fbar), kinematicsB0bar_f_(modelB0bar_f ? modelB0bar_f->getKinematics() : 0), kinematicsB0_f_(modelB0_f ? modelB0_f->getKinematics() : 0), kinematicsB0bar_fbar_(modelB0bar_fbar ? modelB0bar_fbar->getKinematics() : 0), kinematicsB0_fbar_(modelB0_fbar ? modelB0_fbar->getKinematics() : 0), useUntaggedEvents_(useUntaggedEvents), nSigComp_(0), nSigDPPar_(0), nDecayTimePar_(0), nExtraPdfPar_(0), nNormPar_(0), coeffsB0bar_f_(0), coeffsB0_f_(0), coeffsB0bar_fbar_(0), coeffsB0_fbar_(0), coeffPars_B0f_B0barfbar_(0), coeffPars_B0fbar_B0barf_(0), interTermReNorm_f_(0), interTermReNorm_fbar_(0), interTermImNorm_f_(0), interTermImNorm_fbar_(0), fitFracB0bar_f_(0), fitFracB0_f_(0), fitFracB0bar_fbar_(0), fitFracB0_fbar_(0), fitFracAsymm_B0f_B0barfbar_(0), fitFracAsymm_B0fbar_B0barf_(0), acp_B0f_B0barfbar_(0), acp_B0fbar_B0barf_(0), meanEffB0bar_f_("meanEffB0bar_f",0.0,0.0,1.0), meanEffB0_f_("meanEffB0_f",0.0,0.0,1.0), meanEffB0bar_fbar_("meanEffB0bar_fbar",0.0,0.0,1.0), meanEffB0_fbar_("meanEffB0_fbar",0.0,0.0,1.0), DPRateB0bar_f_("DPRateB0bar_f",0.0,0.0,100.0), DPRateB0_f_("DPRateB0_f",0.0,0.0,100.0), DPRateB0bar_fbar_("DPRateB0bar_fbar",0.0,0.0,100.0), DPRateB0_fbar_("DPRateB0_fbar",0.0,0.0,100.0), signalEvents_(0), signalAsym_(0), signalTagCatFrac_(), tagVarName_(tagVarName), tagCatVarName_(tagCatVarName), cpevVarName_(""), validTagCats_(), curEvtTagFlv_(0), curEvtTagCat_(0), cpEigenValue_(CPEven), evtTagFlvVals_(0), evtTagCatVals_(0), evtCPEigenVals_(0), dilution_(), deltaDilution_(), deltaM_("deltaM",0.0), deltaGamma_("deltaGamma",0.0), tau_("tau",LauConstants::tauB0), phiMix_("phiMix", 2.0*LauConstants::beta, -LauConstants::threePi, LauConstants::threePi, kFALSE), sinPhiMix_("sinPhiMix", TMath::Sin(2.0*LauConstants::beta), -3.0, 3.0, kFALSE), cosPhiMix_("cosPhiMix", TMath::Cos(2.0*LauConstants::beta), -3.0, 3.0, kFALSE), useSinCos_(kFALSE), phiMixComplex_(TMath::Cos(-2.0*LauConstants::beta),TMath::Sin(-2.0*LauConstants::beta)), signalDecayTimePdfs_(), curEvtDecayTime_(0.0), curEvtDecayTimeErr_(0.0), qD_(0.0), qDDo2_(0.0), sigExtraPdf_(), finalState_(0.0), iterationsMax_(500000), nGenLoop_(0), ASq_(0.0), aSqMaxVar_(0.0), aSqMaxSet_(1.25), normTimeDP_f_(0.0), normTimeDP_fbar_(0.0), storeGenAmpInfo_(kFALSE), signalTree_(), reuseSignal_(kFALSE), sigDPLike_(0.0), sigExtraLike_(0.0), sigTotalLike_(0.0) { // Add the untagged category as a valid category this->addValidTagCat(0); // Set the fraction, average dilution and dilution difference for the untagged category this->setSignalTagCatPars(0, 1.0, 0.0, 0.0, kTRUE); } LauTimeDepNonFlavModel::~LauTimeDepNonFlavModel() { // TODO - need to delete the various embedded data structures here } void LauTimeDepNonFlavModel::setupBkgndVectors() { } void LauTimeDepNonFlavModel::setNSigEvents(LauParameter* nSigEvents) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::setNSigEvents : The LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; signalEvents_->name("signalEvents"); Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0),2.0*(TMath::Abs(value)+1.0)); signalAsym_ = new LauParameter("signalAsym",0.0,-1.0,1.0,kTRUE); } void LauTimeDepNonFlavModel::setNSigEvents(LauParameter* nSigEvents, LauParameter* sigAsym) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::setNSigEvents : The event LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( sigAsym == 0 ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::setNSigEvents : The asym LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; signalEvents_->name("signalEvents"); Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); signalAsym_ = sigAsym; signalAsym_->name("signalAsym"); signalAsym_->range(-1.0,1.0); } void LauTimeDepNonFlavModel::setNBkgndEvents(LauAbsRValue* /*nBkgndEvents*/) { std::cerr << "WARNING in LauTimeDepNonFlavModel::setNBkgndEvents : This model does not yet support backgrounds" << std::endl; } void LauTimeDepNonFlavModel::addValidTagCats(const std::vector& tagCats) { for (std::vector::const_iterator iter = tagCats.begin(); iter != tagCats.end(); ++iter) { this->addValidTagCat(*iter); } } void LauTimeDepNonFlavModel::addValidTagCat(Int_t tagCat) { validTagCats_.insert(tagCat); } void LauTimeDepNonFlavModel::setSignalTagCatPars(const Int_t tagCat, const Double_t tagCatFrac, const Double_t dilution, const Double_t deltaDilution, const Bool_t fixTCFrac) { if (!this->validTagCat(tagCat)) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setSignalTagCatPars : Tagging category \""<checkSignalTagCatFractions(); only when the user has //set them all up, in this->initialise(); } void LauTimeDepNonFlavModel::checkSignalTagCatFractions() { Double_t totalTaggedFrac(0.0); for (LauTagCatParamMap::const_iterator iter=signalTagCatFrac_.begin(); iter!=signalTagCatFrac_.end(); ++iter) { if (iter->first != 0) { const LauParameter& par = iter->second; totalTaggedFrac += par.value(); } } if ( ((totalTaggedFrac < (1.0-1.0e-8))&&!useUntaggedEvents_) || (totalTaggedFrac > (1.0+1.0e-8)) ) { std::cerr<<"WARNING in LauTimeDepNonFlavModel::checkSignalTagCatFractions : Tagging category fractions add up to "<second; Double_t newVal = par.value() / totalTaggedFrac; par.value(newVal); par.initValue(newVal); par.genValue(newVal); } } else if (useUntaggedEvents_) { Double_t tagCatFrac = 1.0 - totalTaggedFrac; TString tagCatFracName("signalTagCatFrac0"); signalTagCatFrac_[0].name(tagCatFracName); signalTagCatFrac_[0].range(0.0,1.0); signalTagCatFrac_[0].value(tagCatFrac); signalTagCatFrac_[0].initValue(tagCatFrac); signalTagCatFrac_[0].genValue(tagCatFrac); signalTagCatFrac_[0].fixed(kTRUE); TString dilutionName("dilution0"); dilution_[0].name(dilutionName); dilution_[0].range(0.0,1.0); dilution_[0].value(0.0); dilution_[0].initValue(0.0); dilution_[0].genValue(0.0); TString deltaDilutionName("deltaDilution0"); deltaDilution_[0].name(deltaDilutionName); deltaDilution_[0].range(-2.0,2.0); deltaDilution_[0].value(0.0); deltaDilution_[0].initValue(0.0); deltaDilution_[0].genValue(0.0); } for (LauTagCatParamMap::const_iterator iter=dilution_.begin(); iter!=dilution_.end(); ++iter) { std::cout<<"INFO in LauTimeDepNonFlavModel::checkSignalTagCatFractions : Setting dilution for tagging category "<<(*iter).first<<" to "<<(*iter).second<validTagCat(tagCat)) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setSignalDtPdf : Tagging category \""<validTagCat(tagCat)) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setSignalPdfs : Tagging category \""<updateCoeffs(); // Initialisation if (this->useDP() == kTRUE) { this->initialiseDPModels(); } if (!this->useDP() && sigExtraPdf_.empty()) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::initialise : Signal model doesn't exist for any variable."<Exit(EXIT_FAILURE); } if (this->useDP() == kTRUE) { // Check that we have all the Dalitz-plot models if ((sigModelB0bar_f_ == 0) || (sigModelB0_f_ == 0) || (sigModelB0bar_fbar_ == 0) || (sigModelB0bar_fbar_ == 0)) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::initialise : the pointer to one (particle or anti-particle) of the signal DP models is null."<Exit(EXIT_FAILURE); } } // Check here that the tagging category fractions add up to 1, otherwise "normalise". Also set up the untagged cat. // NB this has to be done early in the initialization as other methods access the tagCats map. this->checkSignalTagCatFractions(); // Clear the vectors of parameter information so we can start from scratch this->clearFitParVectors(); // Set the fit parameters for signal and background models this->setSignalDPParameters(); // Set the fit parameters for the decay time models this->setDecayTimeParameters(); // Set the fit parameters for the extra PDFs this->setExtraPdfParameters(); // Set the initial bg and signal events this->setFitNEvents(); // Check that we have the expected number of fit variables const LauParameterPList& fitVars = this->fitPars(); if (fitVars.size() != (nSigDPPar_ + nDecayTimePar_ + nExtraPdfPar_ + nNormPar_)) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::initialise : Number of fit parameters not of expected size."<Exit(EXIT_FAILURE); } this->setExtraNtupleVars(); } void LauTimeDepNonFlavModel::recalculateNormalisation() { sigModelB0bar_f_->recalculateNormalisation(); sigModelB0_f_->recalculateNormalisation(); sigModelB0bar_fbar_->recalculateNormalisation(); sigModelB0_fbar_->recalculateNormalisation(); sigModelB0bar_f_->modifyDataTree(); sigModelB0_f_->modifyDataTree(); sigModelB0bar_fbar_->modifyDataTree(); sigModelB0_fbar_->modifyDataTree(); this->calcInterferenceTermIntegrals(); } void LauTimeDepNonFlavModel::initialiseDPModels() { if (sigModelB0bar_f_ == 0) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::initialiseDPModels : B0bar -> f signal DP model doesn't exist"<Exit(EXIT_FAILURE); } if (sigModelB0_f_ == 0) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::initialiseDPModels : B0 -> f signal DP model doesn't exist"<Exit(EXIT_FAILURE); } if (sigModelB0bar_fbar_ == 0) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::initialiseDPModels : B0bar -> fbar signal DP model doesn't exist"<Exit(EXIT_FAILURE); } if (sigModelB0_fbar_ == 0) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::initialiseDPModels : B0 -> fbar signal DP model doesn't exist"<Exit(EXIT_FAILURE); } // Need to check that the number of components we have and that the dynamics has matches up //const UInt_t nAmpB0bar_f = sigModelB0bar_f_->getnAmp(); //const UInt_t nAmpB0_f = sigModelB0_f_->getnAmp(); //const UInt_t nAmpB0bar_fbar = sigModelB0bar_fbar_->getnAmp(); //const UInt_t nAmpB0_fbar = sigModelB0_fbar_->getnAmp(); const UInt_t nAmpB0bar_f = sigModelB0bar_f_->getnTotAmp(); const UInt_t nAmpB0_f = sigModelB0_f_->getnTotAmp(); const UInt_t nAmpB0bar_fbar = sigModelB0bar_fbar_->getnTotAmp(); const UInt_t nAmpB0_fbar = sigModelB0_fbar_->getnTotAmp(); if ( nAmpB0bar_f != nAmpB0_f ){ std::cerr << "ERROR in LauTimeDepNonFlavModel::initialiseDPModels : Unequal number of signal DP components in the particle and anti-particle models: " << nAmpB0bar_f << " != " << nAmpB0_f << std::endl; gSystem->Exit(EXIT_FAILURE); } else if ( nAmpB0bar_fbar != nAmpB0_fbar ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::initialiseDPModels : Unequal number of signal DP components in the particle and anti-particle models: " << nAmpB0bar_fbar << " != " << nAmpB0_fbar << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( nAmpB0bar_f != nSigComp_ ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::initialiseDPModels : Number of signal DP components in the model (" << nAmpB0bar_f << ") not equal to number of coefficients supplied (" << nSigComp_ << ")." << std::endl; gSystem->Exit(EXIT_FAILURE); } std::cout<<"INFO in LauTimeDepNonFlavModel::initialiseDPModels : Initialising signal DP model"<initialise(coeffsB0bar_f_); sigModelB0_f_->initialise(coeffsB0_f_); sigModelB0bar_fbar_->initialise(coeffsB0bar_fbar_); sigModelB0_fbar_->initialise(coeffsB0_fbar_); fifjEffSum_f_.clear(); fifjEffSum_fbar_.clear(); fifjEffSum_f_.resize(nSigComp_); fifjEffSum_fbar_.resize(nSigComp_); for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { fifjEffSum_f_[iAmp].resize(nSigComp_); fifjEffSum_fbar_[iAmp].resize(nSigComp_); } // calculate the integrals of the A*Abar terms this->calcInterferenceTermIntegrals(); this->calcInterTermNorm(); } void LauTimeDepNonFlavModel::calcInterferenceTermIntegrals() { const std::vector& integralInfoListB0bar_f = sigModelB0bar_f_->getIntegralInfos(); const std::vector& integralInfoListB0_f = sigModelB0_f_->getIntegralInfos(); const std::vector& integralInfoListB0bar_fbar = sigModelB0bar_fbar_->getIntegralInfos(); const std::vector& integralInfoListB0_fbar = sigModelB0_fbar_->getIntegralInfos(); LauComplex A_f, Abar_f, A_fbar, Abar_fbar, fifjEffSumTerm_f, fifjEffSumTerm_fbar; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { fifjEffSum_f_[iAmp][jAmp].zero(); fifjEffSum_fbar_[iAmp][jAmp].zero(); } } const UInt_t nIntegralRegions_f = integralInfoListB0bar_f.size(); for ( UInt_t iRegion(0); iRegion < nIntegralRegions_f; ++iRegion ) { const LauDPPartialIntegralInfo* integralInfoB0bar_f = integralInfoListB0bar_f[iRegion]; const LauDPPartialIntegralInfo* integralInfoB0_f = integralInfoListB0_f[iRegion]; const UInt_t nm13Points = integralInfoB0bar_f->getnm13Points(); const UInt_t nm23Points = integralInfoB0bar_f->getnm23Points(); for (UInt_t m13 = 0; m13 < nm13Points; ++m13) { for (UInt_t m23 = 0; m23 < nm23Points; ++m23) { const Double_t weight_f = integralInfoB0bar_f->getWeight(m13,m23); const Double_t eff_f = integralInfoB0bar_f->getEfficiency(m13,m23); const Double_t effWeight_f = eff_f*weight_f; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { A_f = integralInfoB0_f->getAmplitude(m13, m23, iAmp); for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { Abar_f = integralInfoB0bar_f->getAmplitude(m13, m23, jAmp); fifjEffSumTerm_f = Abar_f*A_f.conj(); fifjEffSumTerm_f.rescale(effWeight_f); fifjEffSum_f_[iAmp][jAmp] += fifjEffSumTerm_f; } } } } } const UInt_t nIntegralRegions_fbar = integralInfoListB0bar_fbar.size(); for ( UInt_t iRegion(0); iRegion < nIntegralRegions_fbar; ++iRegion ) { const LauDPPartialIntegralInfo* integralInfoB0bar_fbar = integralInfoListB0bar_fbar[iRegion]; const LauDPPartialIntegralInfo* integralInfoB0_fbar = integralInfoListB0_fbar[iRegion]; const UInt_t nm13Points = integralInfoB0bar_fbar->getnm13Points(); const UInt_t nm23Points = integralInfoB0bar_fbar->getnm23Points(); for (UInt_t m13 = 0; m13 < nm13Points; ++m13) { for (UInt_t m23 = 0; m23 < nm23Points; ++m23) { const Double_t weight_fbar = integralInfoB0bar_fbar->getWeight(m13,m23); const Double_t eff_fbar = integralInfoB0bar_fbar->getEfficiency(m13,m23); const Double_t effWeight_fbar = eff_fbar*weight_fbar; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { A_fbar = integralInfoB0_fbar->getAmplitude(m13, m23, iAmp); for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { Abar_fbar = integralInfoB0bar_fbar->getAmplitude(m13, m23, jAmp); fifjEffSumTerm_fbar = Abar_fbar*A_fbar.conj(); fifjEffSumTerm_fbar.rescale(effWeight_fbar); fifjEffSum_fbar_[iAmp][jAmp] += fifjEffSumTerm_fbar; } } } } } } void LauTimeDepNonFlavModel::calcInterTermNorm() { const std::vector fNormB0bar_f = sigModelB0bar_f_->getFNorm(); const std::vector fNormB0_f = sigModelB0_f_->getFNorm(); const std::vector fNormB0bar_fbar = sigModelB0bar_fbar_->getFNorm(); const std::vector fNormB0_fbar = sigModelB0_fbar_->getFNorm(); LauComplex norm_f; LauComplex norm_fbar; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { LauComplex coeffTerm_f = coeffsB0bar_f_[jAmp]*coeffsB0_f_[iAmp].conj(); LauComplex coeffTerm_fbar = coeffsB0bar_fbar_[jAmp]*coeffsB0_fbar_[iAmp].conj(); coeffTerm_f *= fifjEffSum_f_[iAmp][jAmp]; coeffTerm_fbar *= fifjEffSum_fbar_[iAmp][jAmp]; coeffTerm_f.rescale(fNormB0bar_f[jAmp] * fNormB0_f[iAmp]); coeffTerm_fbar.rescale(fNormB0bar_fbar[jAmp] * fNormB0_fbar[iAmp]); norm_f += coeffTerm_f; norm_fbar += coeffTerm_fbar; } } norm_f *= phiMixComplex_; norm_fbar *= phiMixComplex_; interTermReNorm_f_ = 2.0*norm_f.re(); interTermImNorm_f_ = 2.0*norm_f.im(); interTermReNorm_fbar_ = 2.0*norm_fbar.re(); interTermImNorm_fbar_ = 2.0*norm_fbar.im(); } void LauTimeDepNonFlavModel::setAmpCoeffSet(LauAbsCoeffSet* coeffSet) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setAmpCoeffSet : Set of coefficients for B0/B0bar -> f,fbar contains only component for f final state \""<name()<<"\"."<name(); TString compName_B0fbar_B0barf = coeffSet_B0fbar_B0barf->name(); TString conjName_B0f_B0barfbar = sigModelB0bar_fbar_->getConjResName(compName_B0f_B0barfbar); TString conjName_B0fbar_B0barf = sigModelB0bar_f_->getConjResName(compName_B0fbar_B0barf); std::cout << "Values are: " << std::endl; std::cout << "CompName: " << compName_B0f_B0barfbar << " " << compName_B0fbar_B0barf << std::endl; std::cout << "ComjName: " << conjName_B0f_B0barfbar << " " << conjName_B0fbar_B0barf << std::endl; // Define each daughter configuration const LauDaughters* daughtersB0bar_f = sigModelB0bar_f_->getDaughters(); const LauDaughters* daughtersB0_f = sigModelB0_f_->getDaughters(); const LauDaughters* daughtersB0bar_fbar = sigModelB0bar_fbar_->getDaughters(); const LauDaughters* daughtersB0_fbar = sigModelB0_fbar_->getDaughters(); const Bool_t conjugateB0_f = daughtersB0_f->isConjugate( daughtersB0bar_fbar ); const Bool_t conjugateB0_fbar = daughtersB0_fbar->isConjugate( daughtersB0bar_f ); std::cout << "I am here 1 " << std::endl; if ( ! sigModelB0_f_->hasResonance(compName_B0f_B0barfbar) ) { if ( ! sigModelB0_f_->hasResonance(conjName_B0f_B0barfbar) ) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setAmpCoeffSet : B0 -> f signal DP model doesn't contain component \""<< compName_B0f_B0barfbar <<"\"."<hasResonance(compName_B0fbar_B0barf) ) { std::cout << "Checked: " << compName_B0fbar_B0barf << std::endl; if ( ! sigModelB0_fbar_->hasResonance(conjName_B0fbar_B0barf) ) { std::cout << "Checked: " << conjName_B0fbar_B0barf << std::endl; std::cerr<<"ERROR in LauTimeDepNonFlavModel::setAmpCoeffSet : B0 -> fbar signal DP model doesn't contain component \""<< compName_B0fbar_B0barf<<"\"."<hasResonance(conjName_B0f_B0barfbar) ) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setAmpCoeffSet : signal DP model doesn't contain component \""<hasResonance(conjName_B0fbar_B0barf) ) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setAmpCoeffSet : signal DP model doesn't contain component \""<hasResonance(compName_B0f_B0barfbar) ) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setAmpCoeffSet : signal DP model doesn't contain component \""<hasResonance(compName_B0fbar_B0barf) ) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setAmpCoeffSet : signal DP model doesn't contain component \""<::const_iterator iter_B0f_B0barfbar=coeffPars_B0f_B0barfbar_.begin(); iter_B0f_B0barfbar!=coeffPars_B0f_B0barfbar_.end(); ++iter_B0f_B0barfbar) { if ((*iter_B0f_B0barfbar)->name() == compName_B0f_B0barfbar) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setAmpCoeffSet : Have already set coefficients for \""<::const_iterator iter_B0fbar_B0barf=coeffPars_B0fbar_B0barf_.begin(); iter_B0fbar_B0barf!=coeffPars_B0fbar_B0barf_.end(); ++iter_B0fbar_B0barf) { if ((*iter_B0fbar_B0barf)->name() == compName_B0fbar_B0barf) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setAmpCoeffSet : Have already set coefficients for \""<index(nSigComp_); coeffSet_B0fbar_B0barf->index(nSigComp_); coeffPars_B0f_B0barfbar_.push_back(coeffSet_B0f_B0barfbar); coeffPars_B0fbar_B0barf_.push_back(coeffSet_B0fbar_B0barf); TString parName_B0f_B0barfbar = coeffSet_B0f_B0barfbar->baseName(); parName_B0f_B0barfbar += "FitFracAsym"; TString parName_B0fbar_B0barf = coeffSet_B0fbar_B0barf->baseName(); parName_B0fbar_B0barf += "FitFracAsym"; fitFracAsymm_B0f_B0barfbar_.push_back(LauParameter(parName_B0f_B0barfbar, 0.0, -1.0, 1.0)); fitFracAsymm_B0fbar_B0barf_.push_back(LauParameter(parName_B0fbar_B0barf, 0.0, -1.0, 1.0)); acp_B0f_B0barfbar_.push_back(coeffSet_B0f_B0barfbar->acp()); acp_B0fbar_B0barf_.push_back(coeffSet_B0fbar_B0barf->acp()); ++nSigComp_; std::cout<<"INFO in LauTimeDepNonFlavModel::setAmpCoeffSet : Added coefficients for components \""<f, B0bar->fbar) and \""<fbar, B0bar->f)"<acp(); acp_B0fbar_B0barf_[i] = coeffPars_B0fbar_B0barf_[i]->acp(); LauAsymmCalc asymmCalc_B0f_B0barfbar(fitFracB0bar_fbar_[i][i].value(), fitFracB0_f_[i][i].value()); LauAsymmCalc asymmCalc_B0fbar_B0barf(fitFracB0bar_f_[i][i].value(), fitFracB0_fbar_[i][i].value()); Double_t asym_B0f_B0barfbar = asymmCalc_B0f_B0barfbar.getAsymmetry(); Double_t asym_B0fbar_B0barf = asymmCalc_B0fbar_B0barf.getAsymmetry(); fitFracAsymm_B0f_B0barfbar_[i].value(asym_B0f_B0barfbar); fitFracAsymm_B0fbar_B0barf_[i].value(asym_B0fbar_B0barf); if (initValues) { fitFracAsymm_B0f_B0barfbar_[i].genValue(asym_B0f_B0barfbar); fitFracAsymm_B0fbar_B0barf_[i].genValue(asym_B0fbar_B0barf); fitFracAsymm_B0f_B0barfbar_[i].initValue(asym_B0f_B0barfbar); fitFracAsymm_B0fbar_B0barf_[i].initValue(asym_B0fbar_B0barf); } } } void LauTimeDepNonFlavModel::setSignalDPParameters() { // Set the fit parameters for the signal model. nSigDPPar_ = 0; if ( ! this->useDP() ) { return; } std::cout << "INFO in LauTimeDepNonFlavModel::setSignalDPParameters : Setting the initial fit parameters for the signal DP model." << std::endl; // Place isobar coefficient parameters in vector of fit variables LauParameterPList& fitVars = this->fitPars(); for (UInt_t i = 0; i < nSigComp_; i++) { LauParameterPList pars_B0f_B0barfbar = coeffPars_B0f_B0barfbar_[i]->getParameters(); LauParameterPList pars_B0fbar_B0barf = coeffPars_B0fbar_B0barf_[i]->getParameters(); for (LauParameterPList::iterator iter_B0f_B0barfbar = pars_B0f_B0barfbar.begin(); iter_B0f_B0barfbar != pars_B0f_B0barfbar.end(); ++iter_B0f_B0barfbar) { if ( !(*iter_B0f_B0barfbar)->clone() ) { fitVars.push_back(*iter_B0f_B0barfbar); ++nSigDPPar_; } } for (LauParameterPList::iterator iter_B0fbar_B0barf = pars_B0fbar_B0barf.begin(); iter_B0fbar_B0barf != pars_B0fbar_B0barf.end(); ++iter_B0fbar_B0barf) { if ( !(*iter_B0fbar_B0barf)->clone() ) { fitVars.push_back(*iter_B0fbar_B0barf); ++nSigDPPar_; } } } // Obtain the resonance parameters and place them in the vector of fit variables and in a separate vector // Need to make sure that they are unique because some might appear in both DP models LauParameterPSet& resVars = this->resPars(); resVars.clear(); LauParameterPList& sigDPParsB0bar_f = sigModelB0bar_f_->getFloatingParameters(); LauParameterPList& sigDPParsB0_f = sigModelB0_f_->getFloatingParameters(); LauParameterPList& sigDPParsB0bar_fbar = sigModelB0bar_fbar_->getFloatingParameters(); LauParameterPList& sigDPParsB0_fbar = sigModelB0_fbar_->getFloatingParameters(); for ( LauParameterPList::iterator iter = sigDPParsB0bar_f.begin(); iter != sigDPParsB0bar_f.end(); ++iter ) { if ( resVars.insert(*iter).second ) { fitVars.push_back(*iter); ++nSigDPPar_; } } for ( LauParameterPList::iterator iter = sigDPParsB0_f.begin(); iter != sigDPParsB0_f.end(); ++iter ) { if ( resVars.insert(*iter).second ) { fitVars.push_back(*iter); ++nSigDPPar_; } } for ( LauParameterPList::iterator iter = sigDPParsB0bar_fbar.begin(); iter != sigDPParsB0bar_fbar.end(); ++iter ) { if ( resVars.insert(*iter).second ) { fitVars.push_back(*iter); ++nSigDPPar_; } } for ( LauParameterPList::iterator iter = sigDPParsB0_fbar.begin(); iter != sigDPParsB0_fbar.end(); ++iter ) { if ( resVars.insert(*iter).second ) { fitVars.push_back(*iter); ++nSigDPPar_; } } } UInt_t LauTimeDepNonFlavModel::addParametersToFitList(LauTagCatDtPdfMap& theMap) { UInt_t counter(0); LauParameterPList& fitVars = this->fitPars(); // loop through the map for (LauTagCatDtPdfMap::iterator iter = theMap.begin(); iter != theMap.end(); ++iter) { // grab the pdf and then its parameters LauDecayTimePdf* thePdf = (*iter).second; // The first one is the tagging category LauAbsRValuePList& rvalues = thePdf->getParameters(); // loop through the parameters for (LauAbsRValuePList::iterator pars_iter = rvalues.begin(); pars_iter != rvalues.end(); ++pars_iter) { LauParameterPList params = (*pars_iter)->getPars(); for (LauParameterPList::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter) { // for each "original" parameter add it to the list of fit parameters and increment the counter if ( !(*params_iter)->clone() && ( !(*params_iter)->fixed() || (this->twoStageFit() && (*params_iter)->secondStage()) ) ) { fitVars.push_back(*params_iter); ++counter; } } } } return counter; } UInt_t LauTimeDepNonFlavModel::addParametersToFitList(LauTagCatPdfMap& theMap) { UInt_t counter(0); // loop through the map for (LauTagCatPdfMap::iterator iter = theMap.begin(); iter != theMap.end(); ++iter) { counter += this->addFitParameters(iter->second); // first is the tagging category } return counter; } void LauTimeDepNonFlavModel::setDecayTimeParameters() { nDecayTimePar_ = 0; // Loop over the Dt PDFs nDecayTimePar_ += this->addParametersToFitList(signalDecayTimePdfs_); LauParameterPList& fitVars = this->fitPars(); if (useSinCos_) { fitVars.push_back(&sinPhiMix_); fitVars.push_back(&cosPhiMix_); nDecayTimePar_ += 2; } else { fitVars.push_back(&phiMix_); ++nDecayTimePar_; } } void LauTimeDepNonFlavModel::setExtraPdfParameters() { // Include the parameters of the PDF for each tagging category in the fit // NB all of them are passed to the fit, even though some have been fixed through parameter.fixed(kTRUE) // With the new "cloned parameter" scheme only "original" parameters are passed to the fit. // Their clones are updated automatically when the originals are updated. nExtraPdfPar_ = 0; nExtraPdfPar_ += this->addParametersToFitList(sigExtraPdf_); } void LauTimeDepNonFlavModel::setFitNEvents() { nNormPar_ = 0; // Initialise the total number of events to be the sum of all the hypotheses Double_t nTotEvts = signalEvents_->value(); this->eventsPerExpt(TMath::FloorNint(nTotEvts)); LauParameterPList& fitVars = this->fitPars(); // if doing an extended ML fit add the signal fraction into the fit parameters if (this->doEMLFit()) { std::cout<<"INFO in LauTimeDepNonFlavModel::setFitNEvents : Initialising number of events for signal and background components..."<useDP() == kFALSE) { fitVars.push_back(signalAsym_); ++nNormPar_; } // tagging-category fractions for signal events for (LauTagCatParamMap::iterator iter = signalTagCatFrac_.begin(); iter != signalTagCatFrac_.end(); ++iter) { if (iter == signalTagCatFrac_.begin()) { continue; } LauParameter* par = &((*iter).second); fitVars.push_back(par); ++nNormPar_; } } void LauTimeDepNonFlavModel::setExtraNtupleVars() { // Set-up other parameters derived from the fit results, e.g. fit fractions. if (this->useDP() != kTRUE) { return; } // First clear the vectors so we start from scratch this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); // Add the B0 (f/fbar) and B0bar (f/fbar) fit fractions for each signal component fitFracB0bar_f_ = sigModelB0bar_f_->getFitFractions(); if (fitFracB0bar_f_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetFitFractions(); if (fitFracB0_f_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetFitFractions(); if (fitFracB0bar_fbar_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetFitFractions(); if (fitFracB0_fbar_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); icalcAsymmetries(kTRUE); // Add the Fit Fraction asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(fitFracAsymm_B0f_B0barfbar_[i]); extraVars.push_back(fitFracAsymm_B0fbar_B0barf_[i]); } // Add the calculated CP asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(acp_B0f_B0barfbar_[i]); extraVars.push_back(acp_B0fbar_B0barf_[i]); } // Now add in the DP efficiency values Double_t initMeanEffB0bar_f = sigModelB0bar_f_->getMeanEff().initValue(); meanEffB0bar_f_.value(initMeanEffB0bar_f); meanEffB0bar_f_.initValue(initMeanEffB0bar_f); meanEffB0bar_f_.genValue(initMeanEffB0bar_f); extraVars.push_back(meanEffB0bar_f_); Double_t initMeanEffB0_f = sigModelB0_f_->getMeanEff().initValue(); meanEffB0_f_.value(initMeanEffB0_f); meanEffB0_f_.initValue(initMeanEffB0_f); meanEffB0_f_.genValue(initMeanEffB0_f); extraVars.push_back(meanEffB0_f_); Double_t initMeanEffB0bar_fbar = sigModelB0bar_fbar_->getMeanEff().initValue(); meanEffB0bar_fbar_.value(initMeanEffB0bar_fbar); meanEffB0bar_fbar_.initValue(initMeanEffB0bar_fbar); meanEffB0bar_fbar_.genValue(initMeanEffB0bar_fbar); extraVars.push_back(meanEffB0bar_fbar_); Double_t initMeanEffB0_fbar = sigModelB0_fbar_->getMeanEff().initValue(); meanEffB0_fbar_.value(initMeanEffB0_fbar); meanEffB0_fbar_.initValue(initMeanEffB0_fbar); meanEffB0_fbar_.genValue(initMeanEffB0_fbar); extraVars.push_back(meanEffB0_fbar_); // Also add in the DP rates Double_t initDPRateB0bar_f = sigModelB0bar_f_->getDPRate().initValue(); DPRateB0bar_f_.value(initDPRateB0bar_f); DPRateB0bar_f_.initValue(initDPRateB0bar_f); DPRateB0bar_f_.genValue(initDPRateB0bar_f); extraVars.push_back(DPRateB0bar_f_); Double_t initDPRateB0_f = sigModelB0_f_->getDPRate().initValue(); DPRateB0_f_.value(initDPRateB0_f); DPRateB0_f_.initValue(initDPRateB0_f); DPRateB0_f_.genValue(initDPRateB0_f); extraVars.push_back(DPRateB0_f_); Double_t initDPRateB0bar_fbar = sigModelB0bar_fbar_->getDPRate().initValue(); DPRateB0bar_fbar_.value(initDPRateB0bar_fbar); DPRateB0bar_fbar_.initValue(initDPRateB0bar_fbar); DPRateB0bar_fbar_.genValue(initDPRateB0bar_fbar); extraVars.push_back(DPRateB0bar_fbar_); Double_t initDPRateB0_fbar = sigModelB0_fbar_->getDPRate().initValue(); DPRateB0_fbar_.value(initDPRateB0_fbar); DPRateB0_fbar_.initValue(initDPRateB0_fbar); DPRateB0_fbar_.genValue(initDPRateB0_fbar); extraVars.push_back(DPRateB0_fbar_); } void LauTimeDepNonFlavModel::finaliseFitResults(const TString& tablePrefixName) { // Retrieve parameters from the fit results for calculations and toy generation // and eventually store these in output root ntuples/text files // Now take the fit parameters and update them as necessary // i.e. to make mag > 0.0, phase in the right range. // This function will also calculate any other values, such as the // fit fractions, using any errors provided by fitParErrors as appropriate. // Also obtain the pull values: (measured - generated)/(average error) if (this->useDP() == kTRUE) { for (UInt_t i = 0; i < nSigComp_; ++i) { // Check whether we have "a > 0.0", and phases in the right range coeffPars_B0f_B0barfbar_[i]->finaliseValues(); coeffPars_B0fbar_B0barf_[i]->finaliseValues(); } } // update the pulls on the event fractions and asymmetries if (this->doEMLFit()) { signalEvents_->updatePull(); } if (this->useDP() == kFALSE) { signalAsym_->updatePull(); } // Finalise the pulls on the decay time parameters for (LauTagCatDtPdfMap::iterator iter = signalDecayTimePdfs_.begin(); iter != signalDecayTimePdfs_.end(); ++iter) { LauDecayTimePdf* pdf = (*iter).second; pdf->updatePulls(); } if (useSinCos_) { cosPhiMix_.updatePull(); sinPhiMix_.updatePull(); } else { this->checkMixingPhase(); } // Update the pulls on all the extra PDFs' parameters for (LauTagCatPdfMap::iterator iter = sigExtraPdf_.begin(); iter != sigExtraPdf_.end(); ++iter) { this->updateFitParameters(iter->second); } // Tagging-category fractions for signal and background events Double_t firstCatFrac(1.0); Int_t firstCat(0); for (LauTagCatParamMap::iterator iter = signalTagCatFrac_.begin(); iter != signalTagCatFrac_.end(); ++iter) { if (iter == signalTagCatFrac_.begin()) { firstCat = iter->first; continue; } LauParameter& par = (*iter).second; firstCatFrac -= par.value(); // update the parameter pull par.updatePull(); } signalTagCatFrac_[firstCat].value(firstCatFrac); signalTagCatFrac_[firstCat].updatePull(); // Fill the fit results to the ntuple // update the coefficients and then calculate the fit fractions and ACP's if (this->useDP() == kTRUE) { this->updateCoeffs(); sigModelB0bar_f_->updateCoeffs(coeffsB0bar_f_); sigModelB0bar_f_->calcExtraInfo(); sigModelB0_f_->updateCoeffs(coeffsB0_f_); sigModelB0_f_->calcExtraInfo(); sigModelB0bar_fbar_->updateCoeffs(coeffsB0bar_fbar_); sigModelB0bar_fbar_->calcExtraInfo(); sigModelB0_fbar_->updateCoeffs(coeffsB0_fbar_); sigModelB0_fbar_->calcExtraInfo(); LauParArray fitFracB0bar_f = sigModelB0bar_f_->getFitFractions(); if (fitFracB0bar_f.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0_f = sigModelB0_f_->getFitFractions(); if (fitFracB0_f.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0bar_fbar = sigModelB0bar_fbar_->getFitFractions(); if (fitFracB0bar_fbar.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0_fbar = sigModelB0_fbar_->getFitFractions(); if (fitFracB0_fbar.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); meanEffB0_f_.value(sigModelB0_f_->getMeanEff().value()); meanEffB0bar_fbar_.value(sigModelB0bar_fbar_->getMeanEff().value()); meanEffB0_fbar_.value(sigModelB0_fbar_->getMeanEff().value()); DPRateB0bar_f_.value(sigModelB0bar_f_->getDPRate().value()); DPRateB0_f_.value(sigModelB0_f_->getDPRate().value()); DPRateB0bar_fbar_.value(sigModelB0bar_fbar_->getDPRate().value()); DPRateB0_fbar_.value(sigModelB0_fbar_->getDPRate().value()); this->calcAsymmetries(); // Then store the final fit parameters, and any extra parameters for // the signal model (e.g. fit fractions, FF asymmetries, ACPs, mean efficiency and DP rate) this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); for (UInt_t i(0); iprintFitFractions(std::cout); this->printAsymmetries(std::cout); } const LauParameterPList& fitVars = this->fitPars(); const LauParameterList& extraVars = this->extraPars(); LauFitNtuple* ntuple = this->fitNtuple(); ntuple->storeParsAndErrors(fitVars, extraVars); // find out the correlation matrix for the parameters ntuple->storeCorrMatrix(this->iExpt(), this->fitStatus(), this->covarianceMatrix()); // Fill the data into ntuple ntuple->updateFitNtuple(); // Print out the partial fit fractions, phases and the // averaged efficiency, reweighted by the dynamics (and anything else) if (this->writeLatexTable()) { TString sigOutFileName(tablePrefixName); sigOutFileName += "_"; sigOutFileName += this->iExpt(); sigOutFileName += "Expt.tex"; this->writeOutTable(sigOutFileName); } } void LauTimeDepNonFlavModel::printFitFractions(std::ostream& output) { // Print out Fit Fractions, total DP rate and mean efficiency // B0 -> f events for (UInt_t i = 0; i < nSigComp_; i++) { const TString compName(coeffPars_B0f_B0barfbar_[i]->name()); output<<"B0bar FitFraction for component "< f overall DP rate (integral of matrix element squared) = "< f average efficiency weighted by whole DP dynamics = "< fbar sample for (UInt_t i = 0; i < nSigComp_; i++) { const TString compName(coeffPars_B0f_B0barfbar_[i]->name()); const TString conjName(sigModelB0_f_->getConjResName(compName)); output<<"B0 FitFraction for component "< fbar overall DP rate (integral of matrix element squared) = "< fbar average efficiency weighted by whole DP dynamics = "< fbar events for (UInt_t i = 0; i < nSigComp_; i++) { const TString compName(coeffPars_B0fbar_B0barf_[i]->name()); output<<"B0bar FitFraction for component "< fbar overall DP rate (integral of matrix element squared) = "< fbar average efficiency weighted by whole DP dynamics = "< f sample for (UInt_t i = 0; i < nSigComp_; i++) { const TString compName(coeffPars_B0fbar_B0barf_[i]->name()); const TString conjName(sigModelB0_fbar_->getConjResName(compName)); output<<"B0 FitFraction for component "< f overall DP rate (integral of matrix element squared) = "< f average efficiency weighted by whole DP dynamics = "<name()); output<<"Fit Fraction for B0(B0bar) -> f(fbar) asymmetry for component "< fbar(f) asymmetry for component "< f(fbar) component "<name()); output<<"ACP for B0(B0bar) -> fbar(f) component "<useDP() == kTRUE) { // print the fit coefficients in one table coeffPars_B0f_B0barfbar_.front()->printTableHeading(fout); for (UInt_t i = 0; i < nSigComp_; i++) { coeffPars_B0f_B0barfbar_[i]->printTableRow(fout); } fout<<"\\hline"<printTableHeading(fout); for (UInt_t i = 0; i < nSigComp_; i++) { coeffPars_B0fbar_B0barf_[i]->printTableRow(fout); } fout<<"\\hline"< f(fbar) fout<<"\\begin{tabular}{|l|c|c|c|c|}"< fbar \\ Fit Fraction & \\Bz ->f \\ Fit Fraction & Fit Fraction Asymmetry & $A_{\\CP}$ \\\\"<name(); resName = resName.ReplaceAll("_", "\\_"); fout< =$ & $"; print.printFormat(fout, meanEffB0bar_fbar_.value()); fout << "$ & $"; print.printFormat(fout, meanEffB0_f_.value()); fout << "$ & & \\\\" << std::endl; fout << "$ & & & & & & & \\\\" << std::endl; if (useSinCos_) { fout << "$\\sinPhiMix =$ & $"; print.printFormat(fout, sinPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, sinPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; fout << "$\\cosPhiMix =$ & $"; print.printFormat(fout, cosPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, cosPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } else { fout << "$\\phiMix =$ & $"; print.printFormat(fout, phiMix_.value()); fout << " \\pm "; print.printFormat(fout, phiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } fout << "\\hline \n\\end{tabular}" << std::endl; // Another combination for B0(B0bar) -> fbar(f) fout<<"\\begin{tabular}{|l|c|c|c|c|}"< f \\ Fit Fraction & \\Bz ->fbar \\ Fit Fraction & Fit Fraction Asymmetry & $A_{\\CP}$ \\\\"<name(); resName = resName.ReplaceAll("_", "\\_"); fout< =$ & $"; print.printFormat(fout, meanEffB0bar_f_.value()); fout << "$ & $"; print.printFormat(fout, meanEffB0_fbar_.value()); fout << "$ & & \\\\" << std::endl; fout << "$ & & & & & & & \\\\" << std::endl; if (useSinCos_) { fout << "$\\sinPhiMix =$ & $"; print.printFormat(fout, sinPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, sinPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; fout << "$\\cosPhiMix =$ & $"; print.printFormat(fout, cosPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, cosPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } else { fout << "$\\phiMix =$ & $"; print.printFormat(fout, phiMix_.value()); fout << " \\pm "; print.printFormat(fout, phiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } fout << "\\hline \n\\end{tabular}" << std::endl; } if (!sigExtraPdf_.empty()) { fout<<"\\begin{tabular}{|l|c|}"<printFitParameters(iter->second, fout); } fout<<"\\hline \n\\end{tabular}"<updateSigEvents(); // Check whether we want to have randomised initial fit parameters for the signal model if (this->useRandomInitFitPars() == kTRUE) { this->randomiseInitFitPars(); } } void LauTimeDepNonFlavModel::randomiseInitFitPars() { // Only randomise those parameters that are not fixed! std::cout<<"INFO in LauTimeDepNonFlavModel::randomiseInitFitPars : Randomising the initial values of the coefficients of the DP components (and phiMix)..."<randomiseInitValues(); coeffPars_B0fbar_B0barf_[i]->randomiseInitValues(); } phiMix_.randomiseValue(-LauConstants::pi, LauConstants::pi); if (useSinCos_) { sinPhiMix_.initValue(TMath::Sin(phiMix_.initValue())); cosPhiMix_.initValue(TMath::Cos(phiMix_.initValue())); } } LauTimeDepNonFlavModel::LauGenInfo LauTimeDepNonFlavModel::eventsToGenerate() { // TODO : Check whether in this bit we keep the same procedure or not // Determine the number of events to generate for each hypothesis // If we're smearing then smear each one individually // NB this individual smearing has to be done individually per tagging category as well LauGenInfo nEvtsGen; LauTagCatGenInfo eventsB0, eventsB0bar; // Signal // If we're including the DP and decay time we can't decide on the tag // yet, since it depends on the whole DP+dt PDF, however, if // we're not then we need to decide. Double_t evtWeight(1.0); Double_t nEvts = signalEvents_->genValue(); if ( nEvts < 0.0 ) { evtWeight = -1.0; nEvts = TMath::Abs( nEvts ); } Double_t sigAsym(0.0); if (this->useDP() == kFALSE) { sigAsym = signalAsym_->genValue(); for (LauTagCatParamMap::const_iterator iter = signalTagCatFrac_.begin(); iter != signalTagCatFrac_.end(); ++iter) { const LauParameter& par = iter->second; Double_t eventsbyTagCat = par.value() * nEvts; Double_t eventsB0byTagCat = TMath::Nint(eventsbyTagCat/2.0 * (1.0 - sigAsym)); Double_t eventsB0barbyTagCat = TMath::Nint(eventsbyTagCat/2.0 * (1.0 + sigAsym)); if (this->doPoissonSmearing()) { eventsB0byTagCat = LauRandom::randomFun()->Poisson(eventsB0byTagCat); eventsB0barbyTagCat = LauRandom::randomFun()->Poisson(eventsB0barbyTagCat); } eventsB0[iter->first] = std::make_pair( TMath::Nint(eventsB0byTagCat), evtWeight ); eventsB0bar[iter->first] = std::make_pair( TMath::Nint(eventsB0barbyTagCat), evtWeight ); } nEvtsGen[std::make_pair("signal",-1)] = eventsB0; nEvtsGen[std::make_pair("signal",+1)] = eventsB0bar; } else { Double_t rateB0bar = sigModelB0bar_f_->getDPRate().value(); Double_t rateB0 = sigModelB0_f_->getDPRate().value(); if ( rateB0bar+rateB0 > 1e-30) { sigAsym = (rateB0bar-rateB0)/(rateB0bar+rateB0); } for (LauTagCatParamMap::const_iterator iter = signalTagCatFrac_.begin(); iter != signalTagCatFrac_.end(); ++iter) { const LauParameter& par = iter->second; Double_t eventsbyTagCat = par.value() * nEvts; if (this->doPoissonSmearing()) { eventsbyTagCat = LauRandom::randomFun()->Poisson(eventsbyTagCat); } eventsB0[iter->first] = std::make_pair( TMath::Nint(eventsbyTagCat), evtWeight ); } nEvtsGen[std::make_pair("signal",0)] = eventsB0; // generate signal event, decide tag later. } std::cout<<"INFO in LauTimeDepNonFlavModel::eventsToGenerate : Generating toy MC with:"<setGenNtupleIntegerBranchValue("genSig",1); // All the generate*Event() methods have to fill in curEvtDecayTime_ and curEvtDecayTimeErr_ // In addition, generateSignalEvent has to decide on the tag and fill in curEvtTagFlv_ genOK = this->generateSignalEvent(); } else { genOK = kFALSE; } if (!genOK) { // If there was a problem with the generation then break out and return. // The problem model will have adjusted itself so that all should be OK next time. break; } if (this->useDP() == kTRUE) { this->setDPDtBranchValues(); // store DP, decay time and tagging variables in the ntuple } // Store the event's tag and tagging category this->setGenNtupleIntegerBranchValue("cpEigenvalue", cpEigenValue_); this->setGenNtupleIntegerBranchValue("tagCat",curEvtTagCat_); this->setGenNtupleIntegerBranchValue("tagFlv",curEvtTagFlv_); // Store the event number (within this experiment) // and then increment it this->setGenNtupleIntegerBranchValue("iEvtWithinExpt",evtNum); ++evtNum; // Write the values into the tree this->fillGenNtupleBranches(); // Print an occasional progress message if (iEvt%1000 == 0) {std::cout<<"INFO in LauTimeDepNonFlavModel::genExpt : Generated event number "<useDP() && genOK) { sigModelB0bar_f_->checkToyMC(kTRUE); sigModelB0_f_->checkToyMC(kTRUE); sigModelB0bar_fbar_->checkToyMC(kTRUE); sigModelB0_fbar_->checkToyMC(kTRUE); std::cout<<"aSqMaxSet = "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0_f = sigModelB0_f_->getFitFractions(); if (fitFracB0_f.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::generate : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0bar_fbar = sigModelB0bar_fbar_->getFitFractions(); if (fitFracB0bar_fbar.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::generate : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0_fbar = sigModelB0_fbar_->getFitFractions(); if (fitFracB0_fbar.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::generate : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); meanEffB0_f_.value(sigModelB0_f_->getMeanEff().value()); meanEffB0bar_fbar_.value(sigModelB0bar_fbar_->getMeanEff().value()); meanEffB0_fbar_.value(sigModelB0_fbar_->getMeanEff().value()); DPRateB0bar_f_.value(sigModelB0bar_f_->getDPRate().value()); DPRateB0_f_.value(sigModelB0_f_->getDPRate().value()); DPRateB0bar_fbar_.value(sigModelB0bar_fbar_->getDPRate().value()); DPRateB0_fbar_.value(sigModelB0_fbar_->getDPRate().value()); } } // If we're reusing embedded events or if the generation is being // reset then clear the lists of used events //if (!signalTree_.empty() && (reuseSignal_ || !genOK)) { if (reuseSignal_ || !genOK) { for(LauTagCatEmbDataMap::const_iterator iter = signalTree_.begin(); iter != signalTree_.end(); ++iter) { (iter->second)->clearUsedList(); } } return genOK; } Bool_t LauTimeDepNonFlavModel::generateSignalEvent() { // Generate signal event, including SCF if necessary. // DP:DecayTime generation follows. // If it's ok, we then generate mES, DeltaE, Fisher/NN... Bool_t genOK(kTRUE); Bool_t generatedEvent(kFALSE); Bool_t doSquareDP = kinematicsB0bar_f_->squareDP(); doSquareDP &= kinematicsB0_f_->squareDP(); doSquareDP &= kinematicsB0bar_fbar_->squareDP(); doSquareDP &= kinematicsB0_fbar_->squareDP(); LauKinematics* kinematics = 0; //(kinematicsB0bar_); // find the right decay time PDF for the current tagging category LauTagCatDtPdfMap::const_iterator dt_iter = signalDecayTimePdfs_.find(curEvtTagCat_); LauDecayTimePdf* decayTimePdf = (dt_iter != signalDecayTimePdfs_.end()) ? dt_iter->second : 0; // find the right embedded data for the current tagging category LauTagCatEmbDataMap::const_iterator emb_iter = signalTree_.find(curEvtTagCat_); LauEmbeddedData* embeddedData = (emb_iter != signalTree_.end()) ? emb_iter->second : 0; // find the right extra PDFs for the current tagging category LauTagCatPdfMap::iterator extra_iter = sigExtraPdf_.find(curEvtTagCat_); LauPdfList* extraPdfs = (extra_iter != sigExtraPdf_.end()) ? &(extra_iter->second) : 0; if (this->useDP()) { if (embeddedData) { // TODO : correct the kinematic term to the two possible final state // This option is not allowed in the moment kinematics = kinematicsB0bar_f_; embeddedData->getEmbeddedEvent(kinematics); curEvtTagFlv_ = TMath::Nint(embeddedData->getValue("tagFlv")); curEvtDecayTimeErr_ = embeddedData->getValue(decayTimePdf->varErrName()); curEvtDecayTime_ = embeddedData->getValue(decayTimePdf->varName()); if (embeddedData->haveBranch("mcMatch")) { Int_t match = TMath::Nint(embeddedData->getValue("mcMatch")); if (match) { this->setGenNtupleIntegerBranchValue("genTMSig",1); this->setGenNtupleIntegerBranchValue("genSCFSig",0); } else { this->setGenNtupleIntegerBranchValue("genTMSig",0); this->setGenNtupleIntegerBranchValue("genSCFSig",1); } } } else { nGenLoop_ = 0; // generate the decay time error (NB the kTRUE forces the generation of a new value) curEvtDecayTimeErr_ = decayTimePdf->generateError(kTRUE); while (generatedEvent == kFALSE && nGenLoop_ < iterationsMax_) { // Calculate the unnormalised truth-matched signal likelihood // First let define the tag flavour Double_t randNo = LauRandom::randomFun()->Rndm(); if (randNo < 0.5) { curEvtTagFlv_ = +1; // B0 tag } else { curEvtTagFlv_ = -1; // B0bar tag } // Calculate event quantities that depend only on the tagCat and tagFlv qD_ = curEvtTagFlv_*dilution_[curEvtTagCat_].unblindValue(); qDDo2_ = curEvtTagFlv_*0.5*deltaDilution_[curEvtTagCat_].unblindValue(); // Generate decay time const Double_t tMin = decayTimePdf->minAbscissa(); const Double_t tMax = decayTimePdf->maxAbscissa(); curEvtDecayTime_ = LauRandom::randomFun()->Rndm()*(tMax-tMin) + tMin; // Calculate all the decay time info decayTimePdf->calcLikelihoodInfo(curEvtDecayTime_, curEvtDecayTimeErr_); - // ...and check that the calculation went ok, otherwise loop again - if (decayTimePdf->state() != LauDecayTimePdf::Good) { - ++nGenLoop_; - continue; - } // Calculate the relevant amplitude normalisation for the two DP's this->calculateAmplitudeNorm(decayTimePdf); // DP variables Double_t m13Sq(0.0), m23Sq(0.0); Double_t randNo_finalState = LauRandom::randomFun()->Rndm(); if (randNo_finalState < normTimeDP_f_/(normTimeDP_f_+normTimeDP_fbar_)) { finalState_ = +1; // A(Abar) -> f // Generate DP position kinematicsB0bar_f_->genFlatPhaseSpace(m13Sq, m23Sq); kinematicsB0_f_->updateKinematics(kinematicsB0bar_f_->getm13Sq(), kinematicsB0bar_f_->getm23Sq() ); // Calculate the total A and Abar for the given DP position sigModelB0_f_->calcLikelihoodInfo(m13Sq, m23Sq); sigModelB0bar_f_->calcLikelihoodInfo(m13Sq, m23Sq); // Calculate DP terms this->calculateDPterms(decayTimePdf, sigModelB0bar_f_, sigModelB0_f_); } else { finalState_ = -1; // A(Abar) -> fbar // Generate DP position kinematicsB0bar_fbar_->genFlatPhaseSpace(m13Sq, m23Sq); kinematicsB0_fbar_->updateKinematics(kinematicsB0bar_fbar_->getm13Sq(), kinematicsB0bar_fbar_->getm23Sq() ); // Calculate the total A and Abar for the given DP position sigModelB0_fbar_->calcLikelihoodInfo(m13Sq, m23Sq); sigModelB0bar_fbar_->calcLikelihoodInfo(m13Sq, m23Sq); // Calculate DP terms this->calculateDPterms(decayTimePdf, sigModelB0bar_fbar_, sigModelB0_fbar_); } //Finally we throw the dice to see whether this event should be generated //We make a distinction between the likelihood of TM and SCF to tag the SCF events as such randNo = LauRandom::randomFun()->Rndm(); if (randNo <= ASq_/aSqMaxSet_ ) { generatedEvent = kTRUE; nGenLoop_ = 0; if (ASq_ > aSqMaxVar_) {aSqMaxVar_ = ASq_;} } else { nGenLoop_++; } } // end of while !generatedEvent loop } // end of if (embeddedData) else control } else { if ( embeddedData ) { embeddedData->getEmbeddedEvent(0); curEvtTagFlv_ = TMath::Nint(embeddedData->getValue("tagFlv")); curEvtDecayTimeErr_ = embeddedData->getValue(decayTimePdf->varErrName()); curEvtDecayTime_ = embeddedData->getValue(decayTimePdf->varName()); } } // Check whether we have generated the toy MC OK. if (nGenLoop_ >= iterationsMax_) { aSqMaxSet_ = 1.01 * aSqMaxVar_; genOK = kFALSE; std::cerr<<"WARNING in LauTimeDepNonFlavModel::generateSignalEvent : Hit max iterations: setting aSqMaxSet_ to "< aSqMaxSet_) { aSqMaxSet_ = 1.01 * aSqMaxVar_; genOK = kFALSE; std::cerr<<"WARNING in LauTimeDepNonFlavModel::generateSignalEvent : Found a larger ASq value: setting aSqMaxSet_ to "<generateExtraPdfValues(extraPdfs, embeddedData); } // Check for problems with the embedding if (embeddedData && (embeddedData->nEvents() == embeddedData->nUsedEvents())) { std::cerr<<"WARNING in LauTimeDepNonFlavModel::generateSignalEvent : Source of embedded signal events used up, clearing the list of used events."<clearUsedList(); } return genOK; } void LauTimeDepNonFlavModel::calculateDPterms(LauDecayTimePdf* decayTimePdf, LauIsobarDynamics* sigModelB0bar, LauIsobarDynamics* sigModelB0) { // Retrieve the amplitudes and efficiency from the dynamics const LauComplex& Abar = sigModelB0bar->getEvtDPAmp(); const LauComplex& A = sigModelB0->getEvtDPAmp(); Double_t eff = sigModelB0bar->getEvtEff(); // Calculate the DP terms Double_t aSqSum = A.abs2() + Abar.abs2(); Double_t aSqDif = A.abs2() - Abar.abs2(); LauComplex inter = Abar * A.conj() * phiMixComplex_; Double_t interTermIm = 2.0 * inter.im(); Double_t interTermRe = 2.0 * inter.re(); // Decay time pdf terms Double_t dtCos = decayTimePdf->getCosTerm(); Double_t dtSin = decayTimePdf->getSinTerm(); Double_t dtCosh = decayTimePdf->getCoshTerm(); Double_t dtSinh = decayTimePdf->getSinhTerm(); // Combine all terms Double_t cosTerm = dtCos * qD_ * aSqDif; Double_t sinTerm = dtSin * qD_ * interTermIm; Double_t coshTerm = dtCosh * (1.0 + qDDo2_) * aSqSum; Double_t sinhTerm = dtSinh * (1.0 + qDDo2_) * interTermRe; if ( cpEigenValue_ == CPOdd ) { sinTerm *= -1.0; sinhTerm *= -1.0; } // Total amplitude and multiply by the efficiency ASq_ = coshTerm + cosTerm - sinTerm + sinhTerm; ASq_ *= eff; } void LauTimeDepNonFlavModel::calculateAmplitudeNorm(LauDecayTimePdf* decayTimePdf) { // Integrals of the sum of the ampltudes to the f(fbar) integral( |A|^2 + |Abar|^2 ) dP Double_t normTermNonDep_f = sigModelB0bar_f_->getDPNorm() + sigModelB0_f_->getDPNorm(); Double_t normTermNonDep_fbar = sigModelB0bar_fbar_->getDPNorm() + sigModelB0_fbar_->getDPNorm(); // Integrals of cross terms |Abar|*|Aconj| Double_t normTermDep_f = interTermReNorm_f_; Double_t normTermDep_fbar = interTermReNorm_fbar_; // Decay time constant integrals Double_t normTermCosh = decayTimePdf->getNormTermCosh(); Double_t normTermSinh = decayTimePdf->getNormTermSinh(); // Time-dependent DP normalisation terms normTimeDP_f_ = normTermNonDep_f*normTermCosh + normTermDep_f*normTermSinh; normTimeDP_fbar_ = normTermNonDep_fbar*normTermCosh + normTermDep_fbar*normTermSinh; } void LauTimeDepNonFlavModel::setupGenNtupleBranches() { // Setup the required ntuple branches this->addGenNtupleDoubleBranch("evtWeight"); this->addGenNtupleIntegerBranch("genSig"); this->addGenNtupleIntegerBranch("cpEigenvalue"); this->addGenNtupleIntegerBranch("tagFlv"); this->addGenNtupleIntegerBranch("tagCat"); if (this->useDP() == kTRUE) { // Let's add the decay time variables. if (signalDecayTimePdfs_.begin() != signalDecayTimePdfs_.end()) { LauDecayTimePdf* pdf = signalDecayTimePdfs_.begin()->second; this->addGenNtupleDoubleBranch(pdf->varName()); this->addGenNtupleDoubleBranch(pdf->varErrName()); } this->addGenNtupleDoubleBranch("m12_f"); this->addGenNtupleDoubleBranch("m23_f"); this->addGenNtupleDoubleBranch("m13_f"); this->addGenNtupleDoubleBranch("m12Sq_f"); this->addGenNtupleDoubleBranch("m23Sq_f"); this->addGenNtupleDoubleBranch("m13Sq_f"); this->addGenNtupleDoubleBranch("cosHel12_f"); this->addGenNtupleDoubleBranch("cosHel23_f"); this->addGenNtupleDoubleBranch("cosHel13_f"); if (kinematicsB0bar_f_->squareDP() && kinematicsB0_f_->squareDP()) { this->addGenNtupleDoubleBranch("mPrime_f"); this->addGenNtupleDoubleBranch("thPrime_f"); } this->addGenNtupleDoubleBranch("m12_fbar"); this->addGenNtupleDoubleBranch("m23_fbar"); this->addGenNtupleDoubleBranch("m13_fbar"); this->addGenNtupleDoubleBranch("m12Sq_fbar"); this->addGenNtupleDoubleBranch("m23Sq_fbar"); this->addGenNtupleDoubleBranch("m13Sq_fbar"); this->addGenNtupleDoubleBranch("cosHel12_fbar"); this->addGenNtupleDoubleBranch("cosHel23_fbar"); this->addGenNtupleDoubleBranch("cosHel13_fbar"); if (kinematicsB0bar_fbar_->squareDP() && kinematicsB0_fbar_->squareDP()) { this->addGenNtupleDoubleBranch("mPrime_fbar"); this->addGenNtupleDoubleBranch("thPrime_fbar"); } // Can add the real and imaginary parts of the B0 and B0bar total // amplitudes seen in the generation (restrict this with a flag // that defaults to false) if ( storeGenAmpInfo_ ) { this->addGenNtupleDoubleBranch("reB0fAmp"); this->addGenNtupleDoubleBranch("imB0fAmp"); this->addGenNtupleDoubleBranch("reB0barfAmp"); this->addGenNtupleDoubleBranch("imB0barfAmp"); this->addGenNtupleDoubleBranch("reB0fbarAmp"); this->addGenNtupleDoubleBranch("imB0fbarAmp"); this->addGenNtupleDoubleBranch("reB0barfbarAmp"); this->addGenNtupleDoubleBranch("imB0barfbarAmp"); } } // Let's look at the extra variables for signal in one of the tagging categories if ( ! sigExtraPdf_.empty() ) { LauPdfList oneTagCatPdfList = sigExtraPdf_.begin()->second; for (LauPdfList::const_iterator pdf_iter = oneTagCatPdfList.begin(); pdf_iter != oneTagCatPdfList.end(); ++pdf_iter) { for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { this->addGenNtupleDoubleBranch( (*var_iter) ); } } } } } void LauTimeDepNonFlavModel::setDPDtBranchValues() { // Store the decay time variables. if (signalDecayTimePdfs_.begin() != signalDecayTimePdfs_.end()) { LauDecayTimePdf* pdf = signalDecayTimePdfs_.begin()->second; this->setGenNtupleDoubleBranchValue(pdf->varName(),curEvtDecayTime_); this->setGenNtupleDoubleBranchValue(pdf->varErrName(),curEvtDecayTimeErr_); } LauKinematics* kinematics_f(0); LauKinematics* kinematics_fbar(0); if (curEvtTagFlv_<0) { kinematics_f = kinematicsB0_f_; kinematics_fbar = kinematicsB0_fbar_; } else { kinematics_f = kinematicsB0bar_f_; kinematics_fbar = kinematicsB0bar_fbar_; } // Store all the DP information this->setGenNtupleDoubleBranchValue("m12_f", kinematics_f->getm12()); this->setGenNtupleDoubleBranchValue("m23_f", kinematics_f->getm23()); this->setGenNtupleDoubleBranchValue("m13_f", kinematics_f->getm13()); this->setGenNtupleDoubleBranchValue("m12Sq_f", kinematics_f->getm12Sq()); this->setGenNtupleDoubleBranchValue("m23Sq_f", kinematics_f->getm23Sq()); this->setGenNtupleDoubleBranchValue("m13Sq_f", kinematics_f->getm13Sq()); this->setGenNtupleDoubleBranchValue("cosHel12_f", kinematics_f->getc12()); this->setGenNtupleDoubleBranchValue("cosHel23_f", kinematics_f->getc23()); this->setGenNtupleDoubleBranchValue("cosHel13_f", kinematics_f->getc13()); if (kinematics_f->squareDP()) { this->setGenNtupleDoubleBranchValue("mPrime_f", kinematics_f->getmPrime()); this->setGenNtupleDoubleBranchValue("thPrime_f", kinematics_f->getThetaPrime()); } this->setGenNtupleDoubleBranchValue("m12_fbar", kinematics_fbar->getm12()); this->setGenNtupleDoubleBranchValue("m23_fbar", kinematics_fbar->getm23()); this->setGenNtupleDoubleBranchValue("m13_fbar", kinematics_fbar->getm13()); this->setGenNtupleDoubleBranchValue("m12Sq_fbar", kinematics_fbar->getm12Sq()); this->setGenNtupleDoubleBranchValue("m23Sq_fbar", kinematics_fbar->getm23Sq()); this->setGenNtupleDoubleBranchValue("m13Sq_fbar", kinematics_fbar->getm13Sq()); this->setGenNtupleDoubleBranchValue("cosHel12_fbar", kinematics_fbar->getc12()); this->setGenNtupleDoubleBranchValue("cosHel23_fbar", kinematics_fbar->getc23()); this->setGenNtupleDoubleBranchValue("cosHel13_fbar", kinematics_fbar->getc13()); if (kinematics_fbar->squareDP()) { this->setGenNtupleDoubleBranchValue("mPrime_fbar", kinematics_fbar->getmPrime()); this->setGenNtupleDoubleBranchValue("thPrime_fbar", kinematics_fbar->getThetaPrime()); } // Can add the real and imaginary parts of the B0 and B0bar total // amplitudes seen in the generation (restrict this with a flag // that defaults to false) if ( storeGenAmpInfo_ ) { if ( this->getGenNtupleIntegerBranchValue("genSig")==1 ) { LauComplex Abar_f = sigModelB0bar_f_->getEvtDPAmp(); LauComplex A_f = sigModelB0_f_->getEvtDPAmp(); LauComplex Abar_fbar = sigModelB0bar_fbar_->getEvtDPAmp(); LauComplex A_fbar = sigModelB0_fbar_->getEvtDPAmp(); this->setGenNtupleDoubleBranchValue("reB0fAmp", A_f.re()); this->setGenNtupleDoubleBranchValue("imB0fAmp", A_f.im()); this->setGenNtupleDoubleBranchValue("reB0barfAmp", Abar_f.re()); this->setGenNtupleDoubleBranchValue("imB0barfAmp", Abar_f.im()); this->setGenNtupleDoubleBranchValue("reB0fbarAmp", A_fbar.re()); this->setGenNtupleDoubleBranchValue("imB0fbarAmp", A_fbar.im()); this->setGenNtupleDoubleBranchValue("reB0barfbarAmp", Abar_fbar.re()); this->setGenNtupleDoubleBranchValue("imB0barfbarAmp", Abar_fbar.im()); } else { this->setGenNtupleDoubleBranchValue("reB0fAmp", 0.0); this->setGenNtupleDoubleBranchValue("imB0fAmp", 0.0); this->setGenNtupleDoubleBranchValue("reB0barfAmp", 0.0); this->setGenNtupleDoubleBranchValue("imB0barfAmp", 0.0); this->setGenNtupleDoubleBranchValue("reB0fbarAmp", 0.0); this->setGenNtupleDoubleBranchValue("imB0fbarAmp", 0.0); this->setGenNtupleDoubleBranchValue("reB0barfbarAmp", 0.0); this->setGenNtupleDoubleBranchValue("imB0barfbarAmp", 0.0); } } } void LauTimeDepNonFlavModel::generateExtraPdfValues(LauPdfList* extraPdfs, LauEmbeddedData* embeddedData) { // TODO : need to add the additional DP LauKinematics* kinematics_f(0); //LauKinematics* kinematics_fbar(0); if (curEvtTagFlv_<0) { kinematics_f = kinematicsB0_f_; //kinematics_fbar = kinematicsB0_fbar_; } else { kinematics_f = kinematicsB0bar_f_; //kinematics_fbar = kinematicsB0bar_fbar_; } // Generate from the extra PDFs if (extraPdfs) { for (LauPdfList::iterator pdf_iter = extraPdfs->begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { LauFitData genValues; if (embeddedData) { genValues = embeddedData->getValues( (*pdf_iter)->varNames() ); } else { genValues = (*pdf_iter)->generate(kinematics_f); } for ( LauFitData::const_iterator var_iter = genValues.begin(); var_iter != genValues.end(); ++var_iter ) { TString varName = var_iter->first; if ( varName != "m13Sq" && varName != "m23Sq" ) { Double_t value = var_iter->second; this->setGenNtupleDoubleBranchValue(varName,value); } } } } } void LauTimeDepNonFlavModel::propagateParUpdates() { // Update the complex mixing phase if (useSinCos_) { phiMixComplex_.setRealPart(cosPhiMix_.unblindValue()); phiMixComplex_.setImagPart(-1.0*sinPhiMix_.unblindValue()); } else { phiMixComplex_.setRealPart(TMath::Cos(-1.0*phiMix_.unblindValue())); phiMixComplex_.setImagPart(TMath::Sin(-1.0*phiMix_.unblindValue())); } // Update the total normalisation for the signal likelihood if (this->useDP() == kTRUE) { this->updateCoeffs(); sigModelB0bar_f_->updateCoeffs(coeffsB0bar_f_); sigModelB0_f_->updateCoeffs(coeffsB0_f_); sigModelB0bar_fbar_->updateCoeffs(coeffsB0bar_fbar_); sigModelB0_fbar_->updateCoeffs(coeffsB0_fbar_); this->calcInterTermNorm(); } // Update the signal events from the background numbers if not doing an extended fit if (!this->doEMLFit()) { this->updateSigEvents(); } } void LauTimeDepNonFlavModel::updateSigEvents() { // The background parameters will have been set from Minuit. // We need to update the signal events using these. Double_t nTotEvts = this->eventsPerExpt(); Double_t signalEvents = nTotEvts; // tagging-category fractions for signal events this->setFirstTagCatFrac(signalTagCatFrac_); signalEvents_->range(-2.0*nTotEvts,2.0*nTotEvts); if ( ! signalEvents_->fixed() ) { signalEvents_->value(signalEvents); } } void LauTimeDepNonFlavModel::setFirstTagCatFrac(LauTagCatParamMap& theMap) { Double_t firstCatFrac = 1.0; Int_t firstCat(0); for (LauTagCatParamMap::iterator iter = theMap.begin(); iter != theMap.end(); ++iter) { if (iter == theMap.begin()) { firstCat = iter->first; continue; } LauParameter& par = iter->second; firstCatFrac -= par.unblindValue(); } theMap[firstCat].value(firstCatFrac); } void LauTimeDepNonFlavModel::cacheInputFitVars() { // Fill the internal data trees of the signal and background models. // Note that we store the events of both charges in both the // negative and the positive models. It's only later, at the stage // when the likelihood is being calculated, that we separate them. LauFitDataTree* inputFitData = this->fitData(); // Start by caching the tagging and CP-eigenstate information evtTagCatVals_.clear(); evtTagFlvVals_.clear(); evtCPEigenVals_.clear(); if ( ! inputFitData->haveBranch( tagCatVarName_ ) ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::cacheInputFitVars : Input data does not contain branch \"" << tagCatVarName_ << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( ! inputFitData->haveBranch( tagVarName_ ) ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::cacheInputFitVars : Input data does not contain branch \"" << tagVarName_ << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } const Bool_t hasCPEV = ( (cpevVarName_ != "") && inputFitData->haveBranch( cpevVarName_ ) ); UInt_t nEvents = inputFitData->nEvents(); evtTagCatVals_.reserve( nEvents ); evtTagFlvVals_.reserve( nEvents ); evtCPEigenVals_.reserve( nEvents ); LauFitData::const_iterator fitdata_iter; for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputFitData->getData(iEvt); fitdata_iter = dataValues.find( tagCatVarName_ ); curEvtTagCat_ = static_cast( fitdata_iter->second ); if ( ! this->validTagCat( curEvtTagCat_ ) ) { std::cerr << "WARNING in LauTimeDepNonFlavModel::cacheInputFitVars : Invalid tagging category " << curEvtTagCat_ << " for event " << iEvt << ", setting it to untagged" << std::endl; curEvtTagCat_ = 0; } evtTagCatVals_.push_back( curEvtTagCat_ ); fitdata_iter = dataValues.find( tagVarName_ ); curEvtTagFlv_ = static_cast( fitdata_iter->second ); if ( TMath::Abs( curEvtTagFlv_ ) != 1 ) { if ( curEvtTagFlv_ > 0 ) { std::cerr << "WARNING in LauTimeDepNonFlavModel::cacheInputFitVars : Invalid tagging output " << curEvtTagFlv_ << " for event " << iEvt << ", setting it to +1" << std::endl; curEvtTagFlv_ = +1; } else { std::cerr << "WARNING in LauTimeDepNonFlavModel::cacheInputFitVars : Invalid tagging output " << curEvtTagFlv_ << " for event " << iEvt << ", setting it to -1" << std::endl; curEvtTagFlv_ = -1; } } evtTagFlvVals_.push_back( curEvtTagFlv_ ); // if the CP-eigenvalue is in the data use those, otherwise use the default if ( hasCPEV ) { fitdata_iter = dataValues.find( cpevVarName_ ); const Int_t cpEV = static_cast( fitdata_iter->second ); if ( cpEV == 1 ) { cpEigenValue_ = CPEven; } else if ( cpEV == -1 ) { cpEigenValue_ = CPOdd; } else { std::cerr<<"WARNING in LauTimeDepNonFlavModel::cacheInputFitVars : Unknown value: "<useDP() == kTRUE) { // DecayTime and SigmaDecayTime for (LauTagCatDtPdfMap::iterator dt_iter = signalDecayTimePdfs_.begin(); dt_iter != signalDecayTimePdfs_.end(); ++dt_iter) { (*dt_iter).second->cacheInfo(*inputFitData); } } // ...and then the extra PDFs for (LauTagCatPdfMap::iterator pdf_iter = sigExtraPdf_.begin(); pdf_iter != sigExtraPdf_.end(); ++pdf_iter) { this->cacheInfo(pdf_iter->second, *inputFitData); } if (this->useDP() == kTRUE) { sigModelB0bar_f_->fillDataTree(*inputFitData); sigModelB0_f_->fillDataTree(*inputFitData); sigModelB0bar_fbar_->fillDataTree(*inputFitData); sigModelB0_fbar_->fillDataTree(*inputFitData); } } Double_t LauTimeDepNonFlavModel::getTotEvtLikelihood(const UInt_t iEvt) { // Find out whether the tag-side B was a B0 or a B0bar. curEvtTagFlv_ = evtTagFlvVals_[iEvt]; // Also get the tagging category. curEvtTagCat_ = evtTagCatVals_[iEvt]; // Get the CP eigenvalue of the current event cpEigenValue_ = evtCPEigenVals_[iEvt]; // Get the DP and DecayTime likelihood for signal this->getEvtDPDtLikelihood(iEvt); // Get the combined extra PDFs likelihood for signal this->getEvtExtraLikelihoods(iEvt); // Construct the total likelihood for signal, qqbar and BBbar backgrounds Double_t sigLike = sigDPLike_ * sigExtraLike_; Double_t signalEvents = signalEvents_->unblindValue(); if (this->useDP() == kFALSE) { signalEvents *= 0.5 * (1.0 + curEvtTagFlv_ * signalAsym_->unblindValue()); } // Construct the total event likelihood Double_t likelihood(sigLike*signalTagCatFrac_[curEvtTagCat_].unblindValue()); if ( ! signalEvents_->fixed() ) { likelihood *= signalEvents; } return likelihood; } Double_t LauTimeDepNonFlavModel::getEventSum() const { Double_t eventSum(0.0); eventSum += signalEvents_->unblindValue(); return eventSum; } void LauTimeDepNonFlavModel::getEvtDPDtLikelihood(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // Dalitz plot for the given event evtNo. sigDPLike_ = 1.0; //There's always a likelihood term for signal, so we better not zero it. if ( this->useDP() == kFALSE ) { return; } // Mistag probabilities. Defined as: omega = prob of the tagging B0 being reported as B0bar // Whether we want omega or omegaBar depends on q_tag, hence curEvtTagFlv_*... in the previous lines //Double_t misTagFrac = 0.5 * (1.0 - dilution_[curEvtTagCat_] - qDDo2); //Double_t misTagFracBar = 0.5 * (1.0 - dilution_[curEvtTagCat_] + qDDo2); // Calculate event quantities qD_ = curEvtTagFlv_*dilution_[curEvtTagCat_].unblindValue(); qDDo2_ = curEvtTagFlv_*0.5*deltaDilution_[curEvtTagCat_].unblindValue(); //LauDecayTimePdf* signalDtPdf = signalDecayTimePdfs_[curEvtTagCat_]; LauDecayTimePdf* decayTimePdf = signalDecayTimePdfs_[curEvtTagCat_]; decayTimePdf->calcLikelihoodInfo(iEvt); // Calculate the relevant amplitude normalisation for the two DP's this->calculateAmplitudeNorm(decayTimePdf); Double_t randNo = LauRandom::randomFun()->Rndm(); if (randNo < normTimeDP_f_/(normTimeDP_f_+normTimeDP_fbar_)) { finalState_ = +1; // A(Abar) -> f // Calculate the likelihood for the f final state sigModelB0bar_f_->calcLikelihoodInfo(iEvt); sigModelB0_f_->calcLikelihoodInfo(iEvt); // Calculate DP terms this->calculateDPterms(decayTimePdf, sigModelB0bar_f_, sigModelB0_f_); } else { finalState_ = -1; // A(Abar) -> fbar // Calculate the likelihood for the fbar final state sigModelB0bar_fbar_->calcLikelihoodInfo(iEvt); sigModelB0_fbar_->calcLikelihoodInfo(iEvt); // Calculate DP terms this->calculateDPterms(decayTimePdf, sigModelB0bar_fbar_, sigModelB0_fbar_); } // Calculate the normalised signal likelihood sigDPLike_ = ASq_ / (normTimeDP_f_+normTimeDP_fbar_); } void LauTimeDepNonFlavModel::getEvtExtraLikelihoods(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // extra variables for the given event evtNo. sigExtraLike_ = 1.0; //There's always a likelihood term for signal, so we better not zero it. // First, those independent of the tagging of the event: // signal LauTagCatPdfMap::iterator sig_iter = sigExtraPdf_.find(curEvtTagCat_); LauPdfList* pdfList = (sig_iter != sigExtraPdf_.end())? &(sig_iter->second) : 0; if (pdfList) { sigExtraLike_ = this->prodPdfValue( *pdfList, iEvt ); } } void LauTimeDepNonFlavModel::updateCoeffs() { coeffsB0bar_f_.clear(); coeffsB0_f_.clear(); coeffsB0bar_fbar_.clear(); coeffsB0_fbar_.clear(); coeffsB0bar_f_.reserve(nSigComp_); coeffsB0_f_.reserve(nSigComp_); coeffsB0bar_fbar_.reserve(nSigComp_); coeffsB0_fbar_.reserve(nSigComp_); for (UInt_t i = 0; i < nSigComp_; ++i) { coeffsB0bar_f_.push_back(coeffPars_B0fbar_B0barf_[i]->antiparticleCoeff()); coeffsB0_f_.push_back(coeffPars_B0f_B0barfbar_[i]->particleCoeff()); coeffsB0bar_fbar_.push_back(coeffPars_B0f_B0barfbar_[i]->antiparticleCoeff()); coeffsB0_fbar_.push_back(coeffPars_B0fbar_B0barf_[i]->particleCoeff()); } } Bool_t LauTimeDepNonFlavModel::validTagCat(Int_t tagCat) const { return (validTagCats_.find(tagCat) != validTagCats_.end()); } Bool_t LauTimeDepNonFlavModel::checkTagCatFracMap(const LauTagCatParamMap& theMap) const { // First check that there is an entry for each tagging category. // NB an entry won't have been added if it isn't a valid category // so don't need to check for that here. if (theMap.size() != signalTagCatFrac_.size()) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::checkTagCatFracMap : Not all tagging categories present."< 1E-10) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::checkTagCatFracMap : Tagging category event fractions do not sum to unity."< -LauConstants::pi && phase < LauConstants::pi) { withinRange = kTRUE; } else { // Not within the specified range if (phase > LauConstants::pi) { phase -= LauConstants::twoPi; } else if (phase < -LauConstants::pi) { phase += LauConstants::twoPi; } } } // A further problem can occur when the generated phase is close to -pi or pi. // The phase can wrap over to the other end of the scale - // this leads to artificially large pulls so we wrap it back. Double_t diff = phase - genPhase; if (diff > LauConstants::pi) { phase -= LauConstants::twoPi; } else if (diff < -LauConstants::pi) { phase += LauConstants::twoPi; } // finally store the new value in the parameter // and update the pull phiMix_.value(phase); phiMix_.updatePull(); } void LauTimeDepNonFlavModel::embedSignal(Int_t tagCat, const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment) { if (signalTree_[tagCat]) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::embedSignal : Already embedding signal from file for tagging category "<findBranches(); if (!dataOK) { delete signalTree_[tagCat]; signalTree_[tagCat] = 0; std::cerr<<"ERROR in LauTimeDepNonFlavModel::embedSignal : Problem creating data tree for embedding."<addSPlotNtupleIntegerBranch("iExpt"); this->addSPlotNtupleIntegerBranch("iEvtWithinExpt"); // Store the efficiency of the event (for inclusive BF calculations). if (this->storeDPEff()) { this->addSPlotNtupleDoubleBranch("efficiency"); } // Store the total event likelihood for each species. this->addSPlotNtupleDoubleBranch("sigTotalLike"); // Store the DP likelihoods if (this->useDP()) { this->addSPlotNtupleDoubleBranch("sigDPLike"); } // Store the likelihoods for each extra PDF const LauPdfList* pdfList( &(sigExtraPdf_.begin()->second) ); this->addSPlotNtupleBranches(pdfList, "sig"); } void LauTimeDepNonFlavModel::addSPlotNtupleBranches(const LauPdfList* extraPdfs, const TString& prefix) { if (!extraPdfs) { return; } // Loop through each of the PDFs for (LauPdfList::const_iterator pdf_iter = extraPdfs->begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars(0); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply add one branch for that variable TString varName = (*pdf_iter)->varName(); TString name(prefix); name += varName; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else if ( nVars == 2 ) { // If the PDF has two variables then we // need a branch for them both together and // branches for each TString allVars(""); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { allVars += (*var_iter); TString name(prefix); name += (*var_iter); name += "Like"; this->addSPlotNtupleDoubleBranch(name); } TString name(prefix); name += allVars; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else { std::cerr<<"WARNING in LauTimeDepNonFlavModel::addSPlotNtupleBranches : Can't yet deal with 3D PDFs."<begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { // calculate the likelihood for this event (*pdf_iter)->calcLikelihoodInfo(iEvt); extraLike = (*pdf_iter)->getLikelihood(); totalLike *= extraLike; // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars(0); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply store the value for that variable TString varName = (*pdf_iter)->varName(); TString name(prefix); name += varName; name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else if ( nVars == 2 ) { // If the PDF has two variables then we // store the value for them both together // and for each on their own TString allVars(""); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { allVars += (*var_iter); TString name(prefix); name += (*var_iter); name += "Like"; Double_t indivLike = (*pdf_iter)->getLikelihood( (*var_iter) ); this->setSPlotNtupleDoubleBranchValue(name, indivLike); } TString name(prefix); name += allVars; name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else { std::cerr<<"WARNING in LauAllFitModel::setSPlotNtupleBranchValues : Can't yet deal with 3D PDFs."<useDP()) { nameSet.insert("DP"); } LauPdfList pdfList( (sigExtraPdf_.begin()->second) ); for (LauPdfList::const_iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter) { // Loop over the variables involved in each PDF for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { // If they are not DP coordinates then add them if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { nameSet.insert( (*var_iter) ); } } } return nameSet; } LauSPlot::NumbMap LauTimeDepNonFlavModel::freeSpeciesNames() const { LauSPlot::NumbMap numbMap; if (!signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } return numbMap; } LauSPlot::NumbMap LauTimeDepNonFlavModel::fixdSpeciesNames() const { LauSPlot::NumbMap numbMap; if (signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } return numbMap; } LauSPlot::TwoDMap LauTimeDepNonFlavModel::twodimPDFs() const { LauSPlot::TwoDMap twodimMap; const LauPdfList* pdfList = &(sigExtraPdf_.begin()->second); for (LauPdfList::const_iterator pdf_iter = pdfList->begin(); pdf_iter != pdfList->end(); ++pdf_iter) { // Count the number of input variables that are not DP variables UInt_t nVars(0); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { twodimMap.insert( std::make_pair( "sig", std::make_pair( (*pdf_iter)->varNames()[0], (*pdf_iter)->varNames()[1] ) ) ); } } return twodimMap; } void LauTimeDepNonFlavModel::storePerEvtLlhds() { std::cout<<"INFO in LauTimeDepNonFlavModel::storePerEvtLlhds : Storing per-event likelihood values..."<fitData(); // if we've not been using the DP model then we need to cache all // the info here so that we can get the efficiency from it if (!this->useDP() && this->storeDPEff()) { sigModelB0bar_f_->initialise(coeffsB0bar_f_); sigModelB0_f_->initialise(coeffsB0_f_); sigModelB0bar_fbar_->initialise(coeffsB0bar_fbar_); sigModelB0_fbar_->initialise(coeffsB0_fbar_); sigModelB0bar_f_->fillDataTree(*inputFitData); sigModelB0_f_->fillDataTree(*inputFitData); sigModelB0bar_fbar_->fillDataTree(*inputFitData); sigModelB0_fbar_->fillDataTree(*inputFitData); } UInt_t evtsPerExpt(this->eventsPerExpt()); LauIsobarDynamics* sigModel(sigModelB0bar_f_); for (UInt_t iEvt = 0; iEvt < evtsPerExpt; ++iEvt) { // Find out whether we have B0bar or B0 curEvtTagFlv_ = evtTagFlvVals_[iEvt]; curEvtTagCat_ = evtTagCatVals_[iEvt]; LauTagCatPdfMap::iterator sig_iter = sigExtraPdf_.find(curEvtTagCat_); LauPdfList* sigPdfs = (sig_iter != sigExtraPdf_.end())? &(sig_iter->second) : 0; // the DP information this->getEvtDPDtLikelihood(iEvt); if (this->storeDPEff()) { if (!this->useDP()) { sigModel->calcLikelihoodInfo(iEvt); } this->setSPlotNtupleDoubleBranchValue("efficiency",sigModel->getEvtEff()); } if (this->useDP()) { sigTotalLike_ = sigDPLike_; this->setSPlotNtupleDoubleBranchValue("sigDPLike",sigDPLike_); } else { sigTotalLike_ = 1.0; } // the signal PDF values sigTotalLike_ *= this->setSPlotNtupleBranchValues(sigPdfs, "sig", iEvt); // the total likelihoods this->setSPlotNtupleDoubleBranchValue("sigTotalLike",sigTotalLike_); // fill the tree this->fillSPlotNtupleBranches(); } std::cout<<"INFO in LauTimeDepNonFlavModel::storePerEvtLlhds : Finished storing per-event likelihood values."<