diff --git a/inc/LauDecayTimePdf.hh b/inc/LauDecayTimePdf.hh index 5b4f6b0..c6c8dfd 100644 --- a/inc/LauDecayTimePdf.hh +++ b/inc/LauDecayTimePdf.hh @@ -1,747 +1,747 @@ /* Copyright 2006 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauDecayTimePdf.hh \brief File containing declaration of LauDecayTimePdf class. */ /*! \class LauDecayTimePdf \brief Class for defining the PDFs used in the time-dependent fit model to describe the decay time. LauDecayTimePdf is a class that provides the PDFs for describing the time-dependence of the various terms in a particle/antiparticle decay to a common final state. The various terms have the form of exponentially decaying trigonometric or hyperbolic functions convolved with a N-Gaussian resolution function. */ #ifndef LAU_DECAYTIME_PDF #define LAU_DECAYTIME_PDF #include #include #include #include "TString.h" #include "LauAbsRValue.hh" #include "LauFitDataTree.hh" #include "LauComplex.hh" class TH1; class Lau1DHistPdf; class Lau1DCubicSpline; class LauKinematics; // TODO - Should this have Pdf in the name? // - Audit function names and public/private access category // - Audit what should be given to constructor and what can be set later (maybe different constructors for different scenarios, e.g. smeared with per-event error/smeared with avg error/not smeared) class LauDecayTimePdf final { public: // TODO - can we think of better names? //! The functional form of the decay time PDF enum FuncType { Hist, //< Hist PDF for fixed background Delta, //< Delta function - for prompt background Exp, //< Exponential function - for non-prompt background or charged B's DeltaExp, //< Delta + Exponential function - for background with prompt and non-prompt parts ExpTrig, //< Exponential function with Delta m driven mixing - for neutral B_d's ExpHypTrig //< Exponential function with both Delta m and Delta Gamma driven mixing - for neutral B_s's }; //! How is the decay time measured - absolute or difference? enum TimeMeasurementMethod { DecayTime, //< Absolute measurement of decay time, e.g. LHCb scenario DecayTimeDiff //< Measurement of the difference of two decay times, e.g. BaBar/Belle(II) scenario }; //! How is the TD efficiency information going to be given? enum EfficiencyMethod { Spline, //< As a cubic spline Binned, //< As a histogram (TH1D/TH1F) Flat //< As a flat distribution (constant) }; //! Constructor /*! \param [in] theVarName the name of the decay time variable in the input data \param [in] theVarErrName the name of the decay time error variable in the input data \param [in] params the parameters of the PDF \param [in] minAbscissaVal the minimum value of the abscissa \param [in] maxAbscissaVal the maximum value of the abscissa \param [in] minAbscissaErr the minimum value of the abscissa error \param [in] maxAbscissaErr the maximum value of the abscissa error \param [in] type the functional form of the PDF \param [in] nGauss the number of Gaussians in the resolution function \param [in] scale controls whether the Gaussian parameters are scaled by the per-event error \param [in] method set the type of the time measurement used in the given experiment */ LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const Double_t minAbscissaErr, const Double_t maxAbscissaErr, const FuncType type, const UInt_t nGauss, const std::vector& scale, const TimeMeasurementMethod method, const EfficiencyMethod effMethod = EfficiencyMethod::Spline); //! Constructor /*! \param [in] theVarName the name of the decay time variable in the input data \param [in] theVarErrName the name of the decay time error variable in the input data \param [in] params the parameters of the PDF \param [in] minAbscissaVal the minimum value of the abscissa \param [in] maxAbscissaVal the maximum value of the abscissa \param [in] minAbscissaErr the minimum value of the abscissa error \param [in] maxAbscissaErr the maximum value of the abscissa error \param [in] type the functional form of the PDF \param [in] nGauss the number of Gaussians in the resolution function \param [in] scaleMeans controls whether the Gaussian mean parameters are scaled by the per-event error \param [in] scaleWidths controls whether the Gaussian width parameters are scaled by the per-event error \param [in] method set the type of the time measurement used in the given experiment */ LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const Double_t minAbscissaErr, const Double_t maxAbscissaErr, const FuncType type, const UInt_t nGauss, const std::vector& scaleMeans, const std::vector& scaleWidths, const TimeMeasurementMethod method, const EfficiencyMethod effMethod = EfficiencyMethod::Spline); //! Copy constructor (deleted) LauDecayTimePdf(const LauDecayTimePdf& other) = delete; //! Copy assignment operator (deleted) LauDecayTimePdf& operator=(const LauDecayTimePdf& other) = delete; //! Move constructor (deleted) LauDecayTimePdf(LauDecayTimePdf&& other) = delete; //! Move assignment operator (deleted) LauDecayTimePdf& operator=(LauDecayTimePdf&& other) = delete; //! Destructor ~LauDecayTimePdf(); // TODO - Do we need this? // - If so, should it be a hist or a LauAbsPdf? // - Or should there be a dedicated constructor for this scenario? //! Set the Histogram PDF in case of fixed background PDF void setHistoPdf(const TH1* hist); // TODO - should this be a LauAbsPdf instead? //! Set the histogram to be used for generation of per-event decay time errors /*! If not set will fall back to using Landau distribution \param [in] hist the histogram of the distribution */ void setErrorHisto(const TH1* hist); // TODO - do we still want this option? //! Set the parameters of the Landau distribution used to generate the per-event decay time errors /*! \param [in] mpv the MPV (most probable value) of the distribution \param [in] sigma the width of the distribution */ void setErrorDistTerms(const Double_t mpv, const Double_t sigma) { errorDistMPV_ = mpv; errorDistSigma_ = sigma; } // TODO - should we remove the EfficiencyMethod argument from the constructor, default to Flat and have these functions modify it? //! Set the efficiency function in the form of a histogram /*! \param [in] hist the histogram of efficiencies */ void setEffiHist(const TH1* hist); //! Set the efficiency function in the form of spline /*! \param [in] spline the efficiency spline function */ void setEffiSpline(Lau1DCubicSpline* spline); - //! Retrieve the name of the error variable + //! Retrieve the name of the variable const TString& varName() const {return varName_;} //! Retrieve the name of the error variable const TString& varErrName() const {return varErrName_;} // TODO - this should probably be set at construction time //! Turn on or off the resolution function void doSmearing(Bool_t smear) {smear_ = smear;} //! Determine if the resolution function is turned on or off Bool_t doSmearing() const {return smear_;} // TODO - we don't use this at the moment - remove it? //! Calculate single effective decay time resolution from multiple Gaussian resolution functions /*! \return effective resolution */ Double_t effectiveResolution() const; //! Cache information from data /*! \param [in] inputData the dataset to be used to calculate everything */ void cacheInfo(const LauFitDataTree& inputData); //! Calculate the likelihood (and all associated information) given value of the abscissa /*! \param [in] abscissa the value of the abscissa */ void calcLikelihoodInfo(const Double_t abscissa); //! Calculate the likelihood (and all associated information) given value of the abscissa and its error /*! \param [in] abscissa the value of the abscissa \param [in] abscissaErr the error on the abscissa */ void calcLikelihoodInfo(const Double_t abscissa, const Double_t abscissaErr); //! Retrieve the likelihood (and all associated information) given the event number /*! \param [in] iEvt the event number */ void calcLikelihoodInfo(const UInt_t iEvt); //! Determine the efficiency value for the given abscissa /*! \param [in] abscissa the value of the abscissa \return the corresponding efficiency value */ Double_t calcEffiTerm( const Double_t abscissa ) const; //! Get FuncType from model FuncType getFuncType() const {return type_;} // TODO - should maybe do away with exp term (and it's norm) since it's just the cosh term when DG=0 and it's confusing to have both // - counter argument is to keep it for backgrounds that have a lifetime-like behaviour //! Get the exponential term Double_t getExpTerm() const {return expTerm_;} //! Get the cos(Dm*t) term (multiplied by the exponential) Double_t getCosTerm() const {return cosTerm_;} //! Get the sin(Dm*t) term (multiplied by the exponential) Double_t getSinTerm() const {return sinTerm_;} //! Get the cosh(DG/2*t) term (multiplied by the exponential) Double_t getCoshTerm() const {return coshTerm_;} //! Get the sinh(DG/2*t) term (multiplied by the exponential) Double_t getSinhTerm() const {return sinhTerm_;} //! Get the hist term from a histogram Double_t getHistTerm() const {return pdfTerm_;} //! Get the normalisation related to the exponential term only Double_t getNormTermExp() const {return normTermExp_;} //! Get the normalisation related to the cos term only Double_t getNormTermCos() const {return normTermCos_;} //! Get the normalisation related to the sin term only Double_t getNormTermSin() const {return normTermSin_;} //! Get the first term in the normalisation (from integrating the cosh) Double_t getNormTermCosh() const {return normTermCosh_;} //! Get the second term in the normalisation (from integrating the sinh) Double_t getNormTermSinh() const {return normTermSinh_;} //! Get error probability density from Error distribution Double_t getErrTerm() const{return errTerm_;} //! Get efficiency probability density from efficiency distribution Double_t getEffiTerm() const{return effiTerm_;} //! Retrieve the parameters of the PDF, e.g. so that they can be loaded into a fit /*! \return the parameters of the PDF */ const std::vector& getParameters() const { return params_; } //! Retrieve the parameters of the PDF, e.g. so that they can be loaded into a fit /*! \return the parameters of the PDF */ std::vector& getParameters() { return params_; } //! Update the pulls for all parameters void updatePulls(); //! Calculate the normalisation of all terms void calcNorm(); //! Calculate the normalisation integrals in the given range for the case of uniform or binned efficiency /*! This form to be used for case where decay time resolution is neglected \param [in] minAbs lower bound for the integral domain \param [in] maxAbs upper bound for the integral domain \param [in] weight the weight for this range, typically the efficiency value */ void calcNonSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight); //! Calculate the normalisation integrals in the given range for the case of uniform or binned efficiency /*! This form to be used for case where decay time resolution is accounted for \param [in] minAbs lower bound for the integral domain \param [in] maxAbs upper bound for the integral domain \param [in] weight the weight for this range, typically the efficiency value \param [in] means the mean values of each Gaussian in the resolution function \param [in] sigmas the width values of each Gaussian in the resolution function \param [in] fractions the fractional weight of each Gaussian in the resolution function */ void calcSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight, const std::vector& means, const std::vector& sigmas, const std::vector& fractions); //! Calculate the normalisation integrals in the given range for the case of spline efficiency /*! This form to be used for case where decay time resolution is accounted for \param [in] iEvt the event number (for the case of using per-event decay-time error) \param [in] splineIndex the index of the spline segment being integrated \param [in] means the mean values of each Gaussian in the resolution function \param [in] sigmas the width values of each Gaussian in the resolution function \param [in] fractions the fractional weight of each Gaussian in the resolution function */ void calcSmearedSplinePartialIntegrals(const UInt_t iEvt, const UInt_t splineIndex, const std::vector& means, const std::vector& sigmas, const std::vector& fractions); //! Calculate the normalisation integrals in the given range for the case of spline efficiency /*! This form to be used for case where decay time resolution is neglected \param [in] splineIndex the index of the spline segment being integrated */ void calcNonSmearedSplinePartialIntegrals(const UInt_t splineIndex); //! Calculate normalisation for non-smeared cos and sin terms /*! \param [in] minAbs lower bound for the integral domain \param [in] maxAbs upper bound for the integral domain \return pair of {cosTermIntegral, sinTermIntegral} */ std::pair nonSmearedCosSinIntegral(const Double_t minAbs, const Double_t maxAbs); //! Calculate normalisation for non-smeared cosh and sinh terms /*! \param [in] minAbs lower bound for the integral domain \param [in] maxAbs upper bound for the integral domain \return pair of {coshTermIntegral, sinhTermIntegral} */ std::pair nonSmearedCoshSinhIntegral(const Double_t minAbs, const Double_t maxAbs); //! Calculate normalisation for non-smeared exponential term /*! \param [in] minAbs lower bound for the integral domain \param [in] maxAbs upper bound for the integral domain \return integral */ Double_t nonSmearedExpIntegral(const Double_t minAbs, const Double_t maxAbs); //! Calculate normalisation for decay-time resolution smeared terms /*! Uses the Faddeeva function method from Section 3 of https://arxiv.org/abs/1407.0748 \param [in] z the complex expression with general form: (Gamma - i Delta_m) * sigma / sqrt(2) \param [in] minAbs lower bound for the integral domain \param [in] maxAbs upper bound for the integral domain \param [in] sigmaOverRoot2 width of the Gaussian resolution function, divided by sqrt(2) \param [in] mu mean of the Gaussian resolution function \return complex integral */ std::complex smearedGeneralIntegral(const std::complex& z, const Double_t minAbs, const Double_t maxAbs, const Double_t sigmaOverRoot2, const Double_t mu); //! Calculate decay-time resolution smeared terms /*! Uses the Faddeeva function method from Section 3 of https://arxiv.org/abs/1407.0748 \param [in] z the complex expression with general form: (Gamma - i Delta_m) * sigma / sqrt(2) \param [in] x = ( t - mu ) / ( sqrt(2) * sigma ) \return complex smeared term */ std::complex smearedGeneralTerm(const std::complex& z, const Double_t x); //! Calculate and cache powers of means and sigmas for each Gaussian in the resolution function /* \param [in] iEvt the event number (for the case of using per-event decay-time error) \param [in] means mean of each Gaussian in the resolution function \param [in] sigmas width of each Gaussian in the resolution function */ void calcMeanAndSigmaPowers( const UInt_t iEvt, const std::vector& means, const std::vector& sigmas ); //! Calculate and cache K-vectors for each term and for each Gaussian in the resolution function /*! \param [in] iEvt the event number (for the case of using per-event decay-time error) */ void calcKVectors( const UInt_t iEvt ); //! Generate the K vector used in eqn 31 from arXiv:1407.0748 /* \param [in] sigma width of the Gaussian resolution function \param [in] z The z value, changing for exp, sin, sinh, etc \return size 4 array of vector values */ std::array,4> generateKvector(const std::complex& z); //! Generate the M vector used in eqn 31 from arXiv:1407.0748 /* Uses the using the Faddeeva function method from (https://arxiv.org/abs/1407.0748) \param [in] minAbs lower bound for the integral domain \param [in] maxAbs upper bound for the integral domain \param [in] z the complex expression with general form: (Gamma - i Delta_m) * sigma / sqrt(2) \param [in] sigma width of the Gaussian resolution function \param [in] mu mean of the Gaussian resolution function \return size 4 array of vector values */ std::array,4> generateMvector(const Double_t minAbs, const Double_t maxAbs, const std::complex& z, const Double_t sigma, const Double_t mu = 0.); //! Calculate the normalisation of a given term in a particular spline segment /* \param [in] coeffs spline coefficients in this segment \param [in] K K-vector for this term \param [in] M M-vector for this term \param [in] sigmaPowers powers of the width of the Gaussian resolution function \param [in] meanPowers powers of the mean of the Gaussian resolution function \return the complex normalisation */ std::complex smearedSplineNormalise(const std::array& coeffs, const std::array,4>& K, const std::array,4>& M, const std::array& sigmaPowers, const std::array& meanPowers) const; //! Calculate integrals of each power of t within a given spline segment /* \param [in] k the power of t \param [in] minAbs lower bound for the integral domain \param [in] maxAbs upper bound for the integral domain \param [in] u the complex expression with general form: (Gamma - i Delta_m) \return the complex integral */ std::complex calcIk(const UInt_t k, const Double_t minAbs, const Double_t maxAbs, const std::complex& u); //! Calculate the normalisation of a given term in a particular spline segment /* \param [in] splineIndex the index of the spline segment being integrated \param [in] u the complex expression with general form: (Gamma - i Delta_m) \param [in] cache cached results of calcIk, to be used and/or updated as appropriate \return the complex normalisation */ std::complex nonSmearedSplineNormalise(const UInt_t splineIndex, const std::complex& u, std::vector,4>>& cache); //! Generate the value of the error /*! If scaling by the error should call this before calling generate \param [in] forceNew forces generation of a new value */ Double_t generateError(const Bool_t forceNew = kFALSE); //TODO not clear that this is really needed, perhaps for background? commented out for now //! Generate an event from the PDF /*! \param [in] kinematics used by some PDFs to determine the DP position, on which they have dependence */ //LauFitData generate(const LauKinematics* kinematics); //! Generate an event from the PDF /*! \param [in] kinematics used by some PDFs to determine the DP position, on which they have dependence */ Double_t generate(const LauKinematics* kinematics); //! Retrieve the decay time minimum value Double_t minAbscissa() const {return minAbscissa_;} //! Retrieve the decay time maximum value Double_t maxAbscissa() const {return maxAbscissa_;} //! Retrieve the decay time error minimum value Double_t minAbscissaError() const {return minAbscissaError_;} //! Retrieve the decay time error maximum value Double_t maxAbscissaError() const {return maxAbscissaError_;} // TODO - can we delete this? // NB calcPDFHeight only calculates the gaussian information for the (type_ == Delta) case //! Calculate the maximum height of the PDF //void calcPDFHeight( const LauKinematics* kinematics ); //! Get efficiency parameters to float in the fit std::vector& getEffiPars() {return effiPars_;} //! Propagate any updates necessary to the decay time Efficiency and recalculate normalisation if necessary void propagateParUpdates(); // TODO - can we delete this? //! Update spline Y values when floating the decay time acceptance /*! \param [in] params the vector of LauParameters describing the Y values */ //void updateEffiSpline(const std::vector& params); //! Set up the initial state correctly - called by the fit model's initialise function void initialise(); protected: //! Calculate the pure physics terms with no resolution function applied void calcNonSmearedTerms(const Double_t abscissa); //! Retrieve the number of PDF parameters /*! \return the number of PDF parameters */ UInt_t nParameters() const {return params_.size();} //! Retrieve the specified parameter /*! \param [in] parName the parameter to retrieve */ LauAbsRValue* findParameter(const TString& parName); //! Retrieve the specified parameter /*! \param [in] parName the parameter to retrieve */ const LauAbsRValue* findParameter(const TString& parName) const; //! Update the cache values for all events void updateCache(); private: //! Name of the variable const TString varName_; //! Name of the error variable const TString varErrName_; //! The parameters of the PDF std::vector params_; // TODO - should probably set this at construction time (can then be const) //! Smear with the resolution model or not Bool_t smear_; //! The minimum value of the decay time const Double_t minAbscissa_; //! The maximum value of the decay time const Double_t maxAbscissa_; //! The minimum value of the decay time error const Double_t minAbscissaError_; //! The maximum value of the decay time error const Double_t maxAbscissaError_; //! The current value of the decay time error Double_t abscissaError_; //! Flag whether a value for the decay time error has been generated Bool_t abscissaErrorGenerated_; //! Value of the MPV of the Landau dist used to generate the Delta t error Double_t errorDistMPV_; //! Value of the width of the Landau dist used to generate the Delta t error Double_t errorDistSigma_; //! The number of gaussians in the resolution model const UInt_t nGauss_; // Parameters of the gaussian(s) that accounts for the resolution: //! mean (offset) of each Gaussian in the resolution function std::vector mean_; //! spread (sigma) of each Gaussian in the resolution function std::vector sigma_; //! fraction of each Gaussian in the resolution function std::vector frac_; // Parameters of the physics decay time distribution //! Lifetime parameter LauAbsRValue* tau_; //! Mass difference parameter LauAbsRValue* deltaM_; //! Width difference parameter LauAbsRValue* deltaGamma_; //! Parameter for the fraction of prompt events in DeltaExp LauAbsRValue* fracPrompt_; //! Which type of decay time function is this? const FuncType type_; //! Are we using absolute decay time or decay time difference? const TimeMeasurementMethod method_; //! Which method for eff(decaytime) input are we using? const EfficiencyMethod effMethod_; //! Scale the mean of each Gaussian by the per-event decay time error? const std::vector scaleMeans_; //! Scale the sigma of each Gaussian by the per-event decay time error? const std::vector scaleWidths_; //! Is anything being scaled by the per-event decay time error? const Bool_t scaleWithPerEventError_; //! The exp(-G*t) term Double_t expTerm_; //! The cos(Dm*t) term (multiplied by the exponential) Double_t cosTerm_; //! The sin(Dm*t) term (multiplied by the exponential) Double_t sinTerm_; //! The cosh(DG/2*t) term (multiplied by the exponential) Double_t coshTerm_; //! The sinh(DG/2*t) term (multiplied by the exponential) Double_t sinhTerm_; //! Normalisation of the exponential term Double_t normTermExp_; //! Normalisation of the cos term Double_t normTermCos_; //! Normalisation of the sin term Double_t normTermSin_; //! Normalisation of the cosh term Double_t normTermCosh_; //! Normalisation of the sinh term Double_t normTermSinh_; //! Error PDF (NB there is no equivalent cache since the PDF errHist_ keeps a cache) Double_t errTerm_; //! Efficiency Double_t effiTerm_; //TODO : to be deleted? or needed for backgrounds? //! Hist PDF term (NB there is no equivalent cache since the PDF pdfHist_ keeps a cache) Double_t pdfTerm_; //! The cache of the decay times std::vector abscissas_; //! The cache of the per-event errors on the decay time std::vector abscissaErrors_; //! The cache of the exponential terms std::vector expTerms_; //! The cache of the exponential * cosh(DG/2*t) terms std::vector coshTerms_; //! The cache of the exponential * sinh(DG/2*t) terms std::vector sinhTerms_; //! The cache of the exponential * cos(Dm*t) terms std::vector cosTerms_; //! The cache of the exponential * sin(Dm*t) terms std::vector sinTerms_; //! The cache of the exponential normalisation std::vector normTermsExp_; //! The cache of the cosh term normalisation std::vector normTermsCosh_; //! The cache of the sinh term normalisation std::vector normTermsSinh_; //! The cache of the cos term normalisation std::vector normTermsCos_; //! The cache of the sin term normalisation std::vector normTermsSin_; //! The cache of the efficiency std::vector effiTerms_; //! Histogram PDF for abscissa error distribution Lau1DHistPdf* errHist_; //! Histogram PDF for abscissa distribution Lau1DHistPdf* pdfHist_; //! efficiency PDF in spline Lau1DCubicSpline* effiFun_; //! efficiency PDF as Histogram TH1* effiHist_; //! Vector of parameters to float acceptance std::vector effiPars_; // Caching / bookkeeping //! Binomial coefficients // TODO - would prefer this to use std::array but cling doesn't like it static constexpr Double_t binom_[4][4] = { {1., 0., 0., 0.}, {1., 1., 0., 0.}, {1., 2., 1., 0.}, {1., 3., 3., 1.} }; Bool_t nothingFloating_{kFALSE}; Bool_t anyKnotFloating_{kTRUE}; Bool_t nonKnotFloating_{kTRUE}; Bool_t physicsParFloating_{kTRUE}; Bool_t tauFloating_{kTRUE}; Bool_t deltaMFloating_{kTRUE}; Bool_t deltaGammaFloating_{kTRUE}; Bool_t resoParFloating_{kTRUE}; //std::vector meansFloating_; //std::vector sigmasFloating_; //std::vector fracsFloating_; Bool_t nothingChanged_{kFALSE}; Bool_t anyKnotChanged_{kTRUE}; Bool_t nonKnotChanged_{kTRUE}; Bool_t physicsParChanged_{kTRUE}; Bool_t tauChanged_{kTRUE}; Bool_t deltaMChanged_{kTRUE}; Bool_t deltaGammaChanged_{kTRUE}; Bool_t resoParChanged_{kTRUE}; //std::vector meansChanged_; //std::vector sigmasChanged_; //std::vector fracsChanged_; Double_t tauVal_{0.0}; Double_t gammaVal_{0.0}; Double_t deltaMVal_{0.0}; Double_t deltaGammaVal_{0.0}; std::vector meanVals_; std::vector sigmaVals_; std::vector fracVals_; std::vector effiParVals_; // vector has size nSplineSegments, array has 0th - 3rd powers std::vector,4>> expTermIkVals_; std::vector,4>> trigTermIkVals_; std::vector,4>> hypHTermIkVals_; std::vector,4>> hypLTermIkVals_; // outer vector has nEvents entries, inner vector has nGauss_ entries, array has 0th - 3rd or 1st - 4th powers, respectively std::vector>> meanPowerVals_; std::vector>> sigmaPowerVals_; // outer vector has nEvents entries, inner vector has nGauss_ entries, array has 0th - 4th entries of the K-vector std::vector,4>>> expTermKvecVals_; std::vector,4>>> trigTermKvecVals_; std::vector,4>>> hypHTermKvecVals_; std::vector,4>>> hypLTermKvecVals_; // outer vector has nEvents entries, middle vector has nSplineSegments entries, inner vector has nGauss_ entries, array has 0th - 4th entries of the M-vector std::vector,4>>>> expTermMvecVals_; std::vector,4>>>> trigTermMvecVals_; std::vector,4>>>> hypHTermMvecVals_; std::vector,4>>>> hypLTermMvecVals_; ClassDef(LauDecayTimePdf,0) // Define the Delta t PDF }; #endif diff --git a/inc/LauFlavTag.hh b/inc/LauFlavTag.hh index aeca828..cda7313 100644 --- a/inc/LauFlavTag.hh +++ b/inc/LauFlavTag.hh @@ -1,303 +1,309 @@ /* Copyright 2017 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauFlavTag.hh \brief File containing declaration of LauFlavTag class. */ /*! \class LauFlavTag \brief Class for defining the flavour tagging approach. Define the flavour tagging categories and all associated parameters to be passed to the relevant fit models. */ #ifndef LAU_FLAVTAG #define LAU_FLAVTAG #include #include #include "TString.h" #include "LauParameter.hh" class LauAbsPdf; class LauFitDataTree; class LauFlavTag final { public: //! Define sign convention for B and Bbar flavours enum Flavour { Bbar = -1, //< Bbar flavour Unknown = 0, //< Unknown flavour B = +1 //< B flavour }; //! Define different types of background to control the behaviour for each source // Might want to move this somewhere more general later enum BkgndType { SignalLike = 0, Combinatorial = 1 }; //! Constructor /*! \param [in] useAveDelta use average and delta variables for tagging calibration and efficiency \param [in] useEtaPrime use eta prime rather the eta as the mistag throughout \param [in] bkgndInfo map containing names and types of the background sources (if applicable) */ LauFlavTag(const Bool_t useAveDelta = kFALSE, const Bool_t useEtaPrime = kFALSE, const std::map bkgndInfo={}); //! Initialise // TODO is this needed? Commented for the moment (here and where called in LauTimeDepFitModel) //void initialise(); // TODO - need to decide which functions need to be public (interface) and which should be private (implementation details) //! Change the dilutions, delta dilutions and tagCatFrac for signal if needed /*! \param [in] name the name of the tagger \param [in] tagVarName the tagging variable name of the tagger in the ntuple \param [in] mistagVarName the associated mistag variable name of the same tagger in the ntuple \param [in] etapdf the mistag distribution for the tagger \param [in] tagEff tagging efficiency - (particle, antiparticle) or (average, delta) depending on useAveDelta_ flag \param [in] calib_p0 calibration parameter p0 - (particle, antiparticle) or (average, delta) depending on useAveDelta_ flag \param [in] calib_p1 calibration parameter p1 - (particle, antiparticle) or (average, delta) depending on useAveDelta_ flag */ // Need to set remember the position in the vector using a map for later reference //void addTagger(const TString& name, const TString& tagVarName, const TString& mistagVarName, LauAbsPdf* etapdf, // const Double_t tagEff_b0=1.0, const Double_t calib_p0_b0=1.0, const Double_t calib_p1_b0=1.0, // const Double_t tagEff_b0bar=-1.0, const Double_t calib_p0_b0bar=-1.0, const Double_t calib_p1_b0bar=-1.0); void addTagger(const TString& name, const TString& tagVarName, const TString& mistagVarName, LauAbsPdf* etapdf, const std::pair tagEff, const std::pair calib_p0, const std::pair calib_p1); //! Read in the input fit data variables, e.g. m13Sq and m23Sq - void cacheInputFitVars(LauFitDataTree* inputFitData); + void cacheInputFitVars(LauFitDataTree* inputFitData, const TString& decayTimeVarName=""); void generateEventInfo(const Flavour trueTagFlv); void generateBkgndEventInfo(const Flavour trueTagFlv, const ULong_t bkgndID); void updateEventInfo(const ULong_t iEvt); const std::vector& getTagVarNames() const {return tagVarNames_;}; const std::vector& getMistagVarNames() const {return mistagVarNames_;}; const TString& getTrueTagVarName() const {return trueTagVarName_;}; const TString& getDecayFlvVarName() const {return decayFlvVarName_;}; Flavour getCurEvtTrueTagFlv() const {return curEvtTrueTagFlv_;}; Flavour getCurEvtDecayFlv() const {return curEvtDecayFlv_;}; const std::vector& getCurEvtTagFlv() const {return curEvtTagFlv_;}; const std::vector& getCurEvtMistag() const {return curEvtMistag_;}; ULong_t getNTaggers() const {return tagVarNames_.size();} //! Get vector of calibration parameters for each tagging category std::vector getCalibP0B0(){return calib_p0_B0_;}; std::vector getCalibP0B0bar(){return calib_p0_B0bar_;}; std::vector getCalibP1B0(){return calib_p1_B0_;}; std::vector getCalibP1B0bar(){return calib_p1_B0bar_;}; //! Get vector of alternative calibration parameters for each tagging category std::vector getCalibP0Ave(){return calib_p0_ave_;}; std::vector getCalibP0Delta(){return calib_p0_delta_;}; std::vector getCalibP1Ave(){return calib_p1_ave_;}; std::vector getCalibP1Delta(){return calib_p1_delta_;}; //! Get vector of tagging efficiency parameters for each tagging category std::vector getTagEffAve(){return tagEff_ave_;}; std::vector getTagEffDelta(){return tagEff_delta_;}; std::vector getTagEffB0(){return tagEff_B0_;}; std::vector getTagEffB0bar(){return tagEff_B0bar_;}; //! Get 2D vector of background tagging efficiency parameters for each tagger (inner vec) and background source (outer vec) std::vector> getTagEffBkgndAve(){return tagEffBkgnd_ave_;}; std::vector> getTagEffBkgndDelta(){return tagEffBkgnd_delta_;}; std::vector> getTagEffBkgndB0(){return tagEffBkgnd_B0_;}; std::vector> getTagEffBkgndB0bar(){return tagEffBkgnd_B0bar_;}; //! Set some things for backgrounds //! Set background eta PDF for a given background and given tagger /*! \param [in] bkgndID background identifier number \param [in] taggerName name of the tagger \param [in] etaPdf the eta PDF itself \param [in] tagEff the tagging efficiency parameters */ void setBkgndParams(const TString& bkgndName, const TString& taggerName, LauAbsPdf* etaPdf, std::pair tagEff); const std::vector& getPerEvtAvgMistag() const {return perEvtAvgMistag_;}; //! Returns little omega (calibrated mistag) /*! \param [in] position index of the background source in the background vector(s) \param [in] flag choose to calculate omega or omegabar (corrsonding to B or Bbar) */ Double_t getLittleOmega(const ULong_t position, const Flavour flag) const; //! Capital Omega for signal decays /*! \param [in] position index of the background source in the background vector(s) \param [in] flag choose to calculate Omega or Omegabar (corrsonding to B or Bbar) */ Double_t getCapitalOmega(const ULong_t position, const Flavour flag) const; //! Capital Omega for backgrounds /*! \param [in] position index of the background source in the background vector(s) \param [in] flag choose to calculate Omega or Omegabar (corrsonding to B or Bbar) \param [in] type the background type */ Double_t getCapitalOmegaBkgnd(const ULong_t position, const Flavour flag, const UInt_t classID) const; Double_t getEtaGen(const ULong_t position); Double_t getEtaGenBkgnd(const ULong_t position, const ULong_t bkgndID); //! Return the Boolean controlling if we use the alternative tagging calibration parameters Bool_t getUseAveDelta() const {return useAveDelta_;}; void setTrueTagVarName(TString trueTagVarName); void setDecayFlvVarName(TString decayFlvVarName); //! Gaussian constraints for P0 parameters for a given tagger /*! \param [in] name name of the tagger \param [in] constraint1 the (mean, sigma) for the particle or average parameter \param [in] constraint2 the (mean, sigma) for the antiparticle or delta parameter */ void addP0GaussianConstraints(const TString name, const std::pair constraint1, const std::pair constraint2); //! Gaussian constraints for P1 parameters for a given tagger /*! \param [in] name name of the tagger \param [in] constraint1 the (mean, sigma) for the particle or average parameter \param [in] constraint2 the (mean, sigma) for the antiparticle or delta parameter */ void addP1GaussianConstraints(const TString name, const std::pair constraint1, const std::pair constraint2); //! Gaussian constraints for tagging efficiency parameters for a given tagger /*! \param [in] name name of the tagger \param [in] constraint1 the (mean, sigma) for the particle or average parameter \param [in] constraint2 the (mean, sigma) for the antiparticle or delta parameter */ void addTagEffGaussianConstraints(const TString name, const std::pair constraint1, const std::pair constraint2); const std::vector getBkgndNames(){return bkgndNames_;}; const std::vector getBkgndTypes(){return bkgndTypes_;}; private: //! Flag to use alternative calibration parameters const Bool_t useAveDelta_; //! Flag to use eta prime not eta for the mistag const Bool_t useEtaPrime_; //! Map to link taggers to their vector position std::map taggerPosition_; //! Flavour tagging variable name std::vector tagVarNames_; //! Per event mistag variable name std::vector mistagVarNames_; //! True tag variable name for normalisation decays TString trueTagVarName_; //! Decay flavour tag variable name for normalisation decays TString decayFlvVarName_; //! Vector of background names std::vector bkgndNames_; //! Vector of background types std::vector bkgndTypes_; //! Vector of flavour tags for each event std::vector> evtTagFlv_; //! Flavour tag for current event std::vector curEvtTagFlv_; //! Vector of mistags for each event std::vector> evtMistag_; //! Per event mistag for current event std::vector curEvtMistag_; //! Vector of true tags for each event std::vector evtTrueTagFlv_; //! Vector of decay tags for each event std::vector evtDecayFlv_; //! True tag from normalisation mode for current event Flavour curEvtTrueTagFlv_{Unknown}; //! True tag from normalisation mode for current event Flavour curEvtDecayFlv_{Unknown}; //! Per-event average mistag value (eta hat) std::vector perEvtAvgMistag_; + //! Decay time values for each event + std::vector evtDecayTime_; + + //! Decay time value of the current event + Double_t curEvtDecayTime_; + //! Calibration parameters std::vector calib_p0_B0_; std::vector calib_p0_B0bar_; std::vector calib_p1_B0_; std::vector calib_p1_B0bar_; //! Alternative calibration parameters std::vector calib_p0_ave_; std::vector calib_p0_delta_; std::vector calib_p1_ave_; std::vector calib_p1_delta_; //! Tagging efficiency parameters std::vector tagEff_B0_; std::vector tagEff_B0bar_; std::vector tagEff_ave_; std::vector tagEff_delta_; //! Tagging efficiency parameters for backgrounds std::vector> tagEffBkgnd_B0_; std::vector> tagEffBkgnd_B0bar_; std::vector> tagEffBkgnd_ave_; std::vector> tagEffBkgnd_delta_; //! Eta PDFs std::vector etaPdfs_; //! Eta PDFs for backgrounds per tagger (inner vec) and per background source (outer vec) std::vector> etaBkgndPdfs_; ClassDef(LauFlavTag,0) // Flavour tagging set up }; #endif diff --git a/src/LauFlavTag.cc b/src/LauFlavTag.cc index 8855327..b739dc0 100644 --- a/src/LauFlavTag.cc +++ b/src/LauFlavTag.cc @@ -1,725 +1,735 @@ /* Copyright 2017 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauFlavTag.cc \brief File containing implementation of LauFlavTag class. */ #include #include #include #include "TMath.h" #include "TString.h" #include "TSystem.h" #include "Lau1DHistPdf.hh" #include "LauAbsPdf.hh" #include "LauFlavTag.hh" #include "LauRandom.hh" ClassImp(LauFlavTag) LauFlavTag::LauFlavTag(const Bool_t useAveDelta, const Bool_t useEtaPrime, const std::map bkgndInfo) : useAveDelta_(useAveDelta), useEtaPrime_(useEtaPrime) { // Put map values into vectors for (const auto &bkgnd : bkgndInfo){ bkgndNames_.push_back(bkgnd.first); bkgndTypes_.push_back(bkgnd.second); std::cout<< "INFO in LauFlavTag::LauFlavTag : adding background " << bkgnd.first << " of type " << bkgnd.second < 0){ if (!useAveDelta_){ tagEffBkgnd_B0_.clear(); tagEffBkgnd_B0_.resize(nBkgnds); tagEffBkgnd_B0bar_.clear(); tagEffBkgnd_B0bar_.resize(nBkgnds); } else { tagEffBkgnd_ave_.clear(); tagEffBkgnd_ave_.resize(nBkgnds); tagEffBkgnd_delta_.clear(); tagEffBkgnd_delta_.resize(nBkgnds); } etaBkgndPdfs_.clear(); etaBkgndPdfs_.resize(nBkgnds); } } void LauFlavTag::addTagger(const TString& name, const TString& tagVarName, const TString& mistagVarName, LauAbsPdf* etapdf, const std::pair tagEff, const std::pair calib_p0, const std::pair calib_p1) { // Check that we don't already have a tagger with the same name if ( taggerPosition_.find(name) != taggerPosition_.end() ) { std::cerr << "ERROR in LauFlavTag::addTagger : tagger called " << name << " already added" << std::endl; gSystem->Exit(EXIT_FAILURE); } // Check that the PDF pointer is valid if ( not etapdf ) { std::cerr << "ERROR in LauFlavTag::addTagger : Eta PDF pointer is NULL" << std::endl; gSystem->Exit(EXIT_FAILURE); } // Find how many taggers have already been added const ULong_t position { tagVarNames_.size() }; // Update map to relate tagger name and position in the vectors taggerPosition_[name] = position; // Fill vectors tagVarNames_.push_back(tagVarName); mistagVarNames_.push_back(mistagVarName); curEvtTagFlv_.push_back(Flavour::Unknown); curEvtMistag_.push_back(Flavour::Unknown); // Extend background vectors if (bkgndNames_.size()>0){ if (!useAveDelta_){ for (std::vector>::iterator iter = tagEffBkgnd_B0_.begin(); iter != tagEffBkgnd_B0_.end(); ++iter){ iter->push_back(nullptr); } for (std::vector>::iterator iter = tagEffBkgnd_B0bar_.begin(); iter != tagEffBkgnd_B0bar_.end(); ++iter){ iter->push_back(nullptr); } } else { for (std::vector>::iterator iter = tagEffBkgnd_ave_.begin(); iter != tagEffBkgnd_ave_.end(); ++iter){ iter->push_back(nullptr); } for (std::vector>::iterator iter = tagEffBkgnd_delta_.begin(); iter != tagEffBkgnd_delta_.end(); ++iter){ iter->push_back(nullptr); } } for (std::vector>::iterator iter = etaBkgndPdfs_.begin(); iter != etaBkgndPdfs_.end(); ++iter){ iter->push_back(nullptr); } } etaPdfs_.push_back(etapdf); Lau1DHistPdf* etahistpdf = dynamic_cast(etapdf); if (etahistpdf){ perEvtAvgMistag_.push_back(etahistpdf->getMean()); } else { std::cerr << "WARNING in LauFlavTag::addTagger : Couldn't determine average eta value from PDF. Setting it to 0.4." << std::endl; perEvtAvgMistag_.push_back(0.4); } //Use particle/antiparticle variables if (!useAveDelta_){ TString tagEff_b0Name("tagEff_b0_"+name); TString tagEff_b0barName("tagEff_b0bar_"+name); TString calib_p0_b0Name("calib_p0_b0_"+name); TString calib_p0_b0barName("calib_p0_b0bar_"+name); TString calib_p1_b0Name("calib_p1_b0_"+name); TString calib_p1_b0barName("calib_p1_b0bar_"+name); LauParameter* tageffb0 = new LauParameter(tagEff_b0Name,tagEff.first,0.0,1.0,kTRUE); tagEff_B0_.push_back(tageffb0); tagEff_B0_[position]->initValue(tagEff.first); tagEff_B0_[position]->genValue(tagEff.first); tagEff_B0_[position]->fixed(kTRUE); //Update once full code in place LauParameter* calibp0b0 = new LauParameter(calib_p0_b0Name,calib_p0.first,-10.0,10.0,kTRUE); calib_p0_B0_.push_back(calibp0b0); calib_p0_B0_[position]->initValue(calib_p0.first); calib_p0_B0_[position]->genValue(calib_p0.first); calib_p0_B0_[position]->fixed(kTRUE); //Update once full code in place LauParameter* calibp1b0 = new LauParameter(calib_p1_b0Name,calib_p1.first,0.0,1.5,kTRUE); calib_p1_B0_.push_back(calibp1b0); calib_p1_B0_[position]->initValue(calib_p1.first); calib_p1_B0_[position]->genValue(calib_p1.first); calib_p1_B0_[position]->fixed(kTRUE); //Update once full code in place if (tagEff.second==-1.0 && calib_p0.second==-1.0 && calib_p1.second==-1.0){ tagEff_B0bar_.push_back(tagEff_B0_[position]->createClone(tagEff_b0barName)); calib_p0_B0bar_.push_back(calib_p0_B0_[position]->createClone(calib_p0_b0barName)); calib_p1_B0bar_.push_back(calib_p1_B0_[position]->createClone(calib_p1_b0barName)); } else { LauParameter* tageffb0bar = new LauParameter(tagEff_b0barName,tagEff.second,0.0,1.0,kTRUE); tagEff_B0bar_.push_back(tageffb0bar); tagEff_B0bar_[position]->initValue(tagEff.second); tagEff_B0bar_[position]->genValue(tagEff.second); tagEff_B0bar_[position]->fixed(kTRUE); //Update once full code in place LauParameter* calibp0b0bar = new LauParameter(calib_p0_b0barName,calib_p0.second,-10.0,10.0,kTRUE); calib_p0_B0bar_.push_back(calibp0b0bar); calib_p0_B0bar_[position]->initValue(calib_p0.second); calib_p0_B0bar_[position]->genValue(calib_p0.second); calib_p0_B0bar_[position]->fixed(kTRUE); //Update once full code in place LauParameter* calibp1b0bar = new LauParameter(calib_p1_b0barName,calib_p1.second,0.0,1.5,kTRUE); calib_p1_B0bar_.push_back(calibp1b0bar); calib_p1_B0bar_[position]->initValue(calib_p1.second); calib_p1_B0bar_[position]->genValue(calib_p1.second); calib_p1_B0bar_[position]->fixed(kTRUE); //Update once full code in place } } else { //Use average and delta variables TString tagEff_aveName("tagEff_ave_"+name); TString tagEff_deltaName("tagEff_delta_"+name); TString calib_p0_aveName("calib_p0_ave_"+name); TString calib_p0_deltaName("calib_p0_delta_"+name); TString calib_p1_aveName("calib_p1_ave_"+name); TString calib_p1_deltaName("calib_p1_delta_"+name); LauParameter* tageffave = new LauParameter(tagEff_aveName,tagEff.first,0.0,1.0,kTRUE); tagEff_ave_.push_back(tageffave); tagEff_ave_[position]->initValue(tagEff.first); tagEff_ave_[position]->genValue(tagEff.first); tagEff_ave_[position]->fixed(kTRUE); //Update once full code in place LauParameter* calibp0ave = new LauParameter(calib_p0_aveName,calib_p0.first,-10.0,10.0,kTRUE); calib_p0_ave_.push_back(calibp0ave); calib_p0_ave_[position]->initValue(calib_p0.first); calib_p0_ave_[position]->genValue(calib_p0.first); calib_p0_ave_[position]->fixed(kTRUE); //Update once full code in place LauParameter* calibp1ave = new LauParameter(calib_p1_aveName,calib_p1.first,0.0,1.5,kTRUE); calib_p1_ave_.push_back(calibp1ave); calib_p1_ave_[position]->initValue(calib_p1.first); calib_p1_ave_[position]->genValue(calib_p1.first); calib_p1_ave_[position]->fixed(kTRUE); //Update once full code in place LauParameter* tageffdelta = new LauParameter(tagEff_deltaName,tagEff.second,-1.0,1.0,kTRUE); tagEff_delta_.push_back(tageffdelta); tagEff_delta_[position]->initValue(tagEff.second); tagEff_delta_[position]->genValue(tagEff.second); tagEff_delta_[position]->fixed(kTRUE); //Update once full code in place LauParameter* calibp0delta = new LauParameter(calib_p0_deltaName,calib_p0.second,-10.0,10.0,kTRUE); calib_p0_delta_.push_back(calibp0delta); calib_p0_delta_[position]->initValue(calib_p0.second); calib_p0_delta_[position]->genValue(calib_p0.second); calib_p0_delta_[position]->fixed(kTRUE); //Update once full code in place LauParameter* calibp1delta = new LauParameter(calib_p1_deltaName,calib_p1.second,-10.0,10.0,kTRUE); calib_p1_delta_.push_back(calibp1delta); calib_p1_delta_[position]->initValue(calib_p1.second); calib_p1_delta_[position]->genValue(calib_p1.second); calib_p1_delta_[position]->fixed(kTRUE); //Update once full code in place } std::cout<<"INFO in LauFlavTag::addTagger : Added tagger with name "<< name << std::endl; } -void LauFlavTag::cacheInputFitVars(LauFitDataTree* inputFitData) +void LauFlavTag::cacheInputFitVars(LauFitDataTree* inputFitData, const TString& decayTimeVarName) { evtTagFlv_.clear(); evtMistag_.clear(); evtTrueTagFlv_.clear(); evtDecayFlv_.clear(); + evtDecayTime_.clear(); // Loop over the taggers to check the branches for (ULong_t i=0; i < tagVarNames_.size(); ++i){ if ( not inputFitData->haveBranch( tagVarNames_[i] ) ) { std::cerr << "ERROR in LauFlavTag::cacheInputFitVars : Input data does not contain branch \"" << tagVarNames_[i] << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( not inputFitData->haveBranch( mistagVarNames_[i] ) ) { std::cerr << "ERROR in LauFlavTag::cacheInputFitVars : Input data does not contain branch \"" << mistagVarNames_[i] << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } } if ( trueTagVarName_ != "" and not inputFitData->haveBranch( trueTagVarName_ ) ) { std::cerr << "ERROR in LauFlavTag::cacheInputFitVars : Input data does not contain branch \"" << trueTagVarName_ << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( decayFlvVarName_ != "" and not inputFitData->haveBranch( decayFlvVarName_ ) ) { std::cerr << "ERROR in LauFlavTag::cacheInputFitVars : Input data does not contain branch \"" << decayFlvVarName_ << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } + if ( decayTimeVarName != "" and not inputFitData->haveBranch( decayTimeVarName ) ) { + std::cerr << "ERROR in LauFlavTag::cacheInputFitVars : Input data does not contain branch \"" << decayTimeVarName << "\"." << std::endl; + gSystem->Exit(EXIT_FAILURE); + } const ULong_t nEvents { inputFitData->nEvents() }; evtTagFlv_.reserve( nEvents ); evtMistag_.reserve( nEvents ); evtTrueTagFlv_.reserve( nEvents ); evtDecayFlv_.reserve( nEvents ); + evtDecayTime_.reserve( nEvents ); LauFitData::const_iterator fitdata_iter; for (ULong_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputFitData->getData(iEvt); // For untagged events see if we have a truth tag for normalisation modes Int_t curEvtTrueTagFlv { ( trueTagVarName_ != "" ) ? static_cast( dataValues.at( trueTagVarName_ ) ) : 0 }; if ( curEvtTrueTagFlv > 1 ) { std::cerr << "WARNING in LauFlavTag::cacheInputFitVars : Invalid true tag value " << curEvtTrueTagFlv << " for event " << iEvt << ", setting it to +1" << std::endl; curEvtTrueTagFlv = +1; } else if ( curEvtTrueTagFlv < -1 ){ std::cerr << "WARNING in LauFlavTag::cacheInputFitVars : Invalid true tag value " << curEvtTrueTagFlv << " for event " << iEvt << ", setting it to -1" << std::endl; curEvtTrueTagFlv = -1; } curEvtTrueTagFlv_ = static_cast(curEvtTrueTagFlv); evtTrueTagFlv_.push_back(curEvtTrueTagFlv_); // Flavour at decay Int_t curEvtDecayFlv { ( decayFlvVarName_ != "" ) ? static_cast( dataValues.at( decayFlvVarName_ ) ) : 0 }; if ( curEvtDecayFlv > 1 ) { std::cerr << "WARNING in LauFlavTag::cacheInputFitVars : Invalid decay flavour value " << curEvtDecayFlv << " for event " << iEvt << ", setting it to +1" << std::endl; curEvtDecayFlv = +1; } else if ( curEvtDecayFlv < -1 ){ std::cerr << "WARNING in LauFlavTag::cacheInputFitVars : Invalid decay flavour value " << curEvtDecayFlv << " for event " << iEvt << ", setting it to -1" << std::endl; curEvtDecayFlv = -1; } curEvtDecayFlv_ = static_cast(curEvtDecayFlv); evtDecayFlv_.push_back(curEvtDecayFlv_); for (ULong_t i=0; i < tagVarNames_.size(); ++i){ Int_t curEvtTagFlv { static_cast( dataValues.at( tagVarNames_[i] ) ) }; if ( curEvtTagFlv > 1 ) { std::cerr << "WARNING in LauFlavTag::cacheInputFitVars : Invalid tagging output " << curEvtTagFlv << " for event " << iEvt << ", setting it to +1" << std::endl; curEvtTagFlv = +1; } else if ( curEvtTagFlv < -1 ) { std::cerr << "WARNING in LauFlavTag::cacheInputFitVars : Invalid tagging output " << curEvtTagFlv << " for event " << iEvt << ", setting it to -1" << std::endl; curEvtTagFlv = -1; } curEvtTagFlv_[i] = static_cast( curEvtTagFlv ); curEvtMistag_[i] = static_cast( dataValues.at( mistagVarNames_[i] ) ); // Calibrated mistag > 0.5 is just a tag flip - handled automatically in getCapitalOmega function if (curEvtMistag_[i] > 0.5){ std::cerr<<"WARNING in LauFlavTag::cacheInputFitVars : Mistag value "<( dataValues.at( decayTimeVarName ) ); + evtDecayTime_.push_back(curEvtDecayTime_); } } void LauFlavTag::updateEventInfo(const ULong_t iEvt) { //Assign current event variables curEvtTagFlv_ = evtTagFlv_[iEvt]; curEvtMistag_ = evtMistag_[iEvt]; curEvtTrueTagFlv_ = evtTrueTagFlv_[iEvt]; curEvtDecayFlv_ = evtDecayFlv_[iEvt]; + curEvtDecayTime_ = evtDecayTime_[iEvt]; } void LauFlavTag::generateEventInfo(const Flavour trueTagFlv) { curEvtTrueTagFlv_ = trueTagFlv; curEvtDecayFlv_ = Flavour::Unknown; Double_t randNo{0.0}; Double_t tagEffB0{0.0}; Double_t tagEffB0bar{0.0}; const ULong_t nTaggers { this->getNTaggers() }; for ( ULong_t position{0}; positiongetEtaGen(position); if (this->getUseAveDelta()) { tagEffB0 = tagEff_ave_[position]->unblindValue() + 0.5 * tagEff_delta_[position]->unblindValue(); tagEffB0bar = tagEff_ave_[position]->unblindValue() - 0.5 * tagEff_delta_[position]->unblindValue(); } else { tagEffB0 = tagEff_B0_[position]->unblindValue(); tagEffB0bar = tagEff_B0bar_[position]->unblindValue(); } if (curEvtTrueTagFlv_ == Flavour::B) { randNo = LauRandom::randomFun()->Rndm(); // Try to tag in tageff% of cases if (randNo <= tagEffB0) { randNo = LauRandom::randomFun()->Rndm(); // Account for (calibrated) mistag if (randNo > this->getLittleOmega(position, Flavour::B)){ curEvtTagFlv_[position] = Flavour::B; } else { curEvtTagFlv_[position] = Flavour::Bbar; } } else { curEvtTagFlv_[position] = Flavour::Unknown; } } else if (curEvtTrueTagFlv_ == Flavour::Bbar) { randNo = LauRandom::randomFun()->Rndm(); // Try to tag in tageff% of cases if (randNo <= tagEffB0bar) { randNo = LauRandom::randomFun()->Rndm(); // Account for (calibrated) mistag if (randNo > this->getLittleOmega(position, Flavour::Bbar)){ curEvtTagFlv_[position] = Flavour::Bbar; } else { curEvtTagFlv_[position] = Flavour::B; } } else { curEvtTagFlv_[position] = Flavour::Unknown; } } else { std::cerr << "ERROR in LauFlavTag::generateEventInfo : Invalid true tag flavour, should be either B (+1) or Bbar (-1)" << std::endl; gSystem->Exit(EXIT_FAILURE); } } } void LauFlavTag::generateBkgndEventInfo(const Flavour trueTagFlv, const ULong_t bkgndID) { if (bkgndID < 0 || bkgndID > bkgndNames_.size()){ std::cerr << "ERROR in LauFlavTag::generateBkgndEventInfo : Invalid backgrond class identifier" << std::endl; gSystem->Exit(EXIT_FAILURE); } curEvtTrueTagFlv_ = trueTagFlv; curEvtDecayFlv_ = Flavour::Unknown; Double_t randNo{0.0}; Double_t tagEffB0{0.0}; Double_t tagEffB0bar{0.0}; const ULong_t nTaggers { this->getNTaggers() }; for ( ULong_t position{0}; positiongetEtaGenBkgnd(position,bkgndID); //TODO If bkgnd is signal like should these parameters be clones of signal TagEff etc? //TODO Or call generateEventInfo() instead? if (this->getUseAveDelta()) { tagEffB0 = tagEffBkgnd_ave_[bkgndID][position]->unblindValue() + 0.5 * tagEffBkgnd_delta_[bkgndID][position]->unblindValue(); tagEffB0bar = tagEffBkgnd_ave_[bkgndID][position]->unblindValue() - 0.5 * tagEffBkgnd_delta_[bkgndID][position]->unblindValue(); } else { tagEffB0 = tagEffBkgnd_B0_[bkgndID][position]->unblindValue(); tagEffB0bar = tagEffBkgnd_B0bar_[bkgndID][position]->unblindValue(); } if (curEvtTrueTagFlv_ == Flavour::B) { randNo = LauRandom::randomFun()->Rndm(); // Try to tag in tageff% of cases if (randNo <= tagEffB0) { randNo = LauRandom::randomFun()->Rndm(); // Account for mistag - use eta not littleOmega for now (littleOmega only for SignalLike bkgnd?) //if (randNo > this->getLittleOmega(position, Flavour::B)){ if (randNo > curEvtMistag_[position]){ curEvtTagFlv_[position] = Flavour::B; } else { curEvtTagFlv_[position] = Flavour::Bbar; } } else { curEvtTagFlv_[position] = Flavour::Unknown; } } else if (curEvtTrueTagFlv_ == Flavour::Bbar) { randNo = LauRandom::randomFun()->Rndm(); // Try to tag in tageff% of cases if (randNo <= tagEffB0bar) { randNo = LauRandom::randomFun()->Rndm(); // Account for (calibrated) mistag //if (randNo > this->getLittleOmega(position, Flavour::Bbar)){ if (randNo > curEvtMistag_[position]){ curEvtTagFlv_[position] = Flavour::Bbar; } else { curEvtTagFlv_[position] = Flavour::B; } } else { curEvtTagFlv_[position] = Flavour::Unknown; } } else { std::cerr << "ERROR in LauFlavTag::generateBkgndEventInfo : Invalid true tag flavour, should be either B (+1) or Bbar (-1)" << std::endl; gSystem->Exit(EXIT_FAILURE); } } } Double_t LauFlavTag::getLittleOmega(const ULong_t position, const Flavour flag) const { if ( flag == Flavour::Unknown ){ std::cerr << "ERROR in LauFlavTag::getLittleOmega : Invalid flag, you must request either omega (+1) or omega bar (-1) to be returned" << std::endl; return 0.0; } Double_t calibp0(0.), calibp1(0.), calibp0bar(0.), calibp1bar(0.); //If we are floating average omega and delta omega we need to use those parameters instead if (useAveDelta_){ calibp0 = calib_p0_ave_[position]->unblindValue() + 0.5*calib_p0_delta_[position]->unblindValue(); calibp0bar = calib_p0_ave_[position]->unblindValue() - 0.5*calib_p0_delta_[position]->unblindValue(); calibp1 = calib_p1_ave_[position]->unblindValue() + 0.5*calib_p1_delta_[position]->unblindValue(); calibp1bar = calib_p1_ave_[position]->unblindValue() - 0.5*calib_p1_delta_[position]->unblindValue(); } else { calibp0 = calib_p0_B0_[position]->unblindValue(); calibp0bar = calib_p0_B0bar_[position]->unblindValue(); calibp1 = calib_p1_B0_[position]->unblindValue(); calibp1bar = calib_p1_B0bar_[position]->unblindValue(); } if ( flag == Flavour::B ){ return calibp0 + calibp1 * (curEvtMistag_[position] - perEvtAvgMistag_[position]); } else{ return calibp0bar + calibp1bar * (curEvtMistag_[position] - perEvtAvgMistag_[position]); } return 0.0; } Double_t LauFlavTag::getCapitalOmega(const ULong_t position, const Flavour flag) const { if ( flag == Flavour::Unknown ){ std::cerr << "ERROR in LauFlavTag::getCapitalOmega : Invalid flag, you must request either Omega (+1) or Omega bar (-1) to be returned" << std::endl; return 0.0; } //Delta functions to control which terms contribute Int_t deltap1(0), deltam1(0), delta0(0); if (curEvtTagFlv_[position] == Flavour::Bbar){ deltam1 = 1; } else if(curEvtTagFlv_[position] == Flavour::B){ deltap1 = 1; } else{ delta0 = 1; } //Efficiency Double_t eff(0.0); if (useAveDelta_){ if(flag==Flavour::B){ eff = tagEff_ave_[position]->unblindValue() + 0.5*tagEff_delta_[position]->unblindValue(); } else { eff = tagEff_ave_[position]->unblindValue() - 0.5*tagEff_delta_[position]->unblindValue(); } }else{ if(flag==Flavour::B){ eff = tagEff_B0_[position]->unblindValue(); }else{ eff = tagEff_B0bar_[position]->unblindValue(); } } //Little omega Double_t omega = this->getLittleOmega(position, flag); Double_t omegaPrime(0.); //Transform to omega prime - TODO isn't this the inverse, getLittleOmega is actually giving us omegaPrime and on the next line we convert back to omega? if (useEtaPrime_){ omegaPrime = (1/(1+TMath::Exp(-1.0*omega))); }else{ omegaPrime = omega; } //little omega must be between 0 and 1. Force this for now, if the fits keep getting stuck can look more closely at it. if (omegaPrime < 0.0){ std::cerr << "WARNING in LauFlavTag::getCapitalOmega : The value of little omega is less than 0, shifting to 0" << std::endl; omegaPrime = 0.0; } if (omegaPrime > 1.0){ std::cerr << "WARNING in LauFlavTag::getCapitalOmega : The value of little omega is greater than 1, shifting to 1" << std::endl; omegaPrime = 1.0; } //eta PDF value std::vector abs; abs.push_back(curEvtMistag_[position]); etaPdfs_[position]->calcLikelihoodInfo(abs); Double_t h { etaPdfs_[position]->getLikelihood() }; const Double_t u { 2.0 }; // the PDF value for a uniform PDF between 0.0 and 0.5 //If h returns 0 for a tagged event, the event likelihood will be zero if (h==0 && delta0==0){ std::cerr << "WARNING in LauFlavTag::getCapitalOmega : The value of the eta PDF is zero at eta = " << curEvtMistag_[position] << ", shifting to 0.1" << std::endl; h=0.1; } //Put it together if (flag == Flavour::B){ return (deltap1*eff*(1-omegaPrime) + deltam1*eff*omegaPrime)*h + delta0*(1-eff)*u; } else { return (deltam1*eff*(1-omegaPrime) + deltap1*eff*omegaPrime)*h + delta0*(1-eff)*u; } } Double_t LauFlavTag::getCapitalOmegaBkgnd(const ULong_t position, const Flavour flag, const UInt_t classID) const { //Fill in with the various options of flag = +-1, type = signal-like, combinatorial etc if ( flag == Flavour::Unknown ){ std::cerr << "ERROR in LauFlavTag::getCapitalOmegaBkgnd : Invalid flag, you must request either Omega (+1) or Omega bar (-1) to be returned" << std::endl; return 0.0; } //Delta functions to control which terms contribute Int_t deltap1(0), deltam1(0), delta0(0); if (curEvtTagFlv_[position] == Flavour::Bbar){ deltam1 = 1; } else if(curEvtTagFlv_[position] == Flavour::B){ deltap1 = 1; } else{ delta0 = 1; } //Efficiency Double_t effB0(0.0), effB0bar(0.0); if (useAveDelta_){ if(flag==Flavour::B){ effB0 = tagEffBkgnd_ave_[classID][position]->unblindValue() + 0.5*tagEffBkgnd_delta_[classID][position]->unblindValue(); } else { effB0bar = tagEffBkgnd_ave_[classID][position]->unblindValue() - 0.5*tagEffBkgnd_delta_[classID][position]->unblindValue(); } }else{ if(flag==Flavour::B){ effB0 = tagEffBkgnd_B0_[classID][position]->unblindValue(); }else{ effB0bar = tagEffBkgnd_B0bar_[classID][position]->unblindValue(); } } //Need to know which background eta PDF to use - classID std::vector abs; abs.push_back(curEvtMistag_[position]); etaBkgndPdfs_[classID][position]->calcLikelihoodInfo(abs); Double_t h { etaBkgndPdfs_[classID][position]->getLikelihood() }; const Double_t u { 2.0 }; // the PDF value for a uniform PDF between 0.0 and 0.5 if (bkgndTypes_[classID] == BkgndType::Combinatorial){ //Combinatorial is the same for flag = +1 and -1 if (flag == Flavour::B){ return (deltap1*effB0 + deltam1*effB0bar)*h + delta0*(1-0.5*(effB0+effB0bar))*u; } else { return (deltap1*effB0 + deltam1*effB0bar)*h + delta0*(1-0.5*(effB0+effB0bar))*u; } } else if (bkgndTypes_[classID] == BkgndType::SignalLike) { return this->getCapitalOmega(position,flag); } else { return 1.0; } } void LauFlavTag::setBkgndParams(const TString& bkgndName, const TString& taggerName, LauAbsPdf* etaPdf, std::pair tagEff) { if (taggerPosition_.count(taggerName)==0){ std::cerr << "ERROR in LauFlavTag::setBkgndParams : Tagger name not recognised please check your options" << std::endl; return; } Int_t bkgndID(-1); for (ULong_t i=0; iname("tagEff_ave_"+taggerName+"_bkgnd_"+bkgndName); tagEffBkgnd_ave_[bkgndID][position] = tagEffB0; tagEffB0bar->name("tagEff_delta_"+taggerName+"_bkgnd_"+bkgndName); tagEffB0bar->range(-1.0,1.0); tagEffBkgnd_delta_[bkgndID][position] = tagEffB0bar; } else { tagEffBkgnd_B0_[bkgndID][position] = tagEffB0; tagEffBkgnd_B0bar_[bkgndID][position] = tagEffB0bar; } std::cout << "INFO in LauFlavTag::setBkgndParams : Added efficiency parameters and eta PDF for background " << bkgndName << " for tagger " << taggerName << std::endl; } Double_t LauFlavTag::getEtaGen(const ULong_t position) { LauFitData data { etaPdfs_[position]->generate(nullptr) }; //TODO Add DP dependence? Double_t etagen { data.at(etaPdfs_[position]->varName()) }; if (etagen > 0.5){etagen = 0.5;} if (etagen < 0.0){etagen = 0.0;} curEvtMistag_[position] = etagen; return etagen; } Double_t LauFlavTag::getEtaGenBkgnd(const ULong_t position, const ULong_t bkgndID) { LauFitData data { etaBkgndPdfs_[bkgndID][position]->generate(nullptr) }; //TODO Add DP dependence? Double_t etagen { data.at(etaBkgndPdfs_[bkgndID][position]->varName()) }; if (etagen > 0.5){etagen = 0.5;} if (etagen < 0.0){etagen = 0.0;} curEvtMistag_[position] = etagen; return etagen; } void LauFlavTag::setTrueTagVarName(TString trueTagVarName){ trueTagVarName_ = std::move(trueTagVarName); } void LauFlavTag::setDecayFlvVarName(TString decayFlvVarName){ decayFlvVarName_ = std::move(decayFlvVarName); } void LauFlavTag::addP0GaussianConstraints(TString name, std::pair constraint1, std::pair constraint2){ //Does key exist? if (taggerPosition_.count(name)==0){ std::cerr << "ERROR in LauFlavTag::addP0GaussianConstraints : Tagger name not recognised please check your options" << std::endl; std::cerr << "ERROR in LauFlavTag::addP0GaussianConstraints : Constraints have not been applied" << std::endl; return; } //Find position in the vector from the tagger name Double_t pos = taggerPosition_.at(name); if (!useAveDelta_){ calib_p0_B0_[pos]->addGaussianConstraint(constraint1.first,constraint1.second); calib_p0_B0bar_[pos]->addGaussianConstraint(constraint2.first,constraint2.second); }else{ calib_p0_ave_[pos]->addGaussianConstraint(constraint1.first,constraint1.second); calib_p0_delta_[pos]->addGaussianConstraint(constraint2.first,constraint2.second); } std::cout << "INFO in LauFlavTag::addP0GaussianConstraints : Added Gaussian constraints for the P0 calibration parameters of tagger " << name << std::endl; } void LauFlavTag::addP1GaussianConstraints(TString name, std::pair constraint1, std::pair constraint2){ //Does key exist? if (taggerPosition_.count(name)==0){ std::cerr << "ERROR in LauFlavTag::addP1GaussianConstraints : Tagger name not recognised please check your options" << std::endl; std::cerr << "ERROR in LauFlavTag::addP1GaussianConstraints : Constraints have not been applied" << std::endl; return; } //Find position in the vector from the tagger name Double_t pos = taggerPosition_.at(name); if (!useAveDelta_){ calib_p1_B0_[pos]->addGaussianConstraint(constraint1.first,constraint1.second); calib_p1_B0bar_[pos]->addGaussianConstraint(constraint2.first,constraint2.second); }else{ calib_p1_ave_[pos]->addGaussianConstraint(constraint1.first,constraint1.second); calib_p1_delta_[pos]->addGaussianConstraint(constraint2.first,constraint2.second); } std::cout << "INFO in LauFlavTag::addP1GaussianConstraints : Added Gaussian constraints for the P1 calibration parameters of tagger " << name << std::endl; } void LauFlavTag::addTagEffGaussianConstraints(TString name, std::pair constraint1, std::pair constraint2){ //Does key exist? if (taggerPosition_.count(name)==0){ std::cerr << "ERROR in LauFlavTag::addTagEffGaussianConstraints : Tagger name not recognised please check your options" << std::endl; std::cerr << "ERROR in LauFlavTag::addTagEffGaussianConstraints : Constraints have not been applied" << std::endl; return; } //Find position in the vector from the tagger name Double_t pos = taggerPosition_.at(name); if (!useAveDelta_){ tagEff_B0_[pos]->addGaussianConstraint(constraint1.first,constraint1.second); tagEff_B0bar_[pos]->addGaussianConstraint(constraint2.first,constraint2.second); }else{ tagEff_ave_[pos]->addGaussianConstraint(constraint1.first,constraint1.second); tagEff_delta_[pos]->addGaussianConstraint(constraint2.first,constraint2.second); } std::cout << "INFO in LauFlavTag::addTagEffGaussianConstraints : Added Gaussian constraints for the tagging efficiency parameters of tagger " << name << std::endl; } diff --git a/src/LauTimeDepFitModel.cc b/src/LauTimeDepFitModel.cc index 811b085..81161f6 100644 --- a/src/LauTimeDepFitModel.cc +++ b/src/LauTimeDepFitModel.cc @@ -1,3059 +1,3059 @@ /* Copyright 2006 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauTimeDepFitModel.cc \brief File containing implementation of LauTimeDepFitModel class. */ #include #include #include #include #include #include #include "TFile.h" #include "TMinuit.h" #include "TRandom.h" #include "TSystem.h" #include "TVirtualFitter.h" #include "LauAbsBkgndDPModel.hh" #include "LauAbsCoeffSet.hh" #include "LauAbsPdf.hh" #include "LauAsymmCalc.hh" #include "LauComplex.hh" #include "LauConstants.hh" #include "LauDPPartialIntegralInfo.hh" #include "LauDaughters.hh" #include "LauDecayTimePdf.hh" #include "LauFitNtuple.hh" #include "LauGenNtuple.hh" #include "LauIsobarDynamics.hh" #include "LauKinematics.hh" #include "LauParamFixed.hh" #include "LauPrint.hh" #include "LauRandom.hh" #include "LauScfMap.hh" #include "LauTimeDepFitModel.hh" #include "LauFlavTag.hh" ClassImp(LauTimeDepFitModel) LauTimeDepFitModel::LauTimeDepFitModel(LauIsobarDynamics* modelB0bar, LauIsobarDynamics* modelB0, LauFlavTag* flavTag) : LauAbsFitModel(), sigModelB0bar_(modelB0bar), sigModelB0_(modelB0), kinematicsB0bar_(modelB0bar ? modelB0bar->getKinematics() : 0), kinematicsB0_(modelB0 ? modelB0->getKinematics() : 0), usingBkgnd_(kFALSE), flavTag_(flavTag), curEvtTrueTagFlv_(LauFlavTag::Unknown), curEvtDecayFlv_(LauFlavTag::Unknown), nSigComp_(0), nSigDPPar_(0), nDecayTimePar_(0), nExtraPdfPar_(0), nNormPar_(0), nCalibPar_(0), nTagEffPar_(0), nEffiPar_(0), nAsymPar_(0), coeffsB0bar_(0), coeffsB0_(0), coeffPars_(0), fitFracB0bar_(0), fitFracB0_(0), fitFracAsymm_(0), acp_(0), meanEffB0bar_("meanEffB0bar",0.0,0.0,1.0), meanEffB0_("meanEffB0",0.0,0.0,1.0), DPRateB0bar_("DPRateB0bar",0.0,0.0,100.0), DPRateB0_("DPRateB0",0.0,0.0,100.0), signalEvents_(0), signalAsym_(0), cpevVarName_(""), cpEigenValue_(CPEven), evtCPEigenVals_(0), deltaM_("deltaM",0.0), deltaGamma_("deltaGamma",0.0), tau_("tau",LauConstants::tauB0), phiMix_("phiMix", 2.0*LauConstants::beta, -LauConstants::threePi, LauConstants::threePi, kFALSE), sinPhiMix_("sinPhiMix", TMath::Sin(2.0*LauConstants::beta), -1.0, 1.0, kFALSE), cosPhiMix_("cosPhiMix", TMath::Cos(2.0*LauConstants::beta), -1.0, 1.0, kFALSE), useSinCos_(kFALSE), phiMixComplex_(TMath::Cos(-2.0*LauConstants::beta),TMath::Sin(-2.0*LauConstants::beta)), signalDecayTimePdf_(), BkgndTypes_(), BkgndDecayTimePdfs_(), curEvtDecayTime_(0.0), curEvtDecayTimeErr_(0.0), sigExtraPdf_(), sigFlavTagPdf_(), bkgdFlavTagPdf_(), AProd_("AProd",0.0,-1.0,1.0,kTRUE), iterationsMax_(100000000), nGenLoop_(0), ASq_(0.0), aSqMaxVar_(0.0), aSqMaxSet_(1.25), storeGenAmpInfo_(kFALSE), signalTree_(), reuseSignal_(kFALSE), sigDPLike_(0.0), sigExtraLike_(0.0), sigFlavTagLike_(0.0), bkgdFlavTagLike_(0.0), sigTotalLike_(0.0) { // Set up ftag here? this->setBkgndClassNames(flavTag_->getBkgndNames()); BkgndTypes_ = flavTag_->getBkgndTypes(); if ( BkgndTypes_.size() > 0 ){usingBkgnd_ = kTRUE;} // Make sure that the integration scheme will be symmetrised sigModelB0bar_->forceSymmetriseIntegration(kTRUE); sigModelB0_->forceSymmetriseIntegration(kTRUE); } LauTimeDepFitModel::~LauTimeDepFitModel() { for ( LauAbsPdf* pdf : sigExtraPdf_ ) { delete pdf; } for (std::vector::iterator iter = bkgndTree_.begin(); iter != bkgndTree_.end(); ++iter){ delete *(iter); } } void LauTimeDepFitModel::setupBkgndVectors() { UInt_t nBkgnds = this->nBkgndClasses(); BkgndDPModelsB_.resize( nBkgnds ); BkgndDPModelsBbar_.resize( nBkgnds ); BkgndDecayTimePdfs_.resize( nBkgnds ); BkgndPdfs_.resize( nBkgnds ); bkgndEvents_.resize( nBkgnds ); bkgndAsym_.resize( nBkgnds ); bkgndTree_.resize( nBkgnds ); reuseBkgnd_.resize( nBkgnds ); bkgndDPLike_.resize( nBkgnds ); bkgndExtraLike_.resize( nBkgnds ); bkgndTotalLike_.resize( nBkgnds ); } void LauTimeDepFitModel::setNSigEvents(LauParameter* nSigEvents) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; signalEvents_->name("signalEvents"); Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0),2.0*(TMath::Abs(value)+1.0)); signalAsym_ = new LauParameter("signalAsym",0.0,-1.0,1.0,kTRUE); } void LauTimeDepFitModel::setNSigEvents(LauParameter* nSigEvents, LauParameter* sigAsym) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The event LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( sigAsym == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The asym LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; signalEvents_->name("signalEvents"); Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); signalAsym_ = sigAsym; signalAsym_->name("signalAsym"); signalAsym_->range(-1.0,1.0); } void LauTimeDepFitModel::setNBkgndEvents(LauAbsRValue* nBkgndEvents) { if ( nBkgndEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBgkndEvents : The background yield LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( ! this->validBkgndClass( nBkgndEvents->name() ) ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : Invalid background class \"" << nBkgndEvents->name() << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; gSystem->Exit(EXIT_FAILURE); } UInt_t bkgndID = this->bkgndClassID( nBkgndEvents->name() ); if ( bkgndEvents_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background yield." << std::endl; return; } if ( bkgndAsym_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background asymmetry." << std::endl; return; } nBkgndEvents->name( nBkgndEvents->name()+"Events" ); if ( nBkgndEvents->isLValue() ) { Double_t value = nBkgndEvents->value(); LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); } bkgndEvents_[bkgndID] = nBkgndEvents; bkgndAsym_[bkgndID] = new LauParameter(nBkgndEvents->name()+"Asym",0.0,-1.0,1.0,kTRUE); } void LauTimeDepFitModel::setNBkgndEvents(LauAbsRValue* nBkgndEvents, LauAbsRValue* bkgndAsym) { if ( nBkgndEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : The background yield LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( bkgndAsym == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : The background asym LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( ! this->validBkgndClass( nBkgndEvents->name() ) ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : Invalid background class \"" << nBkgndEvents->name() << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; gSystem->Exit(EXIT_FAILURE); } UInt_t bkgndID = this->bkgndClassID( nBkgndEvents->name() ); if ( bkgndEvents_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background yield." << std::endl; return; } if ( bkgndAsym_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background asymmetry." << std::endl; return; } bkgndEvents_[bkgndID]->name( nBkgndEvents->name()+"Events" ); if ( nBkgndEvents->isLValue() ) { Double_t value = nBkgndEvents->value(); LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); } bkgndEvents_[bkgndID] = nBkgndEvents; bkgndAsym_[bkgndID]->name( nBkgndEvents->name()+"Asym" ); if ( bkgndAsym->isLValue() ) { LauParameter* asym = dynamic_cast( bkgndAsym ); asym->range(-1.0, 1.0); } bkgndAsym_[bkgndID] = bkgndAsym; } void LauTimeDepFitModel::setSignalDtPdf(LauDecayTimePdf* pdf) { if (pdf==0) { std::cerr<<"ERROR in LauTimeDepFitModel::setSignalDtPdf : The PDF pointer is null, not adding it."<validBkgndClass( bkgndClass) ) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndDtPdf : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); BkgndDecayTimePdfs_[bkgndID] = pdf; usingBkgnd_ = kTRUE; } void LauTimeDepFitModel::setBkgndDPModels(const TString& bkgndClass, LauAbsBkgndDPModel* BModel, LauAbsBkgndDPModel* BbarModel) { if (BModel==nullptr) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndDPModels : the model pointer is null for the particle model." << std::endl; return; } // check that this background name is valid if ( ! this->validBkgndClass( bkgndClass) ) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndDPModels : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); BkgndDPModelsB_[bkgndID] = BModel; if (BbarModel==nullptr) { std::cout << "INFO in LauTimeDepFitModel::setBkgndDPModels : the model pointer is null for the anti-particle model. Using only the particle model." << std::endl; BkgndDPModelsBbar_[bkgndID] = nullptr; } else { BkgndDPModelsBbar_[bkgndID] = BbarModel; } usingBkgnd_ = kTRUE; } void LauTimeDepFitModel::setSignalPdfs(LauAbsPdf* pdf) { // These "extra variables" are assumed to be purely kinematical, like mES and DeltaE //or making use of Rest of Event information, and therefore independent of whether //the parent is a B0 or a B0bar. If this assupmtion doesn't hold, do modify this part! if (pdf==0) { std::cerr<<"ERROR in LauTimeDepFitModel::setSignalPdfs : The PDF pointer is null."<validBkgndClass( bkgndClass ) ) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndPdf : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); BkgndPdfs_[bkgndID].push_back(pdf); usingBkgnd_ = kTRUE; } void LauTimeDepFitModel::setPhiMix(const Double_t phiMix, const Bool_t fixPhiMix, const Bool_t useSinCos) { phiMix_.value(phiMix); phiMix_.initValue(phiMix); phiMix_.genValue(phiMix); phiMix_.fixed(fixPhiMix); const Double_t sinPhiMix = TMath::Sin(phiMix); sinPhiMix_.value(sinPhiMix); sinPhiMix_.initValue(sinPhiMix); sinPhiMix_.genValue(sinPhiMix); sinPhiMix_.fixed(fixPhiMix); const Double_t cosPhiMix = TMath::Cos(phiMix); cosPhiMix_.value(cosPhiMix); cosPhiMix_.initValue(cosPhiMix); cosPhiMix_.genValue(cosPhiMix); cosPhiMix_.fixed(fixPhiMix); useSinCos_ = useSinCos; phiMixComplex_.setRealPart(cosPhiMix); phiMixComplex_.setImagPart(-1.0*sinPhiMix); } void LauTimeDepFitModel::initialise() { // From the initial parameter values calculate the coefficients // so they can be passed to the signal model this->updateCoeffs(); // Initialisation if (this->useDP() == kTRUE) { this->initialiseDPModels(); } // Flavour tagging //flavTag_->initialise(); // Decay-time PDFs signalDecayTimePdf_->initialise(); if (!this->useDP() && sigExtraPdf_.empty()) { std::cerr<<"ERROR in LauTimeDepFitModel::initialise : Signal model doesn't exist for any variable."<Exit(EXIT_FAILURE); } if (this->useDP() == kTRUE) { // Check that we have all the Dalitz-plot models if ((sigModelB0bar_ == 0) || (sigModelB0_ == 0)) { std::cerr<<"ERROR in LauTimeDepFitModel::initialise : the pointer to one (particle or anti-particle) of the signal DP models is null."<Exit(EXIT_FAILURE); } } // Next check that, if a given component is being used we've got the // right number of PDFs for all the variables involved // TODO - should probably check variable names and so on as well //UInt_t nsigpdfvars(0); //for ( LauPdfList::const_iterator pdf_iter = sigExtraPdf_.begin(); pdf_iter != sigExtraPdf_.end(); ++pdf_iter ) { // std::vector varNames = (*pdf_iter)->varNames(); // for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { // if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { // ++nsigpdfvars; // } // } //} //if (usingBkgnd_) { // for (LauBkgndPdfsList::const_iterator bgclass_iter = BkgndPdfsB0_.begin(); bgclass_iter != BkgndPdfsB0_.end(); ++bgclass_iter) { // UInt_t nbkgndpdfvars(0); // const LauPdfList& pdfList = (*bgclass_iter); // for ( LauPdfList::const_iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter ) { // std::vector varNames = (*pdf_iter)->varNames(); // for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { // if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { // ++nbkgndpdfvars; // } // } // } // if (nbkgndpdfvars != nsigpdfvars) { // std::cerr << "ERROR in LauTimeDepFitModel::initialise : There are " << nsigpdfvars << " signal PDF variables but " << nbkgndpdfvars << " bkgnd PDF variables." << std::endl; // gSystem->Exit(EXIT_FAILURE); // } // } //} // Clear the vectors of parameter information so we can start from scratch this->clearFitParVectors(); // Set the fit parameters for signal and background models this->setSignalDPParameters(); // Set the fit parameters for the decay time models this->setDecayTimeParameters(); // Set the fit parameters for the extra PDFs this->setExtraPdfParameters(); // Set the initial bg and signal events this->setFitNEvents(); // Handle flavour-tagging calibration parameters this->setCalibParams(); // Add tagging efficiency parameters this->setTagEffParams(); // Add the efficiency parameters this->setEffiParams(); //Asymmetry terms AProd and in setAsymmetries()? //this->setAsymParams(); // Check that we have the expected number of fit variables const LauParameterPList& fitVars = this->fitPars(); if (fitVars.size() != (nSigDPPar_ + nDecayTimePar_ + nExtraPdfPar_ + nNormPar_ + nCalibPar_ + nTagEffPar_ + nEffiPar_)) { std::cerr<<"ERROR in LauTimeDepFitModel::initialise : Number of fit parameters not of expected size."<Exit(EXIT_FAILURE); } if (sigModelB0_ == 0) { std::cerr<<"ERROR in LauTimeDepFitModel::initialiseDPModels : B0 signal DP model doesn't exist"<Exit(EXIT_FAILURE); } // Need to check that the number of components we have and that the dynamics has matches up const UInt_t nAmpB0bar = sigModelB0bar_->getnTotAmp(); const UInt_t nAmpB0 = sigModelB0_->getnTotAmp(); if ( nAmpB0bar != nAmpB0 ) { std::cerr << "ERROR in LauTimeDepFitModel::initialiseDPModels : Unequal number of signal DP components in the particle and anti-particle models: " << nAmpB0bar << " != " << nAmpB0 << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( nAmpB0bar != nSigComp_ ) { std::cerr << "ERROR in LauTimeDepFitModel::initialiseDPModels : Number of signal DP components in the model (" << nAmpB0bar << ") not equal to number of coefficients supplied (" << nSigComp_ << ")." << std::endl; gSystem->Exit(EXIT_FAILURE); } std::cout<<"INFO in LauTimeDepFitModel::initialiseDPModels : Initialising signal DP model"<initialise(coeffsB0bar_); sigModelB0_->initialise(coeffsB0_); fifjEffSum_.clear(); fifjEffSum_.resize(nSigComp_); for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { fifjEffSum_[iAmp].resize(nSigComp_); } // calculate the integrals of the A*Abar terms this->calcInterferenceTermIntegrals(); this->calcInterTermNorm(); // Add backgrounds if (usingBkgnd_ == kTRUE) { for (LauBkgndDPModelList::iterator iter = BkgndDPModelsB_.begin(); iter != BkgndDPModelsB_.end(); ++iter) { (*iter)->initialise(); } for (LauBkgndDPModelList::iterator iter = BkgndDPModelsBbar_.begin(); iter != BkgndDPModelsBbar_.end(); ++iter) { if ((*iter) != nullptr) { (*iter)->initialise(); } } } } void LauTimeDepFitModel::calcInterferenceTermIntegrals() { const std::vector& integralInfoListB0bar = sigModelB0bar_->getIntegralInfos(); const std::vector& integralInfoListB0 = sigModelB0_->getIntegralInfos(); // TODO should check (first time) that they match in terms of number of entries in the vectors and that each entry has the same number of points, ranges, weights etc. LauComplex A, Abar, fifjEffSumTerm; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { fifjEffSum_[iAmp][jAmp].zero(); } } const UInt_t nIntegralRegions = integralInfoListB0bar.size(); for ( UInt_t iRegion(0); iRegion < nIntegralRegions; ++iRegion ) { const LauDPPartialIntegralInfo* integralInfoB0bar = integralInfoListB0bar[iRegion]; const LauDPPartialIntegralInfo* integralInfoB0 = integralInfoListB0[iRegion]; const UInt_t nm13Points = integralInfoB0bar->getnm13Points(); const UInt_t nm23Points = integralInfoB0bar->getnm23Points(); for (UInt_t m13 = 0; m13 < nm13Points; ++m13) { for (UInt_t m23 = 0; m23 < nm23Points; ++m23) { const Double_t weight = integralInfoB0bar->getWeight(m13,m23); const Double_t eff = integralInfoB0bar->getEfficiency(m13,m23); const Double_t effWeight = eff*weight; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { A = integralInfoB0->getAmplitude(m13, m23, iAmp); for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { Abar = integralInfoB0bar->getAmplitude(m13, m23, jAmp); fifjEffSumTerm = Abar*A.conj(); fifjEffSumTerm.rescale(effWeight); fifjEffSum_[iAmp][jAmp] += fifjEffSumTerm; } } } } } } void LauTimeDepFitModel::calcInterTermNorm() { const std::vector& fNormB0bar = sigModelB0bar_->getFNorm(); const std::vector& fNormB0 = sigModelB0_->getFNorm(); LauComplex norm; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { LauComplex coeffTerm = coeffsB0bar_[jAmp]*coeffsB0_[iAmp].conj(); coeffTerm *= fifjEffSum_[iAmp][jAmp]; coeffTerm.rescale(fNormB0bar[jAmp] * fNormB0[iAmp]); norm += coeffTerm; } } norm *= phiMixComplex_; interTermReNorm_ = 2.0*norm.re(); interTermImNorm_ = 2.0*norm.im(); } void LauTimeDepFitModel::setAmpCoeffSet(LauAbsCoeffSet* coeffSet) { // Is there a component called compName in the signal models? TString compName = coeffSet->name(); TString conjName = sigModelB0bar_->getConjResName(compName); const LauDaughters* daughtersB0bar = sigModelB0bar_->getDaughters(); const LauDaughters* daughtersB0 = sigModelB0_->getDaughters(); const Bool_t conjugate = daughtersB0bar->isConjugate( daughtersB0 ); if ( ! sigModelB0bar_->hasResonance(compName) ) { if ( ! sigModelB0bar_->hasResonance(conjName) ) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : B0bar signal DP model doesn't contain component \""<name( compName ); } if ( conjugate ) { if ( ! sigModelB0_->hasResonance(conjName) ) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : B0 signal DP model doesn't contain component \""<hasResonance(compName) ) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : B0 signal DP model doesn't contain component \""<::const_iterator iter=coeffPars_.begin(); iter!=coeffPars_.end(); ++iter) { if ((*iter)->name() == compName) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : Have already set coefficients for \""<index(nSigComp_); coeffPars_.push_back(coeffSet); TString parName = coeffSet->baseName(); parName += "FitFracAsym"; fitFracAsymm_.push_back(LauParameter(parName, 0.0, -1.0, 1.0)); acp_.push_back(coeffSet->acp()); ++nSigComp_; std::cout<<"INFO in LauTimeDepFitModel::setAmpCoeffSet : Added coefficients for component \""<acp(); LauAsymmCalc asymmCalc(fitFracB0bar_[i][i].value(), fitFracB0_[i][i].value()); Double_t asym = asymmCalc.getAsymmetry(); fitFracAsymm_[i].value(asym); if (initValues) { fitFracAsymm_[i].genValue(asym); fitFracAsymm_[i].initValue(asym); } } } void LauTimeDepFitModel::setSignalDPParameters() { // Set the fit parameters for the signal model. nSigDPPar_ = 0; if ( ! this->useDP() ) { return; } std::cout << "INFO in LauTimeDepFitModel::setSignalDPParameters : Setting the initial fit parameters for the signal DP model." << std::endl; // Place isobar coefficient parameters in vector of fit variables LauParameterPList& fitVars = this->fitPars(); for (UInt_t i = 0; i < nSigComp_; ++i) { LauParameterPList pars = coeffPars_[i]->getParameters(); for (LauParameterPList::iterator iter = pars.begin(); iter != pars.end(); ++iter) { if ( !(*iter)->clone() ) { fitVars.push_back(*iter); ++nSigDPPar_; } } } // Obtain the resonance parameters and place them in the vector of fit variables and in a separate vector // Need to make sure that they are unique because some might appear in both DP models LauParameterPSet& resVars = this->resPars(); resVars.clear(); LauParameterPList& sigDPParsB0bar = sigModelB0bar_->getFloatingParameters(); LauParameterPList& sigDPParsB0 = sigModelB0_->getFloatingParameters(); for ( LauParameterPList::iterator iter = sigDPParsB0bar.begin(); iter != sigDPParsB0bar.end(); ++iter ) { if ( resVars.insert(*iter).second ) { fitVars.push_back(*iter); ++nSigDPPar_; } } for ( LauParameterPList::iterator iter = sigDPParsB0.begin(); iter != sigDPParsB0.end(); ++iter ) { if ( resVars.insert(*iter).second ) { fitVars.push_back(*iter); ++nSigDPPar_; } } } UInt_t LauTimeDepFitModel::addParametersToFitList(std::vector theVector) { UInt_t counter(0); LauParameterPList& fitVars = this->fitPars(); // loop through the map for (std::vector::iterator iter = theVector.begin(); iter != theVector.end(); ++iter) { // grab the pdf and then its parameters LauDecayTimePdf* thePdf = *iter; // The first one is the tagging category LauAbsRValuePList& rvalues = thePdf->getParameters(); // loop through the parameters for (LauAbsRValuePList::iterator pars_iter = rvalues.begin(); pars_iter != rvalues.end(); ++pars_iter) { LauParameterPList params = (*pars_iter)->getPars(); for (LauParameterPList::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter) { // for each "original" parameter add it to the list of fit parameters and increment the counter if ( !(*params_iter)->clone() && ( !(*params_iter)->fixed() || (this->twoStageFit() && (*params_iter)->secondStage()) ) ) { fitVars.push_back(*params_iter); ++counter; } } } } return counter; } UInt_t LauTimeDepFitModel::addParametersToFitList(LauPdfList* theList) { UInt_t counter(0); counter += this->addFitParameters(*(theList)); return counter; } void LauTimeDepFitModel::setDecayTimeParameters() { nDecayTimePar_ = 0; std::cout << "INFO in LauTimeDepFitModel::setDecayTimeParameters : Setting the initial fit parameters of the DecayTime Pdfs." << std::endl; LauParameterPList& fitVars = this->fitPars(); // Loop over the Dt PDFs LauAbsRValuePList& rvalues = signalDecayTimePdf_->getParameters(); // loop through the parameters for (LauAbsRValuePList::iterator pars_iter = rvalues.begin(); pars_iter != rvalues.end(); ++pars_iter) { LauParameterPList params = (*pars_iter)->getPars(); for (LauParameterPList::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter) { // for each "original" parameter add it to the list of fit parameters and increment the counter if ( !(*params_iter)->clone() && ( !(*params_iter)->fixed() || (this->twoStageFit() && (*params_iter)->secondStage()) ) ) { fitVars.push_back(*params_iter); ++nDecayTimePar_; } } } if (usingBkgnd_){ nDecayTimePar_ += this->addParametersToFitList(BkgndDecayTimePdfs_); } if (useSinCos_) { if ( not sinPhiMix_.fixed() ) { fitVars.push_back(&sinPhiMix_); fitVars.push_back(&cosPhiMix_); nDecayTimePar_ += 2; } } else { if ( not phiMix_.fixed() ) { fitVars.push_back(&phiMix_); ++nDecayTimePar_; } } } void LauTimeDepFitModel::setExtraPdfParameters() { // Include the parameters of the PDF for each tagging category in the fit // NB all of them are passed to the fit, even though some have been fixed through parameter.fixed(kTRUE) // With the new "cloned parameter" scheme only "original" parameters are passed to the fit. // Their clones are updated automatically when the originals are updated. nExtraPdfPar_ = 0; std::cout << "INFO in LauTimeDepFitModel::setExtraPdfParameters : Setting the initial fit parameters of the extra Pdfs." << std::endl; nExtraPdfPar_ += this->addFitParameters(sigExtraPdf_); if (usingBkgnd_ == kTRUE) { for (LauBkgndPdfsList::iterator iter = BkgndPdfs_.begin(); iter != BkgndPdfs_.end(); ++iter) { nExtraPdfPar_ += this->addFitParameters(*iter); } } } void LauTimeDepFitModel::setFitNEvents() { nNormPar_ = 0; std::cout << "INFO in LauTimeDepFitModel::setFitNEvents : Setting the initial fit parameters of the signal and background yields." << std::endl; // Initialise the total number of events to be the sum of all the hypotheses Double_t nTotEvts = signalEvents_->value(); this->eventsPerExpt(TMath::FloorNint(nTotEvts)); LauParameterPList& fitVars = this->fitPars(); // if doing an extended ML fit add the signal fraction into the fit parameters if (this->doEMLFit()) { std::cout<<"INFO in LauTimeDepFitModel::setFitNEvents : Initialising number of events for signal and background components..."<fixed() ) { fitVars.push_back(signalEvents_); ++nNormPar_; } } else { std::cout<<"INFO in LauTimeDepFitModel::setFitNEvents : Initialising number of events for background components (and hence signal)..."<useDP() == kFALSE) { fitVars.push_back(signalAsym_); ++nNormPar_; } // TODO arguably should delegate this //LauTagCatParamMap& signalTagCatFrac = flavTag_->getSignalTagCatFrac(); // tagging-category fractions for signal events //for (LauTagCatParamMap::iterator iter = signalTagCatFrac.begin(); iter != signalTagCatFrac.end(); ++iter) { // if (iter == signalTagCatFrac.begin()) { // continue; // } // LauParameter* par = &((*iter).second); // fitVars.push_back(par); // ++nNormPar_; //} // Backgrounds if (usingBkgnd_ == kTRUE) { for (LauBkgndYieldList::iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { std::vector parameters = (*iter)->getPars(); for ( LauParameter* parameter : parameters ) { if (parameter->fixed()){continue;} if(!parameter->clone()) { fitVars.push_back(parameter); ++nNormPar_; } } } for (LauBkgndYieldList::iterator iter = bkgndAsym_.begin(); iter != bkgndAsym_.end(); ++iter) { std::vector parameters = (*iter)->getPars(); for ( LauParameter* parameter : parameters ) { if (parameter->fixed()){continue;} if(!parameter->clone()) { fitVars.push_back(parameter); ++nNormPar_; } } } } } void LauTimeDepFitModel::setAsymParams() { nAsymPar_ = 0; LauParameterPList& fitVars = this->fitPars(); if (!AProd_.fixed()){ fitVars.push_back(&AProd_); nAsymPar_+=1; } } void LauTimeDepFitModel::setTagEffParams() { nTagEffPar_ = 0; Bool_t useAltPars = flavTag_->getUseAveDelta(); std::cout << "INFO in LauTimeDepFitModel::setTagEffParams : Setting the initial fit parameters for flavour tagging efficiencies." << std::endl; if (useAltPars){ std::vector tageff_ave = flavTag_->getTagEffAve(); std::vector tageff_delta = flavTag_->getTagEffDelta(); LauParameterPList& fitVars = this->fitPars(); for(std::vector::iterator iter = tageff_ave.begin(); iter != tageff_ave.end(); ++iter){ LauParameter* eff = *iter; if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } for(std::vector::iterator iter = tageff_delta.begin(); iter != tageff_delta.end(); ++iter){ LauParameter* eff = *iter; if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } else { std::vector tageff_b0 = flavTag_->getTagEffB0(); std::vector tageff_b0bar = flavTag_->getTagEffB0bar(); LauParameterPList& fitVars = this->fitPars(); for(std::vector::iterator iter = tageff_b0.begin(); iter != tageff_b0.end(); ++iter){ LauParameter* eff = *iter; if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } for(std::vector::iterator iter = tageff_b0bar.begin(); iter != tageff_b0bar.end(); ++iter){ LauParameter* eff = *iter; if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } if (usingBkgnd_){ if (useAltPars){ std::vector> tageff_ave = flavTag_->getTagEffBkgndAve(); std::vector> tageff_delta = flavTag_->getTagEffBkgndDelta(); LauParameterPList& fitVars = this->fitPars(); for(std::vector>::iterator row = tageff_ave.begin(); row != tageff_ave.end(); ++row){ for(std::vector::iterator col = row->begin(); col != row->end(); ++col){ if (*col == nullptr){continue;} LauParameter* eff = *col; if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } for(std::vector>::iterator row = tageff_delta.begin(); row != tageff_delta.end(); ++row){ for(std::vector::iterator col = row->begin(); col != row->end(); ++col){ if (*col == nullptr){continue;} LauParameter* eff = *col; if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } } else { std::vector> tageff_b0 = flavTag_->getTagEffBkgndB0(); std::vector> tageff_b0bar = flavTag_->getTagEffBkgndB0bar(); LauParameterPList& fitVars = this->fitPars(); for(std::vector>::iterator row = tageff_b0.begin(); row != tageff_b0.end(); ++row){ for(std::vector::iterator col = row->begin(); col != row->end(); ++col){ if (*col == nullptr){continue;} LauParameter* eff = *col; if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } for(std::vector>::iterator row = tageff_b0bar.begin(); row != tageff_b0bar.end(); ++row){ for(std::vector::iterator col = row->begin(); col != row->end(); ++col){ if (*col == nullptr){continue;} LauParameter* eff = *col; if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } } } } void LauTimeDepFitModel::setCalibParams() { Bool_t useAltPars = flavTag_->getUseAveDelta(); std::cout << "INFO in LauTimeDepFitModel::setCalibParams : Setting the initial fit parameters of the flavour tagging calibration parameters." << std::endl; if (useAltPars){ std::vector p0pars_ave = flavTag_->getCalibP0Ave(); std::vector p0pars_delta = flavTag_->getCalibP0Delta(); std::vector p1pars_ave = flavTag_->getCalibP1Ave(); std::vector p1pars_delta = flavTag_->getCalibP1Delta(); LauParameterPList& fitVars = this->fitPars(); for(std::vector::iterator iter = p0pars_ave.begin(); iter != p0pars_ave.end(); ++iter){ LauParameter* p0 = *iter; if (p0->fixed()){continue;} fitVars.push_back(p0); ++nCalibPar_; } for(std::vector::iterator iter = p0pars_delta.begin(); iter != p0pars_delta.end(); ++iter){ LauParameter* p0 = *iter; if (p0->fixed()){continue;} fitVars.push_back(p0); ++nCalibPar_; } for(std::vector::iterator iter = p1pars_ave.begin(); iter != p1pars_ave.end(); ++iter){ LauParameter* p1 = *iter; if (p1->fixed()){continue;} fitVars.push_back(p1); ++nCalibPar_; } for(std::vector::iterator iter = p1pars_delta.begin(); iter != p1pars_delta.end(); ++iter){ LauParameter* p1 = *iter; if (p1->fixed()){continue;} fitVars.push_back(p1); ++nCalibPar_; } } else { std::vector p0pars_b0 = flavTag_->getCalibP0B0(); std::vector p0pars_b0bar = flavTag_->getCalibP0B0bar(); std::vector p1pars_b0 = flavTag_->getCalibP1B0(); std::vector p1pars_b0bar = flavTag_->getCalibP1B0bar(); LauParameterPList& fitVars = this->fitPars(); for(std::vector::iterator iter = p0pars_b0.begin(); iter != p0pars_b0.end(); ++iter){ LauParameter* p0 = *iter; if (p0->fixed()){continue;} fitVars.push_back(p0); ++nCalibPar_; } for(std::vector::iterator iter = p0pars_b0bar.begin(); iter != p0pars_b0bar.end(); ++iter){ LauParameter* p0 = *iter; if (p0->fixed()){continue;} fitVars.push_back(p0); ++nCalibPar_; } for(std::vector::iterator iter = p1pars_b0.begin(); iter != p1pars_b0.end(); ++iter){ LauParameter* p1 = *iter; if (p1->fixed()){continue;} fitVars.push_back(p1); ++nCalibPar_; } for(std::vector::iterator iter = p1pars_b0bar.begin(); iter != p1pars_b0bar.end(); ++iter){ LauParameter* p1 = *iter; if (p1->fixed()){continue;} fitVars.push_back(p1); ++nCalibPar_; } } } void LauTimeDepFitModel::setEffiParams() { nEffiPar_ = 0; LauParameterPList& fitVars = this->fitPars(); LauParameterPList& effiPars = signalDecayTimePdf_->getEffiPars(); // If all of the knots are fixed we have nothing to do LauParamFixed isFixed; if ( std::all_of( effiPars.begin(), effiPars.end(), isFixed ) ) { return; } // If any knots are floating, add all knots (fixed or floating) for(LauParameterPList::iterator iter = effiPars.begin(); iter != effiPars.end(); ++iter){ LauParameter* par = *iter; fitVars.push_back(par); ++nEffiPar_; } } void LauTimeDepFitModel::setExtraNtupleVars() { // Set-up other parameters derived from the fit results, e.g. fit fractions. if (this->useDP() != kTRUE) { return; } // First clear the vectors so we start from scratch this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); // Add the B0 and B0bar fit fractions for each signal component fitFracB0bar_ = sigModelB0bar_->getFitFractions(); if (fitFracB0bar_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetFitFractions(); if (fitFracB0_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); icalcAsymmetries(kTRUE); // Add the Fit Fraction asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(fitFracAsymm_[i]); } // Add the calculated CP asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(acp_[i]); } // Now add in the DP efficiency values Double_t initMeanEffB0bar = sigModelB0bar_->getMeanEff().initValue(); meanEffB0bar_.value(initMeanEffB0bar); meanEffB0bar_.initValue(initMeanEffB0bar); meanEffB0bar_.genValue(initMeanEffB0bar); extraVars.push_back(meanEffB0bar_); Double_t initMeanEffB0 = sigModelB0_->getMeanEff().initValue(); meanEffB0_.value(initMeanEffB0); meanEffB0_.initValue(initMeanEffB0); meanEffB0_.genValue(initMeanEffB0); extraVars.push_back(meanEffB0_); // Also add in the DP rates Double_t initDPRateB0bar = sigModelB0bar_->getDPRate().initValue(); DPRateB0bar_.value(initDPRateB0bar); DPRateB0bar_.initValue(initDPRateB0bar); DPRateB0bar_.genValue(initDPRateB0bar); extraVars.push_back(DPRateB0bar_); Double_t initDPRateB0 = sigModelB0_->getDPRate().initValue(); DPRateB0_.value(initDPRateB0); DPRateB0_.initValue(initDPRateB0); DPRateB0_.genValue(initDPRateB0); extraVars.push_back(DPRateB0_); } void LauTimeDepFitModel::setAsymmetries(const Double_t AProd, const Bool_t AProdFix){ AProd_.value(AProd); AProd_.fixed(AProdFix); } void LauTimeDepFitModel::finaliseFitResults(const TString& tablePrefixName) { // Retrieve parameters from the fit results for calculations and toy generation // and eventually store these in output root ntuples/text files // Now take the fit parameters and update them as necessary // i.e. to make mag > 0.0, phase in the right range. // This function will also calculate any other values, such as the // fit fractions, using any errors provided by fitParErrors as appropriate. // Also obtain the pull values: (measured - generated)/(average error) if (this->useDP() == kTRUE) { for (UInt_t i = 0; i < nSigComp_; ++i) { // Check whether we have "a > 0.0", and phases in the right range coeffPars_[i]->finaliseValues(); } } // update the pulls on the event fractions and asymmetries if (this->doEMLFit()) { signalEvents_->updatePull(); } if (this->useDP() == kFALSE) { signalAsym_->updatePull(); } // Finalise the pulls on the decay time parameters signalDecayTimePdf_->updatePulls(); // and for backgrounds if required if (usingBkgnd_){ for (std::vector::iterator iter = BkgndDecayTimePdfs_.begin(); iter != BkgndDecayTimePdfs_.end(); ++iter) { LauDecayTimePdf* pdf = *iter; pdf->updatePulls(); } } if (useSinCos_) { if ( not sinPhiMix_.fixed() ) { sinPhiMix_.updatePull(); cosPhiMix_.updatePull(); } } else { this->checkMixingPhase(); } if (usingBkgnd_ == kTRUE) { for (LauBkgndYieldList::iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { std::vector parameters = (*iter)->getPars(); for ( LauParameter* parameter : parameters ) { parameter->updatePull(); } } for (LauBkgndYieldList::iterator iter = bkgndAsym_.begin(); iter != bkgndAsym_.end(); ++iter) { std::vector parameters = (*iter)->getPars(); for ( LauParameter* parameter : parameters ) { parameter->updatePull(); } } } // Update the pulls on all the extra PDFs' parameters this->updateFitParameters(sigExtraPdf_); if (usingBkgnd_ == kTRUE) { for (LauBkgndPdfsList::iterator iter = BkgndPdfs_.begin(); iter != BkgndPdfs_.end(); ++iter) { this->updateFitParameters(*iter); } } // Fill the fit results to the ntuple // update the coefficients and then calculate the fit fractions and ACP's if (this->useDP() == kTRUE) { this->updateCoeffs(); sigModelB0bar_->updateCoeffs(coeffsB0bar_); sigModelB0bar_->calcExtraInfo(); sigModelB0_->updateCoeffs(coeffsB0_); sigModelB0_->calcExtraInfo(); LauParArray fitFracB0bar = sigModelB0bar_->getFitFractions(); if (fitFracB0bar.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0 = sigModelB0_->getFitFractions(); if (fitFracB0.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); meanEffB0_.value(sigModelB0_->getMeanEff().value()); DPRateB0bar_.value(sigModelB0bar_->getDPRate().value()); DPRateB0_.value(sigModelB0_->getDPRate().value()); this->calcAsymmetries(); // Then store the final fit parameters, and any extra parameters for // the signal model (e.g. fit fractions, FF asymmetries, ACPs, mean efficiency and DP rate) this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); for (UInt_t i(0); iprintFitFractions(std::cout); this->printAsymmetries(std::cout); } const LauParameterPList& fitVars = this->fitPars(); const LauParameterList& extraVars = this->extraPars(); LauFitNtuple* ntuple = this->fitNtuple(); ntuple->storeParsAndErrors(fitVars, extraVars); // find out the correlation matrix for the parameters ntuple->storeCorrMatrix(this->iExpt(), this->fitStatus(), this->covarianceMatrix()); // Fill the data into ntuple ntuple->updateFitNtuple(); // Print out the partial fit fractions, phases and the // averaged efficiency, reweighted by the dynamics (and anything else) if (this->writeLatexTable()) { TString sigOutFileName(tablePrefixName); sigOutFileName += "_"; sigOutFileName += this->iExpt(); sigOutFileName += "Expt.tex"; this->writeOutTable(sigOutFileName); } } void LauTimeDepFitModel::printFitFractions(std::ostream& output) { // Print out Fit Fractions, total DP rate and mean efficiency // First for the B0bar events for (UInt_t i = 0; i < nSigComp_; i++) { const TString compName(coeffPars_[i]->name()); output<<"B0bar FitFraction for component "<useDP() == kTRUE) { // print the fit coefficients in one table coeffPars_.front()->printTableHeading(fout); for (UInt_t i = 0; i < nSigComp_; i++) { coeffPars_[i]->printTableRow(fout); } fout<<"\\hline"<name(); resName = resName.ReplaceAll("_", "\\_"); fout< =$ & $"; print.printFormat(fout, meanEffB0bar_.value()); fout << "$ & $"; print.printFormat(fout, meanEffB0_.value()); fout << "$ & & \\\\" << std::endl; if (useSinCos_) { fout << "$\\sinPhiMix =$ & $"; print.printFormat(fout, sinPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, sinPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; fout << "$\\cosPhiMix =$ & $"; print.printFormat(fout, cosPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, cosPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } else { fout << "$\\phiMix =$ & $"; print.printFormat(fout, phiMix_.value()); fout << " \\pm "; print.printFormat(fout, phiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } fout << "\\hline \n\\end{tabular}" << std::endl; } if (!sigExtraPdf_.empty()) { fout<<"\\begin{tabular}{|l|c|}"<printFitParameters(sigExtraPdf_, fout); if (usingBkgnd_ == kTRUE && !BkgndPdfs_.empty()) { fout << "\\hline" << std::endl; fout << "\\Extra Background PDFs' Parameters: & \\\\" << std::endl; for (LauBkgndPdfsList::const_iterator iter = BkgndPdfs_.begin(); iter != BkgndPdfs_.end(); ++iter) { this->printFitParameters(*iter, fout); } } fout<<"\\hline \n\\end{tabular}"<updateSigEvents(); // Check whether we want to have randomised initial fit parameters for the signal model if (this->useRandomInitFitPars() == kTRUE) { this->randomiseInitFitPars(); } } void LauTimeDepFitModel::randomiseInitFitPars() { // Only randomise those parameters that are not fixed! std::cout<<"INFO in LauTimeDepFitModel::randomiseInitFitPars : Randomising the initial values of the coefficients of the DP components (and phiMix)..."<randomiseInitValues(); } phiMix_.randomiseValue(-LauConstants::pi, LauConstants::pi); if (useSinCos_) { sinPhiMix_.initValue(TMath::Sin(phiMix_.initValue())); cosPhiMix_.initValue(TMath::Cos(phiMix_.initValue())); } } LauTimeDepFitModel::LauGenInfo LauTimeDepFitModel::eventsToGenerate() { // Determine the number of events to generate for each hypothesis // If we're smearing then smear each one individually // NB this individual smearing has to be done individually per tagging category as well LauGenInfo nEvtsGen; // Signal // If we're including the DP and decay time we can't decide on the tag // yet, since it depends on the whole DP+dt PDF, however, if // we're not then we need to decide. Double_t evtWeight(1.0); Double_t nEvts = signalEvents_->genValue(); if ( nEvts < 0.0 ) { evtWeight = -1.0; nEvts = TMath::Abs( nEvts ); } //TOD sigAysm doesn't do anything here? Double_t sigAsym(0.0); if (this->useDP() == kFALSE) { sigAsym = signalAsym_->genValue(); //TODO fill in here if we care } else { Double_t rateB0bar = sigModelB0bar_->getDPRate().value(); Double_t rateB0 = sigModelB0_->getDPRate().value(); if ( rateB0bar+rateB0 > 1e-30) { sigAsym = (rateB0bar-rateB0)/(rateB0bar+rateB0); } //for (LauTagCatParamMap::const_iterator iter = signalTagCatFrac.begin(); iter != signalTagCatFrac.end(); ++iter) { // const LauParameter& par = iter->second; // Double_t eventsbyTagCat = par.value() * nEvts; // if (this->doPoissonSmearing()) { // eventsbyTagCat = LauRandom::randomFun()->Poisson(eventsbyTagCat); // } // eventsB0[iter->first] = std::make_pair( TMath::Nint(eventsbyTagCat), evtWeight ); //} //nEvtsGen[std::make_pair("signal",0)] = eventsB0; // generate signal event, decide tag later. nEvtsGen["signal"] = std::make_pair( nEvts, evtWeight ); } std::cout<<"INFO in LauTimeDepFitModel::eventsToGenerate : Generating toy MC with:"<bkgndClassName(bkgndID)<<" background events = "<genValue()<eventsToGenerate(); Bool_t genOK(kTRUE); Int_t evtNum(0); const UInt_t nBkgnds = this->nBkgndClasses(); std::vector bkgndClassNames(nBkgnds); std::vector bkgndClassNamesGen(nBkgnds); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); bkgndClassNames[iBkgnd] = name; bkgndClassNamesGen[iBkgnd] = "gen"+name; } // Loop over the hypotheses and generate the appropriate number of // events for each one for (LauGenInfo::const_iterator iter = nEvts.begin(); iter != nEvts.end(); ++iter) { // find the category of events (e.g. signal) const TString& evtCategory(iter->first); // Type const TString& type(iter->first); // Number of events Int_t nEvtsGen( iter->second.first ); // get the event weight for this category const Double_t evtWeight( iter->second.second ); for (Int_t iEvt(0); iEvtsetGenNtupleDoubleBranchValue( "evtWeight", evtWeight ); if (evtCategory == "signal") { this->setGenNtupleIntegerBranchValue("genSig",1); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { this->setGenNtupleIntegerBranchValue( bkgndClassNamesGen[iBkgnd], 0 ); } // All the generate*Event() methods have to fill in curEvtDecayTime_ and curEvtDecayTimeErr_ // In addition, generateSignalEvent has to decide on the tag and fill in curEvtTagFlv_ genOK = this->generateSignalEvent(); } else { this->setGenNtupleIntegerBranchValue("genSig",0); UInt_t bkgndID(0); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { Int_t gen(0); if ( bkgndClassNames[iBkgnd] == type ) { gen = 1; bkgndID = iBkgnd; } this->setGenNtupleIntegerBranchValue( bkgndClassNamesGen[iBkgnd], gen ); } genOK = this->generateBkgndEvent(bkgndID); } if (!genOK) { // If there was a problem with the generation then break out and return. // The problem model will have adjusted itself so that all should be OK next time. break; } if (this->useDP() == kTRUE) { this->setDPDtBranchValues(); // store DP, decay time and tagging variables in the ntuple } // Store the event's tag and tagging category this->setGenNtupleIntegerBranchValue("cpEigenvalue", cpEigenValue_); const TString& trueTagVarName { flavTag_->getTrueTagVarName() }; if ( trueTagVarName != "" ) { this->setGenNtupleIntegerBranchValue(trueTagVarName, curEvtTrueTagFlv_); } if ( cpEigenValue_ == QFS ) { const TString& decayFlvVarName { flavTag_->getDecayFlvVarName() }; if ( decayFlvVarName != "" ) { this->setGenNtupleIntegerBranchValue(decayFlvVarName, curEvtDecayFlv_); } } const std::vector& tagVarNames { flavTag_->getTagVarNames() }; const std::vector& mistagVarNames { flavTag_->getMistagVarNames() }; // Loop over the taggers - values set via generateSignalEvent const ULong_t nTaggers {flavTag_->getNTaggers()}; for (ULong_t i=0; isetGenNtupleIntegerBranchValue(tagVarNames[i], curEvtTagFlv_[i]); this->setGenNtupleDoubleBranchValue(mistagVarNames[i], curEvtMistag_[i]); } // Store the event number (within this experiment) // and then increment it this->setGenNtupleIntegerBranchValue("iEvtWithinExpt",evtNum); ++evtNum; // Write the values into the tree this->fillGenNtupleBranches(); // Print an occasional progress message if (iEvt%1000 == 0) {std::cout<<"INFO in LauTimeDepFitModel::genExpt : Generated event number "<useDP() && genOK) { sigModelB0bar_->checkToyMC(kTRUE); sigModelB0_->checkToyMC(kTRUE); std::cout<<"aSqMaxSet = "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0 = sigModelB0_->getFitFractions(); if (fitFracB0.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::generate : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); meanEffB0_.value(sigModelB0_->getMeanEff().value()); DPRateB0bar_.value(sigModelB0bar_->getDPRate().value()); DPRateB0_.value(sigModelB0_->getDPRate().value()); } } // If we're reusing embedded events or if the generation is being // reset then clear the lists of used events if (reuseSignal_ || !genOK) { if (signalTree_) { signalTree_->clearUsedList(); } } for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { LauEmbeddedData* data = bkgndTree_[bkgndID]; if (reuseBkgnd_[bkgndID] || !genOK) { if (data) { data->clearUsedList(); } } } return genOK; } Bool_t LauTimeDepFitModel::generateSignalEvent() { // Generate signal event, including SCF if necessary. // DP:DecayTime generation follows. // If it's ok, we then generate mES, DeltaE, Fisher/NN... Bool_t genOK(kTRUE); Bool_t generatedEvent(kFALSE); Bool_t doSquareDP = kinematicsB0bar_->squareDP(); doSquareDP &= kinematicsB0_->squareDP(); LauKinematics* kinematics(kinematicsB0bar_); if (this->useDP()) { if (signalTree_) { signalTree_->getEmbeddedEvent(kinematics); //curEvtTagFlv_ = TMath::Nint(signalTree_->getValue("tagFlv")); curEvtDecayTimeErr_ = signalTree_->getValue(signalDecayTimePdf_->varErrName()); curEvtDecayTime_ = signalTree_->getValue(signalDecayTimePdf_->varName()); if (signalTree_->haveBranch("mcMatch")) { Int_t match = TMath::Nint(signalTree_->getValue("mcMatch")); if (match) { this->setGenNtupleIntegerBranchValue("genTMSig",1); this->setGenNtupleIntegerBranchValue("genSCFSig",0); } else { this->setGenNtupleIntegerBranchValue("genTMSig",0); this->setGenNtupleIntegerBranchValue("genSCFSig",1); } } } else { nGenLoop_ = 0; // Now generate from the combined DP / decay-time PDF while (generatedEvent == kFALSE && nGenLoop_ < iterationsMax_) { curEvtTrueTagFlv_ = LauFlavTag::Flavour::Unknown; curEvtDecayFlv_ = LauFlavTag::Flavour::Unknown; // First choose the true tag, accounting for the production asymmetry // CONVENTION WARNING regarding meaning of sign of AProd Double_t random = LauRandom::randomFun()->Rndm(); if (random <= 0.5 * ( 1.0 - AProd_.unblindValue() ) ) { curEvtTrueTagFlv_ = LauFlavTag::Flavour::B; } else { curEvtTrueTagFlv_ = LauFlavTag::Flavour::Bbar; } // Generate the DP position Double_t m13Sq{0.0}, m23Sq{0.0}; kinematicsB0bar_->genFlatPhaseSpace(m13Sq, m23Sq); // Next, calculate the total A and Abar for the given DP position sigModelB0_->calcLikelihoodInfo(m13Sq, m23Sq); sigModelB0bar_->calcLikelihoodInfo(m13Sq, m23Sq); // Generate decay time const Double_t tMin = signalDecayTimePdf_->minAbscissa(); const Double_t tMax = signalDecayTimePdf_->maxAbscissa(); curEvtDecayTime_ = LauRandom::randomFun()->Rndm()*(tMax-tMin) + tMin; // Generate the decay time error (NB the kTRUE forces the generation of a new value) curEvtDecayTimeErr_ = signalDecayTimePdf_->generateError(kTRUE); // Calculate all the decay time info signalDecayTimePdf_->calcLikelihoodInfo(curEvtDecayTime_,curEvtDecayTimeErr_); // Retrieve the amplitudes and efficiency from the dynamics const LauComplex& Abar { sigModelB0bar_->getEvtDPAmp() }; const LauComplex& A { sigModelB0_->getEvtDPAmp() }; const Double_t ASq { A.abs2() }; const Double_t AbarSq { Abar.abs2() }; const Double_t dpEff { sigModelB0bar_->getEvtEff() }; // Also retrieve all the decay time terms const Double_t dtCos { signalDecayTimePdf_->getCosTerm() }; const Double_t dtSin { signalDecayTimePdf_->getSinTerm() }; const Double_t dtCosh { signalDecayTimePdf_->getCoshTerm() }; const Double_t dtSinh { signalDecayTimePdf_->getSinhTerm() }; // and the decay time acceptance const Double_t dtEff { signalDecayTimePdf_->getEffiTerm() }; if ( cpEigenValue_ == QFS) { // Calculate the total intensities for each flavour-specific final state const Double_t ATotSq { ( ASq * dtCosh + curEvtTrueTagFlv_ * ASq * dtCos ) * dpEff * dtEff }; const Double_t AbarTotSq { ( AbarSq * dtCosh - curEvtTrueTagFlv_ * AbarSq * dtCos ) * dpEff * dtEff }; const Double_t ASumSq { ATotSq + AbarTotSq }; // Finally we throw the dice to see whether this event should be generated (and, if so, which final state) const Double_t randNum = LauRandom::randomFun()->Rndm(); if (randNum <= ASumSq / aSqMaxSet_ ) { generatedEvent = kTRUE; nGenLoop_ = 0; if (ASumSq > aSqMaxVar_) {aSqMaxVar_ = ASumSq;} if ( randNum <= ATotSq / aSqMaxSet_ ) { curEvtDecayFlv_ = LauFlavTag::Flavour::B; } else { curEvtDecayFlv_ = LauFlavTag::Flavour::Bbar; } // Generate the flavour tagging information from the true tag // (we do this after accepting the event to save time) flavTag_->generateEventInfo( curEvtTrueTagFlv_ ); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); } else { nGenLoop_++; } } else { // Calculate the DP terms const Double_t aSqSum { ASq + AbarSq }; const Double_t aSqDif { ASq - AbarSq }; const LauComplex inter { Abar * A.conj() * phiMixComplex_ }; const Double_t interTermIm { ( cpEigenValue_ == CPEven ) ? 2.0 * inter.im() : -2.0 * inter.im() }; const Double_t interTermRe { ( cpEigenValue_ == CPEven ) ? 2.0 * inter.re() : -2.0 * inter.re() }; // Combine DP and decay-time info for all terms const Double_t coshTerm { aSqSum * dtCosh }; const Double_t sinhTerm { interTermRe * dtSinh }; const Double_t cosTerm { aSqDif * dtCos }; const Double_t sinTerm { interTermIm * dtSin }; // Sum to obtain the total and multiply by the efficiency // Multiplying the cos and sin terms by the true flavour at production const Double_t ATotSq { ( coshTerm + sinhTerm + curEvtTrueTagFlv_ * ( cosTerm - sinTerm ) ) * dpEff * dtEff }; //Finally we throw the dice to see whether this event should be generated const Double_t randNum = LauRandom::randomFun()->Rndm(); if (randNum <= ATotSq/aSqMaxSet_ ) { generatedEvent = kTRUE; nGenLoop_ = 0; if (ATotSq > aSqMaxVar_) {aSqMaxVar_ = ATotSq;} // Generate the flavour tagging information from the true tag // (we do this after accepting the event to save time) flavTag_->generateEventInfo( curEvtTrueTagFlv_ ); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); } else { nGenLoop_++; } } } // end of while !generatedEvent loop } // end of if (signalTree_) else control } else { if ( signalTree_ ) { signalTree_->getEmbeddedEvent(0); //curEvtTagFlv_ = TMath::Nint(signalTree_->getValue("tagFlv")); curEvtDecayTimeErr_ = signalTree_->getValue(signalDecayTimePdf_->varErrName()); curEvtDecayTime_ = signalTree_->getValue(signalDecayTimePdf_->varName()); } } // Check whether we have generated the toy MC OK. if (nGenLoop_ >= iterationsMax_) { aSqMaxSet_ = 1.01 * aSqMaxVar_; genOK = kFALSE; std::cerr<<"WARNING in LauTimeDepFitModel::generateSignalEvent : Hit max iterations: setting aSqMaxSet_ to "< aSqMaxSet_) { aSqMaxSet_ = 1.01 * aSqMaxVar_; genOK = kFALSE; std::cerr<<"WARNING in LauTimeDepFitModel::generateSignalEvent : Found a larger ASq value: setting aSqMaxSet_ to "<updateKinematics(kinematicsB0bar_->getm13Sq(), kinematicsB0bar_->getm23Sq() ); this->generateExtraPdfValues(sigExtraPdf_, signalTree_); } // Check for problems with the embedding if (signalTree_ && (signalTree_->nEvents() == signalTree_->nUsedEvents())) { std::cerr<<"WARNING in LauTimeDepFitModel::generateSignalEvent : Source of embedded signal events used up, clearing the list of used events."<clearUsedList(); } return genOK; } Bool_t LauTimeDepFitModel::generateBkgndEvent([[maybe_unused]] UInt_t bkgndID) { // Generate Bkgnd event Bool_t genOK(kTRUE); //TODO DecayTime part? //TODO Flavour tagging part? LauFlavTag::generateBkgndEventInfo() //LauAbsBkgndDPModel* model(0); //LauEmbeddedData* embeddedData(0); //LauPdfList* extraPdfs(0); //LauKinematics* kinematics(0); //model = BkgndDPModels_[bkgndID]; //if (this->enableEmbedding()) { // // find the right embedded data for the current tagging category // LauTagCatEmbDataMap::const_iterator emb_iter = bkgndTree_[bkgndID].find(curEvtTagCat_); // embeddedData = (emb_iter != bkgndTree_[bkgndID].end()) ? emb_iter->second : 0; //} //extraPdfs = &BkgndPdfs_[bkgndID]; //kinematics = kinematicsB0bar_; //if (this->useDP()) { // if (embeddedData) { // embeddedData->getEmbeddedEvent(kinematics); // } else { // if (model == 0) { // const TString& bkgndClass = this->bkgndClassName(bkgndID); // std::cerr << "ERROR in LauCPFitModel::generateBkgndEvent : Can't find the DP model for background class \"" << bkgndClass << "\"." << std::endl; // gSystem->Exit(EXIT_FAILURE); // } // genOK = model->generate(); // } //} else { // if (embeddedData) { // embeddedData->getEmbeddedEvent(0); // } //} //if (genOK) { // this->generateExtraPdfValues(extraPdfs, embeddedData); //} //// Check for problems with the embedding //if (embeddedData && (embeddedData->nEvents() == embeddedData->nUsedEvents())) { // const TString& bkgndClass = this->bkgndClassName(bkgndID); // std::cerr << "WARNING in LauCPFitModel::generateBkgndEvent : Source of embedded " << bkgndClass << " events used up, clearing the list of used events." << std::endl; // embeddedData->clearUsedList(); //} // //TODO Placeholder to allow gen to run for examples if( BkgndTypes_[bkgndID]!=LauFlavTag::Combinatorial ) { std::cout << "WARNING in LauTimeDepFitModel::generateBkgndEvent : Non-combinatorial cases aren't being dealt with right now, returning without generation" << std::endl; return kFALSE; } Double_t random = LauRandom::randomFun()->Rndm(); if (random <= 0.5 * ( 1.0 - AProd_.unblindValue() ) ) { curEvtTrueTagFlv_ = LauFlavTag::Flavour::B; } else { curEvtTrueTagFlv_ = LauFlavTag::Flavour::Bbar; } if (BkgndTypes_[bkgndID]==LauFlavTag::Combinatorial){ flavTag_->generateBkgndEventInfo( curEvtTrueTagFlv_ ,bkgndID); } else if (BkgndTypes_[bkgndID]==LauFlavTag::SignalLike){ flavTag_->generateEventInfo( curEvtTrueTagFlv_ ); } curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); //TODO make all this work depending on B flavour later //generate a DP point BkgndDPModelsB_[bkgndID]->generate(); //generate decay time and dt error curEvtDecayTimeErr_ = BkgndDecayTimePdfs_[bkgndID]->generateError(kTRUE); curEvtDecayTime_ = BkgndDecayTimePdfs_[bkgndID]->generate( kinematicsB0_ ); //TODO this only works for comb. if( cpEigenValue_ == CPEigenvalue::QFS ){curEvtDecayFlv_ = LauFlavTag::Flavour::B;} return genOK; } void LauTimeDepFitModel::setupGenNtupleBranches() { // Setup the required ntuple branches this->addGenNtupleDoubleBranch("evtWeight"); this->addGenNtupleIntegerBranch("genSig"); this->addGenNtupleIntegerBranch("cpEigenvalue"); const TString& trueTagVarName { flavTag_->getTrueTagVarName() }; if ( trueTagVarName != "" ) { this->addGenNtupleIntegerBranch(trueTagVarName); } if ( cpEigenValue_ == QFS ) { const TString& decayFlvVarName { flavTag_->getDecayFlvVarName() }; if ( decayFlvVarName != "" ) { this->addGenNtupleIntegerBranch(decayFlvVarName); } } const std::vector& tagVarNames { flavTag_->getTagVarNames() }; const std::vector& mistagVarNames { flavTag_->getMistagVarNames() }; const ULong_t nTaggers {flavTag_->getNTaggers()}; for (ULong_t position{0}; positionaddGenNtupleIntegerBranch(tagVarNames[position]); this->addGenNtupleDoubleBranch(mistagVarNames[position]); } if (this->useDP() == kTRUE) { // Let's add the decay time variables. this->addGenNtupleDoubleBranch(signalDecayTimePdf_->varName()); this->addGenNtupleDoubleBranch(signalDecayTimePdf_->varErrName()); this->addGenNtupleDoubleBranch("m12"); this->addGenNtupleDoubleBranch("m23"); this->addGenNtupleDoubleBranch("m13"); this->addGenNtupleDoubleBranch("m12Sq"); this->addGenNtupleDoubleBranch("m23Sq"); this->addGenNtupleDoubleBranch("m13Sq"); this->addGenNtupleDoubleBranch("cosHel12"); this->addGenNtupleDoubleBranch("cosHel23"); this->addGenNtupleDoubleBranch("cosHel13"); if (kinematicsB0bar_->squareDP() && kinematicsB0_->squareDP()) { this->addGenNtupleDoubleBranch("mPrime"); this->addGenNtupleDoubleBranch("thPrime"); } // Can add the real and imaginary parts of the B0 and B0bar total // amplitudes seen in the generation (restrict this with a flag // that defaults to false) if ( storeGenAmpInfo_ ) { this->addGenNtupleDoubleBranch("reB0Amp"); this->addGenNtupleDoubleBranch("imB0Amp"); this->addGenNtupleDoubleBranch("reB0barAmp"); this->addGenNtupleDoubleBranch("imB0barAmp"); } } // Let's look at the extra variables for signal in one of the tagging categories for ( const LauAbsPdf* pdf : sigExtraPdf_ ) { const std::vector varNames{ pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { this->addGenNtupleDoubleBranch( varName ); } } } } void LauTimeDepFitModel::setDPDtBranchValues() { // Store the decay time variables. this->setGenNtupleDoubleBranchValue(signalDecayTimePdf_->varName(),curEvtDecayTime_); this->setGenNtupleDoubleBranchValue(signalDecayTimePdf_->varErrName(),curEvtDecayTimeErr_); // CONVENTION WARNING // TODO check - for now use B0 for any tags //LauKinematics* kinematics(0); //if (curEvtTagFlv_[position]<0) { LauKinematics* kinematics = kinematicsB0_; //} else { // kinematics = kinematicsB0bar_; //} // Store all the DP information this->setGenNtupleDoubleBranchValue("m12", kinematics->getm12()); this->setGenNtupleDoubleBranchValue("m23", kinematics->getm23()); this->setGenNtupleDoubleBranchValue("m13", kinematics->getm13()); this->setGenNtupleDoubleBranchValue("m12Sq", kinematics->getm12Sq()); this->setGenNtupleDoubleBranchValue("m23Sq", kinematics->getm23Sq()); this->setGenNtupleDoubleBranchValue("m13Sq", kinematics->getm13Sq()); this->setGenNtupleDoubleBranchValue("cosHel12", kinematics->getc12()); this->setGenNtupleDoubleBranchValue("cosHel23", kinematics->getc23()); this->setGenNtupleDoubleBranchValue("cosHel13", kinematics->getc13()); if (kinematics->squareDP()) { this->setGenNtupleDoubleBranchValue("mPrime", kinematics->getmPrime()); this->setGenNtupleDoubleBranchValue("thPrime", kinematics->getThetaPrime()); } // Can add the real and imaginary parts of the B0 and B0bar total // amplitudes seen in the generation (restrict this with a flag // that defaults to false) if ( storeGenAmpInfo_ ) { if ( this->getGenNtupleIntegerBranchValue("genSig")==1 ) { LauComplex Abar = sigModelB0bar_->getEvtDPAmp(); LauComplex A = sigModelB0_->getEvtDPAmp(); this->setGenNtupleDoubleBranchValue("reB0Amp", A.re()); this->setGenNtupleDoubleBranchValue("imB0Amp", A.im()); this->setGenNtupleDoubleBranchValue("reB0barAmp", Abar.re()); this->setGenNtupleDoubleBranchValue("imB0barAmp", Abar.im()); } else { this->setGenNtupleDoubleBranchValue("reB0Amp", 0.0); this->setGenNtupleDoubleBranchValue("imB0Amp", 0.0); this->setGenNtupleDoubleBranchValue("reB0barAmp", 0.0); this->setGenNtupleDoubleBranchValue("imB0barAmp", 0.0); } } } void LauTimeDepFitModel::generateExtraPdfValues(LauPdfList& extraPdfs, LauEmbeddedData* embeddedData) { // CONVENTION WARNING LauKinematics* kinematics = kinematicsB0_; //LauKinematics* kinematics(0); //if (curEvtTagFlv_<0) { // kinematics = kinematicsB0_; //} else { // kinematics = kinematicsB0bar_; //} // Generate from the extra PDFs for (LauPdfList::iterator pdf_iter = extraPdfs.begin(); pdf_iter != extraPdfs.end(); ++pdf_iter) { LauFitData genValues; if (embeddedData) { genValues = embeddedData->getValues( (*pdf_iter)->varNames() ); } else { genValues = (*pdf_iter)->generate(kinematics); } for ( LauFitData::const_iterator var_iter = genValues.begin(); var_iter != genValues.end(); ++var_iter ) { TString varName = var_iter->first; if ( varName != "m13Sq" && varName != "m23Sq" ) { Double_t value = var_iter->second; this->setGenNtupleDoubleBranchValue(varName,value); } } } } void LauTimeDepFitModel::propagateParUpdates() { // Update the complex mixing phase if (useSinCos_) { phiMixComplex_.setRealPart(cosPhiMix_.unblindValue()); phiMixComplex_.setImagPart(-1.0*sinPhiMix_.unblindValue()); } else { phiMixComplex_.setRealPart(TMath::Cos(-1.0*phiMix_.unblindValue())); phiMixComplex_.setImagPart(TMath::Sin(-1.0*phiMix_.unblindValue())); } // Update the total normalisation for the signal likelihood if (this->useDP() == kTRUE) { this->updateCoeffs(); sigModelB0bar_->updateCoeffs(coeffsB0bar_); sigModelB0_->updateCoeffs(coeffsB0_); this->calcInterTermNorm(); } // Update the decay time normalisation if ( signalDecayTimePdf_ ) { signalDecayTimePdf_->propagateParUpdates(); } // TODO // - maybe also need to add an update of the background decay time PDFs here // Update the signal events from the background numbers if not doing an extended fit // And update the tagging category fractions this->updateSigEvents(); } void LauTimeDepFitModel::updateSigEvents() { // The background parameters will have been set from Minuit. // We need to update the signal events using these. if (!this->doEMLFit()) { Double_t nTotEvts = this->eventsPerExpt(); Double_t signalEvents = nTotEvts; signalEvents_->range(-2.0*nTotEvts,2.0*nTotEvts); for (LauBkgndYieldList::iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { LauAbsRValue* nBkgndEvents = (*iter); if ( nBkgndEvents->isLValue() ) { LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*nTotEvts,2.0*nTotEvts); } } // Subtract background events (if any) from signal. if (usingBkgnd_ == kTRUE) { for (LauBkgndYieldList::const_iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { signalEvents -= (*iter)->value(); } } if ( ! signalEvents_->fixed() ) { signalEvents_->value(signalEvents); } } } void LauTimeDepFitModel::cacheInputFitVars() { // Fill the internal data trees of the signal and background models. // Note that we store the events of both charges in both the // negative and the positive models. It's only later, at the stage // when the likelihood is being calculated, that we separate them. LauFitDataTree* inputFitData = this->fitData(); evtCPEigenVals_.clear(); const Bool_t hasCPEV = ( (cpevVarName_ != "") && inputFitData->haveBranch( cpevVarName_ ) ); UInt_t nEvents = inputFitData->nEvents(); evtCPEigenVals_.reserve( nEvents ); LauFitData::const_iterator fitdata_iter; for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputFitData->getData(iEvt); // if the CP-eigenvalue is in the data use those, otherwise use the default if ( hasCPEV ) { fitdata_iter = dataValues.find( cpevVarName_ ); const Int_t cpEV = static_cast( fitdata_iter->second ); if ( cpEV == 1 ) { cpEigenValue_ = CPEven; } else if ( cpEV == -1 ) { cpEigenValue_ = CPOdd; } else if ( cpEV == 0 ) { cpEigenValue_ = QFS; } else { std::cerr<<"WARNING in LauTimeDepFitModel::cacheInputFitVars : Unknown value: "<cacheInputFitVars(inputFitData); - if (this->useDP() == kTRUE) { // DecayTime and SigmaDecayTime signalDecayTimePdf_->cacheInfo(*inputFitData); // cache all the backgrounds too for(auto& bg : BkgndDecayTimePdfs_) {bg->cacheInfo(*inputFitData);} } + // Flavour tagging information + flavTag_->cacheInputFitVars(inputFitData,signalDecayTimePdf_->varName()); + // ...and then the extra PDFs if (not sigExtraPdf_.empty()){ this->cacheInfo(sigExtraPdf_, *inputFitData); } if(usingBkgnd_ == kTRUE){ for (LauBkgndPdfsList::iterator iter = BkgndPdfs_.begin(); iter != BkgndPdfs_.end(); ++iter) { this->cacheInfo((*iter), *inputFitData); } } if (this->useDP() == kTRUE) { sigModelB0bar_->fillDataTree(*inputFitData); sigModelB0_->fillDataTree(*inputFitData); if (usingBkgnd_ == kTRUE) { for (LauBkgndDPModelList::iterator iter = BkgndDPModelsB_.begin(); iter != BkgndDPModelsB_.end(); ++iter) { (*iter)->fillDataTree(*inputFitData); } for (LauBkgndDPModelList::iterator iter = BkgndDPModelsBbar_.begin(); iter != BkgndDPModelsBbar_.end(); ++iter) { if ((*iter) != nullptr) { (*iter)->fillDataTree(*inputFitData); } } } } } Double_t LauTimeDepFitModel::getTotEvtLikelihood(const UInt_t iEvt) { // Get the CP eigenvalue of the current event cpEigenValue_ = evtCPEigenVals_[iEvt]; // Get the DP and DecayTime likelihood for signal (TODO and eventually backgrounds) this->getEvtDPDtLikelihood(iEvt); // Get the combined extra PDFs likelihood for signal (TODO and eventually backgrounds) this->getEvtExtraLikelihoods(iEvt); // Construct the total likelihood for signal, qqbar and BBbar backgrounds Double_t sigLike = sigDPLike_ * sigExtraLike_; Double_t signalEvents = signalEvents_->unblindValue(); // TODO - consider what to do here - do we even want the option not to use the DP in this model? //if ( not this->useDP() ) { //signalEvents *= 0.5 * (1.0 + curEvtTagFlv_ * signalAsym_->unblindValue()); //} // Construct the total event likelihood Double_t likelihood { sigLike * signalEvents }; if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { // TODO // for combinatorial background (and perhaps others) this factor 0.5 needs to be here // to balance the factor 2 in the signal normalisation that arises from the sum over // tag decisions and integral over eta // for other (more signal-like) backgrounds where we need to think about things depending // on the tag decision and where there may be asymmetries as well this will (probably) arise naturally const Double_t bkgndEvents { 0.5 * bkgndEvents_[bkgndID]->unblindValue() }; likelihood += bkgndEvents*bkgndDPLike_[bkgndID]*bkgndExtraLike_[bkgndID]; } } return likelihood; } Double_t LauTimeDepFitModel::getEventSum() const { Double_t eventSum(0.0); eventSum += signalEvents_->unblindValue(); if (usingBkgnd_) { for ( const auto& yieldPar : bkgndEvents_ ) { eventSum += yieldPar->unblindValue(); } } return eventSum; } void LauTimeDepFitModel::getEvtDPDtLikelihood(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // Dalitz plot for the given event evtNo. if ( ! this->useDP() ) { // There's always going to be a term in the likelihood for the // signal, so we'd better not zero it. sigDPLike_ = 1.0; const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_ == kTRUE) { bkgndDPLike_[bkgndID] = 1.0; } else { bkgndDPLike_[bkgndID] = 0.0; } } return; } // Calculate event quantities // Get the DP dynamics, decay time, and flavour tagging to calculate // everything required for the likelihood calculation sigModelB0bar_->calcLikelihoodInfo(iEvt); sigModelB0_->calcLikelihoodInfo(iEvt); signalDecayTimePdf_->calcLikelihoodInfo(iEvt); flavTag_->updateEventInfo(iEvt); // Retrieve the amplitudes and efficiency from the dynamics LauComplex Abar { sigModelB0bar_->getEvtDPAmp() }; LauComplex A { sigModelB0_->getEvtDPAmp() }; const Double_t dpEff { sigModelB0bar_->getEvtEff() }; // If this is a QFS decay, one of the DP amplitudes needs to be zeroed if (cpEigenValue_ == QFS){ curEvtDecayFlv_ = flavTag_->getCurEvtDecayFlv(); if ( curEvtDecayFlv_ == +1 ) { Abar.zero(); } else if ( curEvtDecayFlv_ == -1 ) { A.zero(); } } // Next calculate the DP terms const Double_t aSqSum { A.abs2() + Abar.abs2() }; const Double_t aSqDif { A.abs2() - Abar.abs2() }; Double_t interTermRe { 0.0 }; Double_t interTermIm { 0.0 }; if ( cpEigenValue_ != QFS ) { const LauComplex inter { Abar * A.conj() * phiMixComplex_ }; if ( cpEigenValue_ == CPEven ) { interTermIm = 2.0 * inter.im(); interTermRe = 2.0 * inter.re(); } else { interTermIm = -2.0 * inter.im(); interTermRe = -2.0 * inter.re(); } } // First get all the decay time terms // TODO Backgrounds // Get the decay time acceptance const Double_t dtEff { signalDecayTimePdf_->getEffiTerm() }; // Get all the decay time terms const Double_t dtCos { signalDecayTimePdf_->getCosTerm() }; const Double_t dtSin { signalDecayTimePdf_->getSinTerm() }; const Double_t dtCosh { signalDecayTimePdf_->getCoshTerm() }; const Double_t dtSinh { signalDecayTimePdf_->getSinhTerm() }; // Get the decay time error term const Double_t dtErrLike { signalDecayTimePdf_->getErrTerm() }; // Get flavour tagging terms Double_t omega{1.0}; Double_t omegabar{1.0}; const ULong_t nTaggers { flavTag_->getNTaggers() }; for (ULong_t position{0}; positiongetCapitalOmega(position, LauFlavTag::Flavour::B); omegabar *= flavTag_->getCapitalOmega(position, LauFlavTag::Flavour::Bbar); } const Double_t prodAsym { AProd_.unblindValue() }; const Double_t ftOmegaHyp { ((1.0 - prodAsym)*omega + (1.0 + prodAsym)*omegabar) }; const Double_t ftOmegaTrig { ((1.0 - prodAsym)*omega - (1.0 + prodAsym)*omegabar) }; const Double_t coshTerm { ftOmegaHyp * dtCosh * aSqSum }; const Double_t sinhTerm { ftOmegaHyp * dtSinh * interTermRe }; const Double_t cosTerm { ftOmegaTrig * dtCos * aSqDif }; const Double_t sinTerm { ftOmegaTrig * dtSin * interTermIm }; // Combine all terms to get the total amplitude squared const Double_t ASq { coshTerm + sinhTerm + cosTerm - sinTerm }; // Calculate the DP and time normalisation const Double_t normASqSum { sigModelB0_->getDPNorm() + sigModelB0bar_->getDPNorm() }; const Double_t normASqDiff { sigModelB0_->getDPNorm() - sigModelB0bar_->getDPNorm() }; Double_t normInterTermRe { 0.0 }; Double_t normInterTermIm { 0.0 }; if ( cpEigenValue_ != QFS ) { // TODO - double check this sign flipping here (it's presumably right but...) normInterTermRe = ( cpEigenValue_ == CPOdd ) ? -1.0 * interTermReNorm_ : interTermReNorm_; normInterTermIm = ( cpEigenValue_ == CPOdd ) ? -1.0 * interTermImNorm_ : interTermImNorm_; } const Double_t normCoshTerm { signalDecayTimePdf_->getNormTermCosh() }; const Double_t normSinhTerm { signalDecayTimePdf_->getNormTermSinh() }; const Double_t normCosTerm { signalDecayTimePdf_->getNormTermCos() }; const Double_t normSinTerm { signalDecayTimePdf_->getNormTermSin() }; const Double_t normHyp { normASqSum * normCoshTerm + normInterTermRe * normSinhTerm }; const Double_t normTrig { - prodAsym * ( normASqDiff * normCosTerm + normInterTermIm * normSinTerm ) }; // Combine all terms to get the total normalisation const Double_t norm { 2.0 * ( normHyp + normTrig ) }; // Multiply the squared-amplitude by the efficiency (DP and decay time) and decay-time error likelihood // and normalise to obtain the signal likelihood sigDPLike_ = ( ASq * dpEff * dtEff * dtErrLike ) / norm; // Background part // TODO add them into the actual Likelihood calculatiions // TODO sort out B and Bbar backgrounds for the DP here // TODO need to include the flavour tagging parts here as well (per tagger and per background source) and will vary by Bkgnd type as well // TODO add new function as getEvtBkgndLikelihoods? const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_ == kTRUE) { BkgndDecayTimePdfs_[bkgndID]->calcLikelihoodInfo(iEvt); // If Bbar DP Model is a nullptr then only consider B DP Model if (BkgndDPModelsBbar_[bkgndID]==nullptr){ bkgndDPLike_[bkgndID] = BkgndDPModelsB_[bkgndID]->getLikelihood(iEvt); } else { bkgndDPLike_[bkgndID] = 0.5*(BkgndDPModelsB_[bkgndID]->getLikelihood(iEvt) + BkgndDPModelsBbar_[bkgndID]->getLikelihood(iEvt)); } bkgndDPLike_[bkgndID]*= BkgndDecayTimePdfs_[bkgndID]->getHistTerm(); //TODO FT part } else { bkgndDPLike_[bkgndID] = 0.0; } } } void LauTimeDepFitModel::getEvtExtraLikelihoods(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // extra variables for the given event evtNo. sigExtraLike_ = 1.0; //There's always a likelihood term for signal, so we better not zero it. // First, those independent of the tagging of the event: // signal if ( not sigExtraPdf_.empty() ) { sigExtraLike_ = this->prodPdfValue( sigExtraPdf_, iEvt ); } // Background const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_) { bkgndExtraLike_[bkgndID] = this->prodPdfValue( BkgndPdfs_[bkgndID], iEvt ); } else { bkgndExtraLike_[bkgndID] = 0.0; } } } //TODO obsolete? void LauTimeDepFitModel::getEvtFlavTagLikelihood(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // extra variables for the given event evtNo. sigFlavTagLike_ = 1.0; //There's always a likelihood term for signal, so we better not zero it. // Loop over taggers const ULong_t nTaggers { flavTag_->getNTaggers() }; for (ULong_t position{0}; positioncalcLikelihoodInfo(iEvt); sigFlavTagLike_ = sigFlavTagPdf_[position]->getLikelihood(); } } if (sigFlavTagLike_<=0){ std::cout<<"INFO in LauTimeDepFitModel::getEvtFlavTagLikelihood : Event with 0 FlavTag Liklihood"<antiparticleCoeff()); coeffsB0_.push_back(coeffPars_[i]->particleCoeff()); } } void LauTimeDepFitModel::checkMixingPhase() { Double_t phase = phiMix_.value(); Double_t genPhase = phiMix_.genValue(); // Check now whether the phase lies in the right range (-pi to pi). Bool_t withinRange(kFALSE); while (withinRange == kFALSE) { if (phase > -LauConstants::pi && phase < LauConstants::pi) { withinRange = kTRUE; } else { // Not within the specified range if (phase > LauConstants::pi) { phase -= LauConstants::twoPi; } else if (phase < -LauConstants::pi) { phase += LauConstants::twoPi; } } } // A further problem can occur when the generated phase is close to -pi or pi. // The phase can wrap over to the other end of the scale - // this leads to artificially large pulls so we wrap it back. Double_t diff = phase - genPhase; if (diff > LauConstants::pi) { phase -= LauConstants::twoPi; } else if (diff < -LauConstants::pi) { phase += LauConstants::twoPi; } // finally store the new value in the parameter // and update the pull phiMix_.value(phase); phiMix_.updatePull(); } void LauTimeDepFitModel::embedSignal(const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment) { if (signalTree_) { std::cerr<<"ERROR in LauTimeDepFitModel::embedSignal : Already embedding signal from file."<findBranches(); if (!dataOK) { delete signalTree_; signalTree_ = 0; std::cerr<<"ERROR in LauTimeDepFitModel::embedSignal : Problem creating data tree for embedding."<validBkgndClass( bkgndClass ) ) { std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); LauEmbeddedData* bkgTree = bkgndTree_[bkgndID]; if (bkgTree) { std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Already embedding background from a file." << std::endl; return; } bkgTree = new LauEmbeddedData(fileName,treeName,reuseEventsWithinExperiment); Bool_t dataOK = bkgTree->findBranches(); if (!dataOK) { delete bkgTree; bkgTree = 0; std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Problem creating data tree for embedding." << std::endl; return; } reuseBkgnd_[bkgndID] = reuseEventsWithinEnsemble; if (this->enableEmbedding() == kFALSE) { this->enableEmbedding(kTRUE); } } void LauTimeDepFitModel::setupSPlotNtupleBranches() { // add branches for storing the experiment number and the number of // the event within the current experiment this->addSPlotNtupleIntegerBranch("iExpt"); this->addSPlotNtupleIntegerBranch("iEvtWithinExpt"); // Store the efficiency of the event (for inclusive BF calculations). if (this->storeDPEff()) { this->addSPlotNtupleDoubleBranch("efficiency"); } // Store the total event likelihood for each species. this->addSPlotNtupleDoubleBranch("sigTotalLike"); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); name += "TotalLike"; this->addSPlotNtupleDoubleBranch(name); } } // Store the DP likelihoods if (this->useDP()) { this->addSPlotNtupleDoubleBranch("sigDPLike"); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); name += "DPLike"; this->addSPlotNtupleDoubleBranch(name); } } } // Store the likelihoods for each extra PDF this->addSPlotNtupleBranches(sigExtraPdf_, "sig"); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); this->addSPlotNtupleBranches(BkgndPdfs_[iBkgnd], bkgndClass); } } } void LauTimeDepFitModel::addSPlotNtupleBranches(const LauPdfList& extraPdfs, const TString& prefix) { // Loop through each of the PDFs for ( const LauAbsPdf* pdf : extraPdfs ) { // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars{0}; const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply add one branch for that variable TString name{prefix}; name += pdf->varName(); name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else if ( nVars == 2 ) { // If the PDF has two variables then we // need a branch for them both together and // branches for each TString allVars{""}; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { allVars += varName; TString name{prefix}; name += varName; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } } TString name{prefix}; name += allVars; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else { std::cerr<<"WARNING in LauTimeDepFitModel::addSPlotNtupleBranches : Can't yet deal with 3D PDFs."<calcLikelihoodInfo(iEvt); extraLike = pdf->getLikelihood(); totalLike *= extraLike; // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars{0}; const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply store the value for that variable TString name{prefix}; name += pdf->varName(); name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else if ( nVars == 2 ) { // If the PDF has two variables then we // store the value for them both together // and for each on their own TString allVars{""}; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { allVars += varName; TString name{prefix}; name += varName; name += "Like"; const Double_t indivLike = pdf->getLikelihood( varName ); this->setSPlotNtupleDoubleBranchValue(name, indivLike); } } TString name{prefix}; name += allVars; name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else { std::cerr<<"WARNING in LauAllFitModel::setSPlotNtupleBranchValues : Can't yet deal with 3D PDFs."<useDP()) { nameSet.insert("DP"); } for ( const LauAbsPdf* pdf : sigExtraPdf_ ) { // Loop over the variables involved in each PDF const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { // If they are not DP coordinates then add them if ( varName != "m13Sq" && varName != "m23Sq" ) { nameSet.insert( varName ); } } } return nameSet; } LauSPlot::NumbMap LauTimeDepFitModel::freeSpeciesNames() const { LauSPlot::NumbMap numbMap; if (!signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } if ( usingBkgnd_ ) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauAbsRValue* par = bkgndEvents_[iBkgnd]; if (!par->fixed()) { numbMap[bkgndClass] = par->genValue(); if ( ! par->isLValue() ) { std::cerr << "WARNING in LauTimeDepFitModel::freeSpeciesNames : \"" << par->name() << "\" is a LauFormulaPar, which implies it is perhaps not entirely free to float in the fit, so the sWeight calculation may not be reliable" << std::endl; } } } } return numbMap; } LauSPlot::NumbMap LauTimeDepFitModel::fixdSpeciesNames() const { LauSPlot::NumbMap numbMap; if (signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } if ( usingBkgnd_ ) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauAbsRValue* par = bkgndEvents_[iBkgnd]; if (par->fixed()) { numbMap[bkgndClass] = par->genValue(); } } } return numbMap; } LauSPlot::TwoDMap LauTimeDepFitModel::twodimPDFs() const { LauSPlot::TwoDMap twodimMap; for ( const LauAbsPdf* pdf : sigExtraPdf_ ) { // Count the number of input variables that are not DP variables UInt_t nVars{0}; const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { twodimMap.insert( std::make_pair( "sig", std::make_pair( varNames[0], varNames[1] ) ) ); } } if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); for ( const LauAbsPdf* pdf : BkgndPdfs_[iBkgnd] ) { // Count the number of input variables that are not DP variables UInt_t nVars{0}; const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { twodimMap.insert( std::make_pair( bkgndClass, std::make_pair( varNames[0], varNames[1] ) ) ); } } } } return twodimMap; } void LauTimeDepFitModel::storePerEvtLlhds() { std::cout<<"INFO in LauTimeDepFitModel::storePerEvtLlhds : Storing per-event likelihood values..."<fitData(); // if we've not been using the DP model then we need to cache all // the info here so that we can get the efficiency from it if (!this->useDP() && this->storeDPEff()) { sigModelB0bar_->initialise(coeffsB0bar_); sigModelB0_->initialise(coeffsB0_); sigModelB0bar_->fillDataTree(*inputFitData); sigModelB0_->fillDataTree(*inputFitData); } UInt_t evtsPerExpt(this->eventsPerExpt()); LauIsobarDynamics* sigModel(sigModelB0bar_); for (UInt_t iEvt = 0; iEvt < evtsPerExpt; ++iEvt) { // Find out whether we have B0bar or B0 flavTag_->updateEventInfo(iEvt); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); // the DP information this->getEvtDPDtLikelihood(iEvt); if (this->storeDPEff()) { if (!this->useDP()) { sigModel->calcLikelihoodInfo(iEvt); } this->setSPlotNtupleDoubleBranchValue("efficiency",sigModel->getEvtEff()); } if (this->useDP()) { sigTotalLike_ = sigDPLike_; this->setSPlotNtupleDoubleBranchValue("sigDPLike",sigDPLike_); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name = this->bkgndClassName(iBkgnd); name += "DPLike"; this->setSPlotNtupleDoubleBranchValue(name,bkgndDPLike_[iBkgnd]); } } } else { sigTotalLike_ = 1.0; } // the signal PDF values sigTotalLike_ *= this->setSPlotNtupleBranchValues(sigExtraPdf_, "sig", iEvt); // the background PDF values if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); LauPdfList& pdfs = BkgndPdfs_[iBkgnd]; bkgndTotalLike_[iBkgnd] *= this->setSPlotNtupleBranchValues(pdfs, bkgndClass, iEvt); } } // the total likelihoods this->setSPlotNtupleDoubleBranchValue("sigTotalLike",sigTotalLike_); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name = this->bkgndClassName(iBkgnd); name += "TotalLike"; this->setSPlotNtupleDoubleBranchValue(name,bkgndTotalLike_[iBkgnd]); } } // fill the tree this->fillSPlotNtupleBranches(); } std::cout<<"INFO in LauTimeDepFitModel::storePerEvtLlhds : Finished storing per-event likelihood values."<