diff --git a/inc/LauAbsBkgndDPModel.hh b/inc/LauAbsBkgndDPModel.hh index 9688409..d2cd6e8 100644 --- a/inc/LauAbsBkgndDPModel.hh +++ b/inc/LauAbsBkgndDPModel.hh @@ -1,155 +1,168 @@ /* Copyright 2004 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauAbsBkgndDPModel.hh \brief File containing declaration of LauAbsBkgndDPModel class. */ /*! \class LauAbsBkgndDPModel \brief The abstract interface for a background Dalitz plot model Class which defines the abstract interface for a background Dalitz plot model */ #ifndef LAU_ABS_BKGND_DP_MODEL #define LAU_ABS_BKGND_DP_MODEL #include "Rtypes.h" class LauDaughters; class LauKinematics; class LauFitDataTree; class LauVetoes; class LauAbsBkgndDPModel { public: //! Destructor virtual ~LauAbsBkgndDPModel() {} //! Initialise the model virtual void initialise() = 0; //! Generate a toy MC event from the model /*! \return success/failure flag */ virtual Bool_t generate() = 0; - //! Get likelihood for a given event + //! Get likelihood (unnormalised likelihood value / PDF normalisation) for a given event /*! \param [in] iEvt the event number \return the likelihood value */ virtual Double_t getLikelihood(UInt_t iEvt) = 0; //! Get unnormalised likelihood for a given event /*! \param [in] iEvt the event number \return the unnormalised likelihood value */ virtual Double_t getUnNormValue(UInt_t iEvt) = 0; + //! Get the unnormalised likelihood value for the current event + /*! + \return the unnormalised likelihood values + */ + virtual Double_t getUnNormValue() const = 0; + //! Get PDF normalisation constant /*! \return the PDF normalisation constant */ virtual Double_t getPdfNorm() const = 0; + //! Calculate the likelihood value for a given set of Dalitz plot coordinates + /*! + \param [in] m13Sq the invariant mass squared of daughters 1 and 3 + \param [in] m23Sq the invariant mass squared of daughters 2 and 3 + */ + virtual void calcLikelihoodInfo(const Double_t m13Sq, const Double_t m23Sq) = 0; + //! Cache the input data and (if appropriate) the per-event likelihood values /*! \param [in] fitDataTree the input data */ virtual void fillDataTree(const LauFitDataTree& fitDataTree) = 0; //! Get the daughter particles /*! \return the daughters */ const LauDaughters* getDaughters() const {return daughters_;} //! Get the daughter particles /*! \return the daughters */ LauDaughters* getDaughters() {return daughters_;} //! Get the Dalitz plot kinematics /*! \return the kinematics */ const LauKinematics* getKinematics() const {return kinematics_;} //! Get the Dalitz plot kinematics /*! \return the kinematics */ LauKinematics* getKinematics() {return kinematics_;} //! Get vetoes in the Dalitz plot /*! \return the vetoes */ const LauVetoes* getVetoes() const {return vetoes_;} //! Get vetoes in the Dalitz plot /*! \return the vetoes */ LauVetoes* getVetoes() {return vetoes_;} protected: //! Constructor /*! \param [in] daughters the daughter particles \param [in] vetoes the vetoes within the Dalitz plot */ LauAbsBkgndDPModel(LauDaughters* daughters, LauVetoes* vetoes); //! Set data event number /*! \param [in] iEvt the event number */ virtual void setDataEventNo(UInt_t iEvt) = 0; private: //! Copy constructor (not implemented) LauAbsBkgndDPModel(const LauAbsBkgndDPModel& rhs); //! Copy assignment operator (not implemented) LauAbsBkgndDPModel& operator=(const LauAbsBkgndDPModel& rhs); //! The daughter particles LauDaughters* daughters_; //! Dalitz plot kinematics LauKinematics* kinematics_; //! Vetoes within the Dalitz plot LauVetoes* vetoes_; ClassDef(LauAbsBkgndDPModel,0) // Abstract DP background model }; #endif diff --git a/inc/LauBkgndDPModel.hh b/inc/LauBkgndDPModel.hh index 7ea3d3f..d34922a 100644 --- a/inc/LauBkgndDPModel.hh +++ b/inc/LauBkgndDPModel.hh @@ -1,172 +1,185 @@ /* Copyright 2004 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauBkgndDPModel.hh \brief File containing declaration of LauBkgndDPModel class. */ /*! \class LauBkgndDPModel \brief Class for defining a histogram-based background Dalitz plot model Class for defining a histogram-based background Dalitz plot model */ #ifndef LAU_BKGND_DP_MODEL #define LAU_BKGND_DP_MODEL #include #include "LauAbsBkgndDPModel.hh" class TH2; class Lau2DAbsDPPdf; class LauDaughters; class LauFitDataTree; class LauVetoes; class LauBkgndDPModel : public LauAbsBkgndDPModel { public: //! Constructor /*! \param [in] daughters the daughters in the decay \param [in] vetoes the vetoes in the Datliz plot */ LauBkgndDPModel(LauDaughters* daughters, LauVetoes* vetoes); - + //! Destructor virtual ~LauBkgndDPModel(); //! Set background histogram /*! \param [in] histo the 2D histogram describing the DP distribution \param [in] useInterpolation boolean flag to determine whether linear interpolation between bins should be used or simply the raw bin values - \param [in] fluctuateBins boolean flag to determine whether the bin contents should be fluctuated in accordance with their errors. + \param [in] fluctuateBins boolean flag to determine whether the bin contents should be fluctuated in accordance with their errors. The seed for the random number generator used to fluctuate the bins should first be set using LauRandom::setSeed. \param [in] useUpperHalfOnly boolean flag to specify that the supplied histogram contains only the upper half of a symmetric DP (or lower half if using square DP coordinates) \param [in] squareDP boolean flag to determine whether the supplied histogram is given in square DP coordinates */ - void setBkgndHisto(const TH2* histo, Bool_t useInterpolation, + void setBkgndHisto(const TH2* histo, Bool_t useInterpolation, Bool_t fluctuateBins, Bool_t useUpperHalfOnly, Bool_t squareDP = kFALSE); //! Set the background histogram and generate a spline /*! \param [in] histo the 2D histogram describing the DP distribution - \param [in] fluctuateBins boolean flag to determine whether the bin contents should be fluctuated in accordance with their errors. + \param [in] fluctuateBins boolean flag to determine whether the bin contents should be fluctuated in accordance with their errors. The seed for the random number generator used to fluctuate the bins should first be set using LauRandom::setSeed. \param [in] useUpperHalfOnly boolean flag to specify that the supplied histogram contains only the upper half of a symmetric DP (or lower half if using square DP coordinates) \param [in] squareDP boolean flag to determine whether the supplied histogram is given in square DP coordinates */ void setBkgndSpline(const TH2* histo, Bool_t fluctuateBins, Bool_t useUpperHalfOnly, Bool_t squareDP); //! Initialise the model virtual void initialise(); - + //! Generate a toy MC event from the model /*! \return success/failure flag */ virtual Bool_t generate(); - + //! Get unnormalised likelihood for a given event /*! \param [in] iEvt the event number \return the unnormalised likelihood value */ virtual Double_t getUnNormValue(UInt_t iEvt); - + + //! Get likelihood (unnormalised likelihood value / PDF normalisation) for a given event + /*! + \param [in] iEvt the event number + \return the likelihood value + */ + virtual Double_t getLikelihood(UInt_t iEvt); + + //! Get the unnormalised likelihood value for the current event + /*! + \return the unnormalised likelihood values + */ + virtual Double_t getUnNormValue() const {return curEvtHistVal_;} + //! Get PDF normalisation constant /*! \return the PDF normalisation constant */ virtual Double_t getPdfNorm() const {return pdfNorm_;} - - //! Get likelihood for a given event + + //! Calculate the likelihood value for a given set of Dalitz plot coordinates /*! - \param [in] iEvt the event number - \return the likelihood value + \param [in] m13Sq the invariant mass squared of daughters 1 and 3 + \param [in] m23Sq the invariant mass squared of daughters 2 and 3 */ - virtual Double_t getLikelihood(UInt_t iEvt); - + virtual void calcLikelihoodInfo(const Double_t m13Sq, const Double_t m23Sq); + //! Cache the input data and (if appropriate) the per-event likelihood values /*! \param [in] fitDataTree the input data */ virtual void fillDataTree(const LauFitDataTree& fitDataTree); - + protected: //! Calulate histogram value at a given point /*! \param [in] xVal the x-value \param [in] yVal the y-value \return the histogram value */ Double_t calcHistValue(Double_t xVal, Double_t yVal); //! Set data event number /*! \param [in] iEvt the event number */ virtual void setDataEventNo(UInt_t iEvt); private: //! Copy constructor (not implemented) LauBkgndDPModel(const LauBkgndDPModel& rhs); //! Copy assignment operator (not implemented) LauBkgndDPModel& operator=(const LauBkgndDPModel& rhs); //! Flags whether the DP is symmetrical or not Bool_t symmetricalDP_; //! Flags whether or not to work in square DP coordinates Bool_t squareDP_; //! PDF of Dalitz plot background, from a 2D histogram Lau2DAbsDPPdf* bgHistDPPdf_; //! Cached histogram values for each event std::vector bgData_; //! Histogram value for the current event - Double_t curEvtHistVal_; + Double_t curEvtHistVal_; //! Maximum height of PDF Double_t maxPdfHeight_; //! Normalisation of PDF Double_t pdfNorm_; //! Boolean to indicate if the warning that there is no histogram has already been issued Bool_t doneGenWarning_; //! Flag to track whether a warning has been issued for bin values less than zero mutable Bool_t lowBinWarningIssued_; ClassDef(LauBkgndDPModel,0) // DP background model }; #endif diff --git a/src/LauBkgndDPModel.cc b/src/LauBkgndDPModel.cc index 4730d4c..392f338 100644 --- a/src/LauBkgndDPModel.cc +++ b/src/LauBkgndDPModel.cc @@ -1,306 +1,320 @@ /* Copyright 2004 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauBkgndDPModel.cc \brief File containing implementation of LauBkgndDPModel class. */ #include #include #include "TRandom.h" #include "TSystem.h" #include "Lau2DHistDPPdf.hh" #include "Lau2DSplineDPPdf.hh" #include "LauBkgndDPModel.hh" #include "LauDaughters.hh" #include "LauFitDataTree.hh" #include "LauKinematics.hh" #include "LauRandom.hh" #include "LauVetoes.hh" ClassImp(LauBkgndDPModel) LauBkgndDPModel::LauBkgndDPModel(LauDaughters* daughters, LauVetoes* vetoes) : LauAbsBkgndDPModel(daughters, vetoes), symmetricalDP_(kFALSE), squareDP_(kFALSE), bgHistDPPdf_(0), curEvtHistVal_(0.0), maxPdfHeight_(1.0), pdfNorm_(1.0), doneGenWarning_(kFALSE), lowBinWarningIssued_(kFALSE) { if (daughters != 0) { symmetricalDP_ = daughters->gotSymmetricalDP(); } } LauBkgndDPModel::~LauBkgndDPModel() { if (bgHistDPPdf_ != 0) { delete bgHistDPPdf_; bgHistDPPdf_ = 0; } } void LauBkgndDPModel::setBkgndHisto(const TH2* histo, Bool_t useInterpolation, Bool_t fluctuateBins, Bool_t useUpperHalfOnly, Bool_t squareDP) { if ( ! histo ) { std::cerr << "WARNING in LauBkgndDPModel::setBkgndHisto : Supplied background histogram pointer is null, likelihood for this component will be flat in the Dalitz plot" << std::endl; } Bool_t upperHalf = kFALSE; if (symmetricalDP_ == kTRUE && useUpperHalfOnly == kTRUE) {upperHalf = kTRUE;} std::cout<<"INFO in LauBkgndDPModel::setBkgndHisto : Background histogram has upperHalf = "<(upperHalf)<getKinematics(); const LauVetoes* vetoes = this->getVetoes(); bgHistDPPdf_ = new Lau2DHistDPPdf(histo, kinematics, vetoes, useInterpolation, fluctuateBins, upperHalf, squareDP); maxPdfHeight_ = bgHistDPPdf_->getMaxHeight(); pdfNorm_ = bgHistDPPdf_->getHistNorm(); } void LauBkgndDPModel::setBkgndSpline(const TH2* histo, Bool_t fluctuateBins, Bool_t useUpperHalfOnly, Bool_t squareDP) { if ( ! histo ) { std::cerr << "WARNING in LauBkgndDPModel::setBkgndSpline : Supplied background histogram pointer is null, construction of spline will fail" << std::endl; } Bool_t upperHalf = kFALSE; if (symmetricalDP_ == kTRUE && useUpperHalfOnly == kTRUE) {upperHalf = kTRUE;} std::cout<<"INFO in LauBkgndDPModel::setBkgndSpline : Background histogram has upperHalf = "<(upperHalf)<getKinematics(); const LauVetoes* vetoes = this->getVetoes(); bgHistDPPdf_ = new Lau2DSplineDPPdf(histo, kinematics, vetoes, fluctuateBins, upperHalf, squareDP); maxPdfHeight_ = bgHistDPPdf_->getMaxHeight(); pdfNorm_ = bgHistDPPdf_->getHistNorm(); } Double_t LauBkgndDPModel::calcHistValue(Double_t xVal, Double_t yVal) { - // Get the likelihood value of the background in the Dalitz plot. + // Get the likelihood value of the background in the Dalitz plot. // Check that we have a valid histogram PDF if (bgHistDPPdf_ == 0) { std::cerr << "WARNING in LauBkgndDPModel::calcHistValue : We don't have a histogram so assuming the likelihood is flat in the Dalitz plot." << std::endl; this->setBkgndHisto( 0, kFALSE, kFALSE, kFALSE, kFALSE ); } // Find out the un-normalised PDF value Double_t value = bgHistDPPdf_->interpolateXY(xVal, yVal); // Check that the value is greater than zero // If we're using a spline then negative values can be caused by adjacent bins that all contain a value of zero. // The spline requires the value, its first derivatives and the mixed second derivative to be continuous and to match the input histogram // at the bin centres. Derivatives are calculated using a finite difference approximation taking the difference between the neighbouring bins. // If two bins are zero but the third is not then the second bin will have a positive first derivative causing the spline to dip below zero // between the two zero bins to remain smooth. Such dips are unavoidable but are correctly removed here. if ( value < 0.0 ) { if(!lowBinWarningIssued_) { std::cerr << "WARNING in LauBkgndDPModel::calcHistValue : Value " << value << " is less than 0 - setting to 0. You may want to check your histogram!" << std::endl << " : If you are using a spline then this could be caused by adjacent empty bins. Further warnings will be suppressed." << std::endl; lowBinWarningIssued_=kTRUE; } return 0.0; } LauKinematics* kinematics = this->getKinematics(); // For square DP co-ordinates, need to divide by Jacobian if (squareDP_ == kTRUE) { // Make sure that square DP kinematics are correct, then get Jacobian kinematics->updateSqDPKinematics(xVal, yVal); Double_t jacobian = kinematics->calcSqDPJacobian(); value /= jacobian; } return value; } -void LauBkgndDPModel::initialise() +void LauBkgndDPModel::initialise() { } Bool_t LauBkgndDPModel::generate() { // Routine to generate the background, using data provided by an // already defined histogram. LauKinematics* kinematics = this->getKinematics(); Bool_t gotBG(kFALSE); while (gotBG == kFALSE) { if (squareDP_ == kTRUE) { // Generate a point in m', theta' space. By construction, this point // is already within the DP region. Double_t mPrime(0.0), thetaPrime(0.0); kinematics->genFlatSqDP(mPrime, thetaPrime); // If we're in a symmetrical DP then we should only generate events in one half if ( symmetricalDP_ && thetaPrime > 0.5 ) { thetaPrime = 1.0 - thetaPrime; } // Calculate histogram height for DP point and // compare with the maximum height if ( bgHistDPPdf_ != 0 ) { Double_t bgContDP = bgHistDPPdf_->interpolateXY(mPrime, thetaPrime)/maxPdfHeight_; if (LauRandom::randomFun()->Rndm() < bgContDP) { kinematics->updateSqDPKinematics(mPrime, thetaPrime); gotBG = kTRUE; } } else { if ( !doneGenWarning_ ) { std::cerr << "WARNING in LauBkgndDPModel::generate : We don't have a histogram so generating flat in the square DP, which won't be flat in the conventional DP!" << std::endl; std::cerr << "WARNING in LauBkgndDPModel::generate : This should never happen!! What have you done?!" << std::endl; doneGenWarning_ = kTRUE; } kinematics->updateSqDPKinematics(mPrime, thetaPrime); gotBG = kTRUE; } } else { // Generate a point in the Dalitz plot (phase-space). Double_t m13Sq(0.0), m23Sq(0.0); kinematics->genFlatPhaseSpace(m13Sq, m23Sq); // If we're in a symmetrical DP then we should only generate events in one half if ( symmetricalDP_ && m13Sq > m23Sq ) { Double_t tmpSq = m13Sq; m13Sq = m23Sq; m23Sq = tmpSq; } // Calculate histogram height for DP point and // compare with the maximum height if ( bgHistDPPdf_ != 0 ) { Double_t bgContDP = bgHistDPPdf_->interpolateXY(m13Sq, m23Sq)/maxPdfHeight_; if (LauRandom::randomFun()->Rndm() < bgContDP) { kinematics->updateKinematics(m13Sq, m23Sq); - gotBG = kTRUE; + gotBG = kTRUE; } } else { if ( !doneGenWarning_ ) { std::cerr << "WARNING in LauBkgndDPModel::generate : We don't have a histogram so generating flat in the DP." << std::endl; doneGenWarning_ = kTRUE; } kinematics->updateKinematics(m13Sq, m23Sq); gotBG = kTRUE; } } } // Implement veto Bool_t vetoOK(kTRUE); const LauVetoes* vetoes = this->getVetoes(); if (vetoes) { vetoOK = vetoes->passVeto(kinematics); } // Call this function recusively until we pass the veto. if (vetoOK == kFALSE) { this->generate(); } return kTRUE; } void LauBkgndDPModel::fillDataTree(const LauFitDataTree& inputFitTree) { // In LauFitDataTree, the first two variables should always be // m13^2 and m23^2. Other variables follow thus: charge. Int_t nBranches = inputFitTree.nBranches(); if (nBranches < 2) { std::cerr<<"ERROR in LauBkgndDPModel::fillDataTree : Expecting at least 2 variables "<<"in input data tree, but there are "<getKinematics(); // clear and resize the data vector bgData_.clear(); bgData_.resize(nEvents); for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputFitTree.getData(iEvt); LauFitData::const_iterator iter = dataValues.find("m13Sq"); m13Sq = iter->second; iter = dataValues.find("m23Sq"); m23Sq = iter->second; // Update the kinematics. This will also update m' and theta' if squareDP = true kinematics->updateKinematics(m13Sq, m23Sq); if (squareDP_ == kTRUE) { mPrime = kinematics->getmPrime(); thetaPrime = kinematics->getThetaPrime(); curEvtHistVal_ = this->calcHistValue(mPrime, thetaPrime); } else { curEvtHistVal_ = this->calcHistValue(m13Sq, m23Sq); } bgData_[iEvt] = curEvtHistVal_; } } +void LauBkgndDPModel::calcLikelihoodInfo(const Double_t m13Sq, const Double_t m23Sq) +{ + LauKinematics* kinematics { this->getKinematics() }; + kinematics->updateKinematics( m13Sq, m23Sq ); + + if (squareDP_ == kTRUE) { + const Double_t mPrime { kinematics->getmPrime() }; + const Double_t thetaPrime { kinematics->getThetaPrime() }; + curEvtHistVal_ = this->calcHistValue(mPrime, thetaPrime); + } else { + curEvtHistVal_ = this->calcHistValue(m13Sq, m23Sq); + } +} + Double_t LauBkgndDPModel::getUnNormValue(UInt_t iEvt) { // Retrieve the likelihood for the given event this->setDataEventNo(iEvt); return curEvtHistVal_; } Double_t LauBkgndDPModel::getLikelihood(UInt_t iEvt) { // Retrieve the likelihood for the given event this->setDataEventNo(iEvt); Double_t llhd = curEvtHistVal_ / this->getPdfNorm(); return llhd; } void LauBkgndDPModel::setDataEventNo(UInt_t iEvt) { // Retrieve the data for event iEvt if (bgData_.size() > iEvt) { curEvtHistVal_ = bgData_[iEvt]; } else { std::cerr<<"ERROR in LauBkgndDPModel::setDataEventNo : Event index too large: "<= "< #include #include #include #include #include #include "TMath.h" #include "TRandom.h" #include "TSystem.h" #include "TH1.h" #include "RooMath.h" #include "Lau1DCubicSpline.hh" #include "Lau1DHistPdf.hh" #include "LauConstants.hh" #include "LauComplex.hh" #include "LauDecayTimePdf.hh" #include "LauFitDataTree.hh" #include "LauParameter.hh" #include "LauParamFixed.hh" #include "LauRandom.hh" ClassImp(LauDecayTimePdf) LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, Double_t minAbscissaVal, Double_t maxAbscissaVal, Double_t minAbscissaErr, Double_t maxAbscissaErr, FuncType type, UInt_t nGauss, const std::vector& scale, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) : varName_(theVarName), varErrName_(theVarErrName), params_(params), smear_(kTRUE), minAbscissa_(minAbscissaVal), maxAbscissa_(maxAbscissaVal), minAbscissaError_(minAbscissaErr), maxAbscissaError_(maxAbscissaErr), abscissaError_(0.0), abscissaErrorGenerated_(kFALSE), errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286 errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102 nGauss_(nGauss), tau_(nullptr), deltaM_(nullptr), deltaGamma_(nullptr), fracPrompt_(nullptr), type_(type), method_(method), effMethod_(effMethod), scaleMeans_(scale), scaleWidths_(scale), scaleWithPerEventError_( std::accumulate( scale.begin(), scale.end(), kFALSE, std::logical_or() ) ), expTerm_(0.0), cosTerm_(0.0), sinTerm_(0.0), coshTerm_(0.0), sinhTerm_(0.0), normTermExp_(0.0), normTermCosh_(0.0), normTermSinh_(0.0), errTerm_(0.0), effiTerm_(0.0), pdfTerm_(0.0), errHist_(nullptr), pdfHist_(nullptr), effiFun_(nullptr), effiHist_(nullptr), effiPars_(0) { if (nGauss > 0) { frac_.assign(nGauss_-1,nullptr); mean_.assign(nGauss_,nullptr); sigma_.assign(nGauss_,nullptr); meanVals_.assign(nGauss_,0.0); sigmaVals_.assign(nGauss_,0.0); fracVals_.assign(nGauss_,0.0); } } LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, Double_t minAbscissaVal, Double_t maxAbscissaVal, Double_t minAbscissaErr, Double_t maxAbscissaErr, FuncType type, UInt_t nGauss, const std::vector& scaleMeans, const std::vector& scaleWidths, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) : varName_(theVarName), varErrName_(theVarErrName), params_(params), smear_(kTRUE), minAbscissa_(minAbscissaVal), maxAbscissa_(maxAbscissaVal), minAbscissaError_(minAbscissaErr), maxAbscissaError_(maxAbscissaErr), abscissaError_(0.0), abscissaErrorGenerated_(kFALSE), errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286 errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102 nGauss_(nGauss), tau_(nullptr), deltaM_(nullptr), deltaGamma_(nullptr), fracPrompt_(nullptr), type_(type), method_(method), effMethod_(effMethod), scaleMeans_(scaleMeans), scaleWidths_(scaleWidths), scaleWithPerEventError_( std::accumulate( scaleMeans.begin(), scaleMeans.end(), kFALSE, std::logical_or() ) || std::accumulate( scaleWidths.begin(), scaleWidths.end(), kFALSE, std::logical_or() ) ), expTerm_(0.0), cosTerm_(0.0), sinTerm_(0.0), coshTerm_(0.0), sinhTerm_(0.0), normTermExp_(0.0), normTermCosh_(0.0), normTermSinh_(0.0), errTerm_(0.0), effiTerm_(0.0), pdfTerm_(0.0), errHist_(nullptr), pdfHist_(nullptr), effiFun_(nullptr), effiHist_(nullptr), effiPars_(0) { if (nGauss > 0) { frac_.assign(nGauss_-1,nullptr); mean_.assign(nGauss_,nullptr); sigma_.assign(nGauss_,nullptr); meanVals_.assign(nGauss_,0.0); sigmaVals_.assign(nGauss_,0.0); fracVals_.assign(nGauss_,0.0); } } LauDecayTimePdf::~LauDecayTimePdf() { // Destructor delete errHist_; errHist_ = nullptr; delete pdfHist_; pdfHist_ = nullptr; delete effiFun_; effiFun_ = nullptr; delete effiHist_; effiHist_ = nullptr; for( auto& par : effiPars_ ){ delete par; par = nullptr; } effiPars_.clear(); } void LauDecayTimePdf::initialise() { // The parameters are: // - the mean and the sigma (bias and spread in resolution) of the gaussian(s) // - the mean lifetime, denoted tau, of the exponential decay // - the frequency of oscillation, denoted Delta m, of the cosine and sine terms // - the decay width difference, denoted Delta Gamma, of the hyperbolic cosine and sine terms // // The next two arguments specify the range in which the PDF is defined, // and the PDF will be normalised w.r.t. these limits. // // The final three arguments define the type of Delta t PDF (Delta, Exp, ExpTrig or ExpHypTrig ), the number of gaussians // and whether or not the gaussian parameters should be scaled by the per-event errors on Delta t // First check whether the scale vector is nGauss in size if (nGauss_ != scaleMeans_.size() || nGauss_ != scaleWidths_.size()) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : scale vector size not the same as nGauss."<Exit(EXIT_FAILURE); } if (type_ == Hist) { if (this->nParameters() != 0){ std::cerr<<"ERROR in LauDecayTimePdf::initialise : Hist PDF should have 0 parameters"<Exit(EXIT_FAILURE); } } else { TString meanName("mean_"); TString sigmaName("sigma_"); TString fracName("frac_"); Bool_t foundParams(kTRUE); for (UInt_t i(0); ifindParameter(tempName); foundParams &= (mean_[i] != nullptr); sigma_[i] = this->findParameter(tempName2); foundParams &= (sigma_[i] != nullptr); if (i!=0) { frac_[i-1] = this->findParameter(tempName3); foundParams &= (frac_[i-1] != nullptr); } } if (type_ == Delta) { if ((this->nParameters() != (3*nGauss_-1)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : Delta type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == Exp) { tau_ = this->findParameter("tau"); foundParams &= (tau_ != nullptr); if ((this->nParameters() != (3*nGauss_-1+1)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : Exp type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == DeltaExp) { tau_ = this->findParameter("tau"); fracPrompt_ = this->findParameter("frac_prompt"); foundParams &= (tau_ != nullptr); foundParams &= (fracPrompt_ != nullptr); if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : DeltaExp type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == ExpTrig) { tau_ = this->findParameter("tau"); deltaM_ = this->findParameter("deltaM"); foundParams &= (tau_ != nullptr); foundParams &= (deltaM_ != nullptr); if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : ExpTrig type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == ExpHypTrig) { tau_ = this->findParameter("tau"); deltaM_ = this->findParameter("deltaM"); deltaGamma_ = this->findParameter("deltaGamma"); foundParams &= (tau_ != nullptr); foundParams &= (deltaM_ != nullptr); foundParams &= (deltaGamma_ != nullptr); if ((this->nParameters() != (3*nGauss_-1+3)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : ExpHypTrig type PDF requires:"<Exit(EXIT_FAILURE); } } } // Setup the normalisation caches normTermsExp_.clear(); normTermsExp_.resize(1); normTermsCos_.clear(); normTermsCos_.resize(1); normTermsSin_.clear(); normTermsSin_.resize(1); normTermsCosh_.clear(); normTermsCosh_.resize(1); normTermsSinh_.clear(); normTermsSinh_.resize(1); if ( effMethod_ == EfficiencyMethod::Spline ) { const UInt_t nSplineSegments { effiFun_->getnKnots() - 1 }; if ( not this->doSmearing() ) { expTermIkVals_.clear(); expTermIkVals_.resize(nSplineSegments); trigTermIkVals_.clear(); trigTermIkVals_.resize(nSplineSegments); hypHTermIkVals_.clear(); hypHTermIkVals_.resize(nSplineSegments); hypLTermIkVals_.clear(); hypLTermIkVals_.resize(nSplineSegments); } else { // Set outer vectors to size 1 (will be resized to nEvents in cacheInfo if necessary) meanPowerVals_.clear(); meanPowerVals_.resize(1); meanPowerVals_.front().resize(nGauss_); sigmaPowerVals_.clear(); sigmaPowerVals_.resize(1); sigmaPowerVals_.front().resize(nGauss_); expTermKvecVals_.clear(); expTermKvecVals_.resize(1); expTermKvecVals_.front().resize(nGauss_); trigTermKvecVals_.clear(); trigTermKvecVals_.resize(1); trigTermKvecVals_.front().resize(nGauss_); hypHTermKvecVals_.clear(); hypHTermKvecVals_.resize(1); hypHTermKvecVals_.front().resize(nGauss_); hypLTermKvecVals_.clear(); hypLTermKvecVals_.resize(1); hypLTermKvecVals_.front().resize(nGauss_); expTermMvecVals_.clear(); expTermMvecVals_.resize(1); expTermMvecVals_.front().resize(nSplineSegments); for ( auto& innerVec : expTermMvecVals_.front() ) { innerVec.resize(nGauss_); } trigTermMvecVals_.clear(); trigTermMvecVals_.resize(1); trigTermMvecVals_.front().resize(nSplineSegments); for ( auto& innerVec : trigTermMvecVals_.front() ) { innerVec.resize(nGauss_); } hypHTermMvecVals_.clear(); hypHTermMvecVals_.resize(1); hypHTermMvecVals_.front().resize(nSplineSegments); for ( auto& innerVec : hypHTermMvecVals_.front() ) { innerVec.resize(nGauss_); } hypLTermMvecVals_.clear(); hypLTermMvecVals_.resize(1); hypLTermMvecVals_.front().resize(nSplineSegments); for ( auto& innerVec : hypLTermMvecVals_.front() ) { innerVec.resize(nGauss_); } } } // Force calculation of all relevant info by faking that all parameter values have changed nothingFloating_ = nothingChanged_ = kFALSE; anyKnotFloating_ = anyKnotChanged_ = not effiPars_.empty(); nonKnotFloating_ = nonKnotChanged_ = kTRUE; physicsParFloating_ = physicsParChanged_ = kTRUE; tauFloating_ = tauChanged_ = ( tau_ != nullptr ); deltaMFloating_ = deltaMChanged_ = ( deltaM_ != nullptr ); deltaGammaFloating_ = deltaGammaChanged_ = ( deltaGamma_ != nullptr ); resoParFloating_ = resoParChanged_ = kTRUE; } Double_t LauDecayTimePdf::effectiveResolution() const { Double_t dilution = 0.; Double_t dMSq = deltaM_->unblindValue() * deltaM_->unblindValue(); // Might be cleaner to just append this to the vector in the init step, // the the consistency can also be checked Double_t fracSum = 0; for (auto f : frac_) fracSum += f->unblindValue(); Double_t lastFrac = 1. - fracSum; for (size_t i = 0; i < sigma_.size(); i++) { Double_t sigSq = sigma_[i]->unblindValue() * sigma_[i]->unblindValue(); Double_t thisFrac = lastFrac; if (i < sigma_.size() - 1) thisFrac = frac_[i]->unblindValue(); dilution += thisFrac * TMath::Exp(-dMSq * 0.5 * sigSq); } return TMath::Sqrt(-2. * TMath::Log(dilution)) / deltaM_->unblindValue(); } void LauDecayTimePdf::cacheInfo(const LauFitDataTree& inputData) { // Check that the input data contains the decay time variable Bool_t hasBranch = inputData.haveBranch(this->varName()); if (!hasBranch) { std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<varName()<<"\"."<doSmearing() and scaleWithPerEventError_ }; if ( needPerEventNormTerms ) { hasBranch = inputData.haveBranch(this->varErrName()); if (!hasBranch) { std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<varErrName()<<"\"."<cacheInfo(inputData); } } if (type_ == Hist) { // Pass the data to the decay-time PDF for caching if ( pdfHist_ ) { pdfHist_->cacheInfo(inputData); } // Make cache of effiTerms const UInt_t nEvents = inputData.nEvents(); // Efficiency will always be 1 (assumedly) effiTerms_.assign(nEvents,1.); } else { // Clear the vectors and reserve enough space in the caches of the terms const UInt_t nEvents = inputData.nEvents(); abscissas_.clear(); abscissas_.resize(nEvents); abscissaErrors_.clear(); abscissaErrors_.resize(nEvents); expTerms_.clear(); expTerms_.resize(nEvents); cosTerms_.clear(); cosTerms_.resize(nEvents); sinTerms_.clear(); sinTerms_.resize(nEvents); coshTerms_.clear(); coshTerms_.resize(nEvents); sinhTerms_.clear(); sinhTerms_.resize(nEvents); effiTerms_.clear(); effiTerms_.resize(nEvents); // Also resize the normalisation cache elements if we're doing per-event resolution if ( needPerEventNormTerms ) { normTermsExp_.clear(); normTermsExp_.resize(nEvents); normTermsCos_.clear(); normTermsCos_.resize(nEvents); normTermsSin_.clear(); normTermsSin_.resize(nEvents); normTermsCosh_.clear(); normTermsCosh_.resize(nEvents); normTermsSinh_.clear(); normTermsSinh_.resize(nEvents); if ( effMethod_ == EfficiencyMethod::Spline ) { meanPowerVals_.resize(nEvents); for ( auto& innerVec : meanPowerVals_ ) { innerVec.resize(nGauss_); } sigmaPowerVals_.resize(nEvents); for ( auto& innerVec : sigmaPowerVals_ ) { innerVec.resize(nGauss_); } expTermKvecVals_.resize(nEvents); for ( auto& innerVec : expTermKvecVals_ ) { innerVec.resize(nGauss_); } trigTermKvecVals_.resize(nEvents); for ( auto& innerVec : trigTermKvecVals_ ) { innerVec.resize(nGauss_); } hypHTermKvecVals_.resize(nEvents); for ( auto& innerVec : hypHTermKvecVals_ ) { innerVec.resize(nGauss_); } hypLTermKvecVals_.resize(nEvents); for ( auto& innerVec : hypLTermKvecVals_ ) { innerVec.resize(nGauss_); } const UInt_t nSplineSegments { effiFun_->getnKnots() - 1 }; expTermMvecVals_.resize(nEvents); for ( auto& middleVec : expTermMvecVals_) { middleVec.resize(nSplineSegments); for ( auto& innerVec : middleVec ) { innerVec.resize(nGauss_); } } trigTermMvecVals_.resize(nEvents); for ( auto& middleVec : trigTermMvecVals_) { middleVec.resize(nSplineSegments); for ( auto& innerVec : middleVec ) { innerVec.resize(nGauss_); } } hypHTermMvecVals_.resize(nEvents); for ( auto& middleVec : hypHTermMvecVals_) { middleVec.resize(nSplineSegments); for ( auto& innerVec : middleVec ) { innerVec.resize(nGauss_); } } hypLTermMvecVals_.resize(nEvents); for ( auto& middleVec : hypLTermMvecVals_) { middleVec.resize(nSplineSegments); for ( auto& innerVec : middleVec ) { innerVec.resize(nGauss_); } } } } // Determine the abscissa and abscissa error values for each event for (UInt_t iEvt {0}; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputData.getData(iEvt); const Double_t abscissa { dataValues.at(this->varName()) }; if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) { std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay time: "<minAbscissa()<<","<maxAbscissa()<<"]."<Exit(EXIT_FAILURE); } abscissas_[iEvt] = abscissa ; const Double_t abscissaErr { needPerEventNormTerms ? dataValues.at(this->varErrName()) : 0.0 }; if ( needPerEventNormTerms and ( abscissaErr > this->maxAbscissaError() or abscissaErr < this->minAbscissaError() ) ) { std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay-time error: "<minAbscissaError()<<","<maxAbscissaError()<<"]."<Exit(EXIT_FAILURE); } abscissaErrors_[iEvt] = abscissaErr; } // Force calculation of all info by faking that all parameter values have changed nothingFloating_ = nothingChanged_ = kFALSE; anyKnotFloating_ = anyKnotChanged_ = not effiPars_.empty(); nonKnotFloating_ = nonKnotChanged_ = kTRUE; physicsParFloating_ = physicsParChanged_ = kTRUE; tauFloating_ = tauChanged_ = ( tau_ != nullptr ); deltaMFloating_ = deltaMChanged_ = ( deltaM_ != nullptr ); deltaGammaFloating_ = deltaGammaChanged_ = ( deltaGamma_ != nullptr ); resoParFloating_ = resoParChanged_ = kTRUE; // Fill the rest of the cache this->updateCache(); // Set the various "parameter-is-floating" flags, used to bookkeep the cache in propagateParUpdates LauParamFixed isFixed; nonKnotFloating_ = not std::all_of(params_.begin(), params_.end(), isFixed); anyKnotFloating_ = not std::all_of(effiPars_.begin(), effiPars_.end(), isFixed); nothingFloating_ = not (nonKnotFloating_ or anyKnotFloating_); std::cout << "INFO in LauDecayTimePdf::cacheInfo : nothing floating set to: " << (nothingFloating_ ? "True" : "False") << std::endl; std::cout << "INFO in LauDecayTimePdf::cacheInfo : any knot floating set to: " << (anyKnotFloating_ ? "True" : "False") << std::endl; std::cout << "INFO in LauDecayTimePdf::cacheInfo : non-knot floating set to: " << (nonKnotFloating_ ? "True" : "False") << std::endl; tauFloating_ = tau_ ? not tau_->fixed() : kFALSE; deltaMFloating_ = deltaM_ ? not deltaM_->fixed() : kFALSE; deltaGammaFloating_ = deltaGamma_ ? not deltaGamma_->fixed() : kFALSE; physicsParFloating_ = ( tauFloating_ or deltaMFloating_ or deltaGammaFloating_ ); std::cout << "INFO in LauDecayTimePdf::cacheInfo : tau floating set to: " << (tauFloating_ ? "True" : "False") << std::endl; std::cout << "INFO in LauDecayTimePdf::cacheInfo : deltaM floating set to: " << (deltaMFloating_ ? "True" : "False") << std::endl; std::cout << "INFO in LauDecayTimePdf::cacheInfo : deltaGamma floating set to: " << (deltaGammaFloating_ ? "True" : "False") << std::endl; resoParFloating_ = kFALSE; for ( UInt_t i{0}; i < nGauss_; ++i ) { const Bool_t meanFloating { not mean_[i]->fixed() }; const Bool_t sigmaFloating { not sigma_[i]->fixed() }; resoParFloating_ |= (meanFloating or sigmaFloating); std::cout << "INFO in LauDecayTimePdf::cacheInfo : mean[" << i << "] floating set to: " << (meanFloating ? "True" : "False") << std::endl; std::cout << "INFO in LauDecayTimePdf::cacheInfo : sigma[" << i << "] floating set to: " << (sigmaFloating ? "True" : "False") << std::endl; if ( i < (nGauss_ - 1) ) { const Bool_t fracFloating { not frac_[i]->fixed() }; resoParFloating_ |= fracFloating; std::cout << "INFO in LauDecayTimePdf::cacheInfo : frac[" << i << "] floating set to: " << (fracFloating ? "True" : "False") << std::endl; } } } } void LauDecayTimePdf::updateCache() { // Get the updated values of all parameters static auto assignValue = [](const LauAbsRValue* par){return par->unblindValue();}; if ( anyKnotChanged_ ) { std::transform( effiPars_.begin(), effiPars_.end(), effiParVals_.begin(), assignValue ); } if ( tauChanged_ ) { tauVal_ = tau_->unblindValue(); gammaVal_ = 1.0 / tauVal_; } if ( deltaMChanged_ ) { deltaMVal_ = deltaM_->unblindValue(); } if ( deltaGammaChanged_ ) { deltaGammaVal_ = deltaGamma_->unblindValue(); } if ( resoParChanged_ ) { std::transform( mean_.begin(), mean_.end(), meanVals_.begin(), assignValue ); std::transform( sigma_.begin(), sigma_.end(), sigmaVals_.begin(), assignValue ); std::transform( frac_.begin(), frac_.end(), fracVals_.begin()+1, assignValue ); fracVals_[0] = std::accumulate( fracVals_.begin()+1, fracVals_.end(), 1.0, std::minus{} ); } // Calculate the values of all terms for each event // TODO - need to sort out UInt_t vs ULong_t everywhere!! const UInt_t nEvents { static_cast(abscissas_.size()) }; // If any of the physics or resolution parameters have changed we need // to update everything, otherwise we only need to recalculate the // efficiency if ( nonKnotChanged_ ) { for (UInt_t iEvt {0}; iEvt < nEvents; iEvt++) { const Double_t abscissa { abscissas_[iEvt] }; const Double_t abscissaErr { abscissaErrors_[iEvt] }; this->calcLikelihoodInfo(abscissa, abscissaErr); expTerms_[iEvt] = expTerm_; cosTerms_[iEvt] = cosTerm_; sinTerms_[iEvt] = sinTerm_; coshTerms_[iEvt] = coshTerm_; sinhTerms_[iEvt] = sinhTerm_; effiTerms_[iEvt] = effiTerm_; } } else { for (UInt_t iEvt {0}; iEvt < nEvents; iEvt++) { const Double_t abscissa { abscissas_[iEvt] }; effiTerm_ = this->calcEffiTerm( abscissa ); effiTerms_[iEvt] = effiTerm_; } } // Calculate the normalisation terms this->calcNorm(); // reset the "parameter-has-changed" flags anyKnotChanged_ = kFALSE; tauChanged_ = kFALSE; deltaMChanged_ = kFALSE; deltaGammaChanged_ = kFALSE; physicsParChanged_ = kFALSE; resoParChanged_ = kFALSE; nonKnotChanged_ = kFALSE; } void LauDecayTimePdf::calcLikelihoodInfo(const UInt_t iEvt) { // Extract all the terms and their normalisations const Bool_t needPerEventNormTerms { this->doSmearing() and scaleWithPerEventError_ }; const UInt_t normTermElement { needPerEventNormTerms * iEvt }; if (type_ == Hist) { if ( pdfHist_ ) { pdfHist_->calcLikelihoodInfo(iEvt); pdfTerm_ = pdfHist_->getLikelihood(); } else { pdfTerm_ = 1.0; } } else { expTerm_ = expTerms_[iEvt]; cosTerm_ = cosTerms_[iEvt]; sinTerm_ = sinTerms_[iEvt]; coshTerm_ = coshTerms_[iEvt]; sinhTerm_ = sinhTerms_[iEvt]; normTermExp_ = normTermsExp_[normTermElement]; normTermCos_ = normTermsCos_[normTermElement]; normTermSin_ = normTermsSin_[normTermElement]; normTermCosh_ = normTermsCosh_[normTermElement]; normTermSinh_ = normTermsSinh_[normTermElement]; } // Extract the decay time error PDF value if ( needPerEventNormTerms and errHist_ ) { errHist_->calcLikelihoodInfo(iEvt); errTerm_ = errHist_->getLikelihood(); } else { errTerm_ = 1.0; } // Extract the decay time efficiency effiTerm_ = effiTerms_[iEvt]; } void LauDecayTimePdf::calcLikelihoodInfo(const Double_t abscissa) { // Check whether any of the gaussians should be scaled - if any of them should we need the per-event error if (this->doSmearing() and scaleWithPerEventError_) { std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Per-event error on decay time not provided, cannot calculate anything."<calcLikelihoodInfo(abscissa, 0.0); } Double_t LauDecayTimePdf::calcEffiTerm( const Double_t abscissa ) const { Double_t effiTerm{1.0}; switch( effMethod_ ) { case EfficiencyMethod::Spline : effiTerm = effiFun_ ? effiFun_ -> evaluate(abscissa) : 1.0 ; break; case EfficiencyMethod::Binned : effiTerm = effiHist_ ? effiHist_-> GetBinContent(effiHist_-> FindFixBin(abscissa)) : 1.0 ; break; case EfficiencyMethod::Flat : effiTerm = 1.0 ; break; } // TODO print warning messages for these, but not every time if ( effiTerm > 1.0 ) { effiTerm = 1.0; } // else if ( effiTerm < 0.0 ) { effiTerm = 0.0; } else if ( effiTerm <= 0.0 ) { effiTerm = 1e-20; } return effiTerm; } void LauDecayTimePdf::calcLikelihoodInfo(const Double_t abscissa, const Double_t abscissaErr) { // Check that the decay time and the decay time error are in valid ranges if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) { std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of the decay time: "<minAbscissa()<<","<maxAbscissa()<<"]."<Exit(EXIT_FAILURE); } if ( (this->doSmearing() and scaleWithPerEventError_) && ( abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError() ) ) { std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of Delta t error: "<minAbscissaError()<<","<maxAbscissaError()<<"]."<Exit(EXIT_FAILURE); } // Determine the decay time efficiency effiTerm_ = this->calcEffiTerm( abscissa ); // For the histogram PDF just calculate that term and return if (type_ == Hist){ if ( pdfHist_ ) { pdfHist_->calcLikelihoodInfo(abscissa); pdfTerm_ = pdfHist_->getLikelihood(); } else { pdfTerm_ = 1.0; } return; } // If we're not using the resolution function, calculate the simple terms and return if (!this->doSmearing()) { this->calcNonSmearedTerms(abscissa); return; } // Get all the up to date parameter values for the resolution function std::vector mean { meanVals_ }; std::vector sigma { sigmaVals_ }; std::vector frac { fracVals_ }; // Scale the gaussian parameters by the per-event error on Delta t (if appropriate) if ( scaleWithPerEventError_ ) { for (UInt_t i{0}; i x(nGauss_); for (UInt_t i{0}; iunblindValue(); } Double_t value(0.0); if (type_ == Delta || type_ == DeltaExp) { // Calculate the gaussian function(s) const Double_t xMax = this->maxAbscissa(); const Double_t xMin = this->minAbscissa(); for (UInt_t i(0); i 1e-10) { Double_t exponent(0.0); Double_t norm(0.0); Double_t scale = LauConstants::root2*sigma[i]; Double_t scale2 = LauConstants::rootPiBy2*sigma[i]; exponent = -x[i]*x[i]; norm = scale2*(TMath::Erf((xMax - mean[i])/scale) - TMath::Erf((xMin - mean[i])/scale)); value += frac[i]*TMath::Exp(exponent)/norm; } } } if (type_ != Delta) { // Reset values of terms expTerm_ = 0.0; cosTerm_ = 0.0; sinTerm_ = 0.0; coshTerm_ = 0.0; sinhTerm_ = 0.0; // Calculate values of the PDF convoluted with each Gaussian for a given value of the abscsissa for (UInt_t i(0); ismearedGeneralTerm( z, x[i] ).real() }; expTerm_ += frac[i] * expTerm; if ( type_ == ExpTrig or type_ == ExpHypTrig ) { const std::complex zTrig { gammaVal_ * sigmaOverRoot2, -deltaMVal_ * sigmaOverRoot2 }; const std::complex trigTerm { this->smearedGeneralTerm( zTrig, x[i] ) }; const Double_t cosTerm { trigTerm.real() }; const Double_t sinTerm { trigTerm.imag() }; cosTerm_ += frac[i] * cosTerm; sinTerm_ += frac[i] * sinTerm; if ( type_ == ExpTrig ) { coshTerm_ += frac[i] * expTerm; } else { const std::complex zH { (gammaVal_ - 0.5 * deltaGammaVal_) * sigmaOverRoot2 }; const std::complex zL { (gammaVal_ + 0.5 * deltaGammaVal_) * sigmaOverRoot2 }; const Double_t termH { this->smearedGeneralTerm( zH, x[i] ).real() }; const Double_t termL { this->smearedGeneralTerm( zL, x[i] ).real() }; const Double_t coshTerm { 0.5 * (termH + termL) }; const Double_t sinhTerm { 0.5 * (termH - termL) }; coshTerm_ += frac[i] * coshTerm; sinhTerm_ += frac[i] * sinhTerm; } } } if (type_ == DeltaExp) { value *= fracPrompt; value += (1.0-fracPrompt)*expTerm_; } else { value = expTerm_; } } // Calculate the decay time error PDF value if ( scaleWithPerEventError_ and errHist_ ) { const std::vector absErrVec {abscissaErr}; errHist_->calcLikelihoodInfo(absErrVec); errTerm_ = errHist_->getLikelihood(); } else { errTerm_ = 1.0; } } void LauDecayTimePdf::calcNonSmearedTerms(const Double_t abscissa) { // Reset values of terms errTerm_ = 1.0; expTerm_ = 0.0; cosTerm_ = 0.0; sinTerm_ = 0.0; coshTerm_ = 0.0; sinhTerm_ = 0.0; if ( type_ == Hist || type_ == Delta ){ return; } if (method_ == DecayTime) { expTerm_ = TMath::Exp(-abscissa*gammaVal_); } else if (method_ == DecayTimeDiff) { expTerm_ = TMath::Exp(-TMath::Abs(abscissa)*gammaVal_); } // Calculate also the terms related to cosine and sine if (type_ == ExpTrig) { coshTerm_ = expTerm_; sinhTerm_ = 0.0; cosTerm_ = TMath::Cos(deltaMVal_*abscissa)*expTerm_; sinTerm_ = TMath::Sin(deltaMVal_*abscissa)*expTerm_; } // Calculate also the terms related to cosh, sinh, cosine, and sine else if (type_ == ExpHypTrig) { coshTerm_ = TMath::CosH(0.5*deltaGammaVal_*abscissa)*expTerm_; sinhTerm_ = TMath::SinH(0.5*deltaGammaVal_*abscissa)*expTerm_; cosTerm_ = TMath::Cos(deltaMVal_*abscissa)*expTerm_; sinTerm_ = TMath::Sin(deltaMVal_*abscissa)*expTerm_; } } std::complex LauDecayTimePdf::smearedGeneralTerm( const std::complex& z, const Double_t x ) { using namespace std::complex_literals; const std::complex arg1 { 1i * (z - x) }; const std::complex arg2 { -(x*x) - (arg1*arg1) }; const std::complex conv { ( arg1.imag() < -5.0 ) ? 0.5 * std::exp(arg2) * RooMath::erfc( -1i * arg1 ) : 0.5 * std::exp(-(x*x)) * RooMath::faddeeva(arg1) }; return conv; } std::pair LauDecayTimePdf::nonSmearedCosSinIntegral(const Double_t minAbs, const Double_t maxAbs) { // From 1407.0748, not clear whether complex is faster in this case const LauComplex denom { gammaVal_, -deltaMVal_ }; const LauComplex exponent { -gammaVal_, deltaMVal_ }; const LauComplex num0 { -exponent.scale(minAbs).exp() }; const LauComplex num1 { -exponent.scale(maxAbs).exp() }; const LauComplex integral { (num1 - num0) / denom }; return {integral.re(), integral.im()}; } Double_t LauDecayTimePdf::nonSmearedExpIntegral(const Double_t minAbs, const Double_t maxAbs) { return tauVal_ * ( TMath::Exp(-minAbs*gammaVal_) - TMath::Exp(-maxAbs*gammaVal_) ); } std::pair LauDecayTimePdf::nonSmearedCoshSinhIntegral(const Double_t minAbs, const Double_t maxAbs) { // Use exponential formualtion rather than cosh, sinh. // Fewer terms (reused for each), but not guaranteed to be faster. const Double_t gammaH { gammaVal_ - 0.5 * deltaGammaVal_ }; const Double_t gammaL { gammaVal_ + 0.5 * deltaGammaVal_ }; const Double_t tauH { 1.0 / gammaH }; const Double_t tauL { 1.0 / gammaL }; const Double_t nL1 { -TMath::Exp(-gammaL * maxAbs) * tauL }; const Double_t nH1 { -TMath::Exp(-gammaH * maxAbs) * tauH }; const Double_t nL0 { -TMath::Exp(-gammaL * minAbs) * tauL }; const Double_t nH0 { -TMath::Exp(-gammaH * minAbs) * tauH }; const Double_t coshIntegral { 0.5 * ( (nH1 + nL1) - (nH0 + nL0) ) }; const Double_t sinhIntegral { 0.5 * ( (nH1 - nL1) - (nH0 - nL0) ) }; return {coshIntegral, sinhIntegral}; } std::complex LauDecayTimePdf::smearedGeneralIntegral(const std::complex& z, const Double_t minAbs, const Double_t maxAbs, const Double_t sigmaOverRoot2, const Double_t mu) { using namespace std::complex_literals; const Double_t x1 { (maxAbs - mu) / (2.0 * sigmaOverRoot2) }; const Double_t x0 { (minAbs - mu) / (2.0 * sigmaOverRoot2) }; const std::complex arg1 { 1i * (z - x1) }; const std::complex arg0 { 1i * (z - x0) }; std::complex integral = 0.0 + 0i; if(arg1.imag() < -5.0) {integral = RooMath::erf(x1) - std::exp(-(x1*x1) - (arg1*arg1)) * RooMath::erfc(-1i * arg1);} else {integral = RooMath::erf(x1) - TMath::Exp(-(x1*x1)) * RooMath::faddeeva(arg1);} if(arg0.imag() < -5.0) {integral -= RooMath::erf(x0) - std::exp(-(x0*x0) - (arg0*arg0)) * RooMath::erfc(-1i * arg0);} else {integral -= RooMath::erf(x0) - TMath::Exp(-(x0*x0)) * RooMath::faddeeva(arg0);} integral *= (sigmaOverRoot2 / (2.0 * z)); return integral; } void LauDecayTimePdf::calcNorm() { // If we're not doing per-event scaling then we only need to calculate things once // TODO - need to sort out UInt_t vs ULong_t everywhere!! const Bool_t needPerEventNormTerms { this->doSmearing() and scaleWithPerEventError_ }; const UInt_t nEvents { needPerEventNormTerms ? static_cast(abscissaErrors_.size()) : 1 }; // Get all the up to date parameter values std::vector means { meanVals_ }; std::vector sigmas { sigmaVals_ }; std::vector fracs { fracVals_ }; for ( UInt_t iEvt{0}; iEvt < nEvents; ++iEvt ) { // first reset integrals to zero normTermExp_ = 0.0; normTermCos_ = 0.0; normTermSin_ = 0.0; normTermCosh_ = 0.0; normTermSinh_ = 0.0; // Scale the gaussian parameters by the per-event error on decay time (if appropriate) if ( needPerEventNormTerms ) { const Double_t abscissaErr { abscissaErrors_[iEvt] }; for (UInt_t i{0}; i doSmearing() ) {this->calcSmearedPartialIntegrals( minAbscissa_, maxAbscissa_ , uniformEffVal, means, sigmas, fracs );} else {this->calcNonSmearedPartialIntegrals( minAbscissa_, maxAbscissa_, uniformEffVal );} break; } case EfficiencyMethod::Binned : { // Efficiency varies as piecewise constant // Total integral is sum of integrals in each bin, each weighted by efficiency in that bin const Int_t nBins { effiHist_->GetNbinsX() }; for ( Int_t bin{1}; bin <= nBins; ++bin ) { const Double_t loEdge {effiHist_->GetBinLowEdge(bin)}; const Double_t hiEdge {loEdge + effiHist_->GetBinWidth(bin)}; const Double_t effVal {effiHist_->GetBinContent(bin)}; if ( this -> doSmearing() ) {this->calcSmearedPartialIntegrals( loEdge, hiEdge, effVal, means, sigmas, fracs );} else {this->calcNonSmearedPartialIntegrals( loEdge, hiEdge, effVal );} } break; } case EfficiencyMethod::Spline : { // Efficiency varies as piecewise polynomial // Use methods from https://arxiv.org/abs/1407.0748 section 4 to calculate const UInt_t nSplineSegments { effiFun_->getnKnots() - 1 }; if ( this->doSmearing() ) { if ( nonKnotChanged_ ) { if ( resoParChanged_ ) { this->calcMeanAndSigmaPowers( iEvt, means, sigmas ); } this->calcKVectors( iEvt ); } for(UInt_t iSeg{0}; iSeg < nSplineSegments; ++iSeg) { this->calcSmearedSplinePartialIntegrals( iEvt, iSeg, means, sigmas, fracs ); } } else { for(UInt_t iSeg{0}; iSeg < nSplineSegments; ++iSeg) { this->calcNonSmearedSplinePartialIntegrals( iSeg ); } } break; } } normTermsExp_[iEvt] = normTermExp_; normTermsCos_[iEvt] = normTermCos_; normTermsSin_[iEvt] = normTermSin_; normTermsCosh_[iEvt] = normTermCosh_; normTermsSinh_[iEvt] = normTermSinh_; } } // TODO - Mildly concerned this is void rather than returning the integrals // (but this would require refactoring for different return values). // As long as it doesn't get called outside of calcNorm() it should be fine - DPO // (TL: comment applies to all calc*PartialIntegrals functions.) void LauDecayTimePdf::calcNonSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight) { /* TODO - need to implement something for DecayTimeDiff everywhere if (method_ == DecayTimeDiff) { // TODO - there should be some TMath::Abs here surely? normTermExp = weight * tauVal_ * (2.0 - TMath::Exp(-maxAbs*gammaVal_) - TMath::Exp(-minAbs*gammaVal_)); } */ const Double_t normTermExp { weight * this->nonSmearedExpIntegral(minAbs, maxAbs) }; normTermExp_ += normTermExp; if ( type_ == ExpTrig or type_ == ExpHypTrig ) { auto [cosIntegral, sinIntegral] = this->nonSmearedCosSinIntegral(minAbs, maxAbs); normTermCos_ += weight * cosIntegral; normTermSin_ += weight * sinIntegral; if ( type_ == ExpTrig ) { normTermCosh_ += normTermExp; } else { auto [coshIntegral, sinhIntegral] = this->nonSmearedCoshSinhIntegral(minAbs, maxAbs); normTermCosh_ += weight * coshIntegral; normTermSinh_ += weight * sinhIntegral; } } } void LauDecayTimePdf::calcSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight, const std::vector& means, const std::vector& sigmas, const std::vector& fractions) { for (UInt_t i(0); ismearedGeneralIntegral( z, minAbs, maxAbs, sigmaOverRoot2, means[i] ) }; const Double_t normTermExp { weight * integral.real() }; normTermExp_ += fractions[i] * normTermExp; if ( type_ == ExpTrig or type_ == ExpHypTrig ) { const std::complex zTrig { gammaVal_ * sigmaOverRoot2, -deltaMVal_ * sigmaOverRoot2 }; const std::complex integralTrig { this->smearedGeneralIntegral( zTrig, minAbs, maxAbs, sigmaOverRoot2, means[i] ) }; const Double_t cosIntegral { integralTrig.real() }; const Double_t sinIntegral { integralTrig.imag() }; normTermCos_ += fractions[i] * weight * cosIntegral; normTermSin_ += fractions[i] * weight * sinIntegral; if ( type_ == ExpTrig ) { normTermCosh_ += fractions[i] * normTermExp; } else { // Heavy (H) eigenstate case const std::complex zH { (gammaVal_ - 0.5 * deltaGammaVal_) * sigmaOverRoot2, 0.0 }; const std::complex integralH { this->smearedGeneralIntegral( zH, minAbs, maxAbs, sigmaOverRoot2, means[i] ) }; // Light (L) eigenstate case const std::complex zL { (gammaVal_ + 0.5 * deltaGammaVal_) * sigmaOverRoot2, 0.0 };; const std::complex integralL { this->smearedGeneralIntegral( zL, minAbs, maxAbs, sigmaOverRoot2, means[i] ) }; const std::complex coshIntegral { 0.5 * (integralH + integralL) }; const std::complex sinhIntegral { 0.5 * (integralH - integralL) }; normTermCosh_ += fractions[i] * weight * coshIntegral.real(); normTermSinh_ += fractions[i] * weight * sinhIntegral.real(); } } } } void LauDecayTimePdf::calcMeanAndSigmaPowers( const UInt_t iEvt, const std::vector& means, const std::vector& sigmas ) { // Calculate powers of mu and sigma/sqrt(2) needed by all terms in the smeared spline normalisation for (UInt_t i(0); i z; for (UInt_t i(0); igenerateKvector(z); if ( type_ == ExpTrig || type_ == ExpHypTrig ) { z.real( gammaVal_ * sigmaOverRoot2 ); z.imag( -deltaMVal_ * sigmaOverRoot2 ); trigTermKvecVals_[iEvt][i] = this->generateKvector(z); if ( type_ == ExpHypTrig ) { z.real( ( gammaVal_ - 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 );; z.imag( 0.0 ); hypHTermKvecVals_[iEvt][i] = this->generateKvector(z); z.real( ( gammaVal_ + 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 ); z.imag( 0.0 ); hypLTermKvecVals_[iEvt][i] = this->generateKvector(z); } } } } void LauDecayTimePdf::calcSmearedSplinePartialIntegrals(const UInt_t iEvt, const UInt_t splineIndex, const std::vector& means, const std::vector& sigmas, const std::vector& fractions) { using namespace std::complex_literals; const std::vector& xVals { effiFun_->getXValues() }; const Double_t minAbs = xVals[splineIndex]; const Double_t maxAbs = xVals[splineIndex+1]; const std::array coeffs { effiFun_->getCoefficients(splineIndex) }; std::complex z; for (UInt_t i(0); igenerateMvector(minAbs, maxAbs, z, sigmas[i], means[i]); } const Double_t normTermExp { this->smearedSplineNormalise(coeffs, expTermKvecVals_[iEvt][i], expTermMvecVals_[iEvt][splineIndex][i], sigmaPowerVals_[iEvt][i], meanPowerVals_[iEvt][i]).real() }; normTermExp_ += fractions[i] * normTermExp; if ( type_ == ExpTrig or type_ == ExpHypTrig ) { z.real( gammaVal_ * sigmaOverRoot2 ); z.imag( -deltaMVal_ * sigmaOverRoot2 ); if ( nonKnotChanged_ ) { trigTermMvecVals_[iEvt][splineIndex][i] = this->generateMvector(minAbs, maxAbs, z, sigmas[i], means[i]); } std::complex integral { this->smearedSplineNormalise(coeffs, trigTermKvecVals_[iEvt][i], trigTermMvecVals_[iEvt][splineIndex][i], sigmaPowerVals_[iEvt][i], meanPowerVals_[iEvt][i]) }; const Double_t cosIntegral { integral.real() }; const Double_t sinIntegral { integral.imag() }; normTermCos_ += fractions[i] * cosIntegral; normTermSin_ += fractions[i] * sinIntegral; if ( type_ == ExpTrig ) { normTermCosh_ += fractions[i] * normTermExp; } else { const std::complex zH { ( gammaVal_ - 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 }; const std::complex zL { ( gammaVal_ + 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 }; if ( nonKnotChanged_ ) { hypHTermMvecVals_[iEvt][splineIndex][i] = this->generateMvector(minAbs, maxAbs, zH, sigmas[i], means[i]); hypLTermMvecVals_[iEvt][splineIndex][i] = this->generateMvector(minAbs, maxAbs, zL, sigmas[i], means[i]); } const Double_t integralH { this->smearedSplineNormalise(coeffs, hypHTermKvecVals_[iEvt][i], hypHTermMvecVals_[iEvt][splineIndex][i], sigmaPowerVals_[iEvt][i], meanPowerVals_[iEvt][i]).real() }; const Double_t integralL { this->smearedSplineNormalise(coeffs, hypLTermKvecVals_[iEvt][i], hypLTermMvecVals_[iEvt][splineIndex][i], sigmaPowerVals_[iEvt][i], meanPowerVals_[iEvt][i]).real() }; const Double_t coshIntegral { 0.5 * (integralH + integralL) }; const Double_t sinhIntegral { 0.5 * (integralH - integralL) }; normTermCosh_ += fractions[i] * coshIntegral; normTermSinh_ += fractions[i] * sinhIntegral; } } } } std::array,4> LauDecayTimePdf::generateKvector(const std::complex& z) { const std::complex zr { 1.0/z }; const std::complex zr2 { zr*zr }; std::array,4> K { 0.5*zr, 0.5*zr2, zr*(1.0+zr2), 3.0*zr2*(1.0+zr2) }; return K; } std::array,4> LauDecayTimePdf::generateMvector(const Double_t minAbs, const Double_t maxAbs, const std::complex& z, const Double_t sigma, const Double_t mean) { using namespace std::complex_literals; std::array,4> M0 {0.,0.,0.,0.}; std::array,4> M1 {0.,0.,0.,0.}; std::array,4> M {0.,0.,0.,0.}; const Double_t x1 { (maxAbs - mean) / (LauConstants::root2 * sigma) }; const Double_t x0 { (minAbs - mean) / (LauConstants::root2 * sigma) }; //Values used a lot const Double_t ex2_1 { TMath::Exp(-(x1*x1)) }; const Double_t ex2_0 { TMath::Exp(-(x0*x0)) }; const Double_t sqrtPir { 1.0 / LauConstants::rootPi }; const std::complex arg1 { 1i * (z - x1) }; const std::complex arg0 { 1i * (z - x0) }; //fad = the faddeeva term times the ex2 value (done in different ways depending on the domain) std::complex fad1; std::complex fad0; if(arg1.imag() < -5.0) {fad1 = std::exp(-(x1*x1) - (arg1*arg1)) * RooMath::erfc(-1i * arg1);} else {fad1 = ex2_1*RooMath::faddeeva(arg1);} if(arg0.imag() < -5.0) {fad0 = std::exp(-(x0*x0) - (arg0*arg0)) * RooMath::erfc(-1i * arg0);} else {fad0 = ex2_0*RooMath::faddeeva(arg0);} //doing the actual functions for x1 M1[0] = RooMath::erf(x1) - fad1; M1[1] = 2. * (-sqrtPir*ex2_1 - x1*fad1); M1[2] = 2. * (-2*x1*sqrtPir*ex2_1 - (2*x1*x1 - 1)*fad1); M1[3] = 4. * (-(2*x1*x1 - 1)*sqrtPir*ex2_1 - x1*(2*x1*x1-3)*fad1); //doing them again for x0 M0[0] = RooMath::erf(x0) - fad0; M0[1] = 2. * (-sqrtPir*ex2_0 - x0*fad0); M0[2] = 2. * (-2*x0*sqrtPir*ex2_0 - (2*x0*x0 - 1)*fad0); M0[3] = 4. * (-(2*x0*x0 - 1)*sqrtPir*ex2_0 - x0*(2*x0*x0-3)*fad0); for(Int_t i = 0; i < 4; ++i){M[i] = M1[i] - M0[i];} return M; } std::complex LauDecayTimePdf::smearedSplineNormalise(const std::array& coeffs, const std::array,4>& K, const std::array,4>& M, const std::array& sigmaPowers, const std::array& meanPowers) const { using namespace std::complex_literals; //Triple sum to get N (eqn 31 and 29 in https://arxiv.org/pdf/1407.0748.pdf) std::complex N = 0. + 0i; for(Int_t k = 0; k < 4; ++k){ for(Int_t n = 0; n <=k; ++n){ for(Int_t i = 0; i <=n; ++i){ //The binomial coefficient terms const Double_t b { binom_[n][i]*binom_[k][n] }; N += sigmaPowers[n]*coeffs[k]*meanPowers[k-n]*K[i]*M[n-i]*b; }}} return N; } void LauDecayTimePdf::calcNonSmearedSplinePartialIntegrals(const UInt_t splineIndex) { using namespace std::complex_literals; const std::complex u { gammaVal_ }; const Double_t normTermExp { this->nonSmearedSplineNormalise(splineIndex, u, expTermIkVals_).real() }; normTermExp_ += normTermExp; if ( type_ == ExpTrig or type_ == ExpHypTrig ) { const std::complex uTrig { gammaVal_, -deltaMVal_ }; const std::complex integral { this->nonSmearedSplineNormalise(splineIndex, uTrig, trigTermIkVals_) }; const Double_t cosIntegral { integral.real() }; const Double_t sinIntegral { integral.imag() }; normTermCos_ += cosIntegral; normTermSin_ += sinIntegral; if ( type_ == ExpTrig ) { normTermCosh_ += normTermExp; } else { const std::complex uH { gammaVal_ - 0.5 * deltaGammaVal_ }; const std::complex uL { gammaVal_ + 0.5 * deltaGammaVal_ }; const Double_t integralH { this->nonSmearedSplineNormalise(splineIndex, uH, hypHTermIkVals_).real() }; const Double_t integralL { this->nonSmearedSplineNormalise(splineIndex, uL, hypLTermIkVals_).real() }; const Double_t coshIntegral { 0.5 * (integralH + integralL) }; const Double_t sinhIntegral { 0.5 * (integralH - integralL) }; normTermCosh_ += coshIntegral; normTermSinh_ += sinhIntegral; } } } std::complex LauDecayTimePdf::calcIk( const UInt_t k, const Double_t minAbs, const Double_t maxAbs, const std::complex& u ) { //Taking mu = 0, this does not have to be the case in general auto G = [&u](const Int_t n){return -TMath::Factorial(n)/std::pow(u,n+1);};//power of n+1 used rather than n, this is due to maths error in the paper auto H = [&u](const Int_t n, const Double_t t){return std::pow(t,n)*std::exp(-u*t);}; std::complex ans { 0.0, 0.0 }; for (UInt_t j = 0; j <= k; ++j) {ans += binom_[k][j]*G(j)*( H( k-j, maxAbs ) - H( k-j, minAbs ) );} return ans; } std::complex LauDecayTimePdf::nonSmearedSplineNormalise( const UInt_t splineIndex, const std::complex& u, std::vector,4>>& cache ) { // u = Gamma - iDeltam in general using namespace std::complex_literals; const std::vector& xVals = effiFun_ -> getXValues(); const Double_t minAbs = xVals[splineIndex]; const Double_t maxAbs = xVals[splineIndex+1]; std::array coeffs = effiFun_->getCoefficients(splineIndex); //sum to get N (eqn 30 in https://arxiv.org/pdf/1407.0748.pdf, using I_k from Appendix B.1 with the corrected maths error) std::complex N = 0. + 0i; if ( nonKnotChanged_ ) { for(UInt_t i = 0; i < 4; ++i) { cache[splineIndex][i] = calcIk(i, minAbs, maxAbs, u); N += cache[splineIndex][i] * coeffs[i]; } } else { for(UInt_t i = 0; i < 4; ++i) { N += cache[splineIndex][i] * coeffs[i]; } } return N; } Double_t LauDecayTimePdf::generateError(Bool_t forceNew) { if (errHist_ && (forceNew || !abscissaErrorGenerated_)) { LauFitData errData = errHist_->generate(nullptr); abscissaError_ = errData.at(this->varErrName()); abscissaErrorGenerated_ = kTRUE; } else { while (forceNew || !abscissaErrorGenerated_) { abscissaError_ = LauRandom::randomFun()->Landau(errorDistMPV_,errorDistSigma_); if (abscissaError_ < maxAbscissaError_ && abscissaError_ > minAbscissaError_) { abscissaErrorGenerated_ = kTRUE; forceNew = kFALSE; } } } return abscissaError_; } Double_t LauDecayTimePdf::generate(const LauKinematics* kinematics) { + // TODO, we should also be able to generate if we're Delta, Exp, or DeltaExp if( type_ != FuncType::Hist ) { std::cerr << "ERROR in LauDecayTimePdf::generate : Right now we can only produce if the type == Hist. Returning 0.;" << std::endl; // std::cout << "type_ = " << type_ << std::endl; return 0.; } // generateError SHOULD have been called before this // function but will call it here just to make sure // (has ns effect if has already been called) abscissaError_ = this->generateError(); LauFitData genAbscissa; if( pdfHist_ ){ genAbscissa = pdfHist_ -> generate( kinematics ); } else { std::cerr << "FATAL in LauDecayTimePdf : no pdfHist defined for a Hist style Decay Time Pdf!" << std::endl; gSystem->Exit(EXIT_FAILURE); } // mark that we need a new error to be generated next time abscissaErrorGenerated_ = kFALSE; return genAbscissa.at(this->varName()); } /* LauFitData LauDecayTimePdf::generate(const LauKinematics* kinematics) { // generateError SHOULD have been called before this // function but will call it here just to make sure // (has ns effect if has already been called) abscissaError_ = this->generateError(); // If the PDF is scaled by the per-event error then need to update the PDF height for each event Bool_t scale(kFALSE); for (std::vector::const_iterator iter = scaleMeans_.begin(); iter != scaleMeans_.end(); ++iter) { scale |= (*iter); } for (std::vector::const_iterator iter = scaleWidths_.begin(); iter != scaleWidths_.end(); ++iter) { scale |= (*iter); } if (scale || (!this->heightUpToDate() && !this->cachePDF())) { this->calcPDFHeight(kinematics); this->heightUpToDate(kTRUE); } // Generate the value of the abscissa. Bool_t gotAbscissa(kFALSE); Double_t genVal(0.0); Double_t genPDFVal(0.0); LauFitData genAbscissa; const Double_t xMin = this->minAbscissa(); const Double_t xMax = this->maxAbscissa(); const Double_t xRange = xMax - xMin; while (!gotAbscissa) { genVal = LauRandom::randomFun()->Rndm()*xRange + xMin; this->calcLikelihoodInfo(genVal, abscissaError_); genPDFVal = this->getUnNormLikelihood(); if (LauRandom::randomFun()->Rndm() <= genPDFVal/this->getMaxHeight()) {gotAbscissa = kTRUE;} if (genPDFVal > this->getMaxHeight()) { std::cerr<<"Warning in LauDecayTimePdf::generate()." <<" genPDFVal = "<getMaxHeight()<<" for the abscissa = "<varName()] = genVal; // mark that we need a new error to be generated next time abscissaErrorGenerated_ = kFALSE; return genAbscissa; } */ void LauDecayTimePdf::setErrorHisto(const TH1* hist) { if ( errHist_ != nullptr ) { std::cerr<<"WARNING in LauDecayTimePdf::setErrorHisto : Error histogram already set, not doing it again."<varErrName(), hist, this->minAbscissaError(), this->maxAbscissaError()); } void LauDecayTimePdf::setHistoPdf(const TH1* hist) { if ( pdfHist_ != nullptr ) { std::cerr<<"WARNING in LauDecayTimePdf::setHistoPdf : PDF histogram already set, not doing it again."<varName(), hist, this->minAbscissa(), this->maxAbscissa()); } void LauDecayTimePdf::setEffiHist(const TH1* hist) { if ( effiHist_ != nullptr ) { std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : efficiency histogram already set, not doing it again." << std::endl; return; } if ( hist == nullptr ) { std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : supplied efficiency histogram pointer is null." << std::endl; return; } // Check boundaries of histogram align with our abscissa's range const Double_t axisMin {hist->GetXaxis()->GetXmin()}; const Double_t axisMax {hist->GetXaxis()->GetXmax()}; if ( TMath::Abs(minAbscissa_ - axisMin)>1e-6 || TMath::Abs(maxAbscissa_ - axisMax)>1e-6 ) { std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : mismatch in range between supplied histogram and abscissa\n" << " : histogram range: " << axisMin << " - " << axisMax << "\n" << " : abscissa range: " << minAbscissa_ << " - " << maxAbscissa_ << "\n" << " : Disregarding this histogram." << std::endl; return; } effiHist_ = dynamic_cast( hist->Clone() ); //Normalise the hist if the (relative) efficiencies have very large values if(effiHist_ -> GetMaximum() > 1.) { effiHist_ -> Scale( 1. / effiHist_->Integral() ); //Normalise std::cout << "INFO in LauDecayTimePdf::setEffiHist : Supplied histogram for Decay Time Acceptance has values too large: normalising..." << std::endl; } } void LauDecayTimePdf::setEffiSpline(Lau1DCubicSpline* spline) { if ( effiFun_ != nullptr ) { std::cerr<<"WARNING in LauDecayTimePdf::setEffiSpline : efficiency function already set, not doing it again."< effis = effiFun_->getYValues(); effiPars_.resize( effis.size() ); effiParVals_.resize( effis.size() ); size_t index = 0; for( Double_t& effi : effis ) { effiPars_[ index ] = new LauParameter( Form( "%s_Knot_%lu", varName_.Data() ,index ), effi, 0.0, 1.0, kTRUE ); ++index; } } LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName) { for ( std::vector::iterator iter = params_.begin(); iter != params_.end(); ++iter ) { if ((*iter)->name().Contains(parName)) { return (*iter); } } std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl; return nullptr; } const LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName) const { for ( std::vector::const_iterator iter = params_.begin(); iter != params_.end(); ++iter ) { if ((*iter)->name().Contains(parName)) { return (*iter); } } std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl; return nullptr; } void LauDecayTimePdf::updatePulls() { for ( std::vector::iterator iter = params_.begin(); iter != params_.end(); ++iter ) { std::vector params = (*iter)->getPars(); for (std::vector::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter ) { if (!(*iter)->fixed()) { (*params_iter)->updatePull(); } } } } void LauDecayTimePdf::propagateParUpdates() { // If none of the parameters are floating there's nothing to do if ( nothingFloating_ ) { return; } // Otherwise, determine which of the floating parameters have changed (if any) and act accordingly static auto checkEquality = [](const LauAbsRValue* par, const Double_t cacheVal){return par->unblindValue() == cacheVal;}; anyKnotChanged_ = anyKnotFloating_ and not std::equal(effiPars_.begin(), effiPars_.end(), effiParVals_.begin(), checkEquality); // Update the acceptance spline if any of the knot values have changed if ( anyKnotChanged_ ) { effiFun_->updateYValues( effiPars_ ); } // Check also the physics and resolution parameters if ( nonKnotFloating_ ) { if ( physicsParFloating_ ) { tauChanged_ = tauFloating_ and not checkEquality(tau_, tauVal_); deltaMChanged_ = deltaMFloating_ and not checkEquality(deltaM_, deltaMVal_); deltaGammaChanged_ = deltaGammaFloating_ and not checkEquality(deltaGamma_, deltaGammaVal_); physicsParChanged_ = tauChanged_ || deltaMChanged_ || deltaGammaChanged_; } if ( resoParFloating_ ) { resoParChanged_ = kFALSE; resoParChanged_ |= not std::equal( mean_.begin(), mean_.end(), meanVals_.begin(), checkEquality ); resoParChanged_ |= not std::equal( sigma_.begin(), sigma_.end(), sigmaVals_.begin(), checkEquality ); resoParChanged_ |= not std::equal( frac_.begin(), frac_.end(), fracVals_.begin()+1, checkEquality ); } nonKnotChanged_ = physicsParChanged_ or resoParChanged_; } // If nothing has changed, there's nothing to do if ( not ( anyKnotChanged_ or nonKnotChanged_ ) ) { return; } // Otherwise we need to update the cache this->updateCache(); } /* void LauDecayTimePdf::updateEffiSpline(const std::vector& effiPars) { if (effiPars.size() != effiFun_->getnKnots()){ std::cerr<<"ERROR in LauDecayTimePdf::updateEffiSpline : number of efficiency parameters is not equal to the number of spline knots."<Exit(EXIT_FAILURE); } effiFun_->updateYValues(effiPars); } */ diff --git a/src/LauFlavTag.cc b/src/LauFlavTag.cc index a8166bd..26bd7cd 100644 --- a/src/LauFlavTag.cc +++ b/src/LauFlavTag.cc @@ -1,1154 +1,1154 @@ /* Copyright 2017 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauFlavTag.cc \brief File containing implementation of LauFlavTag class. */ #include #include #include #include "TMath.h" #include "TString.h" #include "TSystem.h" #include "Lau1DHistPdf.hh" #include "LauAbsPdf.hh" #include "LauFlavTag.hh" #include "LauRandom.hh" ClassImp(LauFlavTag) LauFlavTag::LauFlavTag(const Bool_t useAveDelta, const Bool_t useEtaPrime, const std::map bkgndInfo) : useAveDelta_(useAveDelta), useEtaPrime_(useEtaPrime) { // Put map values into vectors for (const auto &bkgnd : bkgndInfo){ bkgndNames_.push_back(bkgnd.first); bkgndTypes_.push_back(bkgnd.second); std::cout<< "INFO in LauFlavTag::LauFlavTag : adding background " << bkgnd.first << " of type " << bkgnd.second < 0){ if (!useAveDelta_){ tagEffBkgnd_B0_.clear(); tagEffBkgnd_B0_.resize(nBkgnds); tagEffBkgnd_B0bar_.clear(); tagEffBkgnd_B0bar_.resize(nBkgnds); tagEffBkgnd_hist_B0_.clear(); tagEffBkgnd_hist_B0_.resize(nBkgnds); tagEffBkgnd_hist_B0bar_.clear(); tagEffBkgnd_hist_B0bar_.resize(nBkgnds); } else { tagEffBkgnd_ave_.clear(); tagEffBkgnd_ave_.resize(nBkgnds); tagEffBkgnd_delta_.clear(); tagEffBkgnd_delta_.resize(nBkgnds); tagEffBkgnd_hist_ave_.clear(); tagEffBkgnd_hist_ave_.resize(nBkgnds); tagEffBkgnd_hist_delta_.clear(); tagEffBkgnd_hist_delta_.resize(nBkgnds); } etaBkgndPdfs_.clear(); etaBkgndPdfs_.resize(nBkgnds); avgMistagBkgnd_.clear(); avgMistagBkgnd_.resize(nBkgnds); } } void LauFlavTag::addTagger(const TString& name, const TString& tagVarName, const TString& mistagVarName, LauAbsPdf* etapdf, const std::pair tagEff, const std::pair calib_p0, const std::pair calib_p1) { // Check that we don't already have a tagger with the same name if ( taggerPosition_.find(name) != taggerPosition_.end() ) { std::cerr << "ERROR in LauFlavTag::addTagger : tagger called " << name << " already added" << std::endl; gSystem->Exit(EXIT_FAILURE); } - + // Check that the PDF pointer is valid if ( not etapdf ) { std::cerr << "ERROR in LauFlavTag::addTagger : Eta PDF pointer is NULL" << std::endl; gSystem->Exit(EXIT_FAILURE); } // Find how many taggers have already been added const ULong_t position { tagVarNames_.size() }; // Update map to relate tagger name and position in the vectors taggerPosition_[name] = position; // Extend vectors this->extendVectors(tagVarName, mistagVarName); etaPdfs_.push_back(etapdf); Lau1DHistPdf* etahistpdf = dynamic_cast(etapdf); if (etahistpdf){ perEvtAvgMistag_.push_back(etahistpdf->getMean()); } else { std::cerr << "WARNING in LauFlavTag::addTagger : Couldn't determine average eta value from PDF. Setting it to 0.4." << std::endl; perEvtAvgMistag_.push_back(0.4); } //Calib parameters this->setupCalibParams(name,position,calib_p0,calib_p1); //Tagging efficiencies if (!useAveDelta_){ TString tagEff_b0Name("tagEff_b0_"+name); TString tagEff_b0barName("tagEff_b0bar_"+name); LauParameter* tageffb0 = new LauParameter(tagEff_b0Name,tagEff.first,0.0,1.0,kTRUE); tagEff_B0_.push_back(tageffb0); tagEff_B0_[position]->initValue(tagEff.first); tagEff_B0_[position]->genValue(tagEff.first); tagEff_B0_[position]->fixed(kTRUE); if (tagEff.second==-1.0){ tagEff_B0bar_.push_back(tagEff_B0_[position]->createClone(tagEff_b0barName)); } else { LauParameter* tageffb0bar = new LauParameter(tagEff_b0barName,tagEff.second,0.0,1.0,kTRUE); tagEff_B0bar_.push_back(tageffb0bar); tagEff_B0bar_[position]->initValue(tagEff.second); tagEff_B0bar_[position]->genValue(tagEff.second); tagEff_B0bar_[position]->fixed(kTRUE); } } else { //Use average and delta variables TString tagEff_aveName("tagEff_ave_"+name); TString tagEff_deltaName("tagEff_delta_"+name); LauParameter* tageffave = new LauParameter(tagEff_aveName,tagEff.first,0.0,1.0,kTRUE); tagEff_ave_.push_back(tageffave); tagEff_ave_[position]->initValue(tagEff.first); tagEff_ave_[position]->genValue(tagEff.first); tagEff_ave_[position]->fixed(kTRUE); LauParameter* tageffdelta = new LauParameter(tagEff_deltaName,tagEff.second,-1.0,1.0,kTRUE); tagEff_delta_.push_back(tageffdelta); tagEff_delta_[position]->initValue(tagEff.second); tagEff_delta_[position]->genValue(tagEff.second); tagEff_delta_[position]->fixed(kTRUE); } std::cout<<"INFO in LauFlavTag::addTagger : Added tagger with name "<< name << std::endl; } void LauFlavTag::addTagger(const TString& name, const TString& tagVarName, const TString& mistagVarName, LauAbsPdf* etapdf, const std::pair tagEff, const std::pair calib_p0, const std::pair calib_p1) { // Check that we don't already have a tagger with the same name if ( taggerPosition_.find(name) != taggerPosition_.end() ) { std::cerr << "ERROR in LauFlavTag::addTagger : tagger called " << name << " already added" << std::endl; gSystem->Exit(EXIT_FAILURE); } // Check that the PDF pointer is valid if ( not etapdf ) { std::cerr << "ERROR in LauFlavTag::addTagger : Eta PDF pointer is NULL" << std::endl; gSystem->Exit(EXIT_FAILURE); } // Find how many taggers have already been added const ULong_t position { tagVarNames_.size() }; // Update map to relate tagger name and position in the vectors taggerPosition_[name] = position; // Extend vectors this->extendVectors(tagVarName, mistagVarName); etaPdfs_.push_back(etapdf); Lau1DHistPdf* etahistpdf = dynamic_cast(etapdf); if (etahistpdf){ perEvtAvgMistag_.push_back(etahistpdf->getMean()); } else { std::cerr << "WARNING in LauFlavTag::addTagger : Couldn't determine average eta value from PDF. Setting it to 0.4." << std::endl; perEvtAvgMistag_.push_back(0.4); } //Calib parameters this->setupCalibParams(name,position,calib_p0,calib_p1); //Tagging efficiencies if (!useAveDelta_){ tagEff_hist_B0_.push_back(tagEff.first); tagEff_B0_.push_back(nullptr); if (tagEff.second==nullptr){ tagEff_hist_B0bar_.push_back(tagEff.first); tagEff_B0bar_.push_back(nullptr); } else { tagEff_hist_B0bar_.push_back(tagEff.second); tagEff_B0bar_.push_back(nullptr); } } else { //Use average and delta variables tagEff_hist_ave_.push_back(tagEff.first); tagEff_hist_delta_.push_back(tagEff.second); tagEff_ave_.push_back(nullptr); tagEff_delta_.push_back(nullptr); } std::cout<<"INFO in LauFlavTag::addTagger : Added tagger with name "<< name << std::endl; } void LauFlavTag::extendVectors(const TString& tagVarName, const TString& mistagVarName){ tagVarNames_.push_back(tagVarName); mistagVarNames_.push_back(mistagVarName); curEvtTagFlv_.push_back(Flavour::Unknown); curEvtMistag_.push_back(Flavour::Unknown); if (bkgndNames_.size()>0){ if (!useAveDelta_){ //Loop over the outer vector and extend the inner vectors for (auto& innerVec : tagEffBkgnd_B0_){ innerVec.push_back(nullptr); } for (auto& innerVec : tagEffBkgnd_hist_B0_){ innerVec.push_back(nullptr); } for (auto& innerVec : tagEffBkgnd_B0bar_){ innerVec.push_back(nullptr); } for (auto& innerVec : tagEffBkgnd_hist_B0bar_){ innerVec.push_back(nullptr); } } else { //Loop over the outer vector and extend the inner vectors for (auto& innerVec : tagEffBkgnd_ave_){ innerVec.push_back(nullptr); } for (auto& innerVec : tagEffBkgnd_hist_ave_){ innerVec.push_back(nullptr); } for (auto& innerVec : tagEffBkgnd_delta_){ innerVec.push_back(nullptr); } for (auto& innerVec : tagEffBkgnd_hist_delta_){ innerVec.push_back(nullptr); } } for (auto& innerVec : etaBkgndPdfs_){ innerVec.push_back(nullptr); } for (auto& innerVec : avgMistagBkgnd_){ innerVec.push_back(0); } } } void LauFlavTag::setupCalibParams(const TString& name, const ULong_t position, const std::pair calib_p0, const std::pair calib_p1) { if (!useAveDelta_){ TString calib_p0_b0Name("calib_p0_b0_"+name); TString calib_p0_b0barName("calib_p0_b0bar_"+name); TString calib_p1_b0Name("calib_p1_b0_"+name); TString calib_p1_b0barName("calib_p1_b0bar_"+name); LauParameter* calibp0b0 = new LauParameter(calib_p0_b0Name,calib_p0.first,-10.0,10.0,kTRUE); calib_p0_B0_.push_back(calibp0b0); calib_p0_B0_[position]->initValue(calib_p0.first); calib_p0_B0_[position]->genValue(calib_p0.first); calib_p0_B0_[position]->fixed(kTRUE); LauParameter* calibp1b0 = new LauParameter(calib_p1_b0Name,calib_p1.first,0.0,1.5,kTRUE); calib_p1_B0_.push_back(calibp1b0); calib_p1_B0_[position]->initValue(calib_p1.first); calib_p1_B0_[position]->genValue(calib_p1.first); calib_p1_B0_[position]->fixed(kTRUE); if (calib_p0.second==-1.0 && calib_p1.second==-1.0){ calib_p0_B0bar_.push_back(calib_p0_B0_[position]->createClone(calib_p0_b0barName)); calib_p1_B0bar_.push_back(calib_p1_B0_[position]->createClone(calib_p1_b0barName)); } else { LauParameter* calibp0b0bar = new LauParameter(calib_p0_b0barName,calib_p0.second,-10.0,10.0,kTRUE); calib_p0_B0bar_.push_back(calibp0b0bar); calib_p0_B0bar_[position]->initValue(calib_p0.second); calib_p0_B0bar_[position]->genValue(calib_p0.second); calib_p0_B0bar_[position]->fixed(kTRUE); LauParameter* calibp1b0bar = new LauParameter(calib_p1_b0barName,calib_p1.second,0.0,1.5,kTRUE); calib_p1_B0bar_.push_back(calibp1b0bar); calib_p1_B0bar_[position]->initValue(calib_p1.second); calib_p1_B0bar_[position]->genValue(calib_p1.second); calib_p1_B0bar_[position]->fixed(kTRUE); } } else { //Use average and delta variables TString calib_p0_aveName("calib_p0_ave_"+name); TString calib_p0_deltaName("calib_p0_delta_"+name); TString calib_p1_aveName("calib_p1_ave_"+name); TString calib_p1_deltaName("calib_p1_delta_"+name); LauParameter* calibp0ave = new LauParameter(calib_p0_aveName,calib_p0.first,-10.0,10.0,kTRUE); calib_p0_ave_.push_back(calibp0ave); calib_p0_ave_[position]->initValue(calib_p0.first); calib_p0_ave_[position]->genValue(calib_p0.first); calib_p0_ave_[position]->fixed(kTRUE); LauParameter* calibp1ave = new LauParameter(calib_p1_aveName,calib_p1.first,0.0,1.5,kTRUE); calib_p1_ave_.push_back(calibp1ave); calib_p1_ave_[position]->initValue(calib_p1.first); calib_p1_ave_[position]->genValue(calib_p1.first); calib_p1_ave_[position]->fixed(kTRUE); LauParameter* calibp0delta = new LauParameter(calib_p0_deltaName,calib_p0.second,-10.0,10.0,kTRUE); calib_p0_delta_.push_back(calibp0delta); calib_p0_delta_[position]->initValue(calib_p0.second); calib_p0_delta_[position]->genValue(calib_p0.second); calib_p0_delta_[position]->fixed(kTRUE); LauParameter* calibp1delta = new LauParameter(calib_p1_deltaName,calib_p1.second,-10.0,10.0,kTRUE); calib_p1_delta_.push_back(calibp1delta); calib_p1_delta_[position]->initValue(calib_p1.second); calib_p1_delta_[position]->genValue(calib_p1.second); calib_p1_delta_[position]->fixed(kTRUE); } } void LauFlavTag::cacheInputFitVars(LauFitDataTree* inputFitData, const TString& decayTimeVarName) { evtTagFlv_.clear(); evtMistag_.clear(); evtTrueTagFlv_.clear(); evtDecayFlv_.clear(); evtDecayTime_.clear(); // Loop over the taggers to check the branches for (ULong_t i=0; i < tagVarNames_.size(); ++i){ if ( not inputFitData->haveBranch( tagVarNames_[i] ) ) { std::cerr << "ERROR in LauFlavTag::cacheInputFitVars : Input data does not contain branch \"" << tagVarNames_[i] << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( not inputFitData->haveBranch( mistagVarNames_[i] ) ) { std::cerr << "ERROR in LauFlavTag::cacheInputFitVars : Input data does not contain branch \"" << mistagVarNames_[i] << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } } if ( trueTagVarName_ != "" and not inputFitData->haveBranch( trueTagVarName_ ) ) { std::cerr << "ERROR in LauFlavTag::cacheInputFitVars : Input data does not contain branch \"" << trueTagVarName_ << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( decayFlvVarName_ != "" and not inputFitData->haveBranch( decayFlvVarName_ ) ) { std::cerr << "ERROR in LauFlavTag::cacheInputFitVars : Input data does not contain branch \"" << decayFlvVarName_ << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( decayTimeVarName != "" and not inputFitData->haveBranch( decayTimeVarName ) ) { std::cerr << "ERROR in LauFlavTag::cacheInputFitVars : Input data does not contain branch \"" << decayTimeVarName << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } const ULong_t nEvents { inputFitData->nEvents() }; evtTagFlv_.reserve( nEvents ); evtMistag_.reserve( nEvents ); evtTrueTagFlv_.reserve( nEvents ); evtDecayFlv_.reserve( nEvents ); evtDecayTime_.reserve( nEvents ); LauFitData::const_iterator fitdata_iter; for (ULong_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputFitData->getData(iEvt); // For untagged events see if we have a truth tag for normalisation modes Int_t curEvtTrueTagFlv { ( trueTagVarName_ != "" ) ? static_cast( dataValues.at( trueTagVarName_ ) ) : 0 }; if ( curEvtTrueTagFlv > 1 ) { std::cerr << "WARNING in LauFlavTag::cacheInputFitVars : Invalid true tag value " << curEvtTrueTagFlv << " for event " << iEvt << ", setting it to +1" << std::endl; curEvtTrueTagFlv = +1; } else if ( curEvtTrueTagFlv < -1 ){ std::cerr << "WARNING in LauFlavTag::cacheInputFitVars : Invalid true tag value " << curEvtTrueTagFlv << " for event " << iEvt << ", setting it to -1" << std::endl; curEvtTrueTagFlv = -1; } curEvtTrueTagFlv_ = static_cast(curEvtTrueTagFlv); evtTrueTagFlv_.push_back(curEvtTrueTagFlv_); // Flavour at decay // TODO put this in a try catch block current error message is unhelpful if this throws Int_t curEvtDecayFlv { ( decayFlvVarName_ != "" ) ? static_cast( dataValues.at( decayFlvVarName_ ) ) : 0 }; if ( curEvtDecayFlv > 1 ) { std::cerr << "WARNING in LauFlavTag::cacheInputFitVars : Invalid decay flavour value " << curEvtDecayFlv << " for event " << iEvt << ", setting it to +1" << std::endl; curEvtDecayFlv = +1; } else if ( curEvtDecayFlv < -1 ){ std::cerr << "WARNING in LauFlavTag::cacheInputFitVars : Invalid decay flavour value " << curEvtDecayFlv << " for event " << iEvt << ", setting it to -1" << std::endl; curEvtDecayFlv = -1; } curEvtDecayFlv_ = static_cast(curEvtDecayFlv); evtDecayFlv_.push_back(curEvtDecayFlv_); for (ULong_t i=0; i < tagVarNames_.size(); ++i){ Int_t curEvtTagFlv { static_cast( dataValues.at( tagVarNames_[i] ) ) }; if ( curEvtTagFlv > 1 ) { std::cerr << "WARNING in LauFlavTag::cacheInputFitVars : Invalid tagging output " << curEvtTagFlv << " for event " << iEvt << ", setting it to +1" << std::endl; curEvtTagFlv = +1; } else if ( curEvtTagFlv < -1 ) { std::cerr << "WARNING in LauFlavTag::cacheInputFitVars : Invalid tagging output " << curEvtTagFlv << " for event " << iEvt << ", setting it to -1" << std::endl; curEvtTagFlv = -1; } curEvtTagFlv_[i] = static_cast( curEvtTagFlv ); curEvtMistag_[i] = static_cast( dataValues.at( mistagVarNames_[i] ) ); // Calibrated mistag > 0.5 is just a tag flip - handled automatically in getCapitalOmega function if (curEvtMistag_[i] > 0.5){ std::cerr<<"WARNING in LauFlavTag::cacheInputFitVars : Mistag value "<( dataValues.at( decayTimeVarName ) ); evtDecayTime_.push_back(curEvtDecayTime_); } } void LauFlavTag::updateEventInfo(const ULong_t iEvt) { //Assign current event variables curEvtTagFlv_ = evtTagFlv_[iEvt]; curEvtMistag_ = evtMistag_[iEvt]; curEvtTrueTagFlv_ = evtTrueTagFlv_[iEvt]; curEvtDecayFlv_ = evtDecayFlv_[iEvt]; curEvtDecayTime_ = evtDecayTime_[iEvt]; } void LauFlavTag::generateEventInfo(const Flavour trueTagFlv, const Double_t curEvtDecayTime) { curEvtTrueTagFlv_ = trueTagFlv; curEvtDecayFlv_ = Flavour::Unknown; curEvtDecayTime_ = curEvtDecayTime; Double_t randNo{0.0}; Double_t tagEffB0{0.0}; Double_t tagEffB0bar{0.0}; const ULong_t nTaggers { this->getNTaggers() }; for ( ULong_t position{0}; positiongetEtaGen(position); if (this->getUseAveDelta()) { if (tagEff_ave_[position]==nullptr){ const Double_t ave = tagEff_hist_ave_[position]->GetBinContent(tagEff_hist_ave_[position]->FindFixBin(curEvtDecayTime_)); const Double_t delta = tagEff_hist_delta_[position]->GetBinContent(tagEff_hist_delta_[position]->FindFixBin(curEvtDecayTime_)); tagEffB0 = ave + 0.5 * delta; tagEffB0bar = ave - 0.5 * delta; } else { tagEffB0 = tagEff_ave_[position]->unblindValue() + 0.5 * tagEff_delta_[position]->unblindValue(); tagEffB0bar = tagEff_ave_[position]->unblindValue() - 0.5 * tagEff_delta_[position]->unblindValue(); } } else { if (tagEff_B0_[position]==nullptr){ tagEffB0 = tagEff_hist_B0_[position]->GetBinContent(tagEff_hist_B0_[position]->FindFixBin(curEvtDecayTime_)); tagEffB0bar = tagEff_hist_B0bar_[position]->GetBinContent(tagEff_hist_B0bar_[position]->FindFixBin(curEvtDecayTime_)); } else { tagEffB0 = tagEff_B0_[position]->unblindValue(); tagEffB0bar = tagEff_B0bar_[position]->unblindValue(); } } if (curEvtTrueTagFlv_ == Flavour::B) { randNo = LauRandom::randomFun()->Rndm(); // Try to tag in tageff% of cases if (randNo <= tagEffB0) { randNo = LauRandom::randomFun()->Rndm(); // Account for (calibrated) mistag if (randNo > this->getLittleOmega(position, Flavour::B)){ curEvtTagFlv_[position] = Flavour::B; } else { curEvtTagFlv_[position] = Flavour::Bbar; } } else { curEvtTagFlv_[position] = Flavour::Unknown; } } else if (curEvtTrueTagFlv_ == Flavour::Bbar) { randNo = LauRandom::randomFun()->Rndm(); // Try to tag in tageff% of cases if (randNo <= tagEffB0bar) { randNo = LauRandom::randomFun()->Rndm(); // Account for (calibrated) mistag if (randNo > this->getLittleOmega(position, Flavour::Bbar)){ curEvtTagFlv_[position] = Flavour::Bbar; } else { curEvtTagFlv_[position] = Flavour::B; } } else { curEvtTagFlv_[position] = Flavour::Unknown; } } else { std::cerr << "ERROR in LauFlavTag::generateEventInfo : Invalid true tag flavour, should be either B (+1) or Bbar (-1)" << std::endl; gSystem->Exit(EXIT_FAILURE); } } } void LauFlavTag::generateBkgndEventInfo(const Flavour trueTagFlv, const Double_t curEvtDecayTime, const ULong_t bkgndID) { if (bkgndID < 0 || bkgndID > bkgndNames_.size()){ std::cerr << "ERROR in LauFlavTag::generateBkgndEventInfo : Invalid backgrond class identifier" << std::endl; gSystem->Exit(EXIT_FAILURE); } curEvtTrueTagFlv_ = trueTagFlv; curEvtDecayFlv_ = Flavour::Unknown; curEvtDecayTime_ = curEvtDecayTime; Double_t randNo{0.0}; Double_t tagEffB0{0.0}; Double_t tagEffB0bar{0.0}; const ULong_t nTaggers { this->getNTaggers() }; for ( ULong_t position{0}; positiongetEtaGenBkgnd(position,bkgndID); //TODO If bkgnd is signal like should these parameters be clones of signal TagEff etc? //TODO Or call generateEventInfo() instead? if (this->getUseAveDelta()) { if (tagEffBkgnd_ave_[bkgndID][position]==nullptr){ const Double_t ave = tagEffBkgnd_hist_ave_[bkgndID][position]->GetBinContent(tagEffBkgnd_hist_ave_[bkgndID][position]->FindFixBin(curEvtDecayTime_)); const Double_t delta = tagEffBkgnd_hist_delta_[bkgndID][position]->GetBinContent(tagEffBkgnd_hist_delta_[bkgndID][position]->FindFixBin(curEvtDecayTime_)); tagEffB0 = ave + 0.5 * delta; tagEffB0bar = ave - 0.5 * delta; } else { tagEffB0 = tagEffBkgnd_ave_[bkgndID][position]->unblindValue() + 0.5 * tagEffBkgnd_delta_[bkgndID][position]->unblindValue(); tagEffB0bar = tagEffBkgnd_ave_[bkgndID][position]->unblindValue() - 0.5 * tagEffBkgnd_delta_[bkgndID][position]->unblindValue(); } } else { if (tagEffBkgnd_B0_[bkgndID][position]==nullptr){ tagEffB0 = tagEffBkgnd_hist_B0_[bkgndID][position]->GetBinContent(tagEffBkgnd_hist_B0_[bkgndID][position]->FindFixBin(curEvtDecayTime_)); tagEffB0bar = tagEffBkgnd_hist_B0bar_[bkgndID][position]->GetBinContent(tagEffBkgnd_hist_B0bar_[bkgndID][position]->FindFixBin(curEvtDecayTime_)); } else { tagEffB0 = tagEffBkgnd_B0_[bkgndID][position]->unblindValue(); tagEffB0bar = tagEffBkgnd_B0bar_[bkgndID][position]->unblindValue(); } } if (curEvtTrueTagFlv_ == Flavour::B) { randNo = LauRandom::randomFun()->Rndm(); // Try to tag in tageff% of cases if (randNo <= tagEffB0) { randNo = LauRandom::randomFun()->Rndm(); - // Account for mistag - use eta not littleOmega for now (littleOmega only for SignalLike bkgnd?) - //if (randNo > this->getLittleOmega(position, Flavour::B)){ - if (randNo > curEvtMistag_[position]){ + // Account for (calibrated) mistag + const Double_t omega { (bkgndTypes_[bkgndID] == BkgndType::Combinatorial) ? curEvtMistag_[position] : this->getLittleOmegaBkgnd(position, Flavour::B, bkgndID) }; + if (randNo > omega){ curEvtTagFlv_[position] = Flavour::B; } else { curEvtTagFlv_[position] = Flavour::Bbar; } } else { curEvtTagFlv_[position] = Flavour::Unknown; } } else if (curEvtTrueTagFlv_ == Flavour::Bbar) { randNo = LauRandom::randomFun()->Rndm(); // Try to tag in tageff% of cases if (randNo <= tagEffB0bar) { randNo = LauRandom::randomFun()->Rndm(); // Account for (calibrated) mistag - //if (randNo > this->getLittleOmega(position, Flavour::Bbar)){ - if (randNo > curEvtMistag_[position]){ + const Double_t omega { (bkgndTypes_[bkgndID] == BkgndType::Combinatorial) ? curEvtMistag_[position] : this->getLittleOmegaBkgnd(position, Flavour::Bbar, bkgndID) }; + if (randNo > omega){ curEvtTagFlv_[position] = Flavour::Bbar; } else { curEvtTagFlv_[position] = Flavour::B; } } else { curEvtTagFlv_[position] = Flavour::Unknown; } } else { std::cerr << "ERROR in LauFlavTag::generateBkgndEventInfo : Invalid true tag flavour, should be either B (+1) or Bbar (-1)" << std::endl; gSystem->Exit(EXIT_FAILURE); } } } Double_t LauFlavTag::getLittleOmega(const ULong_t position, const Flavour flag) const { if ( flag == Flavour::Unknown ){ std::cerr << "ERROR in LauFlavTag::getLittleOmega : Invalid flag, you must request either omega (+1) or omega bar (-1) to be returned" << std::endl; return 0.0; } Double_t calibp0(0.), calibp1(0.), calibp0bar(0.), calibp1bar(0.); //If we are floating average omega and delta omega we need to use those parameters instead if (useAveDelta_){ calibp0 = calib_p0_ave_[position]->unblindValue() + 0.5*calib_p0_delta_[position]->unblindValue(); calibp0bar = calib_p0_ave_[position]->unblindValue() - 0.5*calib_p0_delta_[position]->unblindValue(); calibp1 = calib_p1_ave_[position]->unblindValue() + 0.5*calib_p1_delta_[position]->unblindValue(); calibp1bar = calib_p1_ave_[position]->unblindValue() - 0.5*calib_p1_delta_[position]->unblindValue(); } else { calibp0 = calib_p0_B0_[position]->unblindValue(); calibp0bar = calib_p0_B0bar_[position]->unblindValue(); calibp1 = calib_p1_B0_[position]->unblindValue(); calibp1bar = calib_p1_B0bar_[position]->unblindValue(); } if ( flag == Flavour::B ){ return calibp0 + calibp1 * (curEvtMistag_[position] - perEvtAvgMistag_[position]); } else{ return calibp0bar + calibp1bar * (curEvtMistag_[position] - perEvtAvgMistag_[position]); } return 0.0; } Double_t LauFlavTag::getCapitalOmega(const ULong_t position, const Flavour flag) const { if ( flag == Flavour::Unknown ){ std::cerr << "ERROR in LauFlavTag::getCapitalOmega : Invalid flag, you must request either Omega (+1) or Omega bar (-1) to be returned" << std::endl; return 0.0; } //Delta functions to control which terms contribute Int_t deltap1(0), deltam1(0), delta0(0); if (curEvtTagFlv_[position] == Flavour::Bbar){ deltam1 = 1; } else if(curEvtTagFlv_[position] == Flavour::B){ deltap1 = 1; } else{ delta0 = 1; } //Efficiency Double_t eff(0.0); if (useAveDelta_){ if(flag==Flavour::B){ if (tagEff_ave_[position]==nullptr){ eff = tagEff_hist_ave_[position]->GetBinContent(tagEff_hist_ave_[position]->FindFixBin(curEvtDecayTime_)) + 0.5*tagEff_hist_delta_[position]->GetBinContent(tagEff_hist_delta_[position]->FindFixBin(curEvtDecayTime_)); } else { eff = tagEff_ave_[position]->unblindValue() + 0.5*tagEff_delta_[position]->unblindValue(); } } else { if (tagEff_ave_[position]==nullptr){ eff = tagEff_hist_ave_[position]->GetBinContent(tagEff_hist_ave_[position]->FindFixBin(curEvtDecayTime_)) - 0.5*tagEff_hist_delta_[position]->GetBinContent(tagEff_hist_delta_[position]->FindFixBin(curEvtDecayTime_)); } else { eff = tagEff_ave_[position]->unblindValue() - 0.5*tagEff_delta_[position]->unblindValue(); } } }else{ if(flag==Flavour::B){ if (tagEff_B0_[position]==nullptr){ eff = tagEff_hist_B0_[position]->GetBinContent(tagEff_hist_B0_[position]->FindFixBin(curEvtDecayTime_)); } else { eff = tagEff_B0_[position]->unblindValue(); } }else{ if (tagEff_B0bar_[position]==nullptr){ eff = tagEff_hist_B0bar_[position]->GetBinContent(tagEff_hist_B0bar_[position]->FindFixBin(curEvtDecayTime_)); } else { eff = tagEff_B0bar_[position]->unblindValue(); } } } //Little omega Double_t omega = this->getLittleOmega(position, flag); Double_t omegaPrime(0.); //Transform to omega prime - TODO isn't this the inverse, getLittleOmega is actually giving us omegaPrime and on the next line we convert back to omega? if (useEtaPrime_){ omegaPrime = (1/(1+TMath::Exp(-1.0*omega))); }else{ omegaPrime = omega; } //little omega must be between 0 and 1. Force this for now, if the fits keep getting stuck can look more closely at it. if (omegaPrime < 0.0){ std::cerr << "WARNING in LauFlavTag::getCapitalOmega : The value of little omega is less than 0, shifting to 0" << std::endl; omegaPrime = 0.0; } if (omegaPrime > 1.0){ std::cerr << "WARNING in LauFlavTag::getCapitalOmega : The value of little omega is greater than 1, shifting to 1" << std::endl; omegaPrime = 1.0; } //eta PDF value std::vector abs; abs.push_back(curEvtMistag_[position]); etaPdfs_[position]->calcLikelihoodInfo(abs); Double_t h { etaPdfs_[position]->getLikelihood() }; const Double_t u { 2.0 }; // the PDF value for a uniform PDF between 0.0 and 0.5 //If h returns 0 for a tagged event, the event likelihood will be zero if (h==0 && delta0==0){ std::cerr << "WARNING in LauFlavTag::getCapitalOmega : The value of the eta PDF is zero at eta = " << curEvtMistag_[position] << ", shifting to 0.1" << std::endl; h=0.1; } //Put it together if (flag == Flavour::B){ return (deltap1*eff*(1-omegaPrime) + deltam1*eff*omegaPrime)*h + delta0*(1-eff)*u; } else { return (deltam1*eff*(1-omegaPrime) + deltap1*eff*omegaPrime)*h + delta0*(1-eff)*u; } } Double_t LauFlavTag::getLittleOmegaBkgnd(const ULong_t position, const Flavour flag, const UInt_t classID) const { if ( flag == Flavour::Unknown ){ std::cerr << "ERROR in LauFlavTag::getLittleOmegaBkgnd : Invalid flag, you must request either omega (+1) or omega bar (-1) to be returned" << std::endl; return 0.0; } Double_t calibp0(0.), calibp1(0.), calibp0bar(0.), calibp1bar(0.); //If we are floating average omega and delta omega we need to use those parameters instead if (useAveDelta_){ calibp0 = calib_p0_ave_[position]->unblindValue() + 0.5*calib_p0_delta_[position]->unblindValue(); calibp0bar = calib_p0_ave_[position]->unblindValue() - 0.5*calib_p0_delta_[position]->unblindValue(); calibp1 = calib_p1_ave_[position]->unblindValue() + 0.5*calib_p1_delta_[position]->unblindValue(); calibp1bar = calib_p1_ave_[position]->unblindValue() - 0.5*calib_p1_delta_[position]->unblindValue(); } else { calibp0 = calib_p0_B0_[position]->unblindValue(); calibp0bar = calib_p0_B0bar_[position]->unblindValue(); calibp1 = calib_p1_B0_[position]->unblindValue(); calibp1bar = calib_p1_B0bar_[position]->unblindValue(); } if ( flag == Flavour::B ){ return calibp0 + calibp1 * (curEvtMistag_[position] - avgMistagBkgnd_[classID][position]); } else{ return calibp0bar + calibp1bar * (curEvtMistag_[position] - avgMistagBkgnd_[classID][position]); } return 0.0; } Double_t LauFlavTag::getCapitalOmegaBkgnd(const ULong_t position, const Flavour flag, const UInt_t classID) const { //Fill in with the various options of flag = +-1, type = signal-like, combinatorial etc if ( flag == Flavour::Unknown ){ std::cerr << "ERROR in LauFlavTag::getCapitalOmegaBkgnd : Invalid flag, you must request either Omega (+1) or Omega bar (-1) to be returned" << std::endl; return 0.0; } //Delta functions to control which terms contribute Int_t deltap1(0), deltam1(0), delta0(0); if (curEvtTagFlv_[position] == Flavour::Bbar){ deltam1 = 1; } else if(curEvtTagFlv_[position] == Flavour::B){ deltap1 = 1; } else{ delta0 = 1; } //Efficiency Double_t effB0(0.0), effB0bar(0.0); if (useAveDelta_){ if (tagEffBkgnd_ave_[classID][position]==nullptr){ effB0 = tagEffBkgnd_hist_ave_[classID][position]->GetBinContent(tagEffBkgnd_hist_ave_[classID][position]->FindFixBin(curEvtDecayTime_)) + 0.5*tagEffBkgnd_hist_delta_[classID][position]->GetBinContent(tagEffBkgnd_hist_delta_[classID][position]->FindFixBin(curEvtDecayTime_)); effB0bar = tagEffBkgnd_hist_ave_[classID][position]->GetBinContent(tagEffBkgnd_hist_ave_[classID][position]->FindFixBin(curEvtDecayTime_)) - 0.5*tagEffBkgnd_hist_delta_[classID][position]->GetBinContent(tagEffBkgnd_hist_delta_[classID][position]->FindFixBin(curEvtDecayTime_)); } else { effB0 = tagEffBkgnd_ave_[classID][position]->unblindValue() + 0.5*tagEffBkgnd_delta_[classID][position]->unblindValue(); effB0bar = tagEffBkgnd_ave_[classID][position]->unblindValue() - 0.5*tagEffBkgnd_delta_[classID][position]->unblindValue(); } }else{ if (tagEffBkgnd_B0_[classID][position]==nullptr){ effB0 = tagEffBkgnd_hist_B0_[classID][position]->GetBinContent(tagEffBkgnd_hist_B0_[classID][position]->FindFixBin(curEvtDecayTime_)); effB0bar = tagEffBkgnd_hist_B0bar_[classID][position]->GetBinContent(tagEffBkgnd_hist_B0bar_[classID][position]->FindFixBin(curEvtDecayTime_)); } else { effB0 = tagEffBkgnd_B0_[classID][position]->unblindValue(); effB0bar = tagEffBkgnd_B0bar_[classID][position]->unblindValue(); } } //Need to know which background eta PDF to use - classID std::vector abs; abs.push_back(curEvtMistag_[position]); etaBkgndPdfs_[classID][position]->calcLikelihoodInfo(abs); Double_t h { etaBkgndPdfs_[classID][position]->getLikelihood() }; const Double_t u { 2.0 }; // the PDF value for a uniform PDF between 0.0 and 0.5 if (bkgndTypes_[classID] == BkgndType::Combinatorial){ return (deltap1*effB0 + deltam1*effB0bar)*h + delta0*(1.0 - 0.5*(effB0 + effB0bar))*u; } else if (bkgndTypes_[classID] == BkgndType::FlavourSpecific){ Double_t omega = this->getLittleOmegaBkgnd(position, flag, classID); if (flag == Flavour::B){ return (deltap1*effB0*(1-omega) + deltam1*effB0*omega)*h + delta0*(1-effB0)*u; } else { return (deltam1*effB0bar*(1-omega) + deltap1*effB0bar*omega)*h + delta0*(1-effB0bar)*u; } } else if (bkgndTypes_[classID] == BkgndType::SelfConjugate){ Double_t omega = this->getLittleOmegaBkgnd(position, flag, classID); if (flag == Flavour::B){ return (deltap1*effB0*(1-omega) + deltam1*effB0*omega)*h + delta0*(1-effB0)*u; } else { return (deltam1*effB0bar*(1-omega) + deltap1*effB0bar*omega)*h + delta0*(1-effB0bar)*u; } } else if (bkgndTypes_[classID] == BkgndType::NonSelfConjugate){ return 1.0; } else { return 1.0; } } void LauFlavTag::setBkgndParams(const TString& bkgndName, const TString& taggerName, LauAbsPdf* etaPdf, std::pair tagEff) { if (taggerPosition_.count(taggerName)==0){ std::cerr << "ERROR in LauFlavTag::setBkgndParams : Tagger name not recognised please check your options" << std::endl; return; } Int_t bkgndID(-1); for (ULong_t i=0; iname("tagEff_ave_"+taggerName+"_bkgnd_"+bkgndName); tagEffBkgnd_ave_[bkgndID][position] = tagEffB0; tagEffB0bar->name("tagEff_delta_"+taggerName+"_bkgnd_"+bkgndName); tagEffB0bar->range(-1.0,1.0); tagEffBkgnd_delta_[bkgndID][position] = tagEffB0bar; } else { tagEffBkgnd_B0_[bkgndID][position] = tagEffB0; tagEffBkgnd_B0bar_[bkgndID][position] = tagEffB0bar; } std::cout << "INFO in LauFlavTag::setBkgndParams : Added efficiency parameters and eta PDF for background " << bkgndName << " for tagger " << taggerName << std::endl; } void LauFlavTag::setBkgndParams(const TString& bkgndName, const TString& taggerName, LauAbsPdf* etaPdf, std::pair tagEff) { if (taggerPosition_.count(taggerName)==0){ std::cerr << "ERROR in LauFlavTag::setBkgndParams : Tagger name not recognised please check your options" << std::endl; return; } if (tagEff.first==nullptr || tagEff.second==nullptr){ std::cerr << "ERROR in LauFlavTag::setBkgndParams : Efficiency histogram(s) do not exist please check your options" << std::endl; return; } Int_t bkgndID(-1); for (ULong_t i=0; i(etaPdf); if (etahistpdf){ avgMistagBkgnd_[bkgndID][position] = etahistpdf->getMean(); } else { std::cerr << "WARNING in LauFlavTag::setBkgndParams : Couldn't determine average eta value from PDF. Setting it to 0.4." << std::endl; avgMistagBkgnd_[bkgndID][position] = 0.4; } if (useAveDelta_){ tagEffBkgnd_hist_ave_[bkgndID][position] = tagEff.first; tagEffBkgnd_hist_delta_[bkgndID][position] = tagEff.second; } else { tagEffBkgnd_hist_B0_[bkgndID][position] = tagEff.first; tagEffBkgnd_hist_B0bar_[bkgndID][position] = tagEff.second; } std::cout << "INFO in LauFlavTag::setBkgndParams : Added efficiency histograms and eta PDF for background " << bkgndName << " for tagger " << taggerName << std::endl; } Double_t LauFlavTag::getEtaGen(const ULong_t position) { LauFitData data { etaPdfs_[position]->generate(nullptr) }; Double_t etagen { data.at(etaPdfs_[position]->varName()) }; if (etagen > 0.5){etagen = 0.5;} if (etagen < 0.0){etagen = 0.0;} curEvtMistag_[position] = etagen; return etagen; } Double_t LauFlavTag::getEtaGenBkgnd(const ULong_t position, const ULong_t bkgndID) { LauFitData data { etaBkgndPdfs_[bkgndID][position]->generate(nullptr) }; Double_t etagen { data.at(etaBkgndPdfs_[bkgndID][position]->varName()) }; if (etagen > 0.5){etagen = 0.5;} if (etagen < 0.0){etagen = 0.0;} curEvtMistag_[position] = etagen; return etagen; } void LauFlavTag::setTrueTagVarName(TString trueTagVarName){ trueTagVarName_ = std::move(trueTagVarName); } void LauFlavTag::setDecayFlvVarName(TString decayFlvVarName){ decayFlvVarName_ = std::move(decayFlvVarName); } void LauFlavTag::addP0GaussianConstraints(TString name, std::pair constraint1, std::pair constraint2){ //Does key exist? if (taggerPosition_.count(name)==0){ std::cerr << "ERROR in LauFlavTag::addP0GaussianConstraints : Tagger name not recognised please check your options" << std::endl; std::cerr << "ERROR in LauFlavTag::addP0GaussianConstraints : Constraints have not been applied" << std::endl; return; } //Find position in the vector from the tagger name Double_t pos = taggerPosition_.at(name); if (!useAveDelta_){ calib_p0_B0_[pos]->addGaussianConstraint(constraint1.first,constraint1.second); calib_p0_B0bar_[pos]->addGaussianConstraint(constraint2.first,constraint2.second); }else{ calib_p0_ave_[pos]->addGaussianConstraint(constraint1.first,constraint1.second); calib_p0_delta_[pos]->addGaussianConstraint(constraint2.first,constraint2.second); } std::cout << "INFO in LauFlavTag::addP0GaussianConstraints : Added Gaussian constraints for the P0 calibration parameters of tagger " << name << std::endl; } void LauFlavTag::addP1GaussianConstraints(TString name, std::pair constraint1, std::pair constraint2){ //Does key exist? if (taggerPosition_.count(name)==0){ std::cerr << "ERROR in LauFlavTag::addP1GaussianConstraints : Tagger name not recognised please check your options" << std::endl; std::cerr << "ERROR in LauFlavTag::addP1GaussianConstraints : Constraints have not been applied" << std::endl; return; } //Find position in the vector from the tagger name Double_t pos = taggerPosition_.at(name); if (!useAveDelta_){ calib_p1_B0_[pos]->addGaussianConstraint(constraint1.first,constraint1.second); calib_p1_B0bar_[pos]->addGaussianConstraint(constraint2.first,constraint2.second); }else{ calib_p1_ave_[pos]->addGaussianConstraint(constraint1.first,constraint1.second); calib_p1_delta_[pos]->addGaussianConstraint(constraint2.first,constraint2.second); } std::cout << "INFO in LauFlavTag::addP1GaussianConstraints : Added Gaussian constraints for the P1 calibration parameters of tagger " << name << std::endl; } void LauFlavTag::addTagEffGaussianConstraints(TString name, std::pair constraint1, std::pair constraint2){ //Does key exist? if (taggerPosition_.count(name)==0){ std::cerr << "ERROR in LauFlavTag::addTagEffGaussianConstraints : Tagger name not recognised please check your options" << std::endl; std::cerr << "ERROR in LauFlavTag::addTagEffGaussianConstraints : Constraints have not been applied" << std::endl; return; } //Find position in the vector from the tagger name Double_t pos = taggerPosition_.at(name); if (tagEff_B0_[pos] == nullptr){ std::cerr << "ERROR in LauFlavTag::addTagEffGaussianConstraints : Cannot add Gaussian constraints to a histogram!" << std::endl; return; } if (!useAveDelta_){ tagEff_B0_[pos]->addGaussianConstraint(constraint1.first,constraint1.second); tagEff_B0bar_[pos]->addGaussianConstraint(constraint2.first,constraint2.second); }else{ tagEff_ave_[pos]->addGaussianConstraint(constraint1.first,constraint1.second); tagEff_delta_[pos]->addGaussianConstraint(constraint2.first,constraint2.second); } std::cout << "INFO in LauFlavTag::addTagEffGaussianConstraints : Added Gaussian constraints for the tagging efficiency parameters of tagger " << name << std::endl; } void LauFlavTag::floatCalibParP0B0(const TString name){ if (useAveDelta_){ std::cerr << "ERROR in LauFlavTag::floatCalibParP0B0 : Trying to set calibration parameters for B0/B0bar not average/delta" << std::endl; return; } if (name==""){ for (auto& param : calib_p0_B0_){ if (param==nullptr){continue;} param->fixed(kFALSE); } } else { //Does key exist? if (taggerPosition_.count(name)==0){ std::cerr << "ERROR in LauFlavTag::floatCalibParP0B0 : Tagger name not recognised please check your options" << std::endl; return; } //Find position in the vector from the tagger name Double_t position = taggerPosition_.at(name); calib_p0_B0_[position]->fixed(kFALSE); } } void LauFlavTag::floatCalibParP1B0(const TString name){ if (useAveDelta_){ std::cerr << "ERROR in LauFlavTag::floatCalibParP1B0 : Trying to set calibration parameters for B0/B0bar not average/delta" << std::endl; return; } if (name==""){ for (auto& param : calib_p1_B0_){ if (param==nullptr){continue;} param->fixed(kFALSE); } } else { //Does key exist? if (taggerPosition_.count(name)==0){ std::cerr << "ERROR in LauFlavTag::floatCalibParP1B0 : Tagger name not recognised please check your options" << std::endl; return; } //Find position in the vector from the tagger name Double_t position = taggerPosition_.at(name); calib_p1_B0_[position]->fixed(kFALSE); } } void LauFlavTag::floatCalibParP0B0bar(const TString name){ if (useAveDelta_){ std::cerr << "ERROR in LauFlavTag::floatCalibParP0B0bar : Trying to set calibration parameters for B0/B0bar not average/delta" << std::endl; return; } if (name==""){ for (auto& param : calib_p0_B0bar_){ if (param==nullptr){continue;} param->fixed(kFALSE); } } else { //Does key exist? if (taggerPosition_.count(name)==0){ std::cerr << "ERROR in LauFlavTag::floatCalibParP0B0bar : Tagger name not recognised please check your options" << std::endl; return; } //Find position in the vector from the tagger name Double_t position = taggerPosition_.at(name); calib_p0_B0bar_[position]->fixed(kFALSE); } } void LauFlavTag::floatCalibParP1B0bar(const TString name){ if (useAveDelta_){ std::cerr << "ERROR in LauFlavTag::floatCalibParP1B0bar : Trying to set calibration parameters for B0/B0bar not average/delta" << std::endl; return; } if (name==""){ for (auto& param : calib_p1_B0bar_){ if (param==nullptr){continue;} param->fixed(kFALSE); } } else { //Does key exist? if (taggerPosition_.count(name)==0){ std::cerr << "ERROR in LauFlavTag::floatCalibParP1B0bar : Tagger name not recognised please check your options" << std::endl; return; } //Find position in the vector from the tagger name Double_t position = taggerPosition_.at(name); calib_p1_B0bar_[position]->fixed(kFALSE); } } void LauFlavTag::floatCalibParP0Ave(const TString name){ if (!useAveDelta_){ std::cerr << "ERROR in LauFlavTag::floatCalibParP0Ave : Trying to set calibration parameters for average/delta not B0/B0bar" << std::endl; return; } if (name==""){ for (auto& param : calib_p0_ave_){ if (param==nullptr){continue;} param->fixed(kFALSE); } } else { //Does key exist? if (taggerPosition_.count(name)==0){ std::cerr << "ERROR in LauFlavTag::floatCalibParP0Ave : Tagger name not recognised please check your options" << std::endl; return; } //Find position in the vector from the tagger name Double_t position = taggerPosition_.at(name); calib_p0_ave_[position]->fixed(kFALSE); } } void LauFlavTag::floatCalibParP0Delta(const TString name){ if (!useAveDelta_){ std::cerr << "ERROR in LauFlavTag::floatCalibParP0Delta : Trying to set calibration parameters for average/delta not B0/B0bar" << std::endl; return; } if (name==""){ for (auto& param : calib_p0_delta_){ if (param==nullptr){continue;} param->fixed(kFALSE); } } else { //Does key exist? if (taggerPosition_.count(name)==0){ std::cerr << "ERROR in LauFlavTag::floatCalibParP0Delta : Tagger name not recognised please check your options" << std::endl; return; } //Find position in the vector from the tagger name Double_t position = taggerPosition_.at(name); calib_p0_delta_[position]->fixed(kFALSE); } } void LauFlavTag::floatCalibParP1Ave(const TString name){ if (!useAveDelta_){ std::cerr << "ERROR in LauFlavTag::floatCalibParP1Ave : Trying to set calibration parameters for average/delta not B0/B0bar" << std::endl; return; } if (name==""){ for (auto& param : calib_p1_ave_){ if (param==nullptr){continue;} param->fixed(kFALSE); } } else { //Does key exist? if (taggerPosition_.count(name)==0){ std::cerr << "ERROR in LauFlavTag::floatCalibParP1Ave : Tagger name not recognised please check your options" << std::endl; return; } //Find position in the vector from the tagger name Double_t position = taggerPosition_.at(name); calib_p1_ave_[position]->fixed(kFALSE); } } void LauFlavTag::floatCalibParP1Delta(const TString name){ if (!useAveDelta_){ std::cerr << "ERROR in LauFlavTag::floatCalibParP1Delta : Trying to set calibration parameters for average/delta not B0/B0bar" << std::endl; return; } if (name==""){ for (auto& param : calib_p1_delta_){ if (param==nullptr){continue;} param->fixed(kFALSE); } } else { //Does key exist? if (taggerPosition_.count(name)==0){ std::cerr << "ERROR in LauFlavTag::floatCalibParP1Delta : Tagger name not recognised please check your options" << std::endl; return; } //Find position in the vector from the tagger name Double_t position = taggerPosition_.at(name); calib_p1_delta_[position]->fixed(kFALSE); } } void LauFlavTag::floatAllCalibPars(){ if (useAveDelta_){ this->floatCalibParP0Ave(); this->floatCalibParP0Delta(); this->floatCalibParP1Ave(); this->floatCalibParP1Delta(); } else { this->floatCalibParP0B0(); this->floatCalibParP1B0(); this->floatCalibParP0B0bar(); this->floatCalibParP1B0bar(); } } diff --git a/src/LauTimeDepFitModel.cc b/src/LauTimeDepFitModel.cc index 7be8e61..c63ba64 100644 --- a/src/LauTimeDepFitModel.cc +++ b/src/LauTimeDepFitModel.cc @@ -1,3110 +1,3264 @@ /* Copyright 2006 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauTimeDepFitModel.cc \brief File containing implementation of LauTimeDepFitModel class. */ #include #include #include #include #include #include #include "TFile.h" #include "TMinuit.h" #include "TRandom.h" #include "TSystem.h" #include "TVirtualFitter.h" #include "LauAbsBkgndDPModel.hh" #include "LauAbsCoeffSet.hh" #include "LauAbsPdf.hh" #include "LauAsymmCalc.hh" #include "LauComplex.hh" #include "LauConstants.hh" #include "LauDPPartialIntegralInfo.hh" #include "LauDaughters.hh" #include "LauDecayTimePdf.hh" #include "LauFitNtuple.hh" #include "LauGenNtuple.hh" #include "LauIsobarDynamics.hh" #include "LauKinematics.hh" #include "LauParamFixed.hh" #include "LauPrint.hh" #include "LauRandom.hh" #include "LauScfMap.hh" #include "LauTimeDepFitModel.hh" #include "LauFlavTag.hh" ClassImp(LauTimeDepFitModel) LauTimeDepFitModel::LauTimeDepFitModel(LauIsobarDynamics* modelB0bar, LauIsobarDynamics* modelB0, LauFlavTag* flavTag) : LauAbsFitModel(), sigModelB0bar_(modelB0bar), sigModelB0_(modelB0), kinematicsB0bar_(modelB0bar ? modelB0bar->getKinematics() : 0), kinematicsB0_(modelB0 ? modelB0->getKinematics() : 0), usingBkgnd_(kFALSE), flavTag_(flavTag), curEvtTrueTagFlv_(LauFlavTag::Unknown), curEvtDecayFlv_(LauFlavTag::Unknown), nSigComp_(0), nSigDPPar_(0), nDecayTimePar_(0), nExtraPdfPar_(0), nNormPar_(0), nCalibPar_(0), nTagEffPar_(0), nEffiPar_(0), nAsymPar_(0), coeffsB0bar_(0), coeffsB0_(0), coeffPars_(0), fitFracB0bar_(0), fitFracB0_(0), fitFracAsymm_(0), acp_(0), meanEffB0bar_("meanEffB0bar",0.0,0.0,1.0), meanEffB0_("meanEffB0",0.0,0.0,1.0), DPRateB0bar_("DPRateB0bar",0.0,0.0,100.0), DPRateB0_("DPRateB0",0.0,0.0,100.0), signalEvents_(0), signalAsym_(0), cpevVarName_(""), cpEigenValue_(CPEven), evtCPEigenVals_(0), deltaM_("deltaM",0.0), deltaGamma_("deltaGamma",0.0), tau_("tau",LauConstants::tauB0), phiMix_("phiMix", 2.0*LauConstants::beta, -LauConstants::threePi, LauConstants::threePi, kFALSE), sinPhiMix_("sinPhiMix", TMath::Sin(2.0*LauConstants::beta), -1.0, 1.0, kFALSE), cosPhiMix_("cosPhiMix", TMath::Cos(2.0*LauConstants::beta), -1.0, 1.0, kFALSE), useSinCos_(kFALSE), phiMixComplex_(TMath::Cos(-2.0*LauConstants::beta),TMath::Sin(-2.0*LauConstants::beta)), signalDecayTimePdf_(), BkgndTypes_(), BkgndDecayTimePdfs_(), curEvtDecayTime_(0.0), curEvtDecayTimeErr_(0.0), sigExtraPdf_(), sigFlavTagPdf_(), bkgdFlavTagPdf_(), AProd_("AProd",0.0,-1.0,1.0,kTRUE), iterationsMax_(100000000), nGenLoop_(0), ASq_(0.0), aSqMaxVar_(0.0), aSqMaxSet_(1.25), storeGenAmpInfo_(kFALSE), signalTree_(), reuseSignal_(kFALSE), sigDPLike_(0.0), sigExtraLike_(0.0), sigFlavTagLike_(0.0), bkgdFlavTagLike_(0.0), sigTotalLike_(0.0) { // Set up ftag here? this->setBkgndClassNames(flavTag_->getBkgndNames()); BkgndTypes_ = flavTag_->getBkgndTypes(); if ( BkgndTypes_.size() > 0 ){usingBkgnd_ = kTRUE;} // Make sure that the integration scheme will be symmetrised sigModelB0bar_->forceSymmetriseIntegration(kTRUE); sigModelB0_->forceSymmetriseIntegration(kTRUE); } LauTimeDepFitModel::~LauTimeDepFitModel() { for ( LauAbsPdf* pdf : sigExtraPdf_ ) { delete pdf; } for(auto& data : bkgndTree_){ delete data; } } void LauTimeDepFitModel::setupBkgndVectors() { UInt_t nBkgnds = this->nBkgndClasses(); BkgndDPModelsB_.resize( nBkgnds ); BkgndDPModelsBbar_.resize( nBkgnds ); BkgndDecayTimePdfs_.resize( nBkgnds ); BkgndPdfs_.resize( nBkgnds ); bkgndEvents_.resize( nBkgnds ); bkgndAsym_.resize( nBkgnds ); bkgndTree_.resize( nBkgnds ); reuseBkgnd_.resize( nBkgnds ); bkgndDPLike_.resize( nBkgnds ); bkgndExtraLike_.resize( nBkgnds ); bkgndTotalLike_.resize( nBkgnds ); } void LauTimeDepFitModel::setNSigEvents(LauParameter* nSigEvents) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; signalEvents_->name("signalEvents"); Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0),2.0*(TMath::Abs(value)+1.0)); signalAsym_ = new LauParameter("signalAsym",0.0,-1.0,1.0,kTRUE); } void LauTimeDepFitModel::setNSigEvents(LauParameter* nSigEvents, LauParameter* sigAsym) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The event LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( sigAsym == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The asym LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; signalEvents_->name("signalEvents"); Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); signalAsym_ = sigAsym; signalAsym_->name("signalAsym"); signalAsym_->range(-1.0,1.0); } void LauTimeDepFitModel::setNBkgndEvents(LauAbsRValue* nBkgndEvents) { if ( nBkgndEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBgkndEvents : The background yield LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( ! this->validBkgndClass( nBkgndEvents->name() ) ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : Invalid background class \"" << nBkgndEvents->name() << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; gSystem->Exit(EXIT_FAILURE); } UInt_t bkgndID = this->bkgndClassID( nBkgndEvents->name() ); if ( bkgndEvents_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background yield." << std::endl; return; } if ( bkgndAsym_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background asymmetry." << std::endl; return; } nBkgndEvents->name( nBkgndEvents->name()+"Events" ); if ( nBkgndEvents->isLValue() ) { Double_t value = nBkgndEvents->value(); LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); } bkgndEvents_[bkgndID] = nBkgndEvents; bkgndAsym_[bkgndID] = new LauParameter(nBkgndEvents->name()+"Asym",0.0,-1.0,1.0,kTRUE); } void LauTimeDepFitModel::setNBkgndEvents(LauAbsRValue* nBkgndEvents, LauAbsRValue* bkgndAsym) { if ( nBkgndEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : The background yield LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( bkgndAsym == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : The background asym LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( ! this->validBkgndClass( nBkgndEvents->name() ) ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : Invalid background class \"" << nBkgndEvents->name() << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; gSystem->Exit(EXIT_FAILURE); } UInt_t bkgndID = this->bkgndClassID( nBkgndEvents->name() ); if ( bkgndEvents_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background yield." << std::endl; return; } if ( bkgndAsym_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background asymmetry." << std::endl; return; } bkgndEvents_[bkgndID]->name( nBkgndEvents->name()+"Events" ); if ( nBkgndEvents->isLValue() ) { Double_t value = nBkgndEvents->value(); LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); } bkgndEvents_[bkgndID] = nBkgndEvents; bkgndAsym_[bkgndID]->name( nBkgndEvents->name()+"Asym" ); if ( bkgndAsym->isLValue() ) { LauParameter* asym = dynamic_cast( bkgndAsym ); asym->range(-1.0, 1.0); } bkgndAsym_[bkgndID] = bkgndAsym; } void LauTimeDepFitModel::setSignalDtPdf(LauDecayTimePdf* pdf) { if (pdf==0) { std::cerr<<"ERROR in LauTimeDepFitModel::setSignalDtPdf : The PDF pointer is null, not adding it."<validBkgndClass( bkgndClass) ) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndDtPdf : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); BkgndDecayTimePdfs_[bkgndID] = pdf; usingBkgnd_ = kTRUE; } void LauTimeDepFitModel::setBkgndDPModels(const TString& bkgndClass, LauAbsBkgndDPModel* BModel, LauAbsBkgndDPModel* BbarModel) { if (BModel==nullptr) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndDPModels : the model pointer is null for the particle model." << std::endl; return; } // check that this background name is valid if ( ! this->validBkgndClass( bkgndClass) ) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndDPModels : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); BkgndDPModelsB_[bkgndID] = BModel; if (BbarModel==nullptr) { std::cout << "INFO in LauTimeDepFitModel::setBkgndDPModels : the model pointer is null for the anti-particle model. Using only the particle model." << std::endl; BkgndDPModelsBbar_[bkgndID] = nullptr; } else { BkgndDPModelsBbar_[bkgndID] = BbarModel; } usingBkgnd_ = kTRUE; } void LauTimeDepFitModel::setSignalPdfs(LauAbsPdf* pdf) { // These "extra variables" are assumed to be purely kinematical, like mES and DeltaE //or making use of Rest of Event information, and therefore independent of whether //the parent is a B0 or a B0bar. If this assupmtion doesn't hold, do modify this part! if (pdf==0) { std::cerr<<"ERROR in LauTimeDepFitModel::setSignalPdfs : The PDF pointer is null."<validBkgndClass( bkgndClass ) ) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndPdf : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); BkgndPdfs_[bkgndID].push_back(pdf); usingBkgnd_ = kTRUE; } void LauTimeDepFitModel::setPhiMix(const Double_t phiMix, const Bool_t fixPhiMix, const Bool_t useSinCos) { phiMix_.value(phiMix); phiMix_.initValue(phiMix); phiMix_.genValue(phiMix); phiMix_.fixed(fixPhiMix); const Double_t sinPhiMix = TMath::Sin(phiMix); sinPhiMix_.value(sinPhiMix); sinPhiMix_.initValue(sinPhiMix); sinPhiMix_.genValue(sinPhiMix); sinPhiMix_.fixed(fixPhiMix); const Double_t cosPhiMix = TMath::Cos(phiMix); cosPhiMix_.value(cosPhiMix); cosPhiMix_.initValue(cosPhiMix); cosPhiMix_.genValue(cosPhiMix); cosPhiMix_.fixed(fixPhiMix); useSinCos_ = useSinCos; phiMixComplex_.setRealPart(cosPhiMix); phiMixComplex_.setImagPart(-1.0*sinPhiMix); } void LauTimeDepFitModel::initialise() { // From the initial parameter values calculate the coefficients // so they can be passed to the signal model this->updateCoeffs(); // Initialisation if (this->useDP() == kTRUE) { this->initialiseDPModels(); } // Flavour tagging //flavTag_->initialise(); // Decay-time PDFs signalDecayTimePdf_->initialise(); //Initialise for backgrounds if necessary for (auto& pdf : BkgndDecayTimePdfs_){ pdf->initialise(); } if (!this->useDP() && sigExtraPdf_.empty()) { std::cerr<<"ERROR in LauTimeDepFitModel::initialise : Signal model doesn't exist for any variable."<Exit(EXIT_FAILURE); } if (this->useDP() == kTRUE) { // Check that we have all the Dalitz-plot models if ((sigModelB0bar_ == 0) || (sigModelB0_ == 0)) { std::cerr<<"ERROR in LauTimeDepFitModel::initialise : the pointer to one (particle or anti-particle) of the signal DP models is null."<Exit(EXIT_FAILURE); } } // Next check that, if a given component is being used we've got the // right number of PDFs for all the variables involved // TODO - should probably check variable names and so on as well //UInt_t nsigpdfvars(0); //for ( LauPdfList::const_iterator pdf_iter = sigExtraPdf_.begin(); pdf_iter != sigExtraPdf_.end(); ++pdf_iter ) { // std::vector varNames = (*pdf_iter)->varNames(); // for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { // if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { // ++nsigpdfvars; // } // } //} //if (usingBkgnd_) { // for (LauBkgndPdfsList::const_iterator bgclass_iter = BkgndPdfsB0_.begin(); bgclass_iter != BkgndPdfsB0_.end(); ++bgclass_iter) { // UInt_t nbkgndpdfvars(0); // const LauPdfList& pdfList = (*bgclass_iter); // for ( LauPdfList::const_iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter ) { // std::vector varNames = (*pdf_iter)->varNames(); // for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { // if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { // ++nbkgndpdfvars; // } // } // } // if (nbkgndpdfvars != nsigpdfvars) { // std::cerr << "ERROR in LauTimeDepFitModel::initialise : There are " << nsigpdfvars << " signal PDF variables but " << nbkgndpdfvars << " bkgnd PDF variables." << std::endl; // gSystem->Exit(EXIT_FAILURE); // } // } //} // Clear the vectors of parameter information so we can start from scratch this->clearFitParVectors(); // Set the fit parameters for signal and background models this->setSignalDPParameters(); // Set the fit parameters for the decay time models this->setDecayTimeParameters(); // Set the fit parameters for the extra PDFs this->setExtraPdfParameters(); // Set the initial bg and signal events this->setFitNEvents(); // Handle flavour-tagging calibration parameters this->setCalibParams(); // Add tagging efficiency parameters this->setTagEffParams(); // Add the efficiency parameters this->setEffiParams(); //Asymmetry terms AProd and in setAsymmetries()? //this->setAsymParams(); // Check that we have the expected number of fit variables const LauParameterPList& fitVars = this->fitPars(); if (fitVars.size() != (nSigDPPar_ + nDecayTimePar_ + nExtraPdfPar_ + nNormPar_ + nCalibPar_ + nTagEffPar_ + nEffiPar_)) { std::cerr<<"ERROR in LauTimeDepFitModel::initialise : Number of fit parameters not of expected size."<Exit(EXIT_FAILURE); } if (sigModelB0_ == 0) { std::cerr<<"ERROR in LauTimeDepFitModel::initialiseDPModels : B0 signal DP model doesn't exist"<Exit(EXIT_FAILURE); } // Need to check that the number of components we have and that the dynamics has matches up const UInt_t nAmpB0bar = sigModelB0bar_->getnTotAmp(); const UInt_t nAmpB0 = sigModelB0_->getnTotAmp(); if ( nAmpB0bar != nAmpB0 ) { std::cerr << "ERROR in LauTimeDepFitModel::initialiseDPModels : Unequal number of signal DP components in the particle and anti-particle models: " << nAmpB0bar << " != " << nAmpB0 << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( nAmpB0bar != nSigComp_ ) { std::cerr << "ERROR in LauTimeDepFitModel::initialiseDPModels : Number of signal DP components in the model (" << nAmpB0bar << ") not equal to number of coefficients supplied (" << nSigComp_ << ")." << std::endl; gSystem->Exit(EXIT_FAILURE); } std::cout<<"INFO in LauTimeDepFitModel::initialiseDPModels : Initialising signal DP model"<initialise(coeffsB0bar_); sigModelB0_->initialise(coeffsB0_); fifjEffSum_.clear(); fifjEffSum_.resize(nSigComp_); for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { fifjEffSum_[iAmp].resize(nSigComp_); } // calculate the integrals of the A*Abar terms this->calcInterferenceTermIntegrals(); this->calcInterTermNorm(); // Add backgrounds if (usingBkgnd_ == kTRUE) { for (auto& model : BkgndDPModelsB_){ model->initialise(); } for (auto& model : BkgndDPModelsBbar_){ if (model != nullptr) { model->initialise(); } } } } void LauTimeDepFitModel::calcInterferenceTermIntegrals() { const std::vector& integralInfoListB0bar = sigModelB0bar_->getIntegralInfos(); const std::vector& integralInfoListB0 = sigModelB0_->getIntegralInfos(); // TODO should check (first time) that they match in terms of number of entries in the vectors and that each entry has the same number of points, ranges, weights etc. LauComplex A, Abar, fifjEffSumTerm; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { fifjEffSum_[iAmp][jAmp].zero(); } } const UInt_t nIntegralRegions = integralInfoListB0bar.size(); for ( UInt_t iRegion(0); iRegion < nIntegralRegions; ++iRegion ) { const LauDPPartialIntegralInfo* integralInfoB0bar = integralInfoListB0bar[iRegion]; const LauDPPartialIntegralInfo* integralInfoB0 = integralInfoListB0[iRegion]; const UInt_t nm13Points = integralInfoB0bar->getnm13Points(); const UInt_t nm23Points = integralInfoB0bar->getnm23Points(); for (UInt_t m13 = 0; m13 < nm13Points; ++m13) { for (UInt_t m23 = 0; m23 < nm23Points; ++m23) { const Double_t weight = integralInfoB0bar->getWeight(m13,m23); const Double_t eff = integralInfoB0bar->getEfficiency(m13,m23); const Double_t effWeight = eff*weight; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { A = integralInfoB0->getAmplitude(m13, m23, iAmp); for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { Abar = integralInfoB0bar->getAmplitude(m13, m23, jAmp); fifjEffSumTerm = Abar*A.conj(); fifjEffSumTerm.rescale(effWeight); fifjEffSum_[iAmp][jAmp] += fifjEffSumTerm; } } } } } } void LauTimeDepFitModel::calcInterTermNorm() { const std::vector& fNormB0bar = sigModelB0bar_->getFNorm(); const std::vector& fNormB0 = sigModelB0_->getFNorm(); LauComplex norm; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { LauComplex coeffTerm = coeffsB0bar_[jAmp]*coeffsB0_[iAmp].conj(); coeffTerm *= fifjEffSum_[iAmp][jAmp]; coeffTerm.rescale(fNormB0bar[jAmp] * fNormB0[iAmp]); norm += coeffTerm; } } norm *= phiMixComplex_; interTermReNorm_ = 2.0*norm.re(); interTermImNorm_ = 2.0*norm.im(); } void LauTimeDepFitModel::setAmpCoeffSet(LauAbsCoeffSet* coeffSet) { // Is there a component called compName in the signal models? TString compName = coeffSet->name(); TString conjName = sigModelB0bar_->getConjResName(compName); const LauDaughters* daughtersB0bar = sigModelB0bar_->getDaughters(); const LauDaughters* daughtersB0 = sigModelB0_->getDaughters(); const Bool_t conjugate = daughtersB0bar->isConjugate( daughtersB0 ); if ( ! sigModelB0bar_->hasResonance(compName) ) { if ( ! sigModelB0bar_->hasResonance(conjName) ) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : B0bar signal DP model doesn't contain component \""<name( compName ); } if ( conjugate ) { if ( ! sigModelB0_->hasResonance(conjName) ) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : B0 signal DP model doesn't contain component \""<hasResonance(compName) ) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : B0 signal DP model doesn't contain component \""<name() == compName) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : Have already set coefficients for \""<index(nSigComp_); coeffPars_.push_back(coeffSet); TString parName = coeffSet->baseName(); parName += "FitFracAsym"; fitFracAsymm_.push_back(LauParameter(parName, 0.0, -1.0, 1.0)); acp_.push_back(coeffSet->acp()); ++nSigComp_; std::cout<<"INFO in LauTimeDepFitModel::setAmpCoeffSet : Added coefficients for component \""<acp(); LauAsymmCalc asymmCalc(fitFracB0bar_[i][i].value(), fitFracB0_[i][i].value()); Double_t asym = asymmCalc.getAsymmetry(); fitFracAsymm_[i].value(asym); if (initValues) { fitFracAsymm_[i].genValue(asym); fitFracAsymm_[i].initValue(asym); } } } void LauTimeDepFitModel::setSignalDPParameters() { // Set the fit parameters for the signal model. nSigDPPar_ = 0; if ( ! this->useDP() ) { return; } std::cout << "INFO in LauTimeDepFitModel::setSignalDPParameters : Setting the initial fit parameters for the signal DP model." << std::endl; // Place isobar coefficient parameters in vector of fit variables LauParameterPList& fitVars = this->fitPars(); for (UInt_t i = 0; i < nSigComp_; ++i) { LauParameterPList pars = coeffPars_[i]->getParameters(); for (auto& param : pars){ if ( !param->clone() ) { fitVars.push_back(param); ++nSigDPPar_; } } } // Obtain the resonance parameters and place them in the vector of fit variables and in a separate vector // Need to make sure that they are unique because some might appear in both DP models LauParameterPSet& resVars = this->resPars(); resVars.clear(); LauParameterPList& sigDPParsB0bar = sigModelB0bar_->getFloatingParameters(); LauParameterPList& sigDPParsB0 = sigModelB0_->getFloatingParameters(); for (auto& param : sigDPParsB0bar){ if ( resVars.insert(param).second ) { fitVars.push_back(param); ++nSigDPPar_; } } for (auto& param : sigDPParsB0){ if ( resVars.insert(param).second ) { fitVars.push_back(param); ++nSigDPPar_; } } } UInt_t LauTimeDepFitModel::addParametersToFitList(std::vector theVector) { UInt_t counter(0); LauParameterPList& fitVars = this->fitPars(); // loop through the map for (auto& pdf : theVector){ // grab the pdf and then its parameters LauAbsRValuePList& rvalues = pdf->getParameters(); // loop through the parameters for (auto& parlist : rvalues){ LauParameterPList params = parlist->getPars(); for (auto& par : params){ // for each "original" parameter add it to the list of fit parameters and increment the counter if ( !par->clone() && ( !par->fixed() || (this->twoStageFit() && par->secondStage()) ) ) { fitVars.push_back(par); ++counter; } } } } return counter; } UInt_t LauTimeDepFitModel::addParametersToFitList(LauPdfList* theList) { UInt_t counter(0); counter += this->addFitParameters(*(theList)); return counter; } void LauTimeDepFitModel::setDecayTimeParameters() { nDecayTimePar_ = 0; std::cout << "INFO in LauTimeDepFitModel::setDecayTimeParameters : Setting the initial fit parameters of the DecayTime Pdfs." << std::endl; LauParameterPList& fitVars = this->fitPars(); // Loop over the Dt PDFs LauAbsRValuePList& rvalues = signalDecayTimePdf_->getParameters(); // loop through the parameters for (auto& parlist : rvalues){ LauParameterPList params = parlist->getPars(); for (auto& par : params){ // for each "original" parameter add it to the list of fit parameters and increment the counter if ( !par->clone() && ( !par->fixed() || (this->twoStageFit() && par->secondStage()) ) ) { fitVars.push_back(par); ++nDecayTimePar_; } } } if (usingBkgnd_){ nDecayTimePar_ += this->addParametersToFitList(BkgndDecayTimePdfs_); } if (useSinCos_) { if ( not sinPhiMix_.fixed() ) { fitVars.push_back(&sinPhiMix_); fitVars.push_back(&cosPhiMix_); nDecayTimePar_ += 2; } } else { if ( not phiMix_.fixed() ) { fitVars.push_back(&phiMix_); ++nDecayTimePar_; } } } void LauTimeDepFitModel::setExtraPdfParameters() { // Include the parameters of the PDF for each tagging category in the fit // NB all of them are passed to the fit, even though some have been fixed through parameter.fixed(kTRUE) // With the new "cloned parameter" scheme only "original" parameters are passed to the fit. // Their clones are updated automatically when the originals are updated. nExtraPdfPar_ = 0; std::cout << "INFO in LauTimeDepFitModel::setExtraPdfParameters : Setting the initial fit parameters of the extra Pdfs." << std::endl; nExtraPdfPar_ += this->addFitParameters(sigExtraPdf_); if (usingBkgnd_ == kTRUE) { for (auto& pdf : BkgndPdfs_){ nExtraPdfPar_ += this->addFitParameters(pdf); } } } void LauTimeDepFitModel::setFitNEvents() { nNormPar_ = 0; std::cout << "INFO in LauTimeDepFitModel::setFitNEvents : Setting the initial fit parameters of the signal and background yields." << std::endl; // Initialise the total number of events to be the sum of all the hypotheses Double_t nTotEvts = signalEvents_->value(); this->eventsPerExpt(TMath::FloorNint(nTotEvts)); LauParameterPList& fitVars = this->fitPars(); // if doing an extended ML fit add the signal fraction into the fit parameters if (this->doEMLFit()) { std::cout<<"INFO in LauTimeDepFitModel::setFitNEvents : Initialising number of events for signal and background components..."<fixed() ) { fitVars.push_back(signalEvents_); ++nNormPar_; } } else { std::cout<<"INFO in LauTimeDepFitModel::setFitNEvents : Initialising number of events for background components (and hence signal)..."<useDP() == kFALSE) { fitVars.push_back(signalAsym_); ++nNormPar_; } // TODO arguably should delegate this //LauTagCatParamMap& signalTagCatFrac = flavTag_->getSignalTagCatFrac(); // tagging-category fractions for signal events //for (LauTagCatParamMap::iterator iter = signalTagCatFrac.begin(); iter != signalTagCatFrac.end(); ++iter) { // if (iter == signalTagCatFrac.begin()) { // continue; // } // LauParameter* par = &((*iter).second); // fitVars.push_back(par); // ++nNormPar_; //} // Backgrounds if (usingBkgnd_ == kTRUE) { for (auto& params : bkgndEvents_){ std::vector parameters = params->getPars(); for ( LauParameter* parameter : parameters ) { if (parameter->fixed()){continue;} if(!parameter->clone()) { fitVars.push_back(parameter); ++nNormPar_; } } } for (auto& params : bkgndAsym_){ std::vector parameters = params->getPars(); for ( LauParameter* parameter : parameters ) { if (parameter->fixed()){continue;} if(!parameter->clone()) { fitVars.push_back(parameter); ++nNormPar_; } } } } } void LauTimeDepFitModel::setAsymParams() { nAsymPar_ = 0; LauParameterPList& fitVars = this->fitPars(); if (!AProd_.fixed()){ fitVars.push_back(&AProd_); nAsymPar_+=1; } } void LauTimeDepFitModel::setTagEffParams() { nTagEffPar_ = 0; Bool_t useAltPars = flavTag_->getUseAveDelta(); std::cout << "INFO in LauTimeDepFitModel::setTagEffParams : Setting the initial fit parameters for flavour tagging efficiencies." << std::endl; if (useAltPars){ std::vector tageff_ave = flavTag_->getTagEffAve(); std::vector tageff_delta = flavTag_->getTagEffDelta(); LauParameterPList& fitVars = this->fitPars(); for(auto& eff : tageff_ave){ if (eff==nullptr){continue;} if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } for(auto& eff : tageff_delta){ if (eff==nullptr){continue;} if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } else { std::vector tageff_b0 = flavTag_->getTagEffB0(); std::vector tageff_b0bar = flavTag_->getTagEffB0bar(); LauParameterPList& fitVars = this->fitPars(); for(auto& eff : tageff_b0){ if (eff==nullptr){continue;} if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } for(auto& eff : tageff_b0bar){ if (eff==nullptr){continue;} if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } if (usingBkgnd_){ if (useAltPars){ std::vector> tageff_ave = flavTag_->getTagEffBkgndAve(); std::vector> tageff_delta = flavTag_->getTagEffBkgndDelta(); LauParameterPList& fitVars = this->fitPars(); for(auto& innerVec : tageff_ave){ for(auto& eff : innerVec){ if (eff == nullptr){continue;} if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } for(auto& innerVec : tageff_delta){ for(auto& eff : innerVec){ if (eff == nullptr){continue;} if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } } else { std::vector> tageff_b0 = flavTag_->getTagEffBkgndB0(); std::vector> tageff_b0bar = flavTag_->getTagEffBkgndB0bar(); LauParameterPList& fitVars = this->fitPars(); for(auto& innerVec : tageff_b0){ for(auto& eff : innerVec){ if (eff == nullptr){continue;} if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } for(auto& innerVec : tageff_b0bar){ for(auto& eff : innerVec){ if (eff == nullptr){continue;} if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } } } } void LauTimeDepFitModel::setCalibParams() { Bool_t useAltPars = flavTag_->getUseAveDelta(); std::cout << "INFO in LauTimeDepFitModel::setCalibParams : Setting the initial fit parameters of the flavour tagging calibration parameters." << std::endl; if (useAltPars){ std::vector p0pars_ave = flavTag_->getCalibP0Ave(); std::vector p0pars_delta = flavTag_->getCalibP0Delta(); std::vector p1pars_ave = flavTag_->getCalibP1Ave(); std::vector p1pars_delta = flavTag_->getCalibP1Delta(); LauParameterPList& fitVars = this->fitPars(); for(auto& p0 : p0pars_ave){ if (p0->fixed()){continue;} fitVars.push_back(p0); ++nCalibPar_; } for(auto& p0 : p0pars_delta){ if (p0->fixed()){continue;} fitVars.push_back(p0); ++nCalibPar_; } for(auto& p1 : p1pars_ave){ if (p1->fixed()){continue;} fitVars.push_back(p1); ++nCalibPar_; } for(auto& p1 : p1pars_delta){ if (p1->fixed()){continue;} fitVars.push_back(p1); ++nCalibPar_; } } else { std::vector p0pars_b0 = flavTag_->getCalibP0B0(); std::vector p0pars_b0bar = flavTag_->getCalibP0B0bar(); std::vector p1pars_b0 = flavTag_->getCalibP1B0(); std::vector p1pars_b0bar = flavTag_->getCalibP1B0bar(); LauParameterPList& fitVars = this->fitPars(); for(auto& p0 : p0pars_b0){ if (p0->fixed()){continue;} fitVars.push_back(p0); ++nCalibPar_; } for(auto& p0 : p0pars_b0bar){ if (p0->fixed()){continue;} fitVars.push_back(p0); ++nCalibPar_; } for(auto& p1 : p1pars_b0){ if (p1->fixed()){continue;} fitVars.push_back(p1); ++nCalibPar_; } for(auto& p1 : p1pars_b0bar){ if (p1->fixed()){continue;} fitVars.push_back(p1); ++nCalibPar_; } } } void LauTimeDepFitModel::setEffiParams() { nEffiPar_ = 0; LauParameterPList& fitVars = this->fitPars(); LauParameterPList& effiPars = signalDecayTimePdf_->getEffiPars(); // If all of the knots are fixed we have nothing to do LauParamFixed isFixed; if ( std::all_of( effiPars.begin(), effiPars.end(), isFixed ) ) { return; } // If any knots are floating, add all knots (fixed or floating) for(auto& par : effiPars){ fitVars.push_back(par); ++nEffiPar_; } } void LauTimeDepFitModel::setExtraNtupleVars() { // Set-up other parameters derived from the fit results, e.g. fit fractions. if (this->useDP() != kTRUE) { return; } // First clear the vectors so we start from scratch this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); // Add the B0 and B0bar fit fractions for each signal component fitFracB0bar_ = sigModelB0bar_->getFitFractions(); if (fitFracB0bar_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetFitFractions(); if (fitFracB0_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); icalcAsymmetries(kTRUE); // Add the Fit Fraction asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(fitFracAsymm_[i]); } // Add the calculated CP asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(acp_[i]); } // Now add in the DP efficiency values Double_t initMeanEffB0bar = sigModelB0bar_->getMeanEff().initValue(); meanEffB0bar_.value(initMeanEffB0bar); meanEffB0bar_.initValue(initMeanEffB0bar); meanEffB0bar_.genValue(initMeanEffB0bar); extraVars.push_back(meanEffB0bar_); Double_t initMeanEffB0 = sigModelB0_->getMeanEff().initValue(); meanEffB0_.value(initMeanEffB0); meanEffB0_.initValue(initMeanEffB0); meanEffB0_.genValue(initMeanEffB0); extraVars.push_back(meanEffB0_); // Also add in the DP rates Double_t initDPRateB0bar = sigModelB0bar_->getDPRate().initValue(); DPRateB0bar_.value(initDPRateB0bar); DPRateB0bar_.initValue(initDPRateB0bar); DPRateB0bar_.genValue(initDPRateB0bar); extraVars.push_back(DPRateB0bar_); Double_t initDPRateB0 = sigModelB0_->getDPRate().initValue(); DPRateB0_.value(initDPRateB0); DPRateB0_.initValue(initDPRateB0); DPRateB0_.genValue(initDPRateB0); extraVars.push_back(DPRateB0_); } void LauTimeDepFitModel::setAsymmetries(const Double_t AProd, const Bool_t AProdFix){ AProd_.value(AProd); AProd_.fixed(AProdFix); } void LauTimeDepFitModel::finaliseFitResults(const TString& tablePrefixName) { // Retrieve parameters from the fit results for calculations and toy generation // and eventually store these in output root ntuples/text files // Now take the fit parameters and update them as necessary // i.e. to make mag > 0.0, phase in the right range. // This function will also calculate any other values, such as the // fit fractions, using any errors provided by fitParErrors as appropriate. // Also obtain the pull values: (measured - generated)/(average error) if (this->useDP() == kTRUE) { for (UInt_t i = 0; i < nSigComp_; ++i) { // Check whether we have "a > 0.0", and phases in the right range coeffPars_[i]->finaliseValues(); } } // update the pulls on the event fractions and asymmetries if (this->doEMLFit()) { signalEvents_->updatePull(); } if (this->useDP() == kFALSE) { signalAsym_->updatePull(); } // Finalise the pulls on the decay time parameters signalDecayTimePdf_->updatePulls(); // and for backgrounds if required if (usingBkgnd_){ for (auto& pdf : BkgndDecayTimePdfs_){ pdf->updatePulls(); } } if (useSinCos_) { if ( not sinPhiMix_.fixed() ) { sinPhiMix_.updatePull(); cosPhiMix_.updatePull(); } } else { this->checkMixingPhase(); } if (usingBkgnd_ == kTRUE) { for (auto& params : bkgndEvents_){ std::vector parameters = params->getPars(); for ( LauParameter* parameter : parameters ) { parameter->updatePull(); } } for (auto& params : bkgndAsym_){ std::vector parameters = params->getPars(); for ( LauParameter* parameter : parameters ) { parameter->updatePull(); } } } // Update the pulls on all the extra PDFs' parameters this->updateFitParameters(sigExtraPdf_); if (usingBkgnd_ == kTRUE) { for (auto& pdf : BkgndPdfs_){ this->updateFitParameters(pdf); } } // Fill the fit results to the ntuple // update the coefficients and then calculate the fit fractions and ACP's if (this->useDP() == kTRUE) { this->updateCoeffs(); sigModelB0bar_->updateCoeffs(coeffsB0bar_); sigModelB0bar_->calcExtraInfo(); sigModelB0_->updateCoeffs(coeffsB0_); sigModelB0_->calcExtraInfo(); LauParArray fitFracB0bar = sigModelB0bar_->getFitFractions(); if (fitFracB0bar.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0 = sigModelB0_->getFitFractions(); if (fitFracB0.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); meanEffB0_.value(sigModelB0_->getMeanEff().value()); DPRateB0bar_.value(sigModelB0bar_->getDPRate().value()); DPRateB0_.value(sigModelB0_->getDPRate().value()); this->calcAsymmetries(); // Then store the final fit parameters, and any extra parameters for // the signal model (e.g. fit fractions, FF asymmetries, ACPs, mean efficiency and DP rate) this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); for (UInt_t i(0); iprintFitFractions(std::cout); this->printAsymmetries(std::cout); } const LauParameterPList& fitVars = this->fitPars(); const LauParameterList& extraVars = this->extraPars(); LauFitNtuple* ntuple = this->fitNtuple(); ntuple->storeParsAndErrors(fitVars, extraVars); // find out the correlation matrix for the parameters ntuple->storeCorrMatrix(this->iExpt(), this->fitStatus(), this->covarianceMatrix()); // Fill the data into ntuple ntuple->updateFitNtuple(); // Print out the partial fit fractions, phases and the // averaged efficiency, reweighted by the dynamics (and anything else) if (this->writeLatexTable()) { TString sigOutFileName(tablePrefixName); sigOutFileName += "_"; sigOutFileName += this->iExpt(); sigOutFileName += "Expt.tex"; this->writeOutTable(sigOutFileName); } } void LauTimeDepFitModel::printFitFractions(std::ostream& output) { // Print out Fit Fractions, total DP rate and mean efficiency // First for the B0bar events for (UInt_t i = 0; i < nSigComp_; i++) { const TString compName(coeffPars_[i]->name()); output<<"B0bar FitFraction for component "<useDP() == kTRUE) { // print the fit coefficients in one table coeffPars_.front()->printTableHeading(fout); for (UInt_t i = 0; i < nSigComp_; i++) { coeffPars_[i]->printTableRow(fout); } fout<<"\\hline"<name(); resName = resName.ReplaceAll("_", "\\_"); fout< =$ & $"; print.printFormat(fout, meanEffB0bar_.value()); fout << "$ & $"; print.printFormat(fout, meanEffB0_.value()); fout << "$ & & \\\\" << std::endl; if (useSinCos_) { fout << "$\\sinPhiMix =$ & $"; print.printFormat(fout, sinPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, sinPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; fout << "$\\cosPhiMix =$ & $"; print.printFormat(fout, cosPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, cosPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } else { fout << "$\\phiMix =$ & $"; print.printFormat(fout, phiMix_.value()); fout << " \\pm "; print.printFormat(fout, phiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } fout << "\\hline \n\\end{tabular}" << std::endl; } if (!sigExtraPdf_.empty()) { fout<<"\\begin{tabular}{|l|c|}"<printFitParameters(sigExtraPdf_, fout); if (usingBkgnd_ == kTRUE && !BkgndPdfs_.empty()) { fout << "\\hline" << std::endl; fout << "\\Extra Background PDFs' Parameters: & \\\\" << std::endl; for (auto& pdf : BkgndPdfs_){ this->printFitParameters(pdf, fout); } } fout<<"\\hline \n\\end{tabular}"<updateSigEvents(); // Check whether we want to have randomised initial fit parameters for the signal model if (this->useRandomInitFitPars() == kTRUE) { this->randomiseInitFitPars(); } } void LauTimeDepFitModel::randomiseInitFitPars() { // Only randomise those parameters that are not fixed! std::cout<<"INFO in LauTimeDepFitModel::randomiseInitFitPars : Randomising the initial values of the coefficients of the DP components (and phiMix)..."<randomiseInitValues(); } phiMix_.randomiseValue(-LauConstants::pi, LauConstants::pi); if (useSinCos_) { sinPhiMix_.initValue(TMath::Sin(phiMix_.initValue())); cosPhiMix_.initValue(TMath::Cos(phiMix_.initValue())); } } LauTimeDepFitModel::LauGenInfo LauTimeDepFitModel::eventsToGenerate() { // Determine the number of events to generate for each hypothesis // If we're smearing then smear each one individually // NB this individual smearing has to be done individually per tagging category as well LauGenInfo nEvtsGen; // Signal // If we're including the DP and decay time we can't decide on the tag // yet, since it depends on the whole DP+dt PDF, however, if // we're not then we need to decide. Double_t evtWeight(1.0); Double_t nEvts = signalEvents_->genValue(); if ( nEvts < 0.0 ) { evtWeight = -1.0; nEvts = TMath::Abs( nEvts ); } //TOD sigAysm doesn't do anything here? Double_t sigAsym(0.0); if (this->useDP() == kFALSE) { sigAsym = signalAsym_->genValue(); //TODO fill in here if we care } else { Double_t rateB0bar = sigModelB0bar_->getDPRate().value(); Double_t rateB0 = sigModelB0_->getDPRate().value(); if ( rateB0bar+rateB0 > 1e-30) { sigAsym = (rateB0bar-rateB0)/(rateB0bar+rateB0); } //for (LauTagCatParamMap::const_iterator iter = signalTagCatFrac.begin(); iter != signalTagCatFrac.end(); ++iter) { // const LauParameter& par = iter->second; // Double_t eventsbyTagCat = par.value() * nEvts; // if (this->doPoissonSmearing()) { // eventsbyTagCat = LauRandom::randomFun()->Poisson(eventsbyTagCat); // } // eventsB0[iter->first] = std::make_pair( TMath::Nint(eventsbyTagCat), evtWeight ); //} //nEvtsGen[std::make_pair("signal",0)] = eventsB0; // generate signal event, decide tag later. nEvtsGen["signal"] = std::make_pair( nEvts, evtWeight ); } std::cout<<"INFO in LauTimeDepFitModel::eventsToGenerate : Generating toy MC with:"<bkgndClassName(bkgndID)<<" background events = "<genValue()<eventsToGenerate(); Bool_t genOK(kTRUE); Int_t evtNum(0); const UInt_t nBkgnds = this->nBkgndClasses(); std::vector bkgndClassNames(nBkgnds); std::vector bkgndClassNamesGen(nBkgnds); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); bkgndClassNames[iBkgnd] = name; bkgndClassNamesGen[iBkgnd] = "gen"+name; } // Loop over the hypotheses and generate the appropriate number of // events for each one for (auto& hypo : nEvts){ // find the category of events (e.g. signal) const TString& evtCategory(hypo.first); // Type const TString& type(hypo.first); // Number of events Int_t nEvtsGen( hypo.second.first ); // get the event weight for this category const Double_t evtWeight( hypo.second.second ); for (Int_t iEvt(0); iEvtsetGenNtupleDoubleBranchValue( "evtWeight", evtWeight ); if (evtCategory == "signal") { this->setGenNtupleIntegerBranchValue("genSig",1); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { this->setGenNtupleIntegerBranchValue( bkgndClassNamesGen[iBkgnd], 0 ); } // All the generate*Event() methods have to fill in curEvtDecayTime_ and curEvtDecayTimeErr_ // In addition, generateSignalEvent has to decide on the tag and fill in curEvtTagFlv_ genOK = this->generateSignalEvent(); } else { this->setGenNtupleIntegerBranchValue("genSig",0); UInt_t bkgndID(0); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { Int_t gen(0); if ( bkgndClassNames[iBkgnd] == type ) { gen = 1; bkgndID = iBkgnd; } this->setGenNtupleIntegerBranchValue( bkgndClassNamesGen[iBkgnd], gen ); } genOK = this->generateBkgndEvent(bkgndID); } if (!genOK) { // If there was a problem with the generation then break out and return. // The problem model will have adjusted itself so that all should be OK next time. break; } if (this->useDP() == kTRUE) { this->setDPDtBranchValues(); // store DP, decay time and tagging variables in the ntuple } // Store the event's tag and tagging category this->setGenNtupleIntegerBranchValue("cpEigenvalue", cpEigenValue_); const TString& trueTagVarName { flavTag_->getTrueTagVarName() }; if ( trueTagVarName != "" ) { this->setGenNtupleIntegerBranchValue(trueTagVarName, curEvtTrueTagFlv_); } if ( cpEigenValue_ == QFS ) { const TString& decayFlvVarName { flavTag_->getDecayFlvVarName() }; if ( decayFlvVarName != "" ) { this->setGenNtupleIntegerBranchValue(decayFlvVarName, curEvtDecayFlv_); } } const std::vector& tagVarNames { flavTag_->getTagVarNames() }; const std::vector& mistagVarNames { flavTag_->getMistagVarNames() }; // Loop over the taggers - values set via generateSignalEvent const ULong_t nTaggers {flavTag_->getNTaggers()}; for (ULong_t i=0; isetGenNtupleIntegerBranchValue(tagVarNames[i], curEvtTagFlv_[i]); this->setGenNtupleDoubleBranchValue(mistagVarNames[i], curEvtMistag_[i]); } // Store the event number (within this experiment) // and then increment it this->setGenNtupleIntegerBranchValue("iEvtWithinExpt",evtNum); ++evtNum; // Write the values into the tree this->fillGenNtupleBranches(); // Print an occasional progress message if (iEvt%1000 == 0) {std::cout<<"INFO in LauTimeDepFitModel::genExpt : Generated event number "<useDP() && genOK) { sigModelB0bar_->checkToyMC(kTRUE); sigModelB0_->checkToyMC(kTRUE); std::cout<<"aSqMaxSet = "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0 = sigModelB0_->getFitFractions(); if (fitFracB0.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::generate : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); meanEffB0_.value(sigModelB0_->getMeanEff().value()); DPRateB0bar_.value(sigModelB0bar_->getDPRate().value()); DPRateB0_.value(sigModelB0_->getDPRate().value()); } } // If we're reusing embedded events or if the generation is being // reset then clear the lists of used events if (reuseSignal_ || !genOK) { if (signalTree_) { signalTree_->clearUsedList(); } } for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { LauEmbeddedData* data = bkgndTree_[bkgndID]; if (reuseBkgnd_[bkgndID] || !genOK) { if (data) { data->clearUsedList(); } } } return genOK; } Bool_t LauTimeDepFitModel::generateSignalEvent() { // Generate signal event, including SCF if necessary. // DP:DecayTime generation follows. // If it's ok, we then generate mES, DeltaE, Fisher/NN... Bool_t genOK(kTRUE); Bool_t generatedEvent(kFALSE); Bool_t doSquareDP = kinematicsB0bar_->squareDP(); doSquareDP &= kinematicsB0_->squareDP(); LauKinematics* kinematics(kinematicsB0bar_); if (this->useDP()) { if (signalTree_) { signalTree_->getEmbeddedEvent(kinematics); //curEvtTagFlv_ = TMath::Nint(signalTree_->getValue("tagFlv")); curEvtDecayTimeErr_ = signalTree_->getValue(signalDecayTimePdf_->varErrName()); curEvtDecayTime_ = signalTree_->getValue(signalDecayTimePdf_->varName()); if (signalTree_->haveBranch("mcMatch")) { Int_t match = TMath::Nint(signalTree_->getValue("mcMatch")); if (match) { this->setGenNtupleIntegerBranchValue("genTMSig",1); this->setGenNtupleIntegerBranchValue("genSCFSig",0); } else { this->setGenNtupleIntegerBranchValue("genTMSig",0); this->setGenNtupleIntegerBranchValue("genSCFSig",1); } } } else { nGenLoop_ = 0; // Now generate from the combined DP / decay-time PDF while (generatedEvent == kFALSE && nGenLoop_ < iterationsMax_) { curEvtTrueTagFlv_ = LauFlavTag::Flavour::Unknown; curEvtDecayFlv_ = LauFlavTag::Flavour::Unknown; // First choose the true tag, accounting for the production asymmetry // CONVENTION WARNING regarding meaning of sign of AProd Double_t random = LauRandom::randomFun()->Rndm(); if (random <= 0.5 * ( 1.0 - AProd_.unblindValue() ) ) { curEvtTrueTagFlv_ = LauFlavTag::Flavour::B; } else { curEvtTrueTagFlv_ = LauFlavTag::Flavour::Bbar; } // Generate the DP position Double_t m13Sq{0.0}, m23Sq{0.0}; kinematicsB0bar_->genFlatPhaseSpace(m13Sq, m23Sq); // Next, calculate the total A and Abar for the given DP position sigModelB0_->calcLikelihoodInfo(m13Sq, m23Sq); sigModelB0bar_->calcLikelihoodInfo(m13Sq, m23Sq); // Generate decay time const Double_t tMin = signalDecayTimePdf_->minAbscissa(); const Double_t tMax = signalDecayTimePdf_->maxAbscissa(); - curEvtDecayTime_ = LauRandom::randomFun()->Rndm()*(tMax-tMin) + tMin; + curEvtDecayTime_ = LauRandom::randomFun()->Uniform(tMin,tMax); // Generate the decay time error (NB the kTRUE forces the generation of a new value) curEvtDecayTimeErr_ = signalDecayTimePdf_->generateError(kTRUE); // Calculate all the decay time info signalDecayTimePdf_->calcLikelihoodInfo(curEvtDecayTime_,curEvtDecayTimeErr_); // Retrieve the amplitudes and efficiency from the dynamics const LauComplex& Abar { sigModelB0bar_->getEvtDPAmp() }; const LauComplex& A { sigModelB0_->getEvtDPAmp() }; const Double_t ASq { A.abs2() }; const Double_t AbarSq { Abar.abs2() }; const Double_t dpEff { sigModelB0bar_->getEvtEff() }; // Also retrieve all the decay time terms const Double_t dtCos { signalDecayTimePdf_->getCosTerm() }; const Double_t dtSin { signalDecayTimePdf_->getSinTerm() }; const Double_t dtCosh { signalDecayTimePdf_->getCoshTerm() }; const Double_t dtSinh { signalDecayTimePdf_->getSinhTerm() }; // and the decay time acceptance const Double_t dtEff { signalDecayTimePdf_->getEffiTerm() }; if ( cpEigenValue_ == QFS) { // Calculate the total intensities for each flavour-specific final state const Double_t ATotSq { ( ASq * dtCosh + curEvtTrueTagFlv_ * ASq * dtCos ) * dpEff * dtEff }; const Double_t AbarTotSq { ( AbarSq * dtCosh - curEvtTrueTagFlv_ * AbarSq * dtCos ) * dpEff * dtEff }; const Double_t ASumSq { ATotSq + AbarTotSq }; // Finally we throw the dice to see whether this event should be generated (and, if so, which final state) const Double_t randNum = LauRandom::randomFun()->Rndm(); if (randNum <= ASumSq / aSqMaxSet_ ) { generatedEvent = kTRUE; nGenLoop_ = 0; if (ASumSq > aSqMaxVar_) {aSqMaxVar_ = ASumSq;} if ( randNum <= ATotSq / aSqMaxSet_ ) { curEvtDecayFlv_ = LauFlavTag::Flavour::B; } else { curEvtDecayFlv_ = LauFlavTag::Flavour::Bbar; } // Generate the flavour tagging information from the true tag // (we do this after accepting the event to save time) flavTag_->generateEventInfo( curEvtTrueTagFlv_, curEvtDecayTime_ ); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); } else { nGenLoop_++; } } else { // Calculate the DP terms const Double_t aSqSum { ASq + AbarSq }; const Double_t aSqDif { ASq - AbarSq }; const LauComplex inter { Abar * A.conj() * phiMixComplex_ }; const Double_t interTermIm { ( cpEigenValue_ == CPEven ) ? 2.0 * inter.im() : -2.0 * inter.im() }; const Double_t interTermRe { ( cpEigenValue_ == CPEven ) ? 2.0 * inter.re() : -2.0 * inter.re() }; // Combine DP and decay-time info for all terms const Double_t coshTerm { aSqSum * dtCosh }; const Double_t sinhTerm { interTermRe * dtSinh }; const Double_t cosTerm { aSqDif * dtCos }; const Double_t sinTerm { interTermIm * dtSin }; // Sum to obtain the total and multiply by the efficiency // Multiplying the cos and sin terms by the true flavour at production const Double_t ATotSq { ( coshTerm + sinhTerm + curEvtTrueTagFlv_ * ( cosTerm - sinTerm ) ) * dpEff * dtEff }; //Finally we throw the dice to see whether this event should be generated const Double_t randNum = LauRandom::randomFun()->Rndm(); if (randNum <= ATotSq/aSqMaxSet_ ) { generatedEvent = kTRUE; nGenLoop_ = 0; if (ATotSq > aSqMaxVar_) {aSqMaxVar_ = ATotSq;} // Generate the flavour tagging information from the true tag // (we do this after accepting the event to save time) flavTag_->generateEventInfo( curEvtTrueTagFlv_, curEvtDecayTime_ ); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); } else { nGenLoop_++; } } } // end of while !generatedEvent loop } // end of if (signalTree_) else control } else { if ( signalTree_ ) { signalTree_->getEmbeddedEvent(0); //curEvtTagFlv_ = TMath::Nint(signalTree_->getValue("tagFlv")); curEvtDecayTimeErr_ = signalTree_->getValue(signalDecayTimePdf_->varErrName()); curEvtDecayTime_ = signalTree_->getValue(signalDecayTimePdf_->varName()); } } // Check whether we have generated the toy MC OK. if (nGenLoop_ >= iterationsMax_) { aSqMaxSet_ = 1.01 * aSqMaxVar_; genOK = kFALSE; std::cerr<<"WARNING in LauTimeDepFitModel::generateSignalEvent : Hit max iterations: setting aSqMaxSet_ to "< aSqMaxSet_) { aSqMaxSet_ = 1.01 * aSqMaxVar_; genOK = kFALSE; std::cerr<<"WARNING in LauTimeDepFitModel::generateSignalEvent : Found a larger ASq value: setting aSqMaxSet_ to "<updateKinematics(kinematicsB0bar_->getm13Sq(), kinematicsB0bar_->getm23Sq() ); this->generateExtraPdfValues(sigExtraPdf_, signalTree_); } // Check for problems with the embedding if (signalTree_ && (signalTree_->nEvents() == signalTree_->nUsedEvents())) { std::cerr<<"WARNING in LauTimeDepFitModel::generateSignalEvent : Source of embedded signal events used up, clearing the list of used events."<clearUsedList(); } return genOK; } Bool_t LauTimeDepFitModel::generateBkgndEvent([[maybe_unused]] UInt_t bkgndID) { // Generate Bkgnd event - Bool_t genOK(kTRUE); - - //TODO DecayTime part? - //TODO Flavour tagging part? LauFlavTag::generateBkgndEventInfo() + Bool_t genOK{kTRUE}; + //TODO restore the ability to embed events from an external source //LauAbsBkgndDPModel* model(0); //LauEmbeddedData* embeddedData(0); //LauPdfList* extraPdfs(0); //LauKinematics* kinematics(0); //model = BkgndDPModels_[bkgndID]; //if (this->enableEmbedding()) { // // find the right embedded data for the current tagging category // LauTagCatEmbDataMap::const_iterator emb_iter = bkgndTree_[bkgndID].find(curEvtTagCat_); // embeddedData = (emb_iter != bkgndTree_[bkgndID].end()) ? emb_iter->second : 0; //} //extraPdfs = &BkgndPdfs_[bkgndID]; //kinematics = kinematicsB0bar_; //if (this->useDP()) { // if (embeddedData) { // embeddedData->getEmbeddedEvent(kinematics); // } else { // if (model == 0) { // const TString& bkgndClass = this->bkgndClassName(bkgndID); // std::cerr << "ERROR in LauCPFitModel::generateBkgndEvent : Can't find the DP model for background class \"" << bkgndClass << "\"." << std::endl; // gSystem->Exit(EXIT_FAILURE); // } // genOK = model->generate(); // } //} else { // if (embeddedData) { // embeddedData->getEmbeddedEvent(0); // } //} //if (genOK) { // this->generateExtraPdfValues(extraPdfs, embeddedData); //} //// Check for problems with the embedding //if (embeddedData && (embeddedData->nEvents() == embeddedData->nUsedEvents())) { // const TString& bkgndClass = this->bkgndClassName(bkgndID); // std::cerr << "WARNING in LauCPFitModel::generateBkgndEvent : Source of embedded " << bkgndClass << " events used up, clearing the list of used events." << std::endl; // embeddedData->clearUsedList(); //} // - //TODO Placeholder to allow gen to run for examples - if( BkgndTypes_[bkgndID]!=LauFlavTag::Combinatorial ) - { - std::cout << "WARNING in LauTimeDepFitModel::generateBkgndEvent : Non-combinatorial cases aren't being dealt with right now, returning without generation" << std::endl; - return kFALSE; - } - Double_t random = LauRandom::randomFun()->Rndm(); - if (random <= 0.5 * ( 1.0 - AProd_.unblindValue() ) ) { - curEvtTrueTagFlv_ = LauFlavTag::Flavour::B; - } else { - curEvtTrueTagFlv_ = LauFlavTag::Flavour::Bbar; - } + switch ( BkgndTypes_[bkgndID] ) { + case LauFlavTag::Combinatorial: + { - //generate decay time and dt error - curEvtDecayTimeErr_ = BkgndDecayTimePdfs_[bkgndID]->generateError(kTRUE); - //This only works for Hist cases - if (BkgndTypes_[bkgndID]!=LauFlavTag::Combinatorial){ - curEvtDecayTime_ = BkgndDecayTimePdfs_[bkgndID]->generate( kinematicsB0_ ); - } else { - // Generate decay time for signal-like modes with dt independent of the DP position - } + // This doesn't really mean anything for combinatorial background + // TODO But maybe we need some sort of asymmetry parameter here? + Double_t random = LauRandom::randomFun()->Rndm(); + if ( random <= 0.5 ) { + curEvtTrueTagFlv_ = LauFlavTag::Flavour::B; + } else { + curEvtTrueTagFlv_ = LauFlavTag::Flavour::Bbar; + } - if (BkgndTypes_[bkgndID]==LauFlavTag::Combinatorial){ - flavTag_->generateBkgndEventInfo( curEvtTrueTagFlv_ , curEvtDecayTime_, bkgndID); - } else if (BkgndTypes_[bkgndID]==LauFlavTag::FlavourSpecific){ - flavTag_->generateEventInfo( curEvtTrueTagFlv_, curEvtDecayTime_ ); - } - curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); - curEvtMistag_ = flavTag_->getCurEvtMistag(); + // generate the true decay flavour - again this doesn't make much sense for combinatorial so we just flip a coin + // TODO - we maybe need an asymmetry parameter here as well? + if ( cpEigenValue_ == CPEigenvalue::QFS ) { + if (random <= 0.5 ) { + curEvtDecayFlv_ = LauFlavTag::Flavour::B; + } else { + curEvtDecayFlv_ = LauFlavTag::Flavour::Bbar; + } + } + + // generate the DP position + BkgndDPModelsB_[bkgndID]->generate(); - //TODO make all this work depending on B flavour later - //generate a DP point - BkgndDPModelsB_[bkgndID]->generate(); + // generate decay time and its error + curEvtDecayTimeErr_ = BkgndDecayTimePdfs_[bkgndID]->generateError(kTRUE); + curEvtDecayTime_ = BkgndDecayTimePdfs_[bkgndID]->generate( kinematicsB0_ ); - //TODO this only works for comb. - if( cpEigenValue_ == CPEigenvalue::QFS ){curEvtDecayFlv_ = LauFlavTag::Flavour::B;} + // generate the flavour tagging response + flavTag_->generateBkgndEventInfo( curEvtTrueTagFlv_, curEvtDecayTime_, bkgndID ); + curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); + curEvtMistag_ = flavTag_->getCurEvtMistag(); + + break; + } + + case LauFlavTag::FlavourSpecific: + { + const LauDecayTimePdf::FuncType dtType { BkgndDecayTimePdfs_[bkgndID]->getFuncType() }; + if ( dtType == LauDecayTimePdf::FuncType::ExpTrig or dtType == LauDecayTimePdf::FuncType::ExpHypTrig ) { + nGenLoop_ = 0; + Bool_t generatedEvent{kFALSE}; + do { + curEvtTrueTagFlv_ = LauFlavTag::Flavour::Unknown; + curEvtDecayFlv_ = LauFlavTag::Flavour::Unknown; + + // First choose the true tag, accounting for the production asymmetry + // CONVENTION WARNING regarding meaning of sign of AProd + // TODO - we should have different AProd's for each background category since they can be from different b-hadron species + Double_t random = LauRandom::randomFun()->Rndm(); + if (random <= 0.5 * ( 1.0 - AProd_.unblindValue() ) ) { + curEvtTrueTagFlv_ = LauFlavTag::Flavour::B; + } else { + curEvtTrueTagFlv_ = LauFlavTag::Flavour::Bbar; + } + + // Generate the DP position + Double_t m13Sq{0.0}, m23Sq{0.0}; + kinematicsB0bar_->genFlatPhaseSpace(m13Sq, m23Sq); + + // Next, calculate the total A^2 and Abar^2 for the given DP position + BkgndDPModelsB_[bkgndID]->calcLikelihoodInfo(m13Sq, m23Sq); + BkgndDPModelsBbar_[bkgndID]->calcLikelihoodInfo(m13Sq, m23Sq); + + // Generate decay time + const Double_t tMin = BkgndDecayTimePdfs_[bkgndID]->minAbscissa(); + const Double_t tMax = BkgndDecayTimePdfs_[bkgndID]->maxAbscissa(); + curEvtDecayTime_ = LauRandom::randomFun()->Uniform(tMin,tMax); + + // Generate the decay time error (NB the kTRUE forces the generation of a new value) + curEvtDecayTimeErr_ = BkgndDecayTimePdfs_[bkgndID]->generateError(kTRUE); + + // Calculate all the decay time info + BkgndDecayTimePdfs_[bkgndID]->calcLikelihoodInfo(curEvtDecayTime_,curEvtDecayTimeErr_); + + + // Retrieve the DP intensities + const Double_t ASq { BkgndDPModelsB_[bkgndID]->getUnNormValue() }; + const Double_t AbarSq { BkgndDPModelsBbar_[bkgndID]->getUnNormValue() }; + + // Also retrieve all the decay time terms + const Double_t dtCos { BkgndDecayTimePdfs_[bkgndID]->getCosTerm() }; + const Double_t dtCosh { BkgndDecayTimePdfs_[bkgndID]->getCoshTerm() }; + // and the decay time acceptance + const Double_t dtEff { BkgndDecayTimePdfs_[bkgndID]->getEffiTerm() }; + + if ( cpEigenValue_ == QFS) { + // Calculate the total intensities for each flavour-specific final state + const Double_t ATotSq { ( ASq * dtCosh + curEvtTrueTagFlv_ * ASq * dtCos ) * dtEff }; + const Double_t AbarTotSq { ( AbarSq * dtCosh - curEvtTrueTagFlv_ * AbarSq * dtCos ) * dtEff }; + const Double_t ASumSq { ATotSq + AbarTotSq }; + + // Finally we throw the dice to see whether this event should be generated (and, if so, which final state) + const Double_t randNum = LauRandom::randomFun()->Rndm(); + if (randNum <= ASumSq / aSqMaxSet_ ) { + generatedEvent = kTRUE; + nGenLoop_ = 0; + if (ASumSq > aSqMaxVar_) {aSqMaxVar_ = ASumSq;} + + if ( randNum <= ATotSq / aSqMaxSet_ ) { + curEvtDecayFlv_ = LauFlavTag::Flavour::B; + } else { + curEvtDecayFlv_ = LauFlavTag::Flavour::Bbar; + } + + // Generate the flavour tagging information from the true tag + // (we do this after accepting the event to save time) + flavTag_->generateEventInfo( curEvtTrueTagFlv_, curEvtDecayTime_ ); + curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); + curEvtMistag_ = flavTag_->getCurEvtMistag(); + } else { + nGenLoop_++; + } + } else { + // Calculate the DP terms + const Double_t aSqSum { ASq + AbarSq }; + const Double_t aSqDif { ASq - AbarSq }; + + // Combine DP and decay-time info for all terms + const Double_t coshTerm { aSqSum * dtCosh }; + const Double_t cosTerm { aSqDif * dtCos }; + + // Sum to obtain the total and multiply by the efficiency + // Multiplying the cos term by the true flavour at production + const Double_t ATotSq { ( coshTerm + curEvtTrueTagFlv_ * cosTerm ) * dtEff }; + + // Finally we throw the dice to see whether this event should be generated + const Double_t randNum = LauRandom::randomFun()->Rndm(); + if (randNum <= ATotSq/aSqMaxSet_ ) { + generatedEvent = kTRUE; + nGenLoop_ = 0; + if (ATotSq > aSqMaxVar_) {aSqMaxVar_ = ATotSq;} + + // Generate the flavour tagging information from the true tag + // (we do this after accepting the event to save time) + flavTag_->generateEventInfo( curEvtTrueTagFlv_, curEvtDecayTime_ ); + curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); + curEvtMistag_ = flavTag_->getCurEvtMistag(); + } else { + nGenLoop_++; + } + } + + } while (generatedEvent == kFALSE && nGenLoop_ < iterationsMax_); + } else { + // Hist, Delta, Exp, DeltaExp decay-time types + + // First choose the true tag, accounting for the production asymmetry + // CONVENTION WARNING regarding meaning of sign of AProd + // TODO - we should have different AProd's for each background category since they can be from different b-hadron species + Double_t random = LauRandom::randomFun()->Rndm(); + if (random <= 0.5 * ( 1.0 - AProd_.unblindValue() ) ) { + curEvtTrueTagFlv_ = LauFlavTag::Flavour::B; + } else { + curEvtTrueTagFlv_ = LauFlavTag::Flavour::Bbar; + } + + // Since there are no oscillations for these decay-time types, + // the true decay flavour must be equal to the true tag flavour + curEvtDecayFlv_ = curEvtTrueTagFlv_; + + // generate the DP position + if ( curEvtDecayFlv_ == LauFlavTag::Flavour::B ) { + BkgndDPModelsB_[bkgndID]->generate(); + } else { + BkgndDPModelsBbar_[bkgndID]->generate(); + } + + // generate decay time and its error + const LauKinematics* kinematics { ( curEvtDecayFlv_ == LauFlavTag::Flavour::B ) ? kinematicsB0_ : kinematicsB0bar_ }; + curEvtDecayTimeErr_ = BkgndDecayTimePdfs_[bkgndID]->generateError(kTRUE); + curEvtDecayTime_ = BkgndDecayTimePdfs_[bkgndID]->generate( kinematics ); + + // generate the flavour tagging response + flavTag_->generateBkgndEventInfo( curEvtTrueTagFlv_, curEvtDecayTime_, bkgndID ); + curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); + curEvtMistag_ = flavTag_->getCurEvtMistag(); + } + + break; + } + + case LauFlavTag::SelfConjugate: + break; + + case LauFlavTag::NonSelfConjugate: + break; + } return genOK; } void LauTimeDepFitModel::setupGenNtupleBranches() { // Setup the required ntuple branches this->addGenNtupleDoubleBranch("evtWeight"); this->addGenNtupleIntegerBranch("genSig"); this->addGenNtupleIntegerBranch("cpEigenvalue"); const TString& trueTagVarName { flavTag_->getTrueTagVarName() }; if ( trueTagVarName != "" ) { this->addGenNtupleIntegerBranch(trueTagVarName); } if ( cpEigenValue_ == QFS ) { const TString& decayFlvVarName { flavTag_->getDecayFlvVarName() }; if ( decayFlvVarName != "" ) { this->addGenNtupleIntegerBranch(decayFlvVarName); } } const std::vector& tagVarNames { flavTag_->getTagVarNames() }; const std::vector& mistagVarNames { flavTag_->getMistagVarNames() }; const ULong_t nTaggers {flavTag_->getNTaggers()}; for (ULong_t position{0}; positionaddGenNtupleIntegerBranch(tagVarNames[position]); this->addGenNtupleDoubleBranch(mistagVarNames[position]); } if (this->useDP() == kTRUE) { // Let's add the decay time variables. this->addGenNtupleDoubleBranch(signalDecayTimePdf_->varName()); this->addGenNtupleDoubleBranch(signalDecayTimePdf_->varErrName()); this->addGenNtupleDoubleBranch("m12"); this->addGenNtupleDoubleBranch("m23"); this->addGenNtupleDoubleBranch("m13"); this->addGenNtupleDoubleBranch("m12Sq"); this->addGenNtupleDoubleBranch("m23Sq"); this->addGenNtupleDoubleBranch("m13Sq"); this->addGenNtupleDoubleBranch("cosHel12"); this->addGenNtupleDoubleBranch("cosHel23"); this->addGenNtupleDoubleBranch("cosHel13"); if (kinematicsB0bar_->squareDP() && kinematicsB0_->squareDP()) { this->addGenNtupleDoubleBranch("mPrime"); this->addGenNtupleDoubleBranch("thPrime"); } // Can add the real and imaginary parts of the B0 and B0bar total // amplitudes seen in the generation (restrict this with a flag // that defaults to false) if ( storeGenAmpInfo_ ) { this->addGenNtupleDoubleBranch("reB0Amp"); this->addGenNtupleDoubleBranch("imB0Amp"); this->addGenNtupleDoubleBranch("reB0barAmp"); this->addGenNtupleDoubleBranch("imB0barAmp"); } } // Let's look at the extra variables for signal in one of the tagging categories for ( const LauAbsPdf* pdf : sigExtraPdf_ ) { const std::vector varNames{ pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { this->addGenNtupleDoubleBranch( varName ); } } } } void LauTimeDepFitModel::setDPDtBranchValues() { // Store the decay time variables. this->setGenNtupleDoubleBranchValue(signalDecayTimePdf_->varName(),curEvtDecayTime_); this->setGenNtupleDoubleBranchValue(signalDecayTimePdf_->varErrName(),curEvtDecayTimeErr_); // CONVENTION WARNING // TODO check - for now use B0 for any tags //LauKinematics* kinematics(0); //if (curEvtTagFlv_[position]<0) { LauKinematics* kinematics = kinematicsB0_; //} else { // kinematics = kinematicsB0bar_; //} // Store all the DP information this->setGenNtupleDoubleBranchValue("m12", kinematics->getm12()); this->setGenNtupleDoubleBranchValue("m23", kinematics->getm23()); this->setGenNtupleDoubleBranchValue("m13", kinematics->getm13()); this->setGenNtupleDoubleBranchValue("m12Sq", kinematics->getm12Sq()); this->setGenNtupleDoubleBranchValue("m23Sq", kinematics->getm23Sq()); this->setGenNtupleDoubleBranchValue("m13Sq", kinematics->getm13Sq()); this->setGenNtupleDoubleBranchValue("cosHel12", kinematics->getc12()); this->setGenNtupleDoubleBranchValue("cosHel23", kinematics->getc23()); this->setGenNtupleDoubleBranchValue("cosHel13", kinematics->getc13()); if (kinematics->squareDP()) { this->setGenNtupleDoubleBranchValue("mPrime", kinematics->getmPrime()); this->setGenNtupleDoubleBranchValue("thPrime", kinematics->getThetaPrime()); } // Can add the real and imaginary parts of the B0 and B0bar total // amplitudes seen in the generation (restrict this with a flag // that defaults to false) if ( storeGenAmpInfo_ ) { if ( this->getGenNtupleIntegerBranchValue("genSig")==1 ) { LauComplex Abar = sigModelB0bar_->getEvtDPAmp(); LauComplex A = sigModelB0_->getEvtDPAmp(); this->setGenNtupleDoubleBranchValue("reB0Amp", A.re()); this->setGenNtupleDoubleBranchValue("imB0Amp", A.im()); this->setGenNtupleDoubleBranchValue("reB0barAmp", Abar.re()); this->setGenNtupleDoubleBranchValue("imB0barAmp", Abar.im()); } else { this->setGenNtupleDoubleBranchValue("reB0Amp", 0.0); this->setGenNtupleDoubleBranchValue("imB0Amp", 0.0); this->setGenNtupleDoubleBranchValue("reB0barAmp", 0.0); this->setGenNtupleDoubleBranchValue("imB0barAmp", 0.0); } } } void LauTimeDepFitModel::generateExtraPdfValues(LauPdfList& extraPdfs, LauEmbeddedData* embeddedData) { // CONVENTION WARNING LauKinematics* kinematics = kinematicsB0_; //LauKinematics* kinematics(0); //if (curEvtTagFlv_<0) { // kinematics = kinematicsB0_; //} else { // kinematics = kinematicsB0bar_; //} // Generate from the extra PDFs for (auto& pdf : extraPdfs){ LauFitData genValues; if (embeddedData) { genValues = embeddedData->getValues( pdf->varNames() ); } else { genValues = pdf->generate(kinematics); } for (auto& var : genValues){ TString varName = var.first; if ( varName != "m13Sq" && varName != "m23Sq" ) { Double_t value = var.second; this->setGenNtupleDoubleBranchValue(varName,value); } } } } void LauTimeDepFitModel::propagateParUpdates() { // Update the complex mixing phase if (useSinCos_) { phiMixComplex_.setRealPart(cosPhiMix_.unblindValue()); phiMixComplex_.setImagPart(-1.0*sinPhiMix_.unblindValue()); } else { phiMixComplex_.setRealPart(TMath::Cos(-1.0*phiMix_.unblindValue())); phiMixComplex_.setImagPart(TMath::Sin(-1.0*phiMix_.unblindValue())); } // Update the total normalisation for the signal likelihood if (this->useDP() == kTRUE) { this->updateCoeffs(); sigModelB0bar_->updateCoeffs(coeffsB0bar_); sigModelB0_->updateCoeffs(coeffsB0_); this->calcInterTermNorm(); } // Update the decay time normalisation if ( signalDecayTimePdf_ ) { signalDecayTimePdf_->propagateParUpdates(); } // TODO // - maybe also need to add an update of the background decay time PDFs here // Update the signal events from the background numbers if not doing an extended fit // And update the tagging category fractions this->updateSigEvents(); } void LauTimeDepFitModel::updateSigEvents() { // The background parameters will have been set from Minuit. // We need to update the signal events using these. if (!this->doEMLFit()) { Double_t nTotEvts = this->eventsPerExpt(); Double_t signalEvents = nTotEvts; signalEvents_->range(-2.0*nTotEvts,2.0*nTotEvts); - for (auto& nBkgndEvents : bkgndEvents_){ + for (auto& nBkgndEvents : bkgndEvents_){ if ( nBkgndEvents->isLValue() ) { LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*nTotEvts,2.0*nTotEvts); } } // Subtract background events (if any) from signal. if (usingBkgnd_ == kTRUE) { for (auto& nBkgndEvents : bkgndEvents_){ signalEvents -= nBkgndEvents->value(); } } if ( ! signalEvents_->fixed() ) { signalEvents_->value(signalEvents); } } } void LauTimeDepFitModel::cacheInputFitVars() { // Fill the internal data trees of the signal and background models. // Note that we store the events of both charges in both the // negative and the positive models. It's only later, at the stage // when the likelihood is being calculated, that we separate them. LauFitDataTree* inputFitData = this->fitData(); evtCPEigenVals_.clear(); const Bool_t hasCPEV = ( (cpevVarName_ != "") && inputFitData->haveBranch( cpevVarName_ ) ); UInt_t nEvents = inputFitData->nEvents(); evtCPEigenVals_.reserve( nEvents ); LauFitData::const_iterator fitdata_iter; for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputFitData->getData(iEvt); // if the CP-eigenvalue is in the data use those, otherwise use the default if ( hasCPEV ) { fitdata_iter = dataValues.find( cpevVarName_ ); const Int_t cpEV = static_cast( fitdata_iter->second ); if ( cpEV == 1 ) { cpEigenValue_ = CPEven; } else if ( cpEV == -1 ) { cpEigenValue_ = CPOdd; } else if ( cpEV == 0 ) { cpEigenValue_ = QFS; } else { std::cerr<<"WARNING in LauTimeDepFitModel::cacheInputFitVars : Unknown value: "<useDP() == kTRUE) { // DecayTime and SigmaDecayTime signalDecayTimePdf_->cacheInfo(*inputFitData); // cache all the backgrounds too for(auto& bg : BkgndDecayTimePdfs_) {bg->cacheInfo(*inputFitData);} } // Flavour tagging information flavTag_->cacheInputFitVars(inputFitData,signalDecayTimePdf_->varName()); // ...and then the extra PDFs if (not sigExtraPdf_.empty()){ this->cacheInfo(sigExtraPdf_, *inputFitData); } if(usingBkgnd_ == kTRUE){ for (auto& pdf : BkgndPdfs_){ this->cacheInfo(pdf, *inputFitData); } } if (this->useDP() == kTRUE) { sigModelB0bar_->fillDataTree(*inputFitData); sigModelB0_->fillDataTree(*inputFitData); if (usingBkgnd_ == kTRUE) { for (auto& model : BkgndDPModelsB_){ model->fillDataTree(*inputFitData); } for (auto& model : BkgndDPModelsBbar_){ if (model != nullptr) { model->fillDataTree(*inputFitData); } } } } } Double_t LauTimeDepFitModel::getTotEvtLikelihood(const UInt_t iEvt) { // Get the CP eigenvalue of the current event cpEigenValue_ = evtCPEigenVals_[iEvt]; // Get the DP and DecayTime likelihood for signal (TODO and eventually backgrounds) this->getEvtDPDtLikelihood(iEvt); // Get the combined extra PDFs likelihood for signal (TODO and eventually backgrounds) this->getEvtExtraLikelihoods(iEvt); // Construct the total likelihood for signal, qqbar and BBbar backgrounds Double_t sigLike = sigDPLike_ * sigExtraLike_; Double_t signalEvents = signalEvents_->unblindValue(); // TODO - consider what to do here - do we even want the option not to use the DP in this model? //if ( not this->useDP() ) { //signalEvents *= 0.5 * (1.0 + curEvtTagFlv_ * signalAsym_->unblindValue()); //} // Construct the total event likelihood Double_t likelihood { sigLike * signalEvents }; if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { // TODO // for combinatorial background (and perhaps others) this factor 0.5 needs to be here // to balance the factor 2 in the signal normalisation that arises from the sum over // tag decisions and integral over eta // for other (more signal-like) backgrounds where we need to think about things depending // on the tag decision and where there may be asymmetries as well this will (probably) arise naturally const Double_t bkgndEvents { 0.5 * bkgndEvents_[bkgndID]->unblindValue() }; likelihood += bkgndEvents*bkgndDPLike_[bkgndID]*bkgndExtraLike_[bkgndID]; } } return likelihood; } Double_t LauTimeDepFitModel::getEventSum() const { Double_t eventSum(0.0); eventSum += signalEvents_->unblindValue(); if (usingBkgnd_) { for ( const auto& yieldPar : bkgndEvents_ ) { eventSum += yieldPar->unblindValue(); } } return eventSum; } void LauTimeDepFitModel::getEvtDPDtLikelihood(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // Dalitz plot for the given event evtNo. if ( ! this->useDP() ) { // There's always going to be a term in the likelihood for the // signal, so we'd better not zero it. sigDPLike_ = 1.0; const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_ == kTRUE) { bkgndDPLike_[bkgndID] = 1.0; } else { bkgndDPLike_[bkgndID] = 0.0; } } return; } // Calculate event quantities // Get the DP dynamics, decay time, and flavour tagging to calculate // everything required for the likelihood calculation sigModelB0bar_->calcLikelihoodInfo(iEvt); sigModelB0_->calcLikelihoodInfo(iEvt); signalDecayTimePdf_->calcLikelihoodInfo(iEvt); flavTag_->updateEventInfo(iEvt); // Retrieve the amplitudes and efficiency from the dynamics LauComplex Abar { sigModelB0bar_->getEvtDPAmp() }; LauComplex A { sigModelB0_->getEvtDPAmp() }; const Double_t dpEff { sigModelB0bar_->getEvtEff() }; // If this is a QFS decay, one of the DP amplitudes needs to be zeroed if (cpEigenValue_ == QFS){ curEvtDecayFlv_ = flavTag_->getCurEvtDecayFlv(); if ( curEvtDecayFlv_ == +1 ) { Abar.zero(); } else if ( curEvtDecayFlv_ == -1 ) { A.zero(); } } // Next calculate the DP terms const Double_t aSqSum { A.abs2() + Abar.abs2() }; const Double_t aSqDif { A.abs2() - Abar.abs2() }; Double_t interTermRe { 0.0 }; Double_t interTermIm { 0.0 }; if ( cpEigenValue_ != QFS ) { const LauComplex inter { Abar * A.conj() * phiMixComplex_ }; if ( cpEigenValue_ == CPEven ) { interTermIm = 2.0 * inter.im(); interTermRe = 2.0 * inter.re(); } else { interTermIm = -2.0 * inter.im(); interTermRe = -2.0 * inter.re(); } } // First get all the decay time terms // TODO Backgrounds // Get the decay time acceptance const Double_t dtEff { signalDecayTimePdf_->getEffiTerm() }; // Get all the decay time terms const Double_t dtCos { signalDecayTimePdf_->getCosTerm() }; const Double_t dtSin { signalDecayTimePdf_->getSinTerm() }; const Double_t dtCosh { signalDecayTimePdf_->getCoshTerm() }; const Double_t dtSinh { signalDecayTimePdf_->getSinhTerm() }; // Get the decay time error term const Double_t dtErrLike { signalDecayTimePdf_->getErrTerm() }; // Get flavour tagging terms Double_t omega{1.0}; Double_t omegabar{1.0}; const ULong_t nTaggers { flavTag_->getNTaggers() }; for (ULong_t position{0}; positiongetCapitalOmega(position, LauFlavTag::Flavour::B); omegabar *= flavTag_->getCapitalOmega(position, LauFlavTag::Flavour::Bbar); } const Double_t prodAsym { AProd_.unblindValue() }; const Double_t ftOmegaHyp { ((1.0 - prodAsym)*omega + (1.0 + prodAsym)*omegabar) }; const Double_t ftOmegaTrig { ((1.0 - prodAsym)*omega - (1.0 + prodAsym)*omegabar) }; const Double_t coshTerm { ftOmegaHyp * dtCosh * aSqSum }; const Double_t sinhTerm { ftOmegaHyp * dtSinh * interTermRe }; const Double_t cosTerm { ftOmegaTrig * dtCos * aSqDif }; const Double_t sinTerm { ftOmegaTrig * dtSin * interTermIm }; // Combine all terms to get the total amplitude squared const Double_t ASq { coshTerm + sinhTerm + cosTerm - sinTerm }; // Calculate the DP and time normalisation const Double_t normASqSum { sigModelB0_->getDPNorm() + sigModelB0bar_->getDPNorm() }; const Double_t normASqDiff { sigModelB0_->getDPNorm() - sigModelB0bar_->getDPNorm() }; Double_t normInterTermRe { 0.0 }; Double_t normInterTermIm { 0.0 }; if ( cpEigenValue_ != QFS ) { // TODO - double check this sign flipping here (it's presumably right but...) normInterTermRe = ( cpEigenValue_ == CPOdd ) ? -1.0 * interTermReNorm_ : interTermReNorm_; normInterTermIm = ( cpEigenValue_ == CPOdd ) ? -1.0 * interTermImNorm_ : interTermImNorm_; } const Double_t normCoshTerm { signalDecayTimePdf_->getNormTermCosh() }; const Double_t normSinhTerm { signalDecayTimePdf_->getNormTermSinh() }; const Double_t normCosTerm { signalDecayTimePdf_->getNormTermCos() }; const Double_t normSinTerm { signalDecayTimePdf_->getNormTermSin() }; const Double_t normHyp { normASqSum * normCoshTerm + normInterTermRe * normSinhTerm }; const Double_t normTrig { - prodAsym * ( normASqDiff * normCosTerm + normInterTermIm * normSinTerm ) }; // Combine all terms to get the total normalisation const Double_t norm { 2.0 * ( normHyp + normTrig ) }; // Multiply the squared-amplitude by the efficiency (DP and decay time) and decay-time error likelihood // and normalise to obtain the signal likelihood sigDPLike_ = ( ASq * dpEff * dtEff * dtErrLike ) / norm; // Background part // TODO add them into the actual Likelihood calculatiions // TODO sort out B and Bbar backgrounds for the DP here // TODO need to include the flavour tagging parts here as well (per tagger and per background source) and will vary by Bkgnd type as well // TODO add new function as getEvtBkgndLikelihoods? // TODO normalisation? const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_ == kTRUE) { BkgndDecayTimePdfs_[bkgndID]->calcLikelihoodInfo(iEvt); // If Bbar DP Model is a nullptr then only consider B DP Model if (BkgndDPModelsBbar_[bkgndID]==nullptr){ bkgndDPLike_[bkgndID] = BkgndDPModelsB_[bkgndID]->getLikelihood(iEvt); } else { bkgndDPLike_[bkgndID] = 0.5*(BkgndDPModelsB_[bkgndID]->getLikelihood(iEvt) + BkgndDPModelsBbar_[bkgndID]->getLikelihood(iEvt)); } Double_t omegaBkgnd{1.0}; Double_t omegabarBkgnd{1.0}; // Consider background type if (BkgndTypes_[bkgndID] == LauFlavTag::Combinatorial){ // For Histogram Dt Pdfs + // TODO - any other decay time function types that make sense for combinatorial? + // - if so, maybe convert this to a switch with a default case to catch the types that don't make sense? if (BkgndDecayTimePdfs_[bkgndID]->getFuncType() == LauDecayTimePdf::Hist){ bkgndDPLike_[bkgndID]*= BkgndDecayTimePdfs_[bkgndID]->getHistTerm(); } else { bkgndDPLike_[bkgndID]*= BkgndDecayTimePdfs_[bkgndID]->getExpTerm()/BkgndDecayTimePdfs_[bkgndID]->getNormTermExp(); } // For flavour tagging for (ULong_t position{0}; positiongetCapitalOmegaBkgnd(position, LauFlavTag::Flavour::B, bkgndID); } bkgndDPLike_[bkgndID] *= omegaBkgnd; // TODO Add other bkg types } else if (BkgndTypes_[bkgndID] == LauFlavTag::FlavourSpecific){ // Get all the decay time terms const Double_t dtExpBkgnd { BkgndDecayTimePdfs_[bkgndID]->getExpTerm() }; const Double_t dtCosBkgnd { BkgndDecayTimePdfs_[bkgndID]->getCosTerm() }; const Double_t dtSinBkgnd { BkgndDecayTimePdfs_[bkgndID]->getSinTerm() }; const Double_t dtCoshBkgnd { BkgndDecayTimePdfs_[bkgndID]->getCoshTerm() }; const Double_t dtSinhBkgnd { BkgndDecayTimePdfs_[bkgndID]->getSinhTerm() }; // Get all norm terms const Double_t normExpTermBkgnd { BkgndDecayTimePdfs_[bkgndID]->getNormTermExp() }; const Double_t normCoshTermBkgnd { BkgndDecayTimePdfs_[bkgndID]->getNormTermCosh() }; const Double_t normSinhTermBkgnd { BkgndDecayTimePdfs_[bkgndID]->getNormTermSinh() }; const Double_t normCosTermBkgnd { BkgndDecayTimePdfs_[bkgndID]->getNormTermCos() }; const Double_t normSinTermBkgnd { BkgndDecayTimePdfs_[bkgndID]->getNormTermSin() }; // Use signal flavour tagging for (ULong_t position{0}; positiongetCapitalOmega(position, LauFlavTag::Flavour::B); omegabarBkgnd *= flavTag_->getCapitalOmega(position, LauFlavTag::Flavour::Bbar); } // Depends on the background source? Or use the signal one? const Double_t ftOmegaHypBkgnd { ((1.0 - prodAsym)*omegaBkgnd + (1.0 + prodAsym)*omegabarBkgnd) }; const Double_t ftOmegaTrigBkgnd { ((1.0 - prodAsym)*omegaBkgnd - (1.0 + prodAsym)*omegabarBkgnd) }; if (BkgndDecayTimePdfs_[bkgndID]->getFuncType() == LauDecayTimePdf::Exp){ //Exp only background bkgndDPLike_[bkgndID] *= ftOmegaHypBkgnd*dtExpBkgnd/normExpTermBkgnd; } else if (BkgndDecayTimePdfs_[bkgndID]->getFuncType() == LauDecayTimePdf::Hist){ bkgndDPLike_[bkgndID] *= ftOmegaHypBkgnd*BkgndDecayTimePdfs_[bkgndID]->getHistTerm(); } else { //ExpTrig or ExpHypTrig modes //TODO Check normalisation const Double_t coshTermBkgnd { ftOmegaHypBkgnd * dtCoshBkgnd }; const Double_t sinhTermBkgnd { ftOmegaHypBkgnd * dtSinhBkgnd }; const Double_t cosTermBkgnd { ftOmegaTrigBkgnd * dtCosBkgnd }; const Double_t sinTermBkgnd { ftOmegaTrigBkgnd * dtSinBkgnd }; const Double_t normBkgnd {normCoshTermBkgnd + normSinhTermBkgnd + normCosTermBkgnd - normSinTermBkgnd}; bkgndDPLike_[bkgndID] *= (coshTermBkgnd + sinhTermBkgnd + cosTermBkgnd - sinTermBkgnd)/normBkgnd; } } } else { bkgndDPLike_[bkgndID] = 0.0; } } } void LauTimeDepFitModel::getEvtExtraLikelihoods(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // extra variables for the given event evtNo. sigExtraLike_ = 1.0; //There's always a likelihood term for signal, so we better not zero it. // First, those independent of the tagging of the event: // signal if ( not sigExtraPdf_.empty() ) { sigExtraLike_ = this->prodPdfValue( sigExtraPdf_, iEvt ); } // Background const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_) { bkgndExtraLike_[bkgndID] = this->prodPdfValue( BkgndPdfs_[bkgndID], iEvt ); } else { bkgndExtraLike_[bkgndID] = 0.0; } } } //TODO obsolete? void LauTimeDepFitModel::getEvtFlavTagLikelihood(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // extra variables for the given event evtNo. sigFlavTagLike_ = 1.0; //There's always a likelihood term for signal, so we better not zero it. // Loop over taggers const ULong_t nTaggers { flavTag_->getNTaggers() }; for (ULong_t position{0}; positioncalcLikelihoodInfo(iEvt); sigFlavTagLike_ = sigFlavTagPdf_[position]->getLikelihood(); } } if (sigFlavTagLike_<=0){ std::cout<<"INFO in LauTimeDepFitModel::getEvtFlavTagLikelihood : Event with 0 FlavTag Liklihood"<antiparticleCoeff()); coeffsB0_.push_back(coeffPars_[i]->particleCoeff()); } } void LauTimeDepFitModel::checkMixingPhase() { Double_t phase = phiMix_.value(); Double_t genPhase = phiMix_.genValue(); // Check now whether the phase lies in the right range (-pi to pi). Bool_t withinRange(kFALSE); while (withinRange == kFALSE) { if (phase > -LauConstants::pi && phase < LauConstants::pi) { withinRange = kTRUE; } else { // Not within the specified range if (phase > LauConstants::pi) { phase -= LauConstants::twoPi; } else if (phase < -LauConstants::pi) { phase += LauConstants::twoPi; } } } // A further problem can occur when the generated phase is close to -pi or pi. // The phase can wrap over to the other end of the scale - // this leads to artificially large pulls so we wrap it back. Double_t diff = phase - genPhase; if (diff > LauConstants::pi) { phase -= LauConstants::twoPi; } else if (diff < -LauConstants::pi) { phase += LauConstants::twoPi; } // finally store the new value in the parameter // and update the pull phiMix_.value(phase); phiMix_.updatePull(); } void LauTimeDepFitModel::embedSignal(const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment) { if (signalTree_) { std::cerr<<"ERROR in LauTimeDepFitModel::embedSignal : Already embedding signal from file."<findBranches(); if (!dataOK) { delete signalTree_; signalTree_ = 0; std::cerr<<"ERROR in LauTimeDepFitModel::embedSignal : Problem creating data tree for embedding."<validBkgndClass( bkgndClass ) ) { std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); LauEmbeddedData* bkgTree = bkgndTree_[bkgndID]; if (bkgTree) { std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Already embedding background from a file." << std::endl; return; } bkgTree = new LauEmbeddedData(fileName,treeName,reuseEventsWithinExperiment); Bool_t dataOK = bkgTree->findBranches(); if (!dataOK) { delete bkgTree; bkgTree = 0; std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Problem creating data tree for embedding." << std::endl; return; } reuseBkgnd_[bkgndID] = reuseEventsWithinEnsemble; if (this->enableEmbedding() == kFALSE) { this->enableEmbedding(kTRUE); } } void LauTimeDepFitModel::setupSPlotNtupleBranches() { // add branches for storing the experiment number and the number of // the event within the current experiment this->addSPlotNtupleIntegerBranch("iExpt"); this->addSPlotNtupleIntegerBranch("iEvtWithinExpt"); // Store the efficiency of the event (for inclusive BF calculations). if (this->storeDPEff()) { this->addSPlotNtupleDoubleBranch("efficiency"); } // Store the total event likelihood for each species. this->addSPlotNtupleDoubleBranch("sigTotalLike"); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); name += "TotalLike"; this->addSPlotNtupleDoubleBranch(name); } } // Store the DP likelihoods if (this->useDP()) { this->addSPlotNtupleDoubleBranch("sigDPLike"); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); name += "DPLike"; this->addSPlotNtupleDoubleBranch(name); } } } // Store the likelihoods for each extra PDF this->addSPlotNtupleBranches(sigExtraPdf_, "sig"); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); this->addSPlotNtupleBranches(BkgndPdfs_[iBkgnd], bkgndClass); } } } void LauTimeDepFitModel::addSPlotNtupleBranches(const LauPdfList& extraPdfs, const TString& prefix) { // Loop through each of the PDFs for ( const LauAbsPdf* pdf : extraPdfs ) { // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars{0}; const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply add one branch for that variable TString name{prefix}; name += pdf->varName(); name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else if ( nVars == 2 ) { // If the PDF has two variables then we // need a branch for them both together and // branches for each TString allVars{""}; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { allVars += varName; TString name{prefix}; name += varName; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } } TString name{prefix}; name += allVars; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else { std::cerr<<"WARNING in LauTimeDepFitModel::addSPlotNtupleBranches : Can't yet deal with 3D PDFs."<calcLikelihoodInfo(iEvt); extraLike = pdf->getLikelihood(); totalLike *= extraLike; // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars{0}; const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply store the value for that variable TString name{prefix}; name += pdf->varName(); name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else if ( nVars == 2 ) { // If the PDF has two variables then we // store the value for them both together // and for each on their own TString allVars{""}; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { allVars += varName; TString name{prefix}; name += varName; name += "Like"; const Double_t indivLike = pdf->getLikelihood( varName ); this->setSPlotNtupleDoubleBranchValue(name, indivLike); } } TString name{prefix}; name += allVars; name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else { std::cerr<<"WARNING in LauAllFitModel::setSPlotNtupleBranchValues : Can't yet deal with 3D PDFs."<useDP()) { nameSet.insert("DP"); } for ( const LauAbsPdf* pdf : sigExtraPdf_ ) { // Loop over the variables involved in each PDF const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { // If they are not DP coordinates then add them if ( varName != "m13Sq" && varName != "m23Sq" ) { nameSet.insert( varName ); } } } return nameSet; } LauSPlot::NumbMap LauTimeDepFitModel::freeSpeciesNames() const { LauSPlot::NumbMap numbMap; if (!signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } if ( usingBkgnd_ ) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauAbsRValue* par = bkgndEvents_[iBkgnd]; if (!par->fixed()) { numbMap[bkgndClass] = par->genValue(); if ( ! par->isLValue() ) { std::cerr << "WARNING in LauTimeDepFitModel::freeSpeciesNames : \"" << par->name() << "\" is a LauFormulaPar, which implies it is perhaps not entirely free to float in the fit, so the sWeight calculation may not be reliable" << std::endl; } } } } return numbMap; } LauSPlot::NumbMap LauTimeDepFitModel::fixdSpeciesNames() const { LauSPlot::NumbMap numbMap; if (signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } if ( usingBkgnd_ ) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauAbsRValue* par = bkgndEvents_[iBkgnd]; if (par->fixed()) { numbMap[bkgndClass] = par->genValue(); } } } return numbMap; } LauSPlot::TwoDMap LauTimeDepFitModel::twodimPDFs() const { LauSPlot::TwoDMap twodimMap; for ( const LauAbsPdf* pdf : sigExtraPdf_ ) { // Count the number of input variables that are not DP variables UInt_t nVars{0}; const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { twodimMap.insert( std::make_pair( "sig", std::make_pair( varNames[0], varNames[1] ) ) ); } } if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); for ( const LauAbsPdf* pdf : BkgndPdfs_[iBkgnd] ) { // Count the number of input variables that are not DP variables UInt_t nVars{0}; const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { twodimMap.insert( std::make_pair( bkgndClass, std::make_pair( varNames[0], varNames[1] ) ) ); } } } } return twodimMap; } void LauTimeDepFitModel::storePerEvtLlhds() { std::cout<<"INFO in LauTimeDepFitModel::storePerEvtLlhds : Storing per-event likelihood values..."<fitData(); // if we've not been using the DP model then we need to cache all // the info here so that we can get the efficiency from it if (!this->useDP() && this->storeDPEff()) { sigModelB0bar_->initialise(coeffsB0bar_); sigModelB0_->initialise(coeffsB0_); sigModelB0bar_->fillDataTree(*inputFitData); sigModelB0_->fillDataTree(*inputFitData); } UInt_t evtsPerExpt(this->eventsPerExpt()); LauIsobarDynamics* sigModel(sigModelB0bar_); for (UInt_t iEvt = 0; iEvt < evtsPerExpt; ++iEvt) { // Find out whether we have B0bar or B0 flavTag_->updateEventInfo(iEvt); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); // the DP information this->getEvtDPDtLikelihood(iEvt); if (this->storeDPEff()) { if (!this->useDP()) { sigModel->calcLikelihoodInfo(iEvt); } this->setSPlotNtupleDoubleBranchValue("efficiency",sigModel->getEvtEff()); } if (this->useDP()) { sigTotalLike_ = sigDPLike_; this->setSPlotNtupleDoubleBranchValue("sigDPLike",sigDPLike_); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name = this->bkgndClassName(iBkgnd); name += "DPLike"; this->setSPlotNtupleDoubleBranchValue(name,bkgndDPLike_[iBkgnd]); } } } else { sigTotalLike_ = 1.0; } // the signal PDF values sigTotalLike_ *= this->setSPlotNtupleBranchValues(sigExtraPdf_, "sig", iEvt); // the background PDF values if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); LauPdfList& pdfs = BkgndPdfs_[iBkgnd]; bkgndTotalLike_[iBkgnd] *= this->setSPlotNtupleBranchValues(pdfs, bkgndClass, iEvt); } } // the total likelihoods this->setSPlotNtupleDoubleBranchValue("sigTotalLike",sigTotalLike_); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name = this->bkgndClassName(iBkgnd); name += "TotalLike"; this->setSPlotNtupleDoubleBranchValue(name,bkgndTotalLike_[iBkgnd]); } } // fill the tree this->fillSPlotNtupleBranches(); } std::cout<<"INFO in LauTimeDepFitModel::storePerEvtLlhds : Finished storing per-event likelihood values."<