diff --git a/src/LauDecayTimePdf.cc b/src/LauDecayTimePdf.cc index 79fd49d..219bab1 100644 --- a/src/LauDecayTimePdf.cc +++ b/src/LauDecayTimePdf.cc @@ -1,1645 +1,1645 @@ /* Copyright 2006 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauDecayTimePdf.cc \brief File containing implementation of LauDecayTimePdf class. */ #include #include #include #include #include #include #include "TMath.h" #include "TRandom.h" #include "TSystem.h" #include "TH1.h" #include "RooMath.h" #include "Lau1DCubicSpline.hh" #include "Lau1DHistPdf.hh" #include "LauConstants.hh" #include "LauComplex.hh" #include "LauDecayTimePdf.hh" #include "LauFitDataTree.hh" #include "LauParameter.hh" #include "LauParamFixed.hh" #include "LauRandom.hh" ClassImp(LauDecayTimePdf) LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, Double_t minAbscissaVal, Double_t maxAbscissaVal, Double_t minAbscissaErr, Double_t maxAbscissaErr, FuncType type, UInt_t nGauss, const std::vector& scale, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) : varName_(theVarName), varErrName_(theVarErrName), params_(params), smear_(kTRUE), minAbscissa_(minAbscissaVal), maxAbscissa_(maxAbscissaVal), minAbscissaError_(minAbscissaErr), maxAbscissaError_(maxAbscissaErr), abscissaError_(0.0), abscissaErrorGenerated_(kFALSE), errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286 errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102 nGauss_(nGauss), mean_(nGauss_,nullptr), sigma_(nGauss_,nullptr), frac_(nGauss_-1,nullptr), tau_(nullptr), deltaM_(nullptr), deltaGamma_(nullptr), fracPrompt_(nullptr), type_(type), method_(method), effMethod_(effMethod), scaleMeans_(scale), scaleWidths_(scale), scaleWithPerEventError_( std::accumulate( scale.begin(), scale.end(), kFALSE, std::logical_or() ) ), expTerm_(0.0), cosTerm_(0.0), sinTerm_(0.0), coshTerm_(0.0), sinhTerm_(0.0), normTermExp_(0.0), normTermCosh_(0.0), normTermSinh_(0.0), errTerm_(0.0), effiTerm_(0.0), pdfTerm_(0.0), errHist_(nullptr), pdfHist_(nullptr), effiFun_(nullptr), effiHist_(nullptr), effiPars_(0), //meansFloating_(nGauss_,kTRUE), //sigmasFloating_(nGauss_,kTRUE), //fracsFloating_(nGauss_-1,kTRUE), //meansChanged_(nGauss_,kTRUE), //sigmasChanged_(nGauss_,kTRUE), //fracsChanged_(nGauss_-1,kTRUE), meanVals_(nGauss_,0.0), sigmaVals_(nGauss_,0.0), fracVals_(nGauss_-1,0.0) { } LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, Double_t minAbscissaVal, Double_t maxAbscissaVal, Double_t minAbscissaErr, Double_t maxAbscissaErr, FuncType type, UInt_t nGauss, const std::vector& scaleMeans, const std::vector& scaleWidths, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) : varName_(theVarName), varErrName_(theVarErrName), params_(params), smear_(kTRUE), minAbscissa_(minAbscissaVal), maxAbscissa_(maxAbscissaVal), minAbscissaError_(minAbscissaErr), maxAbscissaError_(maxAbscissaErr), abscissaError_(0.0), abscissaErrorGenerated_(kFALSE), errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286 errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102 nGauss_(nGauss), mean_(nGauss_,nullptr), sigma_(nGauss_,nullptr), frac_(nGauss_-1,nullptr), tau_(nullptr), deltaM_(nullptr), deltaGamma_(nullptr), fracPrompt_(nullptr), type_(type), method_(method), effMethod_(effMethod), scaleMeans_(scaleMeans), scaleWidths_(scaleWidths), scaleWithPerEventError_( std::accumulate( scaleMeans.begin(), scaleMeans.end(), kFALSE, std::logical_or() ) || std::accumulate( scaleWidths.begin(), scaleWidths.end(), kFALSE, std::logical_or() ) ), expTerm_(0.0), cosTerm_(0.0), sinTerm_(0.0), coshTerm_(0.0), sinhTerm_(0.0), normTermExp_(0.0), normTermCosh_(0.0), normTermSinh_(0.0), errTerm_(0.0), effiTerm_(0.0), pdfTerm_(0.0), errHist_(nullptr), pdfHist_(nullptr), effiFun_(nullptr), effiHist_(nullptr), effiPars_(0), //meansFloating_(nGauss_,kTRUE), //sigmasFloating_(nGauss_,kTRUE), //fracsFloating_(nGauss_-1,kTRUE), //meansChanged_(nGauss_,kTRUE), //sigmasChanged_(nGauss_,kTRUE), //fracsChanged_(nGauss_-1,kTRUE), meanVals_(nGauss_,0.0), sigmaVals_(nGauss_,0.0), fracVals_(nGauss_-1,0.0) { } LauDecayTimePdf::~LauDecayTimePdf() { // Destructor delete errHist_; errHist_ = nullptr; delete pdfHist_; pdfHist_ = nullptr; delete effiFun_; effiFun_ = nullptr; delete effiHist_; effiHist_ = nullptr; for( auto& par : effiPars_ ){ delete par; par = nullptr; } effiPars_.clear(); } void LauDecayTimePdf::initialise() { // The parameters are: // - the mean and the sigma (bias and spread in resolution) of the gaussian(s) // - the mean lifetime, denoted tau, of the exponential decay // - the frequency of oscillation, denoted Delta m, of the cosine and sine terms // - the decay width difference, denoted Delta Gamma, of the hyperbolic cosine and sine terms // // The next two arguments specify the range in which the PDF is defined, // and the PDF will be normalised w.r.t. these limits. // // The final three arguments define the type of Delta t PDF (Delta, Exp, ExpTrig or ExpHypTrig ), the number of gaussians // and whether or not the gaussian parameters should be scaled by the per-event errors on Delta t // First check whether the scale vector is nGauss in size if (nGauss_ != scaleMeans_.size() || nGauss_ != scaleWidths_.size()) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : scale vector size not the same as nGauss."<Exit(EXIT_FAILURE); } if (type_ == Hist) { if (this->nParameters() != 0){ std::cerr<<"ERROR in LauDecayTimePdf::initialise : Hist PDF should have 0 parameters"<Exit(EXIT_FAILURE); } } else { TString meanName("mean_"); TString sigmaName("sigma_"); TString fracName("frac_"); Bool_t foundParams(kTRUE); for (UInt_t i(0); ifindParameter(tempName); foundParams &= (mean_[i] != nullptr); sigma_[i] = this->findParameter(tempName2); foundParams &= (sigma_[i] != nullptr); if (i!=0) { frac_[i-1] = this->findParameter(tempName3); foundParams &= (frac_[i-1] != nullptr); } } if (type_ == Delta) { if ((this->nParameters() != (3*nGauss_-1)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : Delta type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == Exp) { tau_ = this->findParameter("tau"); foundParams &= (tau_ != nullptr); if ((this->nParameters() != (3*nGauss_-1+1)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : Exp type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == DeltaExp) { tau_ = this->findParameter("tau"); fracPrompt_ = this->findParameter("frac_prompt"); foundParams &= (tau_ != nullptr); foundParams &= (fracPrompt_ != nullptr); if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : DeltaExp type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == ExpTrig) { tau_ = this->findParameter("tau"); deltaM_ = this->findParameter("deltaM"); foundParams &= (tau_ != nullptr); foundParams &= (deltaM_ != nullptr); if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : ExpTrig type PDF requires:"<Exit(EXIT_FAILURE); } } else if (type_ == ExpHypTrig) { tau_ = this->findParameter("tau"); deltaM_ = this->findParameter("deltaM"); deltaGamma_ = this->findParameter("deltaGamma"); foundParams &= (tau_ != nullptr); foundParams &= (deltaM_ != nullptr); foundParams &= (deltaGamma_ != nullptr); if ((this->nParameters() != (3*nGauss_-1+3)) || (!foundParams)) { std::cerr<<"ERROR in LauDecayTimePdf::initialise : ExpHypTrig type PDF requires:"<Exit(EXIT_FAILURE); } } } // Setup the normalisation caches if ( effMethod_ == EfficiencyMethod::Spline ) { const UInt_t nSplineSegments { effiFun_->getnKnots() - 1 }; if ( not this->doSmearing() ) { expTermIkVals_.clear(); expTermIkVals_.resize(nSplineSegments); trigTermIkVals_.clear(); trigTermIkVals_.resize(nSplineSegments); hypHTermIkVals_.clear(); hypHTermIkVals_.resize(nSplineSegments); hypLTermIkVals_.clear(); hypLTermIkVals_.resize(nSplineSegments); } else { meanPowerVals_.clear(); meanPowerVals_.resize(nGauss_); sigmaPowerVals_.clear(); sigmaPowerVals_.resize(nGauss_); expTermKvecVals_.clear(); expTermKvecVals_.resize(nGauss_); trigTermKvecVals_.clear(); trigTermKvecVals_.resize(nGauss_); hypHTermKvecVals_.clear(); hypHTermKvecVals_.resize(nGauss_); hypLTermKvecVals_.clear(); hypLTermKvecVals_.resize(nGauss_); expTermMvecVals_.clear(); expTermMvecVals_.resize(nSplineSegments); for ( auto& innerVec : expTermMvecVals_ ) { innerVec.resize(nGauss_); } trigTermMvecVals_.clear(); trigTermMvecVals_.resize(nSplineSegments); for ( auto& innerVec : trigTermMvecVals_ ) { innerVec.resize(nGauss_); } hypHTermMvecVals_.clear(); hypHTermMvecVals_.resize(nSplineSegments); for ( auto& innerVec : hypHTermMvecVals_ ) { innerVec.resize(nGauss_); } hypLTermMvecVals_.clear(); hypLTermMvecVals_.resize(nSplineSegments); for ( auto& innerVec : hypLTermMvecVals_ ) { innerVec.resize(nGauss_); } } } // Force calculation of all relevant info by faking that all parameter values have changed nothingFloating_ = nothingChanged_ = kFALSE; anyKnotFloating_ = anyKnotChanged_ = not effiPars_.empty(); nonKnotFloating_ = nonKnotChanged_ = kTRUE; physicsParFloating_ = physicsParChanged_ = kTRUE; tauFloating_ = tauChanged_ = ( tau_ != nullptr ); deltaMFloating_ = deltaMChanged_ = ( deltaM_ != nullptr ); deltaGammaFloating_ = deltaGammaChanged_ = ( deltaGamma_ != nullptr ); resoParFloating_ = resoParChanged_ = kTRUE; } Double_t LauDecayTimePdf::effectiveResolution() const { Double_t dilution = 0.; Double_t dMSq = deltaM_->unblindValue() * deltaM_->unblindValue(); // Might be cleaner to just append this to the vector in the init step, // the the consistency can also be checked Double_t fracSum = 0; for (auto f : frac_) fracSum += f->unblindValue(); Double_t lastFrac = 1. - fracSum; for (size_t i = 0; i < sigma_.size(); i++) { Double_t sigSq = sigma_[i]->unblindValue() * sigma_[i]->unblindValue(); Double_t thisFrac = lastFrac; if (i < sigma_.size() - 1) thisFrac = frac_[i]->unblindValue(); dilution += thisFrac * TMath::Exp(-dMSq * 0.5 * sigSq); } return TMath::Sqrt(-2. * TMath::Log(dilution)) / deltaM_->unblindValue(); } void LauDecayTimePdf::cacheInfo(const LauFitDataTree& inputData) { // Check that the input data contains the decay time variable Bool_t hasBranch = inputData.haveBranch(this->varName()); if (!hasBranch) { std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<varName()<<"\"."<varErrName()); if (!hasBranch) { std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<varErrName()<<"\"."<cacheInfo(inputData); } if (type_ == Hist) { // Pass the data to the decay-time PDF for caching if ( pdfHist_ ) { pdfHist_->cacheInfo(inputData); } } else { // Clear the vectors and reserve enough space in the event-wise caches const UInt_t nEvents = inputData.nEvents(); abscissas_.clear(); abscissas_.resize(nEvents); abscissaErrors_.clear(); abscissaErrors_.resize(nEvents); expTerms_.clear(); expTerms_.resize(nEvents); cosTerms_.clear(); cosTerms_.resize(nEvents); sinTerms_.clear(); sinTerms_.resize(nEvents); coshTerms_.clear(); coshTerms_.resize(nEvents); sinhTerms_.clear(); sinhTerms_.resize(nEvents); normTermsExp_.clear(); normTermsExp_.resize(nEvents); normTermsCos_.clear(); normTermsCos_.resize(nEvents); normTermsSin_.clear(); normTermsSin_.resize(nEvents); normTermsCosh_.clear(); normTermsCosh_.resize(nEvents); normTermsSinh_.clear(); normTermsSinh_.resize(nEvents); effiTerms_.clear(); effiTerms_.resize(nEvents); // Determine the abscissa and abscissa error values for each event for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputData.getData(iEvt); const Double_t abscissa { dataValues.at(this->varName()) }; if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) { std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay time: "<minAbscissa()<<","<maxAbscissa()<<"]."<Exit(EXIT_FAILURE); } abscissas_[iEvt] = abscissa ; const Double_t abscissaErr { scaleWithPerEventError_ ? dataValues.at(this->varErrName()) : 0.0 }; if ( scaleWithPerEventError_ && ( abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError() ) ) { std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay-time error: "<minAbscissaError()<<","<maxAbscissaError()<<"]."<Exit(EXIT_FAILURE); } abscissaErrors_[iEvt] = abscissaErr; } // Force calculation of all info by faking that all parameter values have changed nothingFloating_ = nothingChanged_ = kFALSE; anyKnotFloating_ = anyKnotChanged_ = not effiPars_.empty(); nonKnotFloating_ = nonKnotChanged_ = kTRUE; physicsParFloating_ = physicsParChanged_ = kTRUE; tauFloating_ = tauChanged_ = ( tau_ != nullptr ); deltaMFloating_ = deltaMChanged_ = ( deltaM_ != nullptr ); deltaGammaFloating_ = deltaGammaChanged_ = ( deltaGamma_ != nullptr ); resoParFloating_ = resoParChanged_ = kTRUE; // Fill the rest of the cache this->updateCache(); // Set the various "parameter-is-floating" flags, used to bookkeep the cache in propagateParUpdates LauParamFixed isFixed; nonKnotFloating_ = not std::all_of(params_.begin(), params_.end(), isFixed); anyKnotFloating_ = not std::all_of(effiPars_.begin(), effiPars_.end(), isFixed); nothingFloating_ = not (nonKnotFloating_ or anyKnotFloating_); std::cout << "INFO in LauDecayTimePdf::cacheInfo : nothing floating set to: " << (nothingFloating_ ? "True" : "False") << std::endl; std::cout << "INFO in LauDecayTimePdf::cacheInfo : any knot floating set to: " << (anyKnotFloating_ ? "True" : "False") << std::endl; std::cout << "INFO in LauDecayTimePdf::cacheInfo : non-knot floating set to: " << (nonKnotFloating_ ? "True" : "False") << std::endl; tauFloating_ = tau_ ? not tau_->fixed() : kFALSE; deltaMFloating_ = deltaM_ ? not deltaM_->fixed() : kFALSE; deltaGammaFloating_ = deltaGamma_ ? not deltaGamma_->fixed() : kFALSE; physicsParFloating_ = ( tauFloating_ or deltaMFloating_ or deltaGammaFloating_ ); std::cout << "INFO in LauDecayTimePdf::cacheInfo : tau floating set to: " << (tauFloating_ ? "True" : "False") << std::endl; std::cout << "INFO in LauDecayTimePdf::cacheInfo : deltaM floating set to: " << (deltaMFloating_ ? "True" : "False") << std::endl; std::cout << "INFO in LauDecayTimePdf::cacheInfo : deltaGamma floating set to: " << (deltaGammaFloating_ ? "True" : "False") << std::endl; resoParFloating_ = kFALSE; for ( UInt_t i{0}; i < nGauss_; ++i ) { const Bool_t meanFloating { not mean_[i]->fixed() }; const Bool_t sigmaFloating { not sigma_[i]->fixed() }; resoParFloating_ |= (meanFloating or sigmaFloating); std::cout << "INFO in LauDecayTimePdf::cacheInfo : mean[" << i << "] floating set to: " << (meanFloating ? "True" : "False") << std::endl; std::cout << "INFO in LauDecayTimePdf::cacheInfo : sigma[" << i << "] floating set to: " << (sigmaFloating ? "True" : "False") << std::endl; if ( i < (nGauss_ - 1) ) { const Bool_t fracFloating { not frac_[i]->fixed() }; resoParFloating_ |= fracFloating; std::cout << "INFO in LauDecayTimePdf::cacheInfo : frac[" << i << "] floating set to: " << (fracFloating ? "True" : "False") << std::endl; } } } } void LauDecayTimePdf::updateCache() { // Get the updated values of all parameters static auto assignValue = [](const LauAbsRValue* par){return par->unblindValue();}; if ( anyKnotChanged_ ) { std::transform( effiPars_.begin(), effiPars_.end(), effiParVals_.begin(), assignValue ); } if ( tauChanged_ ) { tauVal_ = tau_->unblindValue(); gammaVal_ = 1.0 / tauVal_; } if ( deltaMChanged_ ) { deltaMVal_ = deltaM_->unblindValue(); } if ( deltaGammaChanged_ ) { deltaGammaVal_ = deltaGamma_->unblindValue(); } if ( resoParChanged_ ) { std::transform( mean_.begin(), mean_.end(), meanVals_.begin(), assignValue ); std::transform( sigma_.begin(), sigma_.end(), sigmaVals_.begin(), assignValue ); std::transform( frac_.begin(), frac_.end(), fracVals_.begin(), assignValue ); } // If we're not using per-event information for the decay time // error, just calculate the normalisation terms once if ( ! scaleWithPerEventError_ ) { this->calcNorm(); } const UInt_t nEvents = abscissas_.size(); for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const Double_t abscissa = abscissas_[iEvt]; const Double_t abscissaErr = abscissaErrors_[iEvt]; // If none of the physics or resolution parameters have changed // we only need to recalculate the efficiency, otherwise we // need to also update the other terms if ( not nonKnotChanged_ ) { effiTerm_ = this->calcEffiTerm( abscissa ); effiTerms_[iEvt] = effiTerm_; } else { this->calcLikelihoodInfo(abscissa, abscissaErr); expTerms_[iEvt] = expTerm_; cosTerms_[iEvt] = cosTerm_; sinTerms_[iEvt] = sinTerm_; coshTerms_[iEvt] = coshTerm_; sinhTerms_[iEvt] = sinhTerm_; effiTerms_[iEvt] = effiTerm_; } // If we are using per-event information for the decay // time error, need to calculate the normalisation // terms for every event if ( scaleWithPerEventError_ ) { this->calcNorm(abscissaErr); } normTermsExp_[iEvt] = normTermExp_; normTermsCos_[iEvt] = normTermCos_; normTermsSin_[iEvt] = normTermSin_; normTermsCosh_[iEvt] = normTermCosh_; normTermsSinh_[iEvt] = normTermSinh_; } // reset the "parameter-has-changed" flags anyKnotChanged_ = kFALSE; tauChanged_ = kFALSE; deltaMChanged_ = kFALSE; deltaGammaChanged_ = kFALSE; physicsParChanged_ = kFALSE; resoParChanged_ = kFALSE; nonKnotChanged_ = kFALSE; } void LauDecayTimePdf::calcLikelihoodInfo(const UInt_t iEvt) { // Extract all the terms and their normalisations if (type_ == Hist) { if ( pdfHist_ ) { pdfHist_->calcLikelihoodInfo(iEvt); pdfTerm_ = pdfHist_->getLikelihood(); } else { pdfTerm_ = 1.0; } } else { expTerm_ = expTerms_[iEvt]; cosTerm_ = cosTerms_[iEvt]; sinTerm_ = sinTerms_[iEvt]; coshTerm_ = coshTerms_[iEvt]; sinhTerm_ = sinhTerms_[iEvt]; normTermExp_ = normTermsExp_[iEvt]; normTermCos_ = normTermsCos_[iEvt]; normTermSin_ = normTermsSin_[iEvt]; normTermCosh_ = normTermsCosh_[iEvt]; normTermSinh_ = normTermsSinh_[iEvt]; } // Extract the decay time error PDF value if ( errHist_ ) { errHist_->calcLikelihoodInfo(iEvt); errTerm_ = errHist_->getLikelihood(); } else { errTerm_ = 1.0; } // Extract the decay time efficiency effiTerm_ = effiTerms_[iEvt]; } void LauDecayTimePdf::calcLikelihoodInfo(const Double_t abscissa) { // Check whether any of the gaussians should be scaled - if any of them should we need the per-event error if (scaleWithPerEventError_) { std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Per-event error on decay time not provided, cannot calculate anything."<calcLikelihoodInfo(abscissa, 0.0); } Double_t LauDecayTimePdf::calcEffiTerm( const Double_t abscissa ) const { Double_t effiTerm{1.0}; switch( effMethod_ ) { case EfficiencyMethod::Spline : effiTerm = effiFun_ ? effiFun_ -> evaluate(abscissa) : 1.0 ; break; case EfficiencyMethod::Binned : effiTerm = effiHist_ ? effiHist_-> GetBinContent(effiHist_-> FindFixBin(abscissa)) : 1.0 ; break; case EfficiencyMethod::Flat : effiTerm = 1.0 ; break; } if ( effiTerm > 1.0 ) { effiTerm = 1.0; } else if ( effiTerm < 0.0 ) { effiTerm = 0.0; } return effiTerm; } void LauDecayTimePdf::calcLikelihoodInfo(const Double_t abscissa, const Double_t abscissaErr) { // Check that the decay time and the decay time error are in valid ranges if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) { std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of the decay time: "<minAbscissa()<<","<maxAbscissa()<<"]."<Exit(EXIT_FAILURE); } if ( scaleWithPerEventError_ && ( abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError() ) ) { std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of Delta t error: "<minAbscissaError()<<","<maxAbscissaError()<<"]."<Exit(EXIT_FAILURE); } // Determine the decay time efficiency effiTerm_ = this->calcEffiTerm( abscissa ); // For the histogram PDF just calculate that term and return if (type_ == Hist){ if ( pdfHist_ ) { pdfHist_->calcLikelihoodInfo(abscissa); pdfTerm_ = pdfHist_->getLikelihood(); } else { pdfTerm_ = 1.0; } return; } // If we're not using the resolution function, calculate the simple terms and return if (!this->doSmearing()) { this->calcNonSmearedTerms(abscissa); return; } // Get all the up to date parameter values for the resolution function std::vector frac(nGauss_); std::vector mean(nGauss_); std::vector sigma(nGauss_); Double_t fracPrompt(0.0); // TODO - why do we do the fractions this way around? frac[0] = 1.0; for (UInt_t i(0); iunblindValue(); } // Scale the gaussian parameters by the per-event error on Delta t (if appropriate) for (UInt_t i(0); i x(nGauss_); const Double_t xMax = this->maxAbscissa(); const Double_t xMin = this->minAbscissa(); for (UInt_t i(0); i 1e-10) { Double_t exponent(0.0); Double_t norm(0.0); Double_t scale = LauConstants::root2*sigma[i]; Double_t scale2 = LauConstants::rootPiBy2*sigma[i]; exponent = -0.5*x[i]*x[i]/(sigma[i]*sigma[i]); norm = scale2*(TMath::Erf((xMax - mean[i])/scale) - TMath::Erf((xMin - mean[i])/scale)); value += frac[i]*TMath::Exp(exponent)/norm; } } } if (type_ != Delta) { // Reset values of terms expTerm_ = 0.0; cosTerm_ = 0.0; sinTerm_ = 0.0; coshTerm_ = 0.0; sinhTerm_ = 0.0; // Calculate values of the PDF convoluted with each Gaussian for a given value of the abscsissa for (UInt_t i(0); ismearedGeneralTerm( z, abscissa, sigmaOverRoot2, mean[i] ).real() }; expTerm_ += frac[i] * expTerm; // Typical case (1): B0/B0bar if ( type_ == ExpTrig or type_ == ExpHypTrig ) { const std::complex zTrig { gammaVal_ * sigmaOverRoot2, -deltaMVal_ * sigmaOverRoot2 }; const std::complex trigTerm { this->smearedGeneralTerm( zTrig, abscissa, sigmaOverRoot2, mean[i] ) }; const Double_t cosTerm { trigTerm.real() }; const Double_t sinTerm { trigTerm.imag() }; cosTerm_ += frac[i] * cosTerm; sinTerm_ += frac[i] * sinTerm; if ( type_ == ExpTrig ) { coshTerm_ += frac[i] * expTerm; } else { const std::complex zH { (gammaVal_ - 0.5 * deltaGammaVal_) * sigmaOverRoot2 }; const std::complex zL { (gammaVal_ + 0.5 * deltaGammaVal_) * sigmaOverRoot2 }; const Double_t termH { this->smearedGeneralTerm( zH, abscissa, sigmaOverRoot2, mean[i] ).real() }; const Double_t termL { this->smearedGeneralTerm( zL, abscissa, sigmaOverRoot2, mean[i] ).real() }; const Double_t coshTerm { 0.5 * (termH + termL) }; const Double_t sinhTerm { 0.5 * (termH - termL) }; coshTerm_ += frac[i] * coshTerm; sinhTerm_ += frac[i] * sinhTerm; } } } if (type_ == DeltaExp) { value *= fracPrompt; value += (1.0-fracPrompt)*expTerm_; } else { value = expTerm_; } } // Calculate the decay time error PDF value if ( errHist_ ) { std::vector absErrVec = {abscissaErr}; //Otherwise seg fault errHist_->calcLikelihoodInfo(absErrVec); errTerm_ = errHist_->getLikelihood(); } else { errTerm_ = 1.0; } } void LauDecayTimePdf::calcNonSmearedTerms(Double_t abscissa) { // Reset values of terms errTerm_ = 1.0; expTerm_ = 0.0; cosTerm_ = 0.0; sinTerm_ = 0.0; coshTerm_ = 0.0; sinhTerm_ = 0.0; if ( type_ == Hist || type_ == Delta ){ return; } if (method_ == DecayTime) { expTerm_ = TMath::Exp(-abscissa*gammaVal_); } else if (method_ == DecayTimeDiff) { expTerm_ = TMath::Exp(-TMath::Abs(abscissa)*gammaVal_); } // Calculate also the terms related to cosine and sine if (type_ == ExpTrig) { coshTerm_ = expTerm_; sinhTerm_ = 0.0; cosTerm_ = TMath::Cos(deltaMVal_*abscissa)*expTerm_; sinTerm_ = TMath::Sin(deltaMVal_*abscissa)*expTerm_; } // Calculate also the terms related to cosh, sinh, cosine, and sine else if (type_ == ExpHypTrig) { coshTerm_ = TMath::CosH(0.5*deltaGammaVal_*abscissa)*expTerm_; sinhTerm_ = TMath::SinH(0.5*deltaGammaVal_*abscissa)*expTerm_; cosTerm_ = TMath::Cos(deltaMVal_*abscissa)*expTerm_; sinTerm_ = TMath::Sin(deltaMVal_*abscissa)*expTerm_; } } std::complex LauDecayTimePdf::smearedGeneralTerm( const std::complex& z, const Double_t t, const Double_t sigmaOverRoot2, const Double_t mu ) { using namespace std::complex_literals; const Double_t xRe { (t - mu) / (2.0 * sigmaOverRoot2) }; const std::complex x { xRe, 0.0 }; const std::complex arg1 { 1i * (z - x) }; const std::complex arg2 { -(x*x) - (arg1*arg1) }; const std::complex conv { ( arg1.imag() < -5.0 ) ? 0.5 * std::exp(arg2) * RooMath::erfc( -1i * arg1 ) : 0.5 * std::exp(-(xRe*xRe)) * RooMath::faddeeva(arg1) }; return conv; } std::pair LauDecayTimePdf::nonSmearedCosSinIntegral(const Double_t minAbs, const Double_t maxAbs) { // From 1407.0748, not clear whether complex is faster in this case const LauComplex denom { gammaVal_, -deltaMVal_ }; const LauComplex exponent { -gammaVal_, deltaMVal_ }; const LauComplex num0 { -exponent.scale(minAbs).exp() }; const LauComplex num1 { -exponent.scale(maxAbs).exp() }; const LauComplex integral { (num1 - num0) / denom }; return {integral.re(), integral.im()}; } Double_t LauDecayTimePdf::nonSmearedExpIntegral(const Double_t minAbs, const Double_t maxAbs) { return tauVal_ * ( TMath::Exp(-minAbs*gammaVal_) - TMath::Exp(-maxAbs*gammaVal_) ); } std::pair LauDecayTimePdf::nonSmearedCoshSinhIntegral(const Double_t minAbs, const Double_t maxAbs) { // Use exponential formualtion rather than cosh, sinh. // Fewer terms (reused for each), but not guaranteed to be faster. const Double_t gammaH { gammaVal_ - 0.5 * deltaGammaVal_ }; const Double_t gammaL { gammaVal_ + 0.5 * deltaGammaVal_ }; const Double_t tauH { 1.0 / gammaH }; const Double_t tauL { 1.0 / gammaL }; const Double_t nL1 { -TMath::Exp(-gammaL * maxAbs) * tauL }; const Double_t nH1 { -TMath::Exp(-gammaH * maxAbs) * tauH }; const Double_t nL0 { -TMath::Exp(-gammaL * minAbs) * tauL }; const Double_t nH0 { -TMath::Exp(-gammaH * minAbs) * tauH }; const Double_t coshIntegral { 0.5 * ( (nH1 + nL1) - (nH0 + nL0) ) }; const Double_t sinhIntegral { 0.5 * ( (nH1 - nL1) - (nH0 - nL0) ) }; return {coshIntegral, sinhIntegral}; } std::complex LauDecayTimePdf::smearedGeneralIntegral(const std::complex& z, const Double_t minAbs, const Double_t maxAbs, const Double_t sigmaOverRoot2, const Double_t mu) { using namespace std::complex_literals; const Double_t x1 { (maxAbs - mu) / (2.0 * sigmaOverRoot2) }; const Double_t x0 { (minAbs - mu) / (2.0 * sigmaOverRoot2) }; const std::complex arg1 { 1i * (z - x1) }; const std::complex arg0 { 1i * (z - x0) }; std::complex integral = 0.0 + 0i; if(arg1.imag() < -5.0) {integral = RooMath::erf(x1) - std::exp(-(x1*x1) - (arg1*arg1)) * RooMath::erfc(-1i * arg1);} else {integral = RooMath::erf(x1) - TMath::Exp(-(x1*x1)) * RooMath::faddeeva(arg1);} if(arg0.imag() < -5.0) {integral -= RooMath::erf(x0) - std::exp(-(x0*x0) - (arg0*arg0)) * RooMath::erfc(-1i * arg0);} else {integral -= RooMath::erf(x0) - TMath::Exp(-(x0*x0)) * RooMath::faddeeva(arg0);} integral *= (sigmaOverRoot2 / (2.0 * z)); return integral; } void LauDecayTimePdf::calcNorm(const Double_t abscissaErr) { /*if( abscissaErr <= 0. and scaleWithPerEventError_) { std::cerr << "\033[1;31m IN CALCNORM: \33[0m" << std::endl; std::cerr << "\033[1;31m absErr: " << abscissaErr << "\033[0m" << std::endl; //DEBUG }*/ // first reset integrals to zero normTermExp_ = 0.0; normTermCos_ = 0.0; normTermSin_ = 0.0; normTermCosh_ = 0.0; normTermSinh_ = 0.0; // Get all the up to date parameter values std::vector fracs(nGauss_); std::vector means(nGauss_); std::vector sigmas(nGauss_); // TODO - why do we do the fractions this way around? fracs[0] = 1.0; for (UInt_t i(0); i doSmearing() ) {this->calcSmearedPartialIntegrals( minAbscissa_, maxAbscissa_ , uniformEffVal, means, sigmas, fracs );} else {this->calcNonSmearedPartialIntegrals( minAbscissa_, maxAbscissa_, uniformEffVal );} break; } case EfficiencyMethod::Binned : { // Efficiency varies as piecewise constant // Total integral is sum of integrals in each bin, each weighted by efficiency in that bin const Int_t nBins { effiHist_->GetNbinsX() }; for ( Int_t bin{1}; bin <= nBins; ++bin ) { const Double_t loEdge {effiHist_->GetBinLowEdge(bin)}; const Double_t hiEdge {loEdge + effiHist_->GetBinWidth(bin)}; const Double_t effVal {effiHist_->GetBinContent(bin)}; if ( this -> doSmearing() ) {this->calcSmearedPartialIntegrals( loEdge, hiEdge, effVal, means, sigmas, fracs );} else {this->calcNonSmearedPartialIntegrals( loEdge, hiEdge, effVal );} } break; } case EfficiencyMethod::Spline : { // Efficiency varies as piecewise polynomial // Use methods from https://arxiv.org/abs/1407.0748 section 4 to calculate // TODO we shouldn't need the next line, let's try to ensure by this point that we don't need it if(not effiFun_){std::cerr << "FATAL : no spline defined!"; gSystem->Exit(EXIT_FAILURE);} const UInt_t nSplineSegments { effiFun_->getnKnots() - 1 }; if ( this->doSmearing() ) { if ( nonKnotChanged_ ) { if ( resoParChanged_ ) { this->calcMeanAndSigmaPowers( means, sigmas ); } this->calcKVectors(); } for(UInt_t i{0}; i < nSplineSegments; ++i) { this->calcSmearedSplinePartialIntegrals( i, means, sigmas, fracs ); } } else { for(UInt_t i{0}; i < nSplineSegments; ++i) { this->calcNonSmearedSplinePartialIntegrals( i ); } } break; } } // std::cout << "\033[1;34m In calcNorm: \033[0m" << std::endl; //DEBUG // std::cout << "\033[1;34m normTermExp : \033[0m" << normTermExp_ << std::endl; //DEBUG // std::cout << "\033[1;34m normTerm[Cos,Sin] : \033[0m[" << normTermCos_ << ", " << normTermSin_ << "]" << std::endl; //DEBUG // std::cout << "\033[1;34m normTerm[Cosh,Sinh]: \033[0m[" << normTermCosh_ << ", " << normTermSinh_ << "]" << std::endl; //DEBUG } // TODO - Mildly concerned this is void rather than returning the integrals // (but this would require refactoring for different return values). // As long as it doesn't get called outside of calcNorm() it should be fine - DPO // (TL: comment applies to all calc*PartialIntegrals functions.) void LauDecayTimePdf::calcNonSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight) { /* TODO - need to implement something for DecayTimeDiff everywhere if (method_ == DecayTimeDiff) { // TODO - there should be some TMath::Abs here surely? normTermExp = weight * tauVal_ * (2.0 - TMath::Exp(-maxAbs*gammaVal_) - TMath::Exp(-minAbs*gammaVal_)); } */ const Double_t normTermExp { weight * this->nonSmearedExpIntegral(minAbs, maxAbs) }; normTermExp_ += normTermExp; if ( type_ == ExpTrig or type_ == ExpHypTrig ) { auto [cosIntegral, sinIntegral] = this->nonSmearedCosSinIntegral(minAbs, maxAbs); normTermCos_ += weight * cosIntegral; normTermSin_ += weight * sinIntegral; if ( type_ == ExpTrig ) { normTermCosh_ += normTermExp; } else { auto [coshIntegral, sinhIntegral] = this->nonSmearedCoshSinhIntegral(minAbs, maxAbs); normTermCosh_ += weight * coshIntegral; normTermSinh_ += weight * sinhIntegral; auto [cosIntegral, sinIntegral] = this->nonSmearedCosSinIntegral(minAbs, maxAbs); normTermCos_ += weight * cosIntegral; normTermSin_ += weight * sinIntegral; } } } void LauDecayTimePdf::calcSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight, const std::vector& means, const std::vector& sigmas, const std::vector& fractions) { for (UInt_t i(0); ismearedGeneralIntegral( z, minAbs, maxAbs, sigmaOverRoot2, means[i] ) }; const Double_t normTermExp { weight * integral.real() }; normTermExp_ += fractions[i] * normTermExp; if ( type_ == ExpTrig or type_ == ExpHypTrig ) { const std::complex zTrig { gammaVal_ * sigmaOverRoot2, -deltaMVal_ * sigmaOverRoot2 }; const std::complex integralTrig { this->smearedGeneralIntegral( zTrig, minAbs, maxAbs, sigmaOverRoot2, means[i] ) }; const Double_t cosIntegral { integralTrig.real() }; const Double_t sinIntegral { integralTrig.imag() }; normTermCos_ += fractions[i] * weight * cosIntegral; normTermSin_ += fractions[i] * weight * sinIntegral; if ( type_ == ExpTrig ) { normTermCosh_ += fractions[i] * normTermExp; } else { // Heavy (H) eigenstate case const std::complex zH { (gammaVal_ - 0.5 * deltaGammaVal_) * sigmaOverRoot2, 0.0 }; const std::complex integralH { this->smearedGeneralIntegral( zH, minAbs, maxAbs, sigmaOverRoot2, means[i] ) }; // Light (L) eigenstate case const std::complex zL { (gammaVal_ + 0.5 * deltaGammaVal_) * sigmaOverRoot2, 0.0 };; const std::complex integralL { this->smearedGeneralIntegral( zL, minAbs, maxAbs, sigmaOverRoot2, means[i] ) }; const std::complex coshIntegral { 0.5 * (integralH + integralL) }; const std::complex sinhIntegral { 0.5 * (integralH - integralL) }; normTermCosh_ += fractions[i] * weight * coshIntegral.real(); normTermSinh_ += fractions[i] * weight * sinhIntegral.real(); } } } } void LauDecayTimePdf::calcMeanAndSigmaPowers( const std::vector& means, const std::vector& sigmas ) { // Calculate powers of mu and sigma/sqrt(2) needed by all terms in the smeared spline normalisation for (UInt_t i(0); i z; for (UInt_t i(0); igenerateKvector(z); if ( type_ == ExpTrig || type_ == ExpHypTrig ) { z.real( gammaVal_ * sigmaOverRoot2 ); z.imag( -deltaMVal_ * sigmaOverRoot2 ); trigTermKvecVals_[i] = this->generateKvector(z); if ( type_ == ExpHypTrig ) { z.real( ( gammaVal_ - 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 );; z.imag( 0.0 ); hypHTermKvecVals_[i] = this->generateKvector(z); z.real( ( gammaVal_ + 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 ); z.imag( 0.0 ); hypLTermKvecVals_[i] = this->generateKvector(z); } } } } void LauDecayTimePdf::calcSmearedSplinePartialIntegrals(const UInt_t splineIndex, const std::vector& means, const std::vector& sigmas, const std::vector& fractions) { using namespace std::complex_literals; const std::vector& xVals { effiFun_ -> getXValues() }; const Double_t minAbs = xVals[splineIndex]; const Double_t maxAbs = xVals[splineIndex+1]; const std::array coeffs { effiFun_ -> getCoefficients(splineIndex) }; std::complex z; for (UInt_t i(0); igenerateMvector(minAbs, maxAbs, z, sigmas[i], means[i]); } const Double_t normTermExp { this->smearedSplineNormalise(coeffs, expTermKvecVals_[i], expTermMvecVals_[splineIndex][i], sigmaPowerVals_[i], meanPowerVals_[i]).real() }; normTermExp_ += fractions[i] * normTermExp; if ( type_ == ExpTrig or type_ == ExpHypTrig ) { z.real( gammaVal_ * sigmaOverRoot2 ); z.imag( -deltaMVal_ * sigmaOverRoot2 ); if ( nonKnotChanged_ ) { trigTermMvecVals_[splineIndex][i] = this->generateMvector(minAbs, maxAbs, z, sigmas[i], means[i]); } std::complex integral { this->smearedSplineNormalise(coeffs, trigTermKvecVals_[i], trigTermMvecVals_[splineIndex][i], sigmaPowerVals_[i], meanPowerVals_[i]) }; const Double_t cosIntegral { integral.real() }; const Double_t sinIntegral { integral.imag() }; normTermCos_ += fractions[i] * cosIntegral; normTermSin_ += fractions[i] * sinIntegral; if ( type_ == ExpTrig ) { normTermCosh_ += fractions[i] * normTermExp; } else { const std::complex zH { ( gammaVal_ - 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 }; const std::complex zL { ( gammaVal_ + 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 }; if ( nonKnotChanged_ ) { hypHTermMvecVals_[splineIndex][i] = this->generateMvector(minAbs, maxAbs, zH, sigmas[i], means[i]); hypLTermMvecVals_[splineIndex][i] = this->generateMvector(minAbs, maxAbs, zL, sigmas[i], means[i]); } const Double_t integralH { this->smearedSplineNormalise(coeffs, hypHTermKvecVals_[i], hypHTermMvecVals_[splineIndex][i], sigmaPowerVals_[i], meanPowerVals_[i]).real() }; const Double_t integralL { this->smearedSplineNormalise(coeffs, hypLTermKvecVals_[i], hypLTermMvecVals_[splineIndex][i], sigmaPowerVals_[i], meanPowerVals_[i]).real() }; const Double_t coshIntegral { 0.5 * (integralH + integralL) }; const Double_t sinhIntegral { 0.5 * (integralH - integralL) }; normTermCosh_ += fractions[i] * coshIntegral; normTermSinh_ += fractions[i] * sinhIntegral; } } } } std::array,4> LauDecayTimePdf::generateKvector(const std::complex& z) { std::array,4> K {0.,0.,0.,0.}; const std::complex zr { 1./z }; K[0] = 0.5*zr; K[1] = 0.5*zr*zr; K[2] = zr*(1.+zr*zr); K[3] = 3.*zr*zr*(1.+zr*zr); return K; } std::array,4> LauDecayTimePdf::generateMvector(const Double_t minAbs, const Double_t maxAbs, const std::complex& z, const Double_t sigma, const Double_t mean) { using namespace std::complex_literals; std::array,4> M0 {0.,0.,0.,0.}; std::array,4> M1 {0.,0.,0.,0.}; std::array,4> M {0.,0.,0.,0.}; const Double_t x1 { (maxAbs - mean) / (LauConstants::root2 * sigma) }; const Double_t x0 { (minAbs - mean) / (LauConstants::root2 * sigma) }; //Values used a lot const Double_t ex2_1 { TMath::Exp(-(x1*x1)) }; const Double_t ex2_0 { TMath::Exp(-(x0*x0)) }; const Double_t sqrtPir { 1.0 / LauConstants::rootPi }; const std::complex arg1 { 1i * (z - x1) }; const std::complex arg0 { 1i * (z - x0) }; //fad = the faddeeva term times the ex2 value (done in different ways depending on the domain) std::complex fad1; std::complex fad0; if(arg1.imag() < -5.0) {fad1 = std::exp(-(x1*x1) - (arg1*arg1)) * RooMath::erfc(-1i * arg1);} else {fad1 = ex2_1*RooMath::faddeeva(arg1);} if(arg0.imag() < -5.0) {fad0 = std::exp(-(x0*x0) - (arg0*arg0)) * RooMath::erfc(-1i * arg0);} else {fad0 = ex2_0*RooMath::faddeeva(arg0);} //doing the actual functions for x1 M1[0] = RooMath::erf(x1) - fad1; M1[1] = 2. * (-sqrtPir*ex2_1 - x1*fad1); M1[2] = 2. * (-2*x1*sqrtPir*ex2_1 - (2*x1*x1 - 1)*fad1); M1[3] = 4. * (-(2*x1*x1 - 1)*sqrtPir*ex2_1 - x1*(2*x1*x1-3)*fad1); //doing them again for x0 M0[0] = RooMath::erf(x0) - fad0; M0[1] = 2. * (-sqrtPir*ex2_0 - x0*fad0); M0[2] = 2. * (-2*x0*sqrtPir*ex2_0 - (2*x0*x0 - 1)*fad0); M0[3] = 4. * (-(2*x0*x0 - 1)*sqrtPir*ex2_0 - x0*(2*x0*x0-3)*fad0); for(Int_t i = 0; i < 4; ++i){M[i] = M1[i] - M0[i];} return M; } std::complex LauDecayTimePdf::smearedSplineNormalise(const std::array& coeffs, const std::array,4>& K, const std::array,4>& M, const std::array& sigmaPowers, const std::array& meanPowers) const { using namespace std::complex_literals; //Triple sum to get N (eqn 31 and 29 in https://arxiv.org/pdf/1407.0748.pdf) std::complex N = 0. + 0i; for(Int_t k = 0; k < 4; ++k){ for(Int_t n = 0; n <=k; ++n){ for(Int_t i = 0; i <=n; ++i){ //The binomial coefficient terms const Double_t b { binom_[n][i]*binom_[k][n] }; N += sigmaPowers[n]*coeffs[k]*meanPowers[k-n]*K[i]*M[n-i]*b; }}} return N; } void LauDecayTimePdf::calcNonSmearedSplinePartialIntegrals(const UInt_t splineIndex) { using namespace std::complex_literals; const std::complex u { gammaVal_ }; const Double_t normTermExp { this->nonSmearedSplineNormalise(splineIndex, u, expTermIkVals_).real() }; normTermExp_ += normTermExp; if ( type_ == ExpTrig or type_ == ExpHypTrig ) { const std::complex uTrig { gammaVal_, -deltaMVal_ }; const std::complex integral { this->nonSmearedSplineNormalise(splineIndex, uTrig, trigTermIkVals_) }; const Double_t cosIntegral { integral.real() }; const Double_t sinIntegral { integral.imag() }; normTermCos_ += cosIntegral; normTermSin_ += sinIntegral; if ( type_ == ExpTrig ) { normTermCosh_ += normTermExp; } else { const std::complex uH { gammaVal_ - 0.5 * deltaGammaVal_ }; const std::complex uL { gammaVal_ + 0.5 * deltaGammaVal_ }; const Double_t integralH { this->nonSmearedSplineNormalise(splineIndex, uH, hypHTermIkVals_).real() }; - const Double_t integralL { this->nonSmearedSplineNormalise(splineIndex, uL, hypHTermIkVals_).real() }; + const Double_t integralL { this->nonSmearedSplineNormalise(splineIndex, uL, hypLTermIkVals_).real() }; const Double_t coshIntegral { 0.5 * (integralH + integralL) }; const Double_t sinhIntegral { 0.5 * (integralH - integralL) }; normTermCosh_ += coshIntegral; normTermSinh_ += sinhIntegral; } } } std::complex LauDecayTimePdf::calcIk( const UInt_t k, const Double_t minAbs, const Double_t maxAbs, const std::complex& u ) { //Taking mu = 0, this does not have to be the case in general auto G = [&u](const Int_t n){return -TMath::Factorial(n)/std::pow(u,n+1);};//power of n+1 used rather than n, this is due to maths error in the paper auto H = [&u](const Int_t n, const Double_t t){return std::pow(t,n)*std::exp(-u*t);}; std::complex ans { 0.0, 0.0 }; for (UInt_t j = 0; j <= k; ++j) {ans += binom_[k][j]*G(j)*( H( k-j, maxAbs ) - H( k-j, minAbs ) );} return ans; } std::complex LauDecayTimePdf::nonSmearedSplineNormalise( const UInt_t splineIndex, const std::complex& u, std::vector,4>>& cache ) { // u = Gamma - iDeltam in general using namespace std::complex_literals; const std::vector& xVals = effiFun_ -> getXValues(); const Double_t minAbs = xVals[splineIndex]; const Double_t maxAbs = xVals[splineIndex+1]; std::array coeffs = effiFun_->getCoefficients(splineIndex); //sum to get N (eqn 30 in https://arxiv.org/pdf/1407.0748.pdf, using I_k from Appendix B.1 with the corrected maths error) std::complex N = 0. + 0i; if ( nonKnotChanged_ ) { for(UInt_t i = 0; i < 4; ++i) { cache[splineIndex][i] = calcIk(i, minAbs, maxAbs, u); N += cache[splineIndex][i] * coeffs[i]; } } else { for(UInt_t i = 0; i < 4; ++i) { N += cache[splineIndex][i] * coeffs[i]; } } return N; } Double_t LauDecayTimePdf::generateError(Bool_t forceNew) { if (errHist_ && (forceNew || !abscissaErrorGenerated_)) { LauFitData errData = errHist_->generate(nullptr); abscissaError_ = errData.at(this->varErrName()); abscissaErrorGenerated_ = kTRUE; } else { while (forceNew || !abscissaErrorGenerated_) { abscissaError_ = LauRandom::randomFun()->Landau(errorDistMPV_,errorDistSigma_); if (abscissaError_ < maxAbscissaError_ && abscissaError_ > minAbscissaError_) { abscissaErrorGenerated_ = kTRUE; forceNew = kFALSE; } } } return abscissaError_; } /* LauFitData LauDecayTimePdf::generate(const LauKinematics* kinematics) { // generateError SHOULD have been called before this // function but will call it here just to make sure // (has ns effect if has already been called) abscissaError_ = this->generateError(); // If the PDF is scaled by the per-event error then need to update the PDF height for each event Bool_t scale(kFALSE); for (std::vector::const_iterator iter = scaleMeans_.begin(); iter != scaleMeans_.end(); ++iter) { scale |= (*iter); } for (std::vector::const_iterator iter = scaleWidths_.begin(); iter != scaleWidths_.end(); ++iter) { scale |= (*iter); } if (scale || (!this->heightUpToDate() && !this->cachePDF())) { this->calcPDFHeight(kinematics); this->heightUpToDate(kTRUE); } // Generate the value of the abscissa. Bool_t gotAbscissa(kFALSE); Double_t genVal(0.0); Double_t genPDFVal(0.0); LauFitData genAbscissa; const Double_t xMin = this->minAbscissa(); const Double_t xMax = this->maxAbscissa(); const Double_t xRange = xMax - xMin; while (!gotAbscissa) { genVal = LauRandom::randomFun()->Rndm()*xRange + xMin; this->calcLikelihoodInfo(genVal, abscissaError_); genPDFVal = this->getUnNormLikelihood(); if (LauRandom::randomFun()->Rndm() <= genPDFVal/this->getMaxHeight()) {gotAbscissa = kTRUE;} if (genPDFVal > this->getMaxHeight()) { std::cerr<<"Warning in LauDecayTimePdf::generate()." <<" genPDFVal = "<getMaxHeight()<<" for the abscissa = "<varName()] = genVal; // mark that we need a new error to be generated next time abscissaErrorGenerated_ = kFALSE; return genAbscissa; } */ void LauDecayTimePdf::setErrorHisto(const TH1* hist) { if ( errHist_ != nullptr ) { std::cerr<<"WARNING in LauDecayTimePdf::setErrorHisto : Error histogram already set, not doing it again."<varErrName(), hist, this->minAbscissaError(), this->maxAbscissaError()); } void LauDecayTimePdf::setHistoPdf(const TH1* hist) { if ( pdfHist_ != nullptr ) { std::cerr<<"WARNING in LauDecayTimePdf::setHistoPdf : PDF histogram already set, not doing it again."<varName(), hist, this->minAbscissa(), this->maxAbscissa()); } void LauDecayTimePdf::setEffiHist(const TH1* hist) { if ( effiHist_ != nullptr ) { std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : efficiency histogram already set, not doing it again." << std::endl; return; } if ( hist == nullptr ) { std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : supplied efficiency histogram pointer is null." << std::endl; return; } // Check boundaries of histogram align with our abscissa's range const Double_t axisMin {hist->GetXaxis()->GetXmin()}; const Double_t axisMax {hist->GetXaxis()->GetXmax()}; if ( TMath::Abs(minAbscissa_ - axisMin)>1e-6 || TMath::Abs(maxAbscissa_ - axisMax)>1e-6 ) { std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : mismatch in range between supplied histogram and abscissa\n" << " : histogram range: " << axisMin << " - " << axisMax << "\n" << " : abscissa range: " << minAbscissa_ << " - " << maxAbscissa_ << "\n" << " : Disregarding this histogram." << std::endl; return; } effiHist_ = dynamic_cast( hist->Clone() ); //Normalise the hist if the (relative) efficiencies have very large values if(effiHist_ -> GetMaximum() > 1.) { effiHist_ -> Scale( 1. / effiHist_->Integral() ); //Normalise std::cout << "INFO in LauDecayTimePdf::setEffiHist : Supplied histogram for Decay Time Acceptance has values too large: normalising..." << std::endl; } } void LauDecayTimePdf::setEffiSpline(Lau1DCubicSpline* spline) { if ( effiFun_ != nullptr ) { std::cerr<<"WARNING in LauDecayTimePdf::setEffiSpline : efficiency function already set, not doing it again."< effis = effiFun_->getYValues(); effiPars_.resize( effis.size() ); effiParVals_.resize( effis.size() ); size_t index = 0; for( Double_t& effi : effis ) { effiPars_[ index ] = new LauParameter( Form( "%s_Knot_%lu", varName_.Data() ,index ), effi, 0.0, 1.0, kTRUE ); ++index; } } LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName) { for ( std::vector::iterator iter = params_.begin(); iter != params_.end(); ++iter ) { if ((*iter)->name().Contains(parName)) { return (*iter); } } std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl; return nullptr; } const LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName) const { for ( std::vector::const_iterator iter = params_.begin(); iter != params_.end(); ++iter ) { if ((*iter)->name().Contains(parName)) { return (*iter); } } std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl; return nullptr; } void LauDecayTimePdf::updatePulls() { for ( std::vector::iterator iter = params_.begin(); iter != params_.end(); ++iter ) { std::vector params = (*iter)->getPars(); for (std::vector::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter ) { if (!(*iter)->fixed()) { (*params_iter)->updatePull(); } } } } void LauDecayTimePdf::propagateParUpdates() { // If none of the parameters are floating there's nothing to do if ( nothingFloating_ ) { return; } // Otherwise, determine which of the floating parameters have changed (if any) and act accordingly static auto checkEquality = [](const LauAbsRValue* par, const Double_t cacheVal){return par->unblindValue() == cacheVal;}; anyKnotChanged_ = anyKnotFloating_ and not std::equal(effiPars_.begin(), effiPars_.end(), effiParVals_.begin(), checkEquality); // Update the acceptance spline if any of the knot values have changed if ( anyKnotChanged_ ) { effiFun_->updateYValues( effiPars_ ); } // Check also the physics and resolution parameters if ( nonKnotFloating_ ) { if ( physicsParFloating_ ) { tauChanged_ = tauFloating_ and not checkEquality(tau_, tauVal_); deltaMChanged_ = deltaMFloating_ and not checkEquality(deltaM_, deltaMVal_); deltaGammaChanged_ = deltaGammaFloating_ and not checkEquality(deltaGamma_, deltaGammaVal_); physicsParChanged_ = tauChanged_ || deltaMChanged_ || deltaGammaChanged_; } if ( resoParFloating_ ) { resoParChanged_ = kFALSE; resoParChanged_ |= not std::equal( mean_.begin(), mean_.end(), meanVals_.begin(), checkEquality ); resoParChanged_ |= not std::equal( sigma_.begin(), sigma_.end(), sigmaVals_.begin(), checkEquality ); resoParChanged_ |= not std::equal( frac_.begin(), frac_.end(), fracVals_.begin(), checkEquality ); } nonKnotChanged_ = physicsParChanged_ or resoParChanged_; } // If nothing has changed, there's nothing to do if ( not ( anyKnotChanged_ or nonKnotChanged_ ) ) { return; } // Otherwise we need to update the cache this->updateCache(); } /* void LauDecayTimePdf::updateEffiSpline(const std::vector& effiPars) { if (effiPars.size() != effiFun_->getnKnots()){ std::cerr<<"ERROR in LauDecayTimePdf::updateEffiSpline : number of efficiency parameters is not equal to the number of spline knots."<Exit(EXIT_FAILURE); } effiFun_->updateYValues(effiPars); } */