Page MenuHomeHEPForge

No OneTemporary

diff --git a/src/LauDecayTimePdf.cc b/src/LauDecayTimePdf.cc
index 7e503cc..569ced9 100644
--- a/src/LauDecayTimePdf.cc
+++ b/src/LauDecayTimePdf.cc
@@ -1,1696 +1,1698 @@
/*
Copyright 2006 University of Warwick
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Laura++ package authors:
John Back
Paul Harrison
Thomas Latham
*/
/*! \file LauDecayTimePdf.cc
\brief File containing implementation of LauDecayTimePdf class.
*/
#include <complex>
#include <functional>
#include <iostream>
#include <numeric>
#include <vector>
#include <algorithm>
#include "TMath.h"
#include "TRandom.h"
#include "TSystem.h"
#include "TH1.h"
#include "RooMath.h"
#include "Lau1DCubicSpline.hh"
#include "Lau1DHistPdf.hh"
#include "LauConstants.hh"
#include "LauComplex.hh"
#include "LauDecayTimePdf.hh"
#include "LauFitDataTree.hh"
#include "LauParameter.hh"
#include "LauParamFixed.hh"
#include "LauRandom.hh"
ClassImp(LauDecayTimePdf)
LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector<LauAbsRValue*>& params,
Double_t minAbscissaVal, Double_t maxAbscissaVal,
Double_t minAbscissaErr, Double_t maxAbscissaErr,
FuncType type, UInt_t nGauss, const std::vector<Bool_t>& scale, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) :
varName_(theVarName),
varErrName_(theVarErrName),
params_(params),
smear_(kTRUE),
minAbscissa_(minAbscissaVal),
maxAbscissa_(maxAbscissaVal),
minAbscissaError_(minAbscissaErr),
maxAbscissaError_(maxAbscissaErr),
abscissaError_(0.0),
abscissaErrorGenerated_(kFALSE),
errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286
errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102
nGauss_(nGauss),
tau_(nullptr),
deltaM_(nullptr),
deltaGamma_(nullptr),
fracPrompt_(nullptr),
type_(type),
method_(method),
effMethod_(effMethod),
scaleMeans_(scale),
scaleWidths_(scale),
scaleWithPerEventError_( std::accumulate( scale.begin(), scale.end(), kFALSE, std::logical_or<Bool_t>() ) ),
expTerm_(0.0),
cosTerm_(0.0),
sinTerm_(0.0),
coshTerm_(0.0),
sinhTerm_(0.0),
normTermExp_(0.0),
normTermCosh_(0.0),
normTermSinh_(0.0),
errTerm_(0.0),
effiTerm_(0.0),
pdfTerm_(0.0),
errHist_(nullptr),
pdfHist_(nullptr),
effiFun_(nullptr),
effiHist_(nullptr),
effiPars_(0)
{
if (nGauss > 0)
{
frac_.assign(nGauss_-1,nullptr);
mean_.assign(nGauss_,nullptr);
sigma_.assign(nGauss_,nullptr);
meanVals_.assign(nGauss_,0.0);
sigmaVals_.assign(nGauss_,0.0);
fracVals_.assign(nGauss_,0.0);
}
}
LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector<LauAbsRValue*>& params,
Double_t minAbscissaVal, Double_t maxAbscissaVal,
Double_t minAbscissaErr, Double_t maxAbscissaErr,
FuncType type, UInt_t nGauss, const std::vector<Bool_t>& scaleMeans, const std::vector<Bool_t>& scaleWidths, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) :
varName_(theVarName),
varErrName_(theVarErrName),
params_(params),
smear_(kTRUE),
minAbscissa_(minAbscissaVal),
maxAbscissa_(maxAbscissaVal),
minAbscissaError_(minAbscissaErr),
maxAbscissaError_(maxAbscissaErr),
abscissaError_(0.0),
abscissaErrorGenerated_(kFALSE),
errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286
errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102
nGauss_(nGauss),
tau_(nullptr),
deltaM_(nullptr),
deltaGamma_(nullptr),
fracPrompt_(nullptr),
type_(type),
method_(method),
effMethod_(effMethod),
scaleMeans_(scaleMeans),
scaleWidths_(scaleWidths),
scaleWithPerEventError_( std::accumulate( scaleMeans.begin(), scaleMeans.end(), kFALSE, std::logical_or<Bool_t>() ) || std::accumulate( scaleWidths.begin(), scaleWidths.end(), kFALSE, std::logical_or<Bool_t>() ) ),
expTerm_(0.0),
cosTerm_(0.0),
sinTerm_(0.0),
coshTerm_(0.0),
sinhTerm_(0.0),
normTermExp_(0.0),
normTermCosh_(0.0),
normTermSinh_(0.0),
errTerm_(0.0),
effiTerm_(0.0),
pdfTerm_(0.0),
errHist_(nullptr),
pdfHist_(nullptr),
effiFun_(nullptr),
effiHist_(nullptr),
effiPars_(0)
{
if (nGauss > 0)
{
frac_.assign(nGauss_-1,nullptr);
mean_.assign(nGauss_,nullptr);
sigma_.assign(nGauss_,nullptr);
meanVals_.assign(nGauss_,0.0);
sigmaVals_.assign(nGauss_,0.0);
fracVals_.assign(nGauss_,0.0);
}
}
LauDecayTimePdf::~LauDecayTimePdf()
{
// Destructor
delete errHist_; errHist_ = nullptr;
delete pdfHist_; pdfHist_ = nullptr;
delete effiFun_; effiFun_ = nullptr;
delete effiHist_; effiHist_ = nullptr;
for( auto& par : effiPars_ ){ delete par; par = nullptr; }
effiPars_.clear();
}
void LauDecayTimePdf::initialise()
{
// The parameters are:
// - the mean and the sigma (bias and spread in resolution) of the gaussian(s)
// - the mean lifetime, denoted tau, of the exponential decay
// - the frequency of oscillation, denoted Delta m, of the cosine and sine terms
// - the decay width difference, denoted Delta Gamma, of the hyperbolic cosine and sine terms
//
// The next two arguments specify the range in which the PDF is defined,
// and the PDF will be normalised w.r.t. these limits.
//
// The final three arguments define the type of Delta t PDF (Delta, Exp, ExpTrig or ExpHypTrig ), the number of gaussians
// and whether or not the gaussian parameters should be scaled by the per-event errors on Delta t
// First check whether the scale vector is nGauss in size
if (nGauss_ != scaleMeans_.size() || nGauss_ != scaleWidths_.size()) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : scale vector size not the same as nGauss."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
if (type_ == Hist) {
if (this->nParameters() != 0){
std::cerr<<"ERROR in LauDecayTimePdf::initialise : Hist PDF should have 0 parameters"<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else {
TString meanName("mean_");
TString sigmaName("sigma_");
TString fracName("frac_");
Bool_t foundParams(kTRUE);
for (UInt_t i(0); i<nGauss_; ++i) {
TString tempName(meanName); tempName += i;
TString tempName2(sigmaName); tempName2 += i;
TString tempName3(fracName); tempName3 += i;
mean_[i] = this->findParameter(tempName);
foundParams &= (mean_[i] != nullptr);
sigma_[i] = this->findParameter(tempName2);
foundParams &= (sigma_[i] != nullptr);
if (i!=0) {
frac_[i-1] = this->findParameter(tempName3);
foundParams &= (frac_[i-1] != nullptr);
}
}
if (type_ == Delta) {
if ((this->nParameters() != (3*nGauss_-1)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : Delta type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == Exp) {
tau_ = this->findParameter("tau");
foundParams &= (tau_ != nullptr);
if ((this->nParameters() != (3*nGauss_-1+1)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : Exp type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
std::cerr<<" - the lifetime of the exponential decay: \"tau\"."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == DeltaExp) {
tau_ = this->findParameter("tau");
fracPrompt_ = this->findParameter("frac_prompt");
foundParams &= (tau_ != nullptr);
foundParams &= (fracPrompt_ != nullptr);
if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : DeltaExp type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
std::cerr<<" - the lifetime of the exponential decay: \"tau\""<<std::endl;
std::cerr<<" - the fraction of the prompt part: \"frac_prompt\"."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == ExpTrig) {
tau_ = this->findParameter("tau");
deltaM_ = this->findParameter("deltaM");
foundParams &= (tau_ != nullptr);
foundParams &= (deltaM_ != nullptr);
if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : ExpTrig type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
std::cerr<<" - the lifetime of the exponential decay: \"tau\""<<std::endl;
std::cerr<<" - the oscillation frequency: \"deltaM\"."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == ExpHypTrig) {
tau_ = this->findParameter("tau");
deltaM_ = this->findParameter("deltaM");
deltaGamma_ = this->findParameter("deltaGamma");
foundParams &= (tau_ != nullptr);
foundParams &= (deltaM_ != nullptr);
foundParams &= (deltaGamma_ != nullptr);
if ((this->nParameters() != (3*nGauss_-1+3)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : ExpHypTrig type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
std::cerr<<" - the lifetime of the exponential decay: \"tau\""<<std::endl;
std::cerr<<" - the oscillation frequency: \"deltaM\"."<<std::endl;
std::cerr<<" - the width difference: \"deltaGamma\"."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
}
}
// Setup the normalisation caches
normTermsExp_.clear(); normTermsExp_.resize(1);
normTermsCos_.clear(); normTermsCos_.resize(1);
normTermsSin_.clear(); normTermsSin_.resize(1);
normTermsCosh_.clear(); normTermsCosh_.resize(1);
normTermsSinh_.clear(); normTermsSinh_.resize(1);
if ( effMethod_ == EfficiencyMethod::Spline ) {
const UInt_t nSplineSegments { effiFun_->getnKnots() - 1 };
if ( not this->doSmearing() ) {
expTermIkVals_.clear(); expTermIkVals_.resize(nSplineSegments);
trigTermIkVals_.clear(); trigTermIkVals_.resize(nSplineSegments);
hypHTermIkVals_.clear(); hypHTermIkVals_.resize(nSplineSegments);
hypLTermIkVals_.clear(); hypLTermIkVals_.resize(nSplineSegments);
} else {
// Set outer vectors to size 1 (will be resized to nEvents in cacheInfo if necessary)
meanPowerVals_.clear(); meanPowerVals_.resize(1); meanPowerVals_.front().resize(nGauss_);
sigmaPowerVals_.clear(); sigmaPowerVals_.resize(1); sigmaPowerVals_.front().resize(nGauss_);
expTermKvecVals_.clear(); expTermKvecVals_.resize(1); expTermKvecVals_.front().resize(nGauss_);
trigTermKvecVals_.clear(); trigTermKvecVals_.resize(1); trigTermKvecVals_.front().resize(nGauss_);
hypHTermKvecVals_.clear(); hypHTermKvecVals_.resize(1); hypHTermKvecVals_.front().resize(nGauss_);
hypLTermKvecVals_.clear(); hypLTermKvecVals_.resize(1); hypLTermKvecVals_.front().resize(nGauss_);
expTermMvecVals_.clear(); expTermMvecVals_.resize(1); expTermMvecVals_.front().resize(nSplineSegments);
for ( auto& innerVec : expTermMvecVals_.front() ) { innerVec.resize(nGauss_); }
trigTermMvecVals_.clear(); trigTermMvecVals_.resize(1); trigTermMvecVals_.front().resize(nSplineSegments);
for ( auto& innerVec : trigTermMvecVals_.front() ) { innerVec.resize(nGauss_); }
hypHTermMvecVals_.clear(); hypHTermMvecVals_.resize(1); hypHTermMvecVals_.front().resize(nSplineSegments);
for ( auto& innerVec : hypHTermMvecVals_.front() ) { innerVec.resize(nGauss_); }
hypLTermMvecVals_.clear(); hypLTermMvecVals_.resize(1); hypLTermMvecVals_.front().resize(nSplineSegments);
for ( auto& innerVec : hypLTermMvecVals_.front() ) { innerVec.resize(nGauss_); }
}
}
// Force calculation of all relevant info by faking that all parameter values have changed
nothingFloating_ = nothingChanged_ = kFALSE;
anyKnotFloating_ = anyKnotChanged_ = not effiPars_.empty();
nonKnotFloating_ = nonKnotChanged_ = kTRUE;
physicsParFloating_ = physicsParChanged_ = kTRUE;
tauFloating_ = tauChanged_ = ( tau_ != nullptr );
deltaMFloating_ = deltaMChanged_ = ( deltaM_ != nullptr );
deltaGammaFloating_ = deltaGammaChanged_ = ( deltaGamma_ != nullptr );
resoParFloating_ = resoParChanged_ = kTRUE;
}
Double_t LauDecayTimePdf::effectiveResolution() const
{
Double_t dilution = 0.;
Double_t dMSq = deltaM_->unblindValue() * deltaM_->unblindValue();
// Might be cleaner to just append this to the vector in the init step,
// the the consistency can also be checked
Double_t fracSum = 0;
for (auto f : frac_) fracSum += f->unblindValue();
Double_t lastFrac = 1. - fracSum;
for (size_t i = 0; i < sigma_.size(); i++) {
Double_t sigSq = sigma_[i]->unblindValue() * sigma_[i]->unblindValue();
Double_t thisFrac = lastFrac;
if (i < sigma_.size() - 1) thisFrac = frac_[i]->unblindValue();
dilution += thisFrac * TMath::Exp(-dMSq * 0.5 * sigSq);
}
return TMath::Sqrt(-2. * TMath::Log(dilution)) / deltaM_->unblindValue();
}
void LauDecayTimePdf::cacheInfo(const LauFitDataTree& inputData)
{
// Check that the input data contains the decay time variable
Bool_t hasBranch = inputData.haveBranch(this->varName());
if (!hasBranch) {
std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<<this->varName()<<"\"."<<std::endl;
return;
}
// If we're scaling by the per-event error, also check that the input data contains the decay time error variable
const Bool_t needPerEventNormTerms { this->doSmearing() and scaleWithPerEventError_ };
if ( needPerEventNormTerms ) {
hasBranch = inputData.haveBranch(this->varErrName());
if (!hasBranch) {
std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<<this->varErrName()<<"\"."<<std::endl;
return;
}
// Pass the data to the decay-time error PDF for caching
if ( errHist_ ) {
errHist_->cacheInfo(inputData);
}
}
if (type_ == Hist) {
// Pass the data to the decay-time PDF for caching
if ( pdfHist_ ) {
pdfHist_->cacheInfo(inputData);
}
// Make cache of effiTerms
const UInt_t nEvents = inputData.nEvents();
// Efficiency will always be 1 (assumedly)
effiTerms_.assign(nEvents,1.);
} else {
// Clear the vectors and reserve enough space in the caches of the terms
const UInt_t nEvents = inputData.nEvents();
abscissas_.clear(); abscissas_.resize(nEvents);
abscissaErrors_.clear(); abscissaErrors_.resize(nEvents);
expTerms_.clear(); expTerms_.resize(nEvents);
cosTerms_.clear(); cosTerms_.resize(nEvents);
sinTerms_.clear(); sinTerms_.resize(nEvents);
coshTerms_.clear(); coshTerms_.resize(nEvents);
sinhTerms_.clear(); sinhTerms_.resize(nEvents);
effiTerms_.clear(); effiTerms_.resize(nEvents);
// Also resize the normalisation cache elements if we're doing per-event resolution
if ( needPerEventNormTerms ) {
normTermsExp_.clear(); normTermsExp_.resize(nEvents);
normTermsCos_.clear(); normTermsCos_.resize(nEvents);
normTermsSin_.clear(); normTermsSin_.resize(nEvents);
normTermsCosh_.clear(); normTermsCosh_.resize(nEvents);
normTermsSinh_.clear(); normTermsSinh_.resize(nEvents);
if ( effMethod_ == EfficiencyMethod::Spline ) {
meanPowerVals_.resize(nEvents);
for ( auto& innerVec : meanPowerVals_ ) { innerVec.resize(nGauss_); }
sigmaPowerVals_.resize(nEvents);
for ( auto& innerVec : sigmaPowerVals_ ) { innerVec.resize(nGauss_); }
expTermKvecVals_.resize(nEvents);
for ( auto& innerVec : expTermKvecVals_ ) { innerVec.resize(nGauss_); }
trigTermKvecVals_.resize(nEvents);
for ( auto& innerVec : trigTermKvecVals_ ) { innerVec.resize(nGauss_); }
hypHTermKvecVals_.resize(nEvents);
for ( auto& innerVec : hypHTermKvecVals_ ) { innerVec.resize(nGauss_); }
hypLTermKvecVals_.resize(nEvents);
for ( auto& innerVec : hypLTermKvecVals_ ) { innerVec.resize(nGauss_); }
const UInt_t nSplineSegments { effiFun_->getnKnots() - 1 };
expTermMvecVals_.resize(nEvents);
for ( auto& middleVec : expTermMvecVals_) {
middleVec.resize(nSplineSegments);
for ( auto& innerVec : middleVec ) {
innerVec.resize(nGauss_);
}
}
trigTermMvecVals_.resize(nEvents);
for ( auto& middleVec : trigTermMvecVals_) {
middleVec.resize(nSplineSegments);
for ( auto& innerVec : middleVec ) {
innerVec.resize(nGauss_);
}
}
hypHTermMvecVals_.resize(nEvents);
for ( auto& middleVec : hypHTermMvecVals_) {
middleVec.resize(nSplineSegments);
for ( auto& innerVec : middleVec ) {
innerVec.resize(nGauss_);
}
}
hypLTermMvecVals_.resize(nEvents);
for ( auto& middleVec : hypLTermMvecVals_) {
middleVec.resize(nSplineSegments);
for ( auto& innerVec : middleVec ) {
innerVec.resize(nGauss_);
}
}
}
}
// Determine the abscissa and abscissa error values for each event
for (UInt_t iEvt {0}; iEvt < nEvents; iEvt++) {
const LauFitData& dataValues = inputData.getData(iEvt);
const Double_t abscissa { dataValues.at(this->varName()) };
if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) {
std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay time: "<<abscissa<<
" outside allowed range: ["<<this->minAbscissa()<<","<<this->maxAbscissa()<<"]."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
abscissas_[iEvt] = abscissa ;
const Double_t abscissaErr { needPerEventNormTerms ? dataValues.at(this->varErrName()) : 0.0 };
if ( needPerEventNormTerms and ( abscissaErr > this->maxAbscissaError() or abscissaErr < this->minAbscissaError() ) ) {
std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay-time error: "<<abscissaErr<<
" outside allowed range: ["<<this->minAbscissaError()<<","<<this->maxAbscissaError()<<"]."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
abscissaErrors_[iEvt] = abscissaErr;
}
// Force calculation of all info by faking that all parameter values have changed
nothingFloating_ = nothingChanged_ = kFALSE;
anyKnotFloating_ = anyKnotChanged_ = not effiPars_.empty();
nonKnotFloating_ = nonKnotChanged_ = kTRUE;
physicsParFloating_ = physicsParChanged_ = kTRUE;
tauFloating_ = tauChanged_ = ( tau_ != nullptr );
deltaMFloating_ = deltaMChanged_ = ( deltaM_ != nullptr );
deltaGammaFloating_ = deltaGammaChanged_ = ( deltaGamma_ != nullptr );
resoParFloating_ = resoParChanged_ = kTRUE;
// Fill the rest of the cache
this->updateCache();
// Set the various "parameter-is-floating" flags, used to bookkeep the cache in propagateParUpdates
LauParamFixed isFixed;
nonKnotFloating_ = not std::all_of(params_.begin(), params_.end(), isFixed);
anyKnotFloating_ = not std::all_of(effiPars_.begin(), effiPars_.end(), isFixed);
nothingFloating_ = not (nonKnotFloating_ or anyKnotFloating_);
std::cout << "INFO in LauDecayTimePdf::cacheInfo : nothing floating set to: " << (nothingFloating_ ? "True" : "False") << std::endl;
std::cout << "INFO in LauDecayTimePdf::cacheInfo : any knot floating set to: " << (anyKnotFloating_ ? "True" : "False") << std::endl;
std::cout << "INFO in LauDecayTimePdf::cacheInfo : non-knot floating set to: " << (nonKnotFloating_ ? "True" : "False") << std::endl;
tauFloating_ = tau_ ? not tau_->fixed() : kFALSE;
deltaMFloating_ = deltaM_ ? not deltaM_->fixed() : kFALSE;
deltaGammaFloating_ = deltaGamma_ ? not deltaGamma_->fixed() : kFALSE;
physicsParFloating_ = ( tauFloating_ or deltaMFloating_ or deltaGammaFloating_ );
std::cout << "INFO in LauDecayTimePdf::cacheInfo : tau floating set to: " << (tauFloating_ ? "True" : "False") << std::endl;
std::cout << "INFO in LauDecayTimePdf::cacheInfo : deltaM floating set to: " << (deltaMFloating_ ? "True" : "False") << std::endl;
std::cout << "INFO in LauDecayTimePdf::cacheInfo : deltaGamma floating set to: " << (deltaGammaFloating_ ? "True" : "False") << std::endl;
resoParFloating_ = kFALSE;
for ( UInt_t i{0}; i < nGauss_; ++i )
{
const Bool_t meanFloating { not mean_[i]->fixed() };
const Bool_t sigmaFloating { not sigma_[i]->fixed() };
resoParFloating_ |= (meanFloating or sigmaFloating);
std::cout << "INFO in LauDecayTimePdf::cacheInfo : mean[" << i << "] floating set to: " << (meanFloating ? "True" : "False") << std::endl;
std::cout << "INFO in LauDecayTimePdf::cacheInfo : sigma[" << i << "] floating set to: " << (sigmaFloating ? "True" : "False") << std::endl;
if ( i < (nGauss_ - 1) ) {
const Bool_t fracFloating { not frac_[i]->fixed() };
resoParFloating_ |= fracFloating;
std::cout << "INFO in LauDecayTimePdf::cacheInfo : frac[" << i << "] floating set to: " << (fracFloating ? "True" : "False") << std::endl;
}
}
}
}
void LauDecayTimePdf::updateCache()
{
// Get the updated values of all parameters
static auto assignValue = [](const LauAbsRValue* par){return par->unblindValue();};
if ( anyKnotChanged_ ) {
std::transform( effiPars_.begin(), effiPars_.end(), effiParVals_.begin(), assignValue );
}
if ( tauChanged_ ) {
tauVal_ = tau_->unblindValue();
gammaVal_ = 1.0 / tauVal_;
}
if ( deltaMChanged_ ) {
deltaMVal_ = deltaM_->unblindValue();
}
if ( deltaGammaChanged_ ) {
deltaGammaVal_ = deltaGamma_->unblindValue();
}
if ( resoParChanged_ ) {
std::transform( mean_.begin(), mean_.end(), meanVals_.begin(), assignValue );
std::transform( sigma_.begin(), sigma_.end(), sigmaVals_.begin(), assignValue );
std::transform( frac_.begin(), frac_.end(), fracVals_.begin()+1, assignValue );
fracVals_[0] = std::accumulate( fracVals_.begin()+1, fracVals_.end(), 1.0, std::minus<Double_t>{} );
}
// Calculate the values of all terms for each event
// TODO - need to sort out UInt_t vs ULong_t everywhere!!
const UInt_t nEvents { static_cast<UInt_t>(abscissas_.size()) };
// If any of the physics or resolution parameters have changed we need
// to update everything, otherwise we only need to recalculate the
// efficiency
if ( nonKnotChanged_ ) {
for (UInt_t iEvt {0}; iEvt < nEvents; iEvt++) {
const Double_t abscissa { abscissas_[iEvt] };
const Double_t abscissaErr { abscissaErrors_[iEvt] };
this->calcLikelihoodInfo(abscissa, abscissaErr);
expTerms_[iEvt] = expTerm_;
cosTerms_[iEvt] = cosTerm_;
sinTerms_[iEvt] = sinTerm_;
coshTerms_[iEvt] = coshTerm_;
sinhTerms_[iEvt] = sinhTerm_;
effiTerms_[iEvt] = effiTerm_;
}
} else {
for (UInt_t iEvt {0}; iEvt < nEvents; iEvt++) {
const Double_t abscissa { abscissas_[iEvt] };
effiTerm_ = this->calcEffiTerm( abscissa );
effiTerms_[iEvt] = effiTerm_;
}
}
// Calculate the normalisation terms
this->calcNorm();
// reset the "parameter-has-changed" flags
anyKnotChanged_ = kFALSE;
tauChanged_ = kFALSE;
deltaMChanged_ = kFALSE;
deltaGammaChanged_ = kFALSE;
physicsParChanged_ = kFALSE;
resoParChanged_ = kFALSE;
nonKnotChanged_ = kFALSE;
}
void LauDecayTimePdf::calcLikelihoodInfo(const UInt_t iEvt)
{
// Extract all the terms and their normalisations
const Bool_t needPerEventNormTerms { this->doSmearing() and scaleWithPerEventError_ };
const UInt_t normTermElement { needPerEventNormTerms * iEvt };
if (type_ == Hist) {
if ( pdfHist_ ) {
pdfHist_->calcLikelihoodInfo(iEvt);
pdfTerm_ = pdfHist_->getLikelihood();
} else {
pdfTerm_ = 1.0;
}
} else {
expTerm_ = expTerms_[iEvt];
cosTerm_ = cosTerms_[iEvt];
sinTerm_ = sinTerms_[iEvt];
coshTerm_ = coshTerms_[iEvt];
sinhTerm_ = sinhTerms_[iEvt];
normTermExp_ = normTermsExp_[normTermElement];
normTermCos_ = normTermsCos_[normTermElement];
normTermSin_ = normTermsSin_[normTermElement];
normTermCosh_ = normTermsCosh_[normTermElement];
normTermSinh_ = normTermsSinh_[normTermElement];
}
// Extract the decay time error PDF value
if ( needPerEventNormTerms and errHist_ ) {
errHist_->calcLikelihoodInfo(iEvt);
errTerm_ = errHist_->getLikelihood();
} else {
errTerm_ = 1.0;
}
// Extract the decay time efficiency
effiTerm_ = effiTerms_[iEvt];
}
void LauDecayTimePdf::calcLikelihoodInfo(const Double_t abscissa)
{
// Check whether any of the gaussians should be scaled - if any of them should we need the per-event error
if (this->doSmearing() and scaleWithPerEventError_) {
std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Per-event error on decay time not provided, cannot calculate anything."<<std::endl;
return;
}
this->calcLikelihoodInfo(abscissa, 0.0);
}
Double_t LauDecayTimePdf::calcEffiTerm( const Double_t abscissa ) const
{
Double_t effiTerm{1.0};
switch( effMethod_ )
{
case EfficiencyMethod::Spline : effiTerm = effiFun_ ? effiFun_ -> evaluate(abscissa) : 1.0 ; break;
case EfficiencyMethod::Binned : effiTerm = effiHist_ ? effiHist_-> GetBinContent(effiHist_-> FindFixBin(abscissa)) : 1.0 ; break;
case EfficiencyMethod::Flat : effiTerm = 1.0 ; break;
}
+ // TODO print warning messages for these, but not every time
if ( effiTerm > 1.0 ) { effiTerm = 1.0; }
- else if ( effiTerm < 0.0 ) { effiTerm = 0.0; }
+ // else if ( effiTerm < 0.0 ) { effiTerm = 0.0; }
+ else if ( effiTerm <= 0.0 ) { effiTerm = 1e-20; }
return effiTerm;
}
void LauDecayTimePdf::calcLikelihoodInfo(const Double_t abscissa, const Double_t abscissaErr)
{
// Check that the decay time and the decay time error are in valid ranges
if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) {
std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of the decay time: "<<abscissa<<
" outside allowed range: ["<<this->minAbscissa()<<","<<this->maxAbscissa()<<"]."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
if ( (this->doSmearing() and scaleWithPerEventError_) && ( abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError() ) ) {
std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of Delta t error: "<<abscissaErr<<
" outside allowed range: ["<<this->minAbscissaError()<<","<<this->maxAbscissaError()<<"]."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
// Determine the decay time efficiency
effiTerm_ = this->calcEffiTerm( abscissa );
// For the histogram PDF just calculate that term and return
if (type_ == Hist){
if ( pdfHist_ ) {
pdfHist_->calcLikelihoodInfo(abscissa);
pdfTerm_ = pdfHist_->getLikelihood();
} else {
pdfTerm_ = 1.0;
}
return;
}
// If we're not using the resolution function, calculate the simple terms and return
if (!this->doSmearing()) {
this->calcNonSmearedTerms(abscissa);
return;
}
// Get all the up to date parameter values for the resolution function
std::vector<Double_t> mean { meanVals_ };
std::vector<Double_t> sigma { sigmaVals_ };
std::vector<Double_t> frac { fracVals_ };
// Scale the gaussian parameters by the per-event error on Delta t (if appropriate)
if ( scaleWithPerEventError_ ) {
for (UInt_t i{0}; i<nGauss_; ++i) {
if (scaleMeans_[i]) {
mean[i] *= abscissaErr;
}
if (scaleWidths_[i]) {
sigma[i] *= abscissaErr;
}
}
}
// Calculate x = ( t - mu ) / ( sqrt(2) * sigma ), used by all types
std::vector<Double_t> x(nGauss_);
for (UInt_t i{0}; i<nGauss_; ++i) {
x[i] = ( abscissa - mean[i] ) / ( LauConstants::root2 * sigma[i] );
}
// TODO - this parameter isn't accounted for in the caching bookkeeping yet
Double_t fracPrompt{0.0};
if (type_ == DeltaExp) {
fracPrompt = fracPrompt_->unblindValue();
}
Double_t value(0.0);
if (type_ == Delta || type_ == DeltaExp) {
// Calculate the gaussian function(s)
const Double_t xMax = this->maxAbscissa();
const Double_t xMin = this->minAbscissa();
for (UInt_t i(0); i<nGauss_; ++i) {
if (TMath::Abs(sigma[i]) > 1e-10) {
Double_t exponent(0.0);
Double_t norm(0.0);
Double_t scale = LauConstants::root2*sigma[i];
Double_t scale2 = LauConstants::rootPiBy2*sigma[i];
exponent = -x[i]*x[i];
norm = scale2*(TMath::Erf((xMax - mean[i])/scale)
- TMath::Erf((xMin - mean[i])/scale));
value += frac[i]*TMath::Exp(exponent)/norm;
}
}
}
if (type_ != Delta) {
// Reset values of terms
expTerm_ = 0.0;
cosTerm_ = 0.0;
sinTerm_ = 0.0;
coshTerm_ = 0.0;
sinhTerm_ = 0.0;
// Calculate values of the PDF convoluted with each Gaussian for a given value of the abscsissa
for (UInt_t i(0); i<nGauss_; ++i) {
const Double_t sigmaOverRoot2 { sigma[i] / LauConstants::root2 };
const std::complex z { gammaVal_ * sigmaOverRoot2 };
const Double_t expTerm { this->smearedGeneralTerm( z, x[i] ).real() };
expTerm_ += frac[i] * expTerm;
if ( type_ == ExpTrig or type_ == ExpHypTrig ) {
const std::complex zTrig { gammaVal_ * sigmaOverRoot2, -deltaMVal_ * sigmaOverRoot2 };
const std::complex trigTerm { this->smearedGeneralTerm( zTrig, x[i] ) };
const Double_t cosTerm { trigTerm.real() };
const Double_t sinTerm { trigTerm.imag() };
cosTerm_ += frac[i] * cosTerm;
sinTerm_ += frac[i] * sinTerm;
if ( type_ == ExpTrig ) {
coshTerm_ += frac[i] * expTerm;
} else {
const std::complex zH { (gammaVal_ - 0.5 * deltaGammaVal_) * sigmaOverRoot2 };
const std::complex zL { (gammaVal_ + 0.5 * deltaGammaVal_) * sigmaOverRoot2 };
const Double_t termH { this->smearedGeneralTerm( zH, x[i] ).real() };
const Double_t termL { this->smearedGeneralTerm( zL, x[i] ).real() };
const Double_t coshTerm { 0.5 * (termH + termL) };
const Double_t sinhTerm { 0.5 * (termH - termL) };
coshTerm_ += frac[i] * coshTerm;
sinhTerm_ += frac[i] * sinhTerm;
}
}
}
if (type_ == DeltaExp) {
value *= fracPrompt;
value += (1.0-fracPrompt)*expTerm_;
} else {
value = expTerm_;
}
}
// Calculate the decay time error PDF value
if ( scaleWithPerEventError_ and errHist_ ) {
const std::vector<Double_t> absErrVec {abscissaErr};
errHist_->calcLikelihoodInfo(absErrVec);
errTerm_ = errHist_->getLikelihood();
} else {
errTerm_ = 1.0;
}
}
void LauDecayTimePdf::calcNonSmearedTerms(const Double_t abscissa)
{
// Reset values of terms
errTerm_ = 1.0;
expTerm_ = 0.0;
cosTerm_ = 0.0;
sinTerm_ = 0.0;
coshTerm_ = 0.0;
sinhTerm_ = 0.0;
if ( type_ == Hist || type_ == Delta ){
return;
}
if (method_ == DecayTime) {
expTerm_ = TMath::Exp(-abscissa*gammaVal_);
} else if (method_ == DecayTimeDiff) {
expTerm_ = TMath::Exp(-TMath::Abs(abscissa)*gammaVal_);
}
// Calculate also the terms related to cosine and sine
if (type_ == ExpTrig) {
coshTerm_ = expTerm_;
sinhTerm_ = 0.0;
cosTerm_ = TMath::Cos(deltaMVal_*abscissa)*expTerm_;
sinTerm_ = TMath::Sin(deltaMVal_*abscissa)*expTerm_;
}
// Calculate also the terms related to cosh, sinh, cosine, and sine
else if (type_ == ExpHypTrig) {
coshTerm_ = TMath::CosH(0.5*deltaGammaVal_*abscissa)*expTerm_;
sinhTerm_ = TMath::SinH(0.5*deltaGammaVal_*abscissa)*expTerm_;
cosTerm_ = TMath::Cos(deltaMVal_*abscissa)*expTerm_;
sinTerm_ = TMath::Sin(deltaMVal_*abscissa)*expTerm_;
}
}
std::complex<Double_t> LauDecayTimePdf::smearedGeneralTerm( const std::complex<Double_t>& z, const Double_t x )
{
using namespace std::complex_literals;
const std::complex arg1 { 1i * (z - x) };
const std::complex arg2 { -(x*x) - (arg1*arg1) };
const std::complex conv { ( arg1.imag() < -5.0 ) ? 0.5 * std::exp(arg2) * RooMath::erfc( -1i * arg1 ) : 0.5 * std::exp(-(x*x)) * RooMath::faddeeva(arg1) };
return conv;
}
std::pair<Double_t,Double_t> LauDecayTimePdf::nonSmearedCosSinIntegral(const Double_t minAbs, const Double_t maxAbs)
{
// From 1407.0748, not clear whether complex is faster in this case
const LauComplex denom { gammaVal_, -deltaMVal_ };
const LauComplex exponent { -gammaVal_, deltaMVal_ };
const LauComplex num0 { -exponent.scale(minAbs).exp() };
const LauComplex num1 { -exponent.scale(maxAbs).exp() };
const LauComplex integral { (num1 - num0) / denom };
return {integral.re(), integral.im()};
}
Double_t LauDecayTimePdf::nonSmearedExpIntegral(const Double_t minAbs, const Double_t maxAbs)
{
return tauVal_ * ( TMath::Exp(-minAbs*gammaVal_) - TMath::Exp(-maxAbs*gammaVal_) );
}
std::pair<Double_t,Double_t> LauDecayTimePdf::nonSmearedCoshSinhIntegral(const Double_t minAbs, const Double_t maxAbs)
{
// Use exponential formualtion rather than cosh, sinh.
// Fewer terms (reused for each), but not guaranteed to be faster.
const Double_t gammaH { gammaVal_ - 0.5 * deltaGammaVal_ };
const Double_t gammaL { gammaVal_ + 0.5 * deltaGammaVal_ };
const Double_t tauH { 1.0 / gammaH };
const Double_t tauL { 1.0 / gammaL };
const Double_t nL1 { -TMath::Exp(-gammaL * maxAbs) * tauL };
const Double_t nH1 { -TMath::Exp(-gammaH * maxAbs) * tauH };
const Double_t nL0 { -TMath::Exp(-gammaL * minAbs) * tauL };
const Double_t nH0 { -TMath::Exp(-gammaH * minAbs) * tauH };
const Double_t coshIntegral { 0.5 * ( (nH1 + nL1) - (nH0 + nL0) ) };
const Double_t sinhIntegral { 0.5 * ( (nH1 - nL1) - (nH0 - nL0) ) };
return {coshIntegral, sinhIntegral};
}
std::complex<Double_t> LauDecayTimePdf::smearedGeneralIntegral(const std::complex<Double_t>& z, const Double_t minAbs, const Double_t maxAbs, const Double_t sigmaOverRoot2, const Double_t mu)
{
using namespace std::complex_literals;
const Double_t x1 { (maxAbs - mu) / (2.0 * sigmaOverRoot2) };
const Double_t x0 { (minAbs - mu) / (2.0 * sigmaOverRoot2) };
const std::complex arg1 { 1i * (z - x1) };
const std::complex arg0 { 1i * (z - x0) };
std::complex integral = 0.0 + 0i;
if(arg1.imag() < -5.0)
{integral = RooMath::erf(x1) - std::exp(-(x1*x1) - (arg1*arg1)) * RooMath::erfc(-1i * arg1);}
else
{integral = RooMath::erf(x1) - TMath::Exp(-(x1*x1)) * RooMath::faddeeva(arg1);}
if(arg0.imag() < -5.0)
{integral -= RooMath::erf(x0) - std::exp(-(x0*x0) - (arg0*arg0)) * RooMath::erfc(-1i * arg0);}
else
{integral -= RooMath::erf(x0) - TMath::Exp(-(x0*x0)) * RooMath::faddeeva(arg0);}
integral *= (sigmaOverRoot2 / (2.0 * z));
return integral;
}
void LauDecayTimePdf::calcNorm()
{
// If we're not doing per-event scaling then we only need to calculate things once
// TODO - need to sort out UInt_t vs ULong_t everywhere!!
const Bool_t needPerEventNormTerms { this->doSmearing() and scaleWithPerEventError_ };
const UInt_t nEvents { needPerEventNormTerms ? static_cast<UInt_t>(abscissaErrors_.size()) : 1 };
// Get all the up to date parameter values
std::vector<Double_t> means { meanVals_ };
std::vector<Double_t> sigmas { sigmaVals_ };
std::vector<Double_t> fracs { fracVals_ };
for ( UInt_t iEvt{0}; iEvt < nEvents; ++iEvt ) {
// first reset integrals to zero
normTermExp_ = 0.0;
normTermCos_ = 0.0;
normTermSin_ = 0.0;
normTermCosh_ = 0.0;
normTermSinh_ = 0.0;
// Scale the gaussian parameters by the per-event error on decay time (if appropriate)
if ( needPerEventNormTerms ) {
const Double_t abscissaErr { abscissaErrors_[iEvt] };
for (UInt_t i{0}; i<nGauss_; ++i) {
if (scaleMeans_[i]) {
means[i] = meanVals_[i] * abscissaErr;
}
if (scaleWidths_[i]) {
sigmas[i] = sigmaVals_[i] * abscissaErr;
}
}
}
switch ( effMethod_ ) {
case EfficiencyMethod::Flat :
{
// No efficiency variation
// Simply calculate integrals over full range
const Double_t uniformEffVal {1.0};
if( this -> doSmearing() )
{this->calcSmearedPartialIntegrals( minAbscissa_, maxAbscissa_ , uniformEffVal, means, sigmas, fracs );}
else
{this->calcNonSmearedPartialIntegrals( minAbscissa_, maxAbscissa_, uniformEffVal );}
break;
}
case EfficiencyMethod::Binned :
{
// Efficiency varies as piecewise constant
// Total integral is sum of integrals in each bin, each weighted by efficiency in that bin
const Int_t nBins { effiHist_->GetNbinsX() };
for ( Int_t bin{1}; bin <= nBins; ++bin ) {
const Double_t loEdge {effiHist_->GetBinLowEdge(bin)};
const Double_t hiEdge {loEdge + effiHist_->GetBinWidth(bin)};
const Double_t effVal {effiHist_->GetBinContent(bin)};
if ( this -> doSmearing() )
{this->calcSmearedPartialIntegrals( loEdge, hiEdge, effVal, means, sigmas, fracs );}
else
{this->calcNonSmearedPartialIntegrals( loEdge, hiEdge, effVal );}
}
break;
}
case EfficiencyMethod::Spline :
{
// Efficiency varies as piecewise polynomial
// Use methods from https://arxiv.org/abs/1407.0748 section 4 to calculate
const UInt_t nSplineSegments { effiFun_->getnKnots() - 1 };
if ( this->doSmearing() ) {
if ( nonKnotChanged_ ) {
if ( resoParChanged_ ) {
this->calcMeanAndSigmaPowers( iEvt, means, sigmas );
}
this->calcKVectors( iEvt );
}
for(UInt_t iSeg{0}; iSeg < nSplineSegments; ++iSeg)
{
this->calcSmearedSplinePartialIntegrals( iEvt, iSeg, means, sigmas, fracs );
}
} else {
for(UInt_t iSeg{0}; iSeg < nSplineSegments; ++iSeg)
{
this->calcNonSmearedSplinePartialIntegrals( iSeg );
}
}
break;
}
}
normTermsExp_[iEvt] = normTermExp_;
normTermsCos_[iEvt] = normTermCos_;
normTermsSin_[iEvt] = normTermSin_;
normTermsCosh_[iEvt] = normTermCosh_;
normTermsSinh_[iEvt] = normTermSinh_;
}
}
// TODO - Mildly concerned this is void rather than returning the integrals
// (but this would require refactoring for different return values).
// As long as it doesn't get called outside of calcNorm() it should be fine - DPO
// (TL: comment applies to all calc*PartialIntegrals functions.)
void LauDecayTimePdf::calcNonSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight)
{
/* TODO - need to implement something for DecayTimeDiff everywhere
if (method_ == DecayTimeDiff) {
// TODO - there should be some TMath::Abs here surely?
normTermExp = weight * tauVal_ * (2.0 - TMath::Exp(-maxAbs*gammaVal_) - TMath::Exp(-minAbs*gammaVal_));
}
*/
const Double_t normTermExp { weight * this->nonSmearedExpIntegral(minAbs, maxAbs) };
normTermExp_ += normTermExp;
if ( type_ == ExpTrig or type_ == ExpHypTrig ) {
auto [cosIntegral, sinIntegral] = this->nonSmearedCosSinIntegral(minAbs, maxAbs);
normTermCos_ += weight * cosIntegral;
normTermSin_ += weight * sinIntegral;
if ( type_ == ExpTrig ) {
normTermCosh_ += normTermExp;
} else {
auto [coshIntegral, sinhIntegral] = this->nonSmearedCoshSinhIntegral(minAbs, maxAbs);
normTermCosh_ += weight * coshIntegral;
normTermSinh_ += weight * sinhIntegral;
}
}
}
void LauDecayTimePdf::calcSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight, const std::vector<Double_t>& means, const std::vector<Double_t>& sigmas, const std::vector<Double_t>& fractions)
{
for (UInt_t i(0); i<nGauss_; ++i)
{
const Double_t sigmaOverRoot2 { sigmas[i] / LauConstants::root2 };
const std::complex z { gammaVal_ * sigmaOverRoot2, 0.0 };
const std::complex integral { this->smearedGeneralIntegral( z, minAbs, maxAbs, sigmaOverRoot2, means[i] ) };
const Double_t normTermExp { weight * integral.real() };
normTermExp_ += fractions[i] * normTermExp;
if ( type_ == ExpTrig or type_ == ExpHypTrig ) {
const std::complex zTrig { gammaVal_ * sigmaOverRoot2, -deltaMVal_ * sigmaOverRoot2 };
const std::complex integralTrig { this->smearedGeneralIntegral( zTrig, minAbs, maxAbs, sigmaOverRoot2, means[i] ) };
const Double_t cosIntegral { integralTrig.real() };
const Double_t sinIntegral { integralTrig.imag() };
normTermCos_ += fractions[i] * weight * cosIntegral;
normTermSin_ += fractions[i] * weight * sinIntegral;
if ( type_ == ExpTrig ) {
normTermCosh_ += fractions[i] * normTermExp;
} else {
// Heavy (H) eigenstate case
const std::complex zH { (gammaVal_ - 0.5 * deltaGammaVal_) * sigmaOverRoot2, 0.0 };
const std::complex integralH { this->smearedGeneralIntegral( zH, minAbs, maxAbs, sigmaOverRoot2, means[i] ) };
// Light (L) eigenstate case
const std::complex zL { (gammaVal_ + 0.5 * deltaGammaVal_) * sigmaOverRoot2, 0.0 };;
const std::complex integralL { this->smearedGeneralIntegral( zL, minAbs, maxAbs, sigmaOverRoot2, means[i] ) };
const std::complex coshIntegral { 0.5 * (integralH + integralL) };
const std::complex sinhIntegral { 0.5 * (integralH - integralL) };
normTermCosh_ += fractions[i] * weight * coshIntegral.real();
normTermSinh_ += fractions[i] * weight * sinhIntegral.real();
}
}
}
}
void LauDecayTimePdf::calcMeanAndSigmaPowers( const UInt_t iEvt, const std::vector<Double_t>& means, const std::vector<Double_t>& sigmas )
{
// Calculate powers of mu and sigma/sqrt(2) needed by all terms in the smeared spline normalisation
for (UInt_t i(0); i<nGauss_; ++i)
{
const Double_t mu { means[i] };
const Double_t sigmaOverRoot2 { sigmas[i] / LauConstants::root2 };
meanPowerVals_[iEvt][i] = {1.0, mu, mu*mu, mu*mu*mu};
sigmaPowerVals_[iEvt][i] = {sigmaOverRoot2,
sigmaOverRoot2*sigmaOverRoot2,
sigmaOverRoot2*sigmaOverRoot2*sigmaOverRoot2,
sigmaOverRoot2*sigmaOverRoot2*sigmaOverRoot2*sigmaOverRoot2};
}
}
void LauDecayTimePdf::calcKVectors( const UInt_t iEvt )
{
using namespace std::complex_literals;
std::complex<Double_t> z;
for (UInt_t i(0); i<nGauss_; ++i)
{
const Double_t sigmaOverRoot2 { sigmaPowerVals_[iEvt][i][0] };
z = gammaVal_ * sigmaOverRoot2;
expTermKvecVals_[iEvt][i] = this->generateKvector(z);
if ( type_ == ExpTrig || type_ == ExpHypTrig ) {
z.real( gammaVal_ * sigmaOverRoot2 );
z.imag( -deltaMVal_ * sigmaOverRoot2 );
trigTermKvecVals_[iEvt][i] = this->generateKvector(z);
if ( type_ == ExpHypTrig ) {
z.real( ( gammaVal_ - 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 );;
z.imag( 0.0 );
hypHTermKvecVals_[iEvt][i] = this->generateKvector(z);
z.real( ( gammaVal_ + 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 );
z.imag( 0.0 );
hypLTermKvecVals_[iEvt][i] = this->generateKvector(z);
}
}
}
}
void LauDecayTimePdf::calcSmearedSplinePartialIntegrals(const UInt_t iEvt, const UInt_t splineIndex, const std::vector<Double_t>& means, const std::vector<Double_t>& sigmas, const std::vector<Double_t>& fractions)
{
using namespace std::complex_literals;
const std::vector<Double_t>& xVals { effiFun_->getXValues() };
const Double_t minAbs = xVals[splineIndex];
const Double_t maxAbs = xVals[splineIndex+1];
const std::array<Double_t,4> coeffs { effiFun_->getCoefficients(splineIndex) };
std::complex<Double_t> z;
for (UInt_t i(0); i<nGauss_; ++i)
{
const Double_t sigmaOverRoot2 { sigmaPowerVals_[iEvt][i][0] };
z = gammaVal_ * sigmaOverRoot2;
if ( nonKnotChanged_ ) {
expTermMvecVals_[iEvt][splineIndex][i] = this->generateMvector(minAbs, maxAbs, z, sigmas[i], means[i]);
}
const Double_t normTermExp { this->smearedSplineNormalise(coeffs, expTermKvecVals_[iEvt][i], expTermMvecVals_[iEvt][splineIndex][i], sigmaPowerVals_[iEvt][i], meanPowerVals_[iEvt][i]).real() };
normTermExp_ += fractions[i] * normTermExp;
if ( type_ == ExpTrig or type_ == ExpHypTrig ) {
z.real( gammaVal_ * sigmaOverRoot2 );
z.imag( -deltaMVal_ * sigmaOverRoot2 );
if ( nonKnotChanged_ ) {
trigTermMvecVals_[iEvt][splineIndex][i] = this->generateMvector(minAbs, maxAbs, z, sigmas[i], means[i]);
}
std::complex integral { this->smearedSplineNormalise(coeffs, trigTermKvecVals_[iEvt][i], trigTermMvecVals_[iEvt][splineIndex][i], sigmaPowerVals_[iEvt][i], meanPowerVals_[iEvt][i]) };
const Double_t cosIntegral { integral.real() };
const Double_t sinIntegral { integral.imag() };
normTermCos_ += fractions[i] * cosIntegral;
normTermSin_ += fractions[i] * sinIntegral;
if ( type_ == ExpTrig ) {
normTermCosh_ += fractions[i] * normTermExp;
} else {
const std::complex zH { ( gammaVal_ - 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 };
const std::complex zL { ( gammaVal_ + 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 };
if ( nonKnotChanged_ ) {
hypHTermMvecVals_[iEvt][splineIndex][i] = this->generateMvector(minAbs, maxAbs, zH, sigmas[i], means[i]);
hypLTermMvecVals_[iEvt][splineIndex][i] = this->generateMvector(minAbs, maxAbs, zL, sigmas[i], means[i]);
}
const Double_t integralH { this->smearedSplineNormalise(coeffs, hypHTermKvecVals_[iEvt][i], hypHTermMvecVals_[iEvt][splineIndex][i], sigmaPowerVals_[iEvt][i], meanPowerVals_[iEvt][i]).real() };
const Double_t integralL { this->smearedSplineNormalise(coeffs, hypLTermKvecVals_[iEvt][i], hypLTermMvecVals_[iEvt][splineIndex][i], sigmaPowerVals_[iEvt][i], meanPowerVals_[iEvt][i]).real() };
const Double_t coshIntegral { 0.5 * (integralH + integralL) };
const Double_t sinhIntegral { 0.5 * (integralH - integralL) };
normTermCosh_ += fractions[i] * coshIntegral;
normTermSinh_ += fractions[i] * sinhIntegral;
}
}
}
}
std::array<std::complex<Double_t>,4> LauDecayTimePdf::generateKvector(const std::complex<Double_t>& z)
{
const std::complex<Double_t> zr { 1.0/z };
const std::complex<Double_t> zr2 { zr*zr };
std::array<std::complex<Double_t>,4> K {
0.5*zr,
0.5*zr2,
zr*(1.0+zr2),
3.0*zr2*(1.0+zr2)
};
return K;
}
std::array<std::complex<Double_t>,4> LauDecayTimePdf::generateMvector(const Double_t minAbs, const Double_t maxAbs, const std::complex<Double_t>& z, const Double_t sigma, const Double_t mean)
{
using namespace std::complex_literals;
std::array<std::complex<Double_t>,4> M0 {0.,0.,0.,0.};
std::array<std::complex<Double_t>,4> M1 {0.,0.,0.,0.};
std::array<std::complex<Double_t>,4> M {0.,0.,0.,0.};
const Double_t x1 { (maxAbs - mean) / (LauConstants::root2 * sigma) };
const Double_t x0 { (minAbs - mean) / (LauConstants::root2 * sigma) };
//Values used a lot
const Double_t ex2_1 { TMath::Exp(-(x1*x1)) };
const Double_t ex2_0 { TMath::Exp(-(x0*x0)) };
const Double_t sqrtPir { 1.0 / LauConstants::rootPi };
const std::complex arg1 { 1i * (z - x1) };
const std::complex arg0 { 1i * (z - x0) };
//fad = the faddeeva term times the ex2 value (done in different ways depending on the domain)
std::complex<Double_t> fad1;
std::complex<Double_t> fad0;
if(arg1.imag() < -5.0)
{fad1 = std::exp(-(x1*x1) - (arg1*arg1)) * RooMath::erfc(-1i * arg1);}
else
{fad1 = ex2_1*RooMath::faddeeva(arg1);}
if(arg0.imag() < -5.0)
{fad0 = std::exp(-(x0*x0) - (arg0*arg0)) * RooMath::erfc(-1i * arg0);}
else
{fad0 = ex2_0*RooMath::faddeeva(arg0);}
//doing the actual functions for x1
M1[0] = RooMath::erf(x1) - fad1;
M1[1] = 2. * (-sqrtPir*ex2_1 - x1*fad1);
M1[2] = 2. * (-2*x1*sqrtPir*ex2_1 - (2*x1*x1 - 1)*fad1);
M1[3] = 4. * (-(2*x1*x1 - 1)*sqrtPir*ex2_1 - x1*(2*x1*x1-3)*fad1);
//doing them again for x0
M0[0] = RooMath::erf(x0) - fad0;
M0[1] = 2. * (-sqrtPir*ex2_0 - x0*fad0);
M0[2] = 2. * (-2*x0*sqrtPir*ex2_0 - (2*x0*x0 - 1)*fad0);
M0[3] = 4. * (-(2*x0*x0 - 1)*sqrtPir*ex2_0 - x0*(2*x0*x0-3)*fad0);
for(Int_t i = 0; i < 4; ++i){M[i] = M1[i] - M0[i];}
return M;
}
std::complex<Double_t> LauDecayTimePdf::smearedSplineNormalise(const std::array<Double_t,4>& coeffs, const std::array<std::complex<Double_t>,4>& K, const std::array<std::complex<Double_t>,4>& M, const std::array<Double_t,4>& sigmaPowers, const std::array<Double_t,4>& meanPowers) const
{
using namespace std::complex_literals;
//Triple sum to get N (eqn 31 and 29 in https://arxiv.org/pdf/1407.0748.pdf)
std::complex<Double_t> N = 0. + 0i;
for(Int_t k = 0; k < 4; ++k){
for(Int_t n = 0; n <=k; ++n){
for(Int_t i = 0; i <=n; ++i){
//The binomial coefficient terms
const Double_t b { binom_[n][i]*binom_[k][n] };
N += sigmaPowers[n]*coeffs[k]*meanPowers[k-n]*K[i]*M[n-i]*b;
}}}
return N;
}
void LauDecayTimePdf::calcNonSmearedSplinePartialIntegrals(const UInt_t splineIndex)
{
using namespace std::complex_literals;
const std::complex u { gammaVal_ };
const Double_t normTermExp { this->nonSmearedSplineNormalise(splineIndex, u, expTermIkVals_).real() };
normTermExp_ += normTermExp;
if ( type_ == ExpTrig or type_ == ExpHypTrig ) {
const std::complex uTrig { gammaVal_, -deltaMVal_ };
const std::complex integral { this->nonSmearedSplineNormalise(splineIndex, uTrig, trigTermIkVals_) };
const Double_t cosIntegral { integral.real() };
const Double_t sinIntegral { integral.imag() };
normTermCos_ += cosIntegral;
normTermSin_ += sinIntegral;
if ( type_ == ExpTrig ) {
normTermCosh_ += normTermExp;
} else {
const std::complex uH { gammaVal_ - 0.5 * deltaGammaVal_ };
const std::complex uL { gammaVal_ + 0.5 * deltaGammaVal_ };
const Double_t integralH { this->nonSmearedSplineNormalise(splineIndex, uH, hypHTermIkVals_).real() };
const Double_t integralL { this->nonSmearedSplineNormalise(splineIndex, uL, hypLTermIkVals_).real() };
const Double_t coshIntegral { 0.5 * (integralH + integralL) };
const Double_t sinhIntegral { 0.5 * (integralH - integralL) };
normTermCosh_ += coshIntegral;
normTermSinh_ += sinhIntegral;
}
}
}
std::complex<Double_t> LauDecayTimePdf::calcIk( const UInt_t k, const Double_t minAbs, const Double_t maxAbs, const std::complex<Double_t>& u )
{
//Taking mu = 0, this does not have to be the case in general
auto G = [&u](const Int_t n){return -TMath::Factorial(n)/std::pow(u,n+1);};//power of n+1 used rather than n, this is due to maths error in the paper
auto H = [&u](const Int_t n, const Double_t t){return std::pow(t,n)*std::exp(-u*t);};
std::complex ans { 0.0, 0.0 };
for (UInt_t j = 0; j <= k; ++j)
{ans += binom_[k][j]*G(j)*( H( k-j, maxAbs ) - H( k-j, minAbs ) );}
return ans;
}
std::complex<Double_t> LauDecayTimePdf::nonSmearedSplineNormalise( const UInt_t splineIndex, const std::complex<Double_t>& u, std::vector<std::array<std::complex<Double_t>,4>>& cache )
{
// u = Gamma - iDeltam in general
using namespace std::complex_literals;
const std::vector<Double_t>& xVals = effiFun_ -> getXValues();
const Double_t minAbs = xVals[splineIndex];
const Double_t maxAbs = xVals[splineIndex+1];
std::array<Double_t,4> coeffs = effiFun_->getCoefficients(splineIndex);
//sum to get N (eqn 30 in https://arxiv.org/pdf/1407.0748.pdf, using I_k from Appendix B.1 with the corrected maths error)
std::complex<Double_t> N = 0. + 0i;
if ( nonKnotChanged_ ) {
for(UInt_t i = 0; i < 4; ++i) {
cache[splineIndex][i] = calcIk(i, minAbs, maxAbs, u);
N += cache[splineIndex][i] * coeffs[i];
}
} else {
for(UInt_t i = 0; i < 4; ++i) {
N += cache[splineIndex][i] * coeffs[i];
}
}
return N;
}
Double_t LauDecayTimePdf::generateError(Bool_t forceNew)
{
if (errHist_ && (forceNew || !abscissaErrorGenerated_)) {
LauFitData errData = errHist_->generate(nullptr);
abscissaError_ = errData.at(this->varErrName());
abscissaErrorGenerated_ = kTRUE;
} else {
while (forceNew || !abscissaErrorGenerated_) {
abscissaError_ = LauRandom::randomFun()->Landau(errorDistMPV_,errorDistSigma_);
if (abscissaError_ < maxAbscissaError_ && abscissaError_ > minAbscissaError_) {
abscissaErrorGenerated_ = kTRUE;
forceNew = kFALSE;
}
}
}
return abscissaError_;
}
Double_t LauDecayTimePdf::generate(const LauKinematics* kinematics)
{
if( type_ != FuncType::Hist )
{
std::cerr << "ERROR in LauDecayTimePdf::generate : Right now we can only produce if the type == Hist. Returning 0.;" << std::endl;
// std::cout << "type_ = " << type_ << std::endl;
return 0.;
}
// generateError SHOULD have been called before this
// function but will call it here just to make sure
// (has ns effect if has already been called)
abscissaError_ = this->generateError();
LauFitData genAbscissa;
if( pdfHist_ ){ genAbscissa = pdfHist_ -> generate( kinematics ); }
else
{
std::cerr << "FATAL in LauDecayTimePdf : no pdfHist defined for a Hist style Decay Time Pdf!" << std::endl;
gSystem->Exit(EXIT_FAILURE);
}
// mark that we need a new error to be generated next time
abscissaErrorGenerated_ = kFALSE;
return genAbscissa.at(this->varName());
}
/*
LauFitData LauDecayTimePdf::generate(const LauKinematics* kinematics)
{
// generateError SHOULD have been called before this
// function but will call it here just to make sure
// (has ns effect if has already been called)
abscissaError_ = this->generateError();
// If the PDF is scaled by the per-event error then need to update the PDF height for each event
Bool_t scale(kFALSE);
for (std::vector<Bool_t>::const_iterator iter = scaleMeans_.begin(); iter != scaleMeans_.end(); ++iter) {
scale |= (*iter);
}
for (std::vector<Bool_t>::const_iterator iter = scaleWidths_.begin(); iter != scaleWidths_.end(); ++iter) {
scale |= (*iter);
}
if (scale || (!this->heightUpToDate() && !this->cachePDF())) {
this->calcPDFHeight(kinematics);
this->heightUpToDate(kTRUE);
}
// Generate the value of the abscissa.
Bool_t gotAbscissa(kFALSE);
Double_t genVal(0.0);
Double_t genPDFVal(0.0);
LauFitData genAbscissa;
const Double_t xMin = this->minAbscissa();
const Double_t xMax = this->maxAbscissa();
const Double_t xRange = xMax - xMin;
while (!gotAbscissa) {
genVal = LauRandom::randomFun()->Rndm()*xRange + xMin;
this->calcLikelihoodInfo(genVal, abscissaError_);
genPDFVal = this->getUnNormLikelihood();
if (LauRandom::randomFun()->Rndm() <= genPDFVal/this->getMaxHeight()) {gotAbscissa = kTRUE;}
if (genPDFVal > this->getMaxHeight()) {
std::cerr<<"Warning in LauDecayTimePdf::generate()."
<<" genPDFVal = "<<genPDFVal<<" is larger than the specified PDF height "
<<this->getMaxHeight()<<" for the abscissa = "<<genVal<<". Need to reset height to be larger than "
<<genPDFVal<<" by using the setMaxHeight(Double_t) function"
<<" and re-run the Monte Carlo generation!"<<std::endl;
}
}
genAbscissa[this->varName()] = genVal;
// mark that we need a new error to be generated next time
abscissaErrorGenerated_ = kFALSE;
return genAbscissa;
}
*/
void LauDecayTimePdf::setErrorHisto(const TH1* hist)
{
if ( errHist_ != nullptr ) {
std::cerr<<"WARNING in LauDecayTimePdf::setErrorHisto : Error histogram already set, not doing it again."<<std::endl;
return;
}
errHist_ = new Lau1DHistPdf(this->varErrName(), hist, this->minAbscissaError(), this->maxAbscissaError());
}
void LauDecayTimePdf::setHistoPdf(const TH1* hist)
{
if ( pdfHist_ != nullptr ) {
std::cerr<<"WARNING in LauDecayTimePdf::setHistoPdf : PDF histogram already set, not doing it again."<<std::endl;
return;
}
pdfHist_ = new Lau1DHistPdf(this->varName(), hist, this->minAbscissa(), this->maxAbscissa());
}
void LauDecayTimePdf::setEffiHist(const TH1* hist)
{
if ( effiHist_ != nullptr ) {
std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : efficiency histogram already set, not doing it again." << std::endl;
return;
}
if ( hist == nullptr ) {
std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : supplied efficiency histogram pointer is null." << std::endl;
return;
}
// Check boundaries of histogram align with our abscissa's range
const Double_t axisMin {hist->GetXaxis()->GetXmin()};
const Double_t axisMax {hist->GetXaxis()->GetXmax()};
if ( TMath::Abs(minAbscissa_ - axisMin)>1e-6 || TMath::Abs(maxAbscissa_ - axisMax)>1e-6 ) {
std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : mismatch in range between supplied histogram and abscissa\n"
<< " : histogram range: " << axisMin << " - " << axisMax << "\n"
<< " : abscissa range: " << minAbscissa_ << " - " << maxAbscissa_ << "\n"
<< " : Disregarding this histogram." << std::endl;
return;
}
effiHist_ = dynamic_cast<TH1*>( hist->Clone() );
//Normalise the hist if the (relative) efficiencies have very large values
if(effiHist_ -> GetMaximum() > 1.)
{
effiHist_ -> Scale( 1. / effiHist_->Integral() ); //Normalise
std::cout << "INFO in LauDecayTimePdf::setEffiHist : Supplied histogram for Decay Time Acceptance has values too large: normalising..." << std::endl;
}
}
void LauDecayTimePdf::setEffiSpline(Lau1DCubicSpline* spline)
{
if ( effiFun_ != nullptr ) {
std::cerr<<"WARNING in LauDecayTimePdf::setEffiSpline : efficiency function already set, not doing it again."<<std::endl;
return;
}
if ( spline == nullptr ) {
std::cerr<<"WARNING in LauDecayTimePdf::setEffiSpline : supplied efficiency spline pointer is null."<<std::endl;
return;
}
effiFun_ = spline;
std::vector<Double_t> effis = effiFun_->getYValues();
effiPars_.resize( effis.size() );
effiParVals_.resize( effis.size() );
size_t index = 0;
for( Double_t& effi : effis )
{
effiPars_[ index ] = new LauParameter( Form( "%s_Knot_%lu", varName_.Data() ,index ), effi, 0.0, 1.0, kTRUE );
++index;
}
}
LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName)
{
for ( std::vector<LauAbsRValue*>::iterator iter = params_.begin(); iter != params_.end(); ++iter ) {
if ((*iter)->name().Contains(parName)) {
return (*iter);
}
}
std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl;
return nullptr;
}
const LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName) const
{
for ( std::vector<LauAbsRValue*>::const_iterator iter = params_.begin(); iter != params_.end(); ++iter ) {
if ((*iter)->name().Contains(parName)) {
return (*iter);
}
}
std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl;
return nullptr;
}
void LauDecayTimePdf::updatePulls()
{
for ( std::vector<LauAbsRValue*>::iterator iter = params_.begin(); iter != params_.end(); ++iter ) {
std::vector<LauParameter*> params = (*iter)->getPars();
for (std::vector<LauParameter*>::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter ) {
if (!(*iter)->fixed()) {
(*params_iter)->updatePull();
}
}
}
}
void LauDecayTimePdf::propagateParUpdates()
{
// If none of the parameters are floating there's nothing to do
if ( nothingFloating_ ) {
return;
}
// Otherwise, determine which of the floating parameters have changed (if any) and act accordingly
static auto checkEquality = [](const LauAbsRValue* par, const Double_t cacheVal){return par->unblindValue() == cacheVal;};
anyKnotChanged_ = anyKnotFloating_ and not std::equal(effiPars_.begin(), effiPars_.end(), effiParVals_.begin(), checkEquality);
// Update the acceptance spline if any of the knot values have changed
if ( anyKnotChanged_ ) {
effiFun_->updateYValues( effiPars_ );
}
// Check also the physics and resolution parameters
if ( nonKnotFloating_ ) {
if ( physicsParFloating_ ) {
tauChanged_ = tauFloating_ and not checkEquality(tau_, tauVal_);
deltaMChanged_ = deltaMFloating_ and not checkEquality(deltaM_, deltaMVal_);
deltaGammaChanged_ = deltaGammaFloating_ and not checkEquality(deltaGamma_, deltaGammaVal_);
physicsParChanged_ = tauChanged_ || deltaMChanged_ || deltaGammaChanged_;
}
if ( resoParFloating_ ) {
resoParChanged_ = kFALSE;
resoParChanged_ |= not std::equal( mean_.begin(), mean_.end(), meanVals_.begin(), checkEquality );
resoParChanged_ |= not std::equal( sigma_.begin(), sigma_.end(), sigmaVals_.begin(), checkEquality );
resoParChanged_ |= not std::equal( frac_.begin(), frac_.end(), fracVals_.begin()+1, checkEquality );
}
nonKnotChanged_ = physicsParChanged_ or resoParChanged_;
}
// If nothing has changed, there's nothing to do
if ( not ( anyKnotChanged_ or nonKnotChanged_ ) ) {
return;
}
// Otherwise we need to update the cache
this->updateCache();
}
/*
void LauDecayTimePdf::updateEffiSpline(const std::vector<LauParameter*>& effiPars)
{
if (effiPars.size() != effiFun_->getnKnots()){
std::cerr<<"ERROR in LauDecayTimePdf::updateEffiSpline : number of efficiency parameters is not equal to the number of spline knots."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
effiFun_->updateYValues(effiPars);
}
*/

File Metadata

Mime Type
text/x-diff
Expires
Sat, Dec 21, 5:04 PM (18 h, 36 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
4023568
Default Alt Text
(61 KB)

Event Timeline