Page Menu
Home
HEPForge
Search
Configure Global Search
Log In
Files
F10881451
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
56 KB
Subscribers
None
View Options
diff --git a/src/LauDecayTimePdf.cc b/src/LauDecayTimePdf.cc
index 9c407dd..af90248 100644
--- a/src/LauDecayTimePdf.cc
+++ b/src/LauDecayTimePdf.cc
@@ -1,1544 +1,1553 @@
/*
Copyright 2006 University of Warwick
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Laura++ package authors:
John Back
Paul Harrison
Thomas Latham
*/
/*! \file LauDecayTimePdf.cc
\brief File containing implementation of LauDecayTimePdf class.
*/
#include <complex>
#include <functional>
#include <iostream>
#include <numeric>
#include <vector>
#include "TMath.h"
#include "TRandom.h"
#include "TSystem.h"
#include "TH1.h"
#include "RooMath.h"
#include "Lau1DCubicSpline.hh"
#include "Lau1DHistPdf.hh"
#include "LauConstants.hh"
#include "LauComplex.hh"
#include "LauDecayTimePdf.hh"
#include "LauFitDataTree.hh"
#include "LauParameter.hh"
#include "LauRandom.hh"
ClassImp(LauDecayTimePdf)
LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector<LauAbsRValue*>& params,
Double_t minAbscissaVal, Double_t maxAbscissaVal,
Double_t minAbscissaErr, Double_t maxAbscissaErr,
FuncType type, UInt_t nGauss, const std::vector<Bool_t>& scale, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) :
varName_(theVarName),
varErrName_(theVarErrName),
param_(params),
smear_(kTRUE),
minAbscissa_(minAbscissaVal),
maxAbscissa_(maxAbscissaVal),
minAbscissaError_(minAbscissaErr),
maxAbscissaError_(maxAbscissaErr),
abscissaError_(0.0),
abscissaErrorGenerated_(kFALSE),
errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286
errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102
nGauss_(nGauss),
mean_(nGauss_,0),
sigma_(nGauss_,0),
frac_(nGauss_-1,0),
tau_(0),
deltaM_(0),
deltaGamma_(0),
fracPrompt_(0),
type_(type),
method_(method),
effMethod_(effMethod),
scaleMeans_(scale),
scaleWidths_(scale),
scaleWithPerEventError_( std::accumulate( scale.begin(), scale.end(), kFALSE, std::logical_or<Bool_t>() ) ),
expTerm_(0.0),
cosTerm_(0.0),
sinTerm_(0.0),
coshTerm_(0.0),
sinhTerm_(0.0),
normTermExp_(0.0),
normTermCosh_(0.0),
normTermSinh_(0.0),
errTerm_(0.0),
effiTerm_(0.0),
pdfTerm_(0.0),
errHist_(nullptr),
pdfHist_(nullptr),
effiFun_(nullptr),
effiHist_(nullptr),
effiPars_(0)
{
this->initialise();
}
LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector<LauAbsRValue*>& params,
Double_t minAbscissaVal, Double_t maxAbscissaVal,
Double_t minAbscissaErr, Double_t maxAbscissaErr,
FuncType type, UInt_t nGauss, const std::vector<Bool_t>& scaleMeans, const std::vector<Bool_t>& scaleWidths, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) :
varName_(theVarName),
varErrName_(theVarErrName),
param_(params),
smear_(kTRUE),
minAbscissa_(minAbscissaVal),
maxAbscissa_(maxAbscissaVal),
minAbscissaError_(minAbscissaErr),
maxAbscissaError_(maxAbscissaErr),
abscissaError_(0.0),
abscissaErrorGenerated_(kFALSE),
errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286
errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102
nGauss_(nGauss),
mean_(nGauss_,0),
sigma_(nGauss_,0),
frac_(nGauss_-1,0),
tau_(0),
deltaM_(0),
deltaGamma_(0),
fracPrompt_(0),
type_(type),
method_(method),
effMethod_(effMethod),
scaleMeans_(scaleMeans),
scaleWidths_(scaleWidths),
scaleWithPerEventError_( std::accumulate( scaleMeans.begin(), scaleMeans.end(), kFALSE, std::logical_or<Bool_t>() ) || std::accumulate( scaleWidths.begin(), scaleWidths.end(), kFALSE, std::logical_or<Bool_t>() ) ),
expTerm_(0.0),
cosTerm_(0.0),
sinTerm_(0.0),
coshTerm_(0.0),
sinhTerm_(0.0),
normTermExp_(0.0),
normTermCosh_(0.0),
normTermSinh_(0.0),
errTerm_(0.0),
effiTerm_(0.0),
pdfTerm_(0.0),
errHist_(nullptr),
pdfHist_(nullptr),
effiFun_(nullptr),
effiHist_(nullptr),
effiPars_(0)
{
this->initialise();
}
LauDecayTimePdf::~LauDecayTimePdf()
{
// Destructor
delete errHist_; errHist_ = nullptr;
delete pdfHist_; pdfHist_ = nullptr;
delete effiFun_; effiFun_ = nullptr;
delete effiHist_; effiHist_ = nullptr;
for( auto& par : effiPars_ ){ delete par; par = nullptr; }
effiPars_.clear();
}
void LauDecayTimePdf::initialise()
{
// The parameters are:
// - the mean and the sigma (bias and spread in resolution) of the gaussian(s)
// - the mean lifetime, denoted tau, of the exponential decay
// - the frequency of oscillation, denoted Delta m, of the cosine and sine terms
// - the decay width difference, denoted Delta Gamma, of the hyperbolic cosine and sine terms
//
// The next two arguments specify the range in which the PDF is defined,
// and the PDF will be normalised w.r.t. these limits.
//
// The final three arguments define the type of Delta t PDF (Delta, Exp, ExpTrig or ExpHypTrig ), the number of gaussians
// and whether or not the gaussian parameters should be scaled by the per-event errors on Delta t
// First check whether the scale vector is nGauss in size
if (nGauss_ != scaleMeans_.size() || nGauss_ != scaleWidths_.size()) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : scale vector size not the same as nGauss."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
if (type_ == Hist) {
if (this->nParameters() != 0){
std::cerr<<"ERROR in LauDecayTimePdf::initialise : Hist PDF should have 0 parameters"<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else {
TString meanName("mean_");
TString sigmaName("sigma_");
TString fracName("frac_");
Bool_t foundParams(kTRUE);
for (UInt_t i(0); i<nGauss_; ++i) {
TString tempName(meanName); tempName += i;
TString tempName2(sigmaName); tempName2 += i;
TString tempName3(fracName); tempName3 += i;
mean_[i] = this->findParameter(tempName);
foundParams &= (mean_[i] != 0);
sigma_[i] = this->findParameter(tempName2);
foundParams &= (sigma_[i] != 0);
if (i!=0) {
frac_[i-1] = this->findParameter(tempName3);
foundParams &= (frac_[i-1] != 0);
}
}
if (type_ == Delta) {
if ((this->nParameters() != (3*nGauss_-1)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : Delta type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == Exp) {
tau_ = this->findParameter("tau");
foundParams &= (tau_ != 0);
if ((this->nParameters() != (3*nGauss_-1+1)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : Exp type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
std::cerr<<" - the lifetime of the exponential decay: \"tau\"."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == DeltaExp) {
tau_ = this->findParameter("tau");
fracPrompt_ = this->findParameter("frac_prompt");
foundParams &= (tau_ != 0);
foundParams &= (fracPrompt_ != 0);
if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : DeltaExp type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
std::cerr<<" - the lifetime of the exponential decay: \"tau\""<<std::endl;
std::cerr<<" - the fraction of the prompt part: \"frac_prompt\"."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == ExpTrig) {
tau_ = this->findParameter("tau");
deltaM_ = this->findParameter("deltaM");
foundParams &= (tau_ != 0);
foundParams &= (deltaM_ != 0);
if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : ExpTrig type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
std::cerr<<" - the lifetime of the exponential decay: \"tau\""<<std::endl;
std::cerr<<" - the oscillation frequency: \"deltaM\"."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == ExpHypTrig) {
tau_ = this->findParameter("tau");
deltaM_ = this->findParameter("deltaM");
deltaGamma_ = this->findParameter("deltaGamma");
foundParams &= (tau_ != 0);
foundParams &= (deltaM_ != 0);
foundParams &= (deltaGamma_ != 0);
if ((this->nParameters() != (3*nGauss_-1+3)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : ExpHypTrig type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
std::cerr<<" - the lifetime of the exponential decay: \"tau\""<<std::endl;
std::cerr<<" - the oscillation frequency: \"deltaM\"."<<std::endl;
std::cerr<<" - the width difference: \"deltaGamma\"."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
}
}
}
Double_t LauDecayTimePdf::effectiveResolution() const
{
Double_t dilution = 0.;
Double_t dMSq = deltaM_->unblindValue() * deltaM_->unblindValue();
// Might be cleaner to just append this to the vector in the init step,
// the the consistency can also be checked
Double_t fracSum = 0;
for (auto f : frac_) fracSum += f->unblindValue();
Double_t lastFrac = 1. - fracSum;
for (size_t i = 0; i < sigma_.size(); i++) {
Double_t sigSq = sigma_[i]->unblindValue() * sigma_[i]->unblindValue();
Double_t thisFrac = lastFrac;
if (i < sigma_.size() - 1) thisFrac = frac_[i]->unblindValue();
dilution += thisFrac * TMath::Exp(-dMSq * sigSq / 2.);
}
return TMath::Sqrt(-2. * TMath::Log(dilution)) / deltaM_->unblindValue();
}
void LauDecayTimePdf::cacheInfo(const LauFitDataTree& inputData)
{
// Check that the input data contains the decay time variable
Bool_t hasBranch = inputData.haveBranch(this->varName());
if (!hasBranch) {
std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<<this->varName()<<"\"."<<std::endl;
return;
}
// If we're scaling by the per-event error, also check that the input data contains the decay time error variable
if ( scaleWithPerEventError_ ) {
hasBranch = inputData.haveBranch(this->varErrName());
if (!hasBranch) {
std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<<this->varErrName()<<"\"."<<std::endl;
return;
}
}
// Pass the data to the decay-time error PDF for caching
if ( errHist_ ) {
errHist_->cacheInfo(inputData);
}
if (type_ == Hist) {
// Pass the data to the decay-time PDF for caching
if ( pdfHist_ ) {
pdfHist_->cacheInfo(inputData);
}
} else {
// determine whether we are caching our PDF value
//TODO
//Bool_t doCaching( this->nFixedParameters() == this->nParameters() );
//this->cachePDF( doCaching );
// clear the vectors and reserve enough space
const UInt_t nEvents = inputData.nEvents();
abscissas_.clear(); abscissas_.reserve(nEvents);
abscissaErrors_.clear(); abscissaErrors_.reserve(nEvents);
expTerms_.clear(); expTerms_.reserve(nEvents);
cosTerms_.clear(); cosTerms_.reserve(nEvents);
sinTerms_.clear(); sinTerms_.reserve(nEvents);
coshTerms_.clear(); coshTerms_.reserve(nEvents);
sinhTerms_.clear(); sinhTerms_.reserve(nEvents);
normTermsExp_.clear(); normTermsExp_.reserve(nEvents);
normTermsCos_.clear(); normTermsCos_.reserve(nEvents);
normTermsSin_.clear(); normTermsSin_.reserve(nEvents);
normTermsCosh_.clear(); normTermsCosh_.reserve(nEvents);
normTermsSinh_.clear(); normTermsSinh_.reserve(nEvents);
effiTerms_.clear(); effiTerms_.reserve(nEvents);
// If we're not using per-event information for the decay time
// error, just calculate the normalisation terms once
if ( ! scaleWithPerEventError_ ) {
this->calcNorm();
}
for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) {
const LauFitData& dataValues = inputData.getData(iEvt);
const Double_t abscissa { dataValues.at(this->varName()) };
if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) {
std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay time: "<<abscissa<<
" outside allowed range: ["<<this->minAbscissa()<<","<<this->maxAbscissa()<<"]."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
abscissas_.push_back( abscissa );
const Double_t abscissaErr { scaleWithPerEventError_ ? dataValues.at(this->varErrName()) : 0.0 };
if ( scaleWithPerEventError_ && ( abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError() ) ) {
std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay-time error: "<<abscissaErr<<
" outside allowed range: ["<<this->minAbscissaError()<<","<<this->maxAbscissaError()<<"]."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
abscissaErrors_.push_back(abscissaErr);
// std::cout << "\033[1;31m IN CACHE INFO \033[0m" << std::endl;
// std::cout << "\033[1;31m absErr: " << abscissaErr << "\033[0m" << std::endl; //DEBUG
this->calcLikelihoodInfo(abscissa, abscissaErr);
// If we are using per-event information for the decay
// time error, need to calculate the normalisation
// terms for every event
if ( scaleWithPerEventError_ ) {
this->calcNorm(abscissaErr);
}
expTerms_.push_back(expTerm_);
cosTerms_.push_back(cosTerm_);
sinTerms_.push_back(sinTerm_);
coshTerms_.push_back(coshTerm_);
sinhTerms_.push_back(sinhTerm_);
normTermsExp_.push_back(normTermExp_);
normTermsCos_.push_back(normTermCos_);
normTermsSin_.push_back(normTermSin_);
normTermsCosh_.push_back(normTermCosh_);
normTermsSinh_.push_back(normTermSinh_);
effiTerms_.push_back(effiTerm_);
}
}
}
void LauDecayTimePdf::calcLikelihoodInfo(const UInt_t iEvt)
{
// Extract all the terms and their normalisations
if (type_ == Hist) {
if ( pdfHist_ ) {
pdfHist_->calcLikelihoodInfo(iEvt);
pdfTerm_ = pdfHist_->getLikelihood();
} else {
pdfTerm_ = 1.0;
}
} else {
expTerm_ = expTerms_[iEvt];
cosTerm_ = cosTerms_[iEvt];
sinTerm_ = sinTerms_[iEvt];
coshTerm_ = coshTerms_[iEvt];
sinhTerm_ = sinhTerms_[iEvt];
normTermExp_ = normTermsExp_[iEvt];
normTermCos_ = normTermsCos_[iEvt];
normTermSin_ = normTermsSin_[iEvt];
normTermCosh_ = normTermsCosh_[iEvt];
normTermSinh_ = normTermsSinh_[iEvt];
}
// Extract the decay time error PDF value
if ( errHist_ ) {
errHist_->calcLikelihoodInfo(iEvt);
errTerm_ = errHist_->getLikelihood();
} else {
errTerm_ = 1.0;
}
// Extract the decay time efficiency
effiTerm_ = effiTerms_[iEvt];
// TODO - Parameters can change in some cases, so we'll need to update things!
// - For the moment do the blunt force thing and recalculate everything for every event!
// - Need to make this intelligent!
const Double_t abscissa = abscissas_[iEvt];
const Double_t abscissaErr = abscissaErrors_[iEvt];
this->calcLikelihoodInfo(abscissa,abscissaErr);
this->calcNorm(abscissaErr);
}
void LauDecayTimePdf::calcLikelihoodInfo(const Double_t abscissa)
{
// Check whether any of the gaussians should be scaled - if any of them should we need the per-event error
if (scaleWithPerEventError_) {
std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Per-event error on decay time not provided, cannot calculate anything."<<std::endl;
return;
}
this->calcLikelihoodInfo(abscissa, 0.0);
}
void LauDecayTimePdf::calcLikelihoodInfo(const Double_t abscissa, const Double_t abscissaErr)
{
// Check that the decay time and the decay time error are in valid ranges
if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) {
std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of the decay time: "<<abscissa<<
" outside allowed range: ["<<this->minAbscissa()<<","<<this->maxAbscissa()<<"]."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
if ( scaleWithPerEventError_ && ( abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError() ) ) {
std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of Delta t error: "<<abscissaErr<<
" outside allowed range: ["<<this->minAbscissaError()<<","<<this->maxAbscissaError()<<"]."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
// Determine the decay time efficiency
switch( effMethod_ )
{
case EfficiencyMethod::Spline : effiTerm_ = effiFun_ ? effiFun_ -> evaluate(abscissa) : 1.0 ; break;
case EfficiencyMethod::Binned : effiTerm_ = effiHist_ ? effiHist_-> GetBinContent(effiHist_-> FindFixBin(abscissa)) : 1.0 ; break;
case EfficiencyMethod::Flat : effiTerm_ = 1.0 ; break;
}
if ( effiTerm_ > 1.0 ) { effiTerm_ = 1.0; }
else if ( effiTerm_ < 0.0 ) { effiTerm_ = 0.0; }
// For the histogram PDF just calculate that term and return
if (type_ == Hist){
if ( pdfHist_ ) {
pdfHist_->calcLikelihoodInfo(abscissa);
pdfTerm_ = pdfHist_->getLikelihood();
} else {
pdfTerm_ = 1.0;
}
return;
}
// If we're not using the resolution function, calculate the simple terms and return
if (!this->doSmearing()) {
this->calcNonSmearedTerms(abscissa);
return;
}
// Get all the up to date parameter values for the resolution function
std::vector<Double_t> frac(nGauss_);
std::vector<Double_t> mean(nGauss_);
std::vector<Double_t> sigma(nGauss_);
Double_t fracPrompt(0.0);
// TODO - why do we do the fractions this way around?
frac[0] = 1.0;
for (UInt_t i(0); i<nGauss_; ++i) {
mean[i] = mean_[i]->unblindValue();
sigma[i] = sigma_[i]->unblindValue();
if (i != 0) {
frac[i] = frac_[i-1]->unblindValue();
frac[0] -= frac[i];
}
}
if (type_ == DeltaExp) {
fracPrompt = fracPrompt_->unblindValue();
}
// Scale the gaussian parameters by the per-event error on Delta t (if appropriate)
for (UInt_t i(0); i<nGauss_; ++i) {
if (scaleMeans_[i]) {
mean[i] *= abscissaErr;
}
if (scaleWidths_[i]) {
sigma[i] *= abscissaErr;
}
}
// Calculate term needed by every type
std::vector<Double_t> x(nGauss_);
const Double_t xMax = this->maxAbscissa();
const Double_t xMin = this->minAbscissa();
for (UInt_t i(0); i<nGauss_; ++i) {
x[i] = abscissa - mean[i];
}
Double_t value(0.0);
if (type_ == Delta || type_ == DeltaExp) {
// Calculate the gaussian function(s)
for (UInt_t i(0); i<nGauss_; ++i) {
if (TMath::Abs(sigma[i]) > 1e-10) {
Double_t exponent(0.0);
Double_t norm(0.0);
Double_t scale = LauConstants::root2*sigma[i];
Double_t scale2 = LauConstants::rootPiBy2*sigma[i];
exponent = -0.5*x[i]*x[i]/(sigma[i]*sigma[i]);
norm = scale2*(TMath::Erf((xMax - mean[i])/scale)
- TMath::Erf((xMin - mean[i])/scale));
value += frac[i]*TMath::Exp(exponent)/norm;
}
}
}
if (type_ != Delta) {
// Reset values of terms
expTerm_ = 0.0;
cosTerm_ = 0.0;
sinTerm_ = 0.0;
coshTerm_ = 0.0;
sinhTerm_ = 0.0;
// Calculate values of the PDF convoluted with each Gaussian for a given value of the abscsissa
for (UInt_t i(0); i<nGauss_; ++i) {
// We need this in all other cases
const Double_t expTerm = smearedExpTerm(abscissa, sigma[i], mean[i]);
expTerm_ += frac[i] * expTerm;
// Typical case (1): B0/B0bar
if ( type_ == ExpTrig ) {
// LHCb convention, i.e. convolution evaluate between 0 and inf
if (method_ == DecayTime) {
auto [cosTerm, sinTerm] = smearedCosSinTerm(abscissa, sigma[i], mean[i]);
coshTerm_ += frac[i] * expTerm;
cosTerm_ += frac[i] * cosTerm;
sinTerm_ += frac[i] * sinTerm;
} else {
// lhcb.web.cern.ch
}
}
// Typical case (2): B0s/B0sbar
else if ( type_ == ExpHypTrig ) {
// LHCb convention
if (method_ == DecayTime) {
auto [coshTerm, sinhTerm] = smearedCoshSinhTerm(abscissa, sigma[i], mean[i]);
auto [cosTerm, sinTerm] = smearedCosSinTerm(abscissa, sigma[i], mean[i]);
coshTerm_ += frac[i] * coshTerm;
sinhTerm_ += frac[i] * sinhTerm;
cosTerm_ += frac[i] * cosTerm;
sinTerm_ += frac[i] * sinTerm;
} else {
// lhcb.web.cern.ch
}
}
}
if (type_ == DeltaExp) {
value *= fracPrompt;
value += (1.0-fracPrompt)*expTerm_;
} else {
value = expTerm_;
}
}
// Calculate the decay time error PDF value
if ( errHist_ ) {
std::vector<Double_t> absErrVec = {abscissaErr}; //Otherwise seg fault
errHist_->calcLikelihoodInfo(absErrVec);
errTerm_ = errHist_->getLikelihood();
} else {
errTerm_ = 1.0;
}
}
void LauDecayTimePdf::calcNonSmearedTerms(Double_t abscissa)
{
// Reset values of terms
errTerm_ = 1.0;
expTerm_ = 0.0;
cosTerm_ = 0.0;
sinTerm_ = 0.0;
coshTerm_ = 0.0;
sinhTerm_ = 0.0;
if ( type_ == Hist || type_ == Delta ){
return;
}
const Double_t tau { tau_->unblindValue() };
const Double_t gamma { 1.0 / tau };
if (method_ == DecayTime) {
expTerm_ = TMath::Exp(-abscissa*gamma);
} else if (method_ == DecayTimeDiff) {
expTerm_ = TMath::Exp(-TMath::Abs(abscissa)*gamma);
}
// Calculate also the terms related to cosine and sine
if (type_ == ExpTrig) {
const Double_t deltaM = deltaM_->unblindValue();
coshTerm_ = expTerm_;
sinhTerm_ = 0.0;
cosTerm_ = TMath::Cos(deltaM*abscissa)*expTerm_;
sinTerm_ = TMath::Sin(deltaM*abscissa)*expTerm_;
}
// Calculate also the terms related to cosh, sinh, cosine, and sine
else if (type_ == ExpHypTrig) {
const Double_t deltaM = deltaM_->unblindValue();
const Double_t deltaGamma = deltaGamma_->unblindValue();
coshTerm_ = TMath::CosH(0.5*deltaGamma*abscissa)*expTerm_;
sinhTerm_ = TMath::SinH(0.5*deltaGamma*abscissa)*expTerm_;
cosTerm_ = TMath::Cos(deltaM*abscissa)*expTerm_;
sinTerm_ = TMath::Sin(deltaM*abscissa)*expTerm_;
}
}
std::pair<Double_t, Double_t> LauDecayTimePdf::smearedCosSinTerm(Double_t t, Double_t sigma, Double_t mu)
{
using namespace std::complex_literals;
const Double_t gamma = 1. / this->tau_->unblindValue();
const Double_t x = (t - mu) / (LauConstants::root2 * sigma);
const std::complex z = std::complex(gamma * sigma / LauConstants::root2, -this->deltaM_->unblindValue() * sigma / LauConstants::root2);
const std::complex arg1 = std::complex(0., 1.) * (z - x);
const std::complex arg2 { -(x*x) - (arg1 * arg1) };
const std::complex conv = arg1.imag() < -5.? 0.5 * std::exp(arg2) * RooMath::erfc( -1i * arg1 ) : 0.5 * TMath::Exp(-(x * x)) * RooMath::faddeeva(arg1) ;
const Double_t cos_conv = conv.real();
const Double_t sin_conv = conv.imag();
return {cos_conv, sin_conv};
}
std::pair<Double_t, Double_t> LauDecayTimePdf::smearedCoshSinhTerm(Double_t t, Double_t sigma, Double_t mu)
{
using namespace std::complex_literals;
Double_t gamma = 1. / this->tau_->unblindValue();
std::complex x((t - mu) / (LauConstants::root2 * sigma),0.);
Double_t xRe = x.real();
Double_t z_H = ((gamma - deltaGamma_->unblindValue() / 2.) * sigma) / LauConstants::root2;
Double_t z_L = ((gamma + deltaGamma_->unblindValue() / 2.) * sigma) / LauConstants::root2;
//Doing H
std::complex arg_H1(0., z_H - x.real());
std::complex arg_H2 = -(x*x) - (arg_H1 * arg_H1);
std::complex conv_H = arg_H1.imag() < -5. ? (0.5 * std::exp(arg_H2)) * RooMath::erfc(-1i * arg_H1) : 0.5 * TMath::Exp(-( xRe * xRe )) * RooMath::faddeeva(arg_H1);
//Doing L
std::complex arg_L1(0., z_L - x.real());
std::complex arg_L2 = -(x*x) - (arg_L1 * arg_L1);
std::complex conv_L = arg_L1.imag() < -5. ? (0.5 * std::exp(arg_L2)) * RooMath::erfc(-1i * arg_L1) : 0.5 * TMath::Exp(-( xRe * xRe )) * RooMath::faddeeva(arg_L1);
std::complex cosh_conv = 0.5 * (conv_H + conv_L);
std::complex sinh_conv = 0.5 * (conv_H - conv_L);
return {cosh_conv.real(), sinh_conv.real()};
}
Double_t LauDecayTimePdf::smearedExpTerm(Double_t t, Double_t sigma, Double_t mu)
{
using namespace std::complex_literals;
const Double_t gamma = 1. / this->tau_->unblindValue();
const std::complex x((t - mu) / (LauConstants::root2 * sigma),0.);
const Double_t xRe = x.real();
const Double_t z = (gamma * sigma) / LauConstants::root2;
const std::complex arg1(0., z - x.real());
const std::complex arg2 = -(x * x) - (arg1 * arg1);
const std::complex conv = arg1.imag() < -5. ? 0.5 * (std::exp(arg2)) * RooMath::erfc(-1i * arg1) : 0.5 * TMath::Exp(-(xRe * xRe)) * RooMath::faddeeva(arg1) ;
return conv.real();
}
std::pair<Double_t, Double_t> LauDecayTimePdf::nonSmearedCosSinIntegral(Double_t minAbs, Double_t maxAbs)
{
// From 1407.0748, not clear whether complex is faster in this case
Double_t gamma = 1. / this->tau_->unblindValue();
LauComplex denom = LauComplex(gamma, -this->deltaM_->unblindValue());
LauComplex exponent = LauComplex(-gamma, this->deltaM_->unblindValue());
LauComplex num0 = -exponent.scale(minAbs).exp();
LauComplex num1 = -exponent.scale(maxAbs).exp();
LauComplex integral = (num1 - num0) / denom;
return {integral.re(), integral.im()};
}
std::pair<Double_t, Double_t> LauDecayTimePdf::smearedCosSinIntegral(Double_t minAbs, Double_t maxAbs, Double_t sigma, Double_t mu)
{
using namespace std::complex_literals;
Double_t gamma = 1. / this->tau_->unblindValue();
Double_t x1 = (maxAbs - mu) / (LauConstants::root2 * sigma);
Double_t x0 = (minAbs - mu) / (LauConstants::root2 * sigma);
std::complex z = std::complex(gamma * sigma / LauConstants::root2, -this->deltaM_->unblindValue() * sigma / LauConstants::root2);
std::complex arg1 = std::complex(0., 1.) * (z - x1);
std::complex arg0 = std::complex(0., 1.) * (z - x0);
std::complex integral = 0. + 0i;
if(arg1.imag() < -5.)
{integral = RooMath::erf(x1) - std::exp(-(x1 * x1) - (arg1 * arg1)) * RooMath::erfc(-1i * arg1);}
else
{integral = RooMath::erf(x1) - TMath::Exp(-(x1 * x1)) * RooMath::faddeeva(arg1);}
if(arg0.imag() < -5.)
{integral -= RooMath::erf(x0) - std::exp(-(x0 * x0) - (arg0 * arg0)) * RooMath::erfc(-1i * arg0);}
else
{integral -= RooMath::erf(x0) - TMath::Exp(-(x0 * x0)) * RooMath::faddeeva(arg0);}
integral *= (sigma / (2. * LauConstants::root2 * z));
Double_t cos_integral = integral.real();
Double_t sin_integral = integral.imag();
return {cos_integral, sin_integral};
}
Double_t LauDecayTimePdf::nonSmearedExpIntegral(Double_t minAbs, Double_t maxAbs)
{
const Double_t tau = tau_->unblindValue();
const Double_t Gamma = 1.0 / tau;
return tau * ( TMath::Exp(-minAbs*Gamma) - TMath::Exp(-maxAbs*Gamma) );
}
Double_t LauDecayTimePdf::smearedExpIntegral(Double_t minAbs, Double_t maxAbs, Double_t sigma, Double_t mu)
{
using namespace std::complex_literals;
const Double_t gamma = 1. / this->tau_->unblindValue();
const Double_t x1 = (maxAbs - mu) / (LauConstants::root2 * sigma);
const Double_t x0 = (minAbs - mu) / (LauConstants::root2 * sigma);
const Double_t z = (gamma * sigma) / LauConstants::root2;
std::complex arg1(0., z - x1);
std::complex arg0(0., z - x0);
std::complex integral = 0. + 0i;
if(arg1.imag() < -5.)
{integral = RooMath::erf(x1) - std::exp(-(x1 * x1) - (arg1 * arg1)) * RooMath::erfc(-1i * arg1);}
else
{integral = RooMath::erf(x1) - TMath::Exp(-(x1 * x1)) * RooMath::faddeeva(arg1);}
if(arg0.imag() < -5.)
{integral -= RooMath::erf(x0) - std::exp(-(x0 * x0) - (arg0 * arg0)) * RooMath::erfc(-1i * arg0);}
else
{integral -= RooMath::erf(x0) - TMath::Exp(-(x0 * x0)) * RooMath::faddeeva(arg0);}
integral *= (sigma / (2. * LauConstants::root2 * z));
return integral.real();
}
std::pair<Double_t, Double_t> LauDecayTimePdf::nonSmearedCoshSinhIntegral(Double_t minAbs, Double_t maxAbs)
{
// Use exponential formualtion rather than cosh, sinh.
// Fewer terms (reused for each), but not guaranteed to be faster.
Double_t gamma = 1. / this->tau_->unblindValue();
Double_t gammaH = gamma - 0.5 * deltaGamma_->unblindValue();
Double_t gammaL = gamma - 0.5 * deltaGamma_->unblindValue();
Double_t nL1 = -TMath::Exp(-gammaL * maxAbs) / gammaL;
Double_t nH1 = -TMath::Exp(-gammaH * maxAbs) / gammaH;
Double_t nL0 = -TMath::Exp(-gammaL * minAbs) / gammaL;
Double_t nH0 = -TMath::Exp(-gammaH * minAbs) / gammaH;
Double_t cosh_integral = 0.5 * ( (nH1 + nL1) - (nH0 + nL0) );
Double_t sinh_integral = 0.5 * ( (nH1 - nL1) - (nH0 - nL0) );
return {cosh_integral, sinh_integral};
}
std::pair<Double_t, Double_t> LauDecayTimePdf::smearedCoshSinhIntegral(Double_t minAbs, Double_t maxAbs, Double_t sigma, Double_t mu)
{
using namespace std::complex_literals;
Double_t gamma = 1. / this->tau_->unblindValue();
Double_t x1 = (maxAbs - mu) / (LauConstants::root2 * sigma);
Double_t x0 = (minAbs - mu) / (LauConstants::root2 * sigma);
Double_t z_H = ((gamma - deltaGamma_->unblindValue() / 2.) * sigma) / LauConstants::root2;
std::complex arg1_H(0., z_H - x1);
std::complex arg0_H(0., z_H - x0);
std::complex integral_H = 0. + 0i;
if(arg1_H.imag() < -5.)
{integral_H = RooMath::erf(x1) - std::exp(-(x1 * x1) - (arg1_H * arg1_H)) * RooMath::erfc(-1i * arg1_H);}
else
{integral_H = RooMath::erf(x1) - TMath::Exp(-(x1 * x1)) * RooMath::faddeeva(arg1_H);}
if(arg0_H.imag() < -5.)
{integral_H -= RooMath::erf(x0) - std::exp(-(x0 * x0) - (arg0_H * arg0_H)) * RooMath::erfc(-1i * arg0_H);}
else
{integral_H -= RooMath::erf(x0) - TMath::Exp(-(x0 * x0)) * RooMath::faddeeva(arg0_H);}
integral_H *= (sigma / (2. * LauConstants::root2 * z_H));
// Same for light (L)
Double_t z_L = ((gamma + deltaGamma_->unblindValue() / 2.) * sigma) / LauConstants::root2;
std::complex arg1_L(0., z_L - x1);
std::complex arg0_L(0., z_L - x0);
std::complex integral_L = 0. + 0i;
if(arg1_L.imag() < -5.)
{integral_L = RooMath::erf(x1) - std::exp(-(x1 * x1) - (arg1_L * arg1_L)) * RooMath::erfc(-1i * arg1_L);}
else
{integral_L = RooMath::erf(x1) - TMath::Exp(-(x1 * x1)) * RooMath::faddeeva(arg1_L);}
if(arg0_L.imag() < -5.)
{integral_L -= RooMath::erf(x0) - std::exp(-(x0 * x0) - (arg0_L * arg0_L)) * RooMath::erfc(-1i * arg0_L);}
else
{integral_L -= RooMath::erf(x0) - TMath::Exp(-(x0 * x0)) * RooMath::faddeeva(arg0_L);}
integral_L *= (sigma / (2. * LauConstants::root2 * z_L));
std::complex cosh_integral = 0.5 * (integral_H + integral_L);
std::complex sinh_integral = 0.5 * (integral_H - integral_L);
return {cosh_integral.real(), sinh_integral.real()};
}
void LauDecayTimePdf::calcNorm(const Double_t abscissaErr)
{
/*if( abscissaErr <= 0. and scaleWithPerEventError_)
{
std::cerr << "\033[1;31m IN CALCNORM: \33[0m" << std::endl;
std::cerr << "\033[1;31m absErr: " << abscissaErr << "\033[0m" << std::endl; //DEBUG
}*/
// first reset integrals to zero
normTermExp_ = 0.0;
normTermCos_ = 0.0;
normTermSin_ = 0.0;
normTermCosh_ = 0.0;
normTermSinh_ = 0.0;
// Get all the up to date parameter values
std::vector<Double_t> fracs(nGauss_);
std::vector<Double_t> means(nGauss_);
std::vector<Double_t> sigmas(nGauss_);
// TODO - why do we do the fractions this way around?
fracs[0] = 1.0;
for (UInt_t i(0); i<nGauss_; ++i) {
means[i] = mean_[i]->unblindValue();
sigmas[i] = sigma_[i]->unblindValue();
if (i != 0) {
fracs[i] = frac_[i-1]->unblindValue();
fracs[0] -= fracs[i];
}
}
// Scale the gaussian parameters by the per-event error on decay time (if appropriate)
for (UInt_t i(0); i<nGauss_; ++i) {
if (scaleMeans_[i]) {
means[i] *= abscissaErr;
}
if (scaleWidths_[i]) {
sigmas[i] *= abscissaErr;
}
}
// std::cout << "\033[1;31m IN CALCNORM: \33[0m" << std::endl;
// std::cout << "\033[1;31m sigmas: ["; for (Double_t s : sigmas) std::cout << s << ", "; std::cout << "\b\b] \33[0m\t"; //DEBUG
// std::cout << "\033[1;31m absErr: " << abscissaErr << "\033[0m" << std::endl; //DEBUG
switch ( effMethod_ ) {
case EfficiencyMethod::Flat :
// No efficiency variation
// Simply calculate integrals over full range
if( this -> doSmearing() )
{this->calcSmearedPartialIntegrals( minAbscissa_, maxAbscissa_ , 1.0, means, sigmas, fracs);}
else
{this->calcNonSmearedPartialIntegrals( minAbscissa_, maxAbscissa_, 1.0 );}
break;
case EfficiencyMethod::Binned :
// Efficiency varies as piecewise constant
// Total integral is sum of integrals in each bin, each weighted by efficiency in that bin
for ( Int_t bin{1}; bin <= effiHist_->GetNbinsX(); ++bin ) {
const Double_t loEdge {effiHist_->GetBinLowEdge(bin)};
const Double_t hiEdge {loEdge + effiHist_->GetBinWidth(bin)};
const Double_t effVal {effiHist_->GetBinContent(bin)};
if ( this -> doSmearing() )
{this->calcSmearedPartialIntegrals( loEdge, hiEdge, effVal, means, sigmas, fracs );}
else
{this->calcNonSmearedPartialIntegrals( loEdge, hiEdge, effVal );}
}
break;
case EfficiencyMethod::Spline :
// Efficiency varies as piecewise polynomial
// TODO - to be worked out what to do here
if(not effiFun_){std::cerr << "FATAL : no spline defined!"; gSystem->Exit(EXIT_FAILURE);}
for(size_t i = 0; i < effiFun_ -> getnKnots()-1; ++i)
{
if( this -> doSmearing() )
{this -> calcSmearedSplinePartialIntegrals( i, means, sigmas, fracs );}
else
{this -> calcNonSmearedSplinePartialIntegrals( i );}
}
// std::cout << "\033[1;31m Normalisation values: \n" << normTermExp_ << "\n" << normTermCos_ << "\n" << normTermSin_ << "\n" << normTermCosh_ << "\n" << normTermSinh_ << "\n\n\033[0m" << std::endl; //DEBUG
/*
std::cerr << "WARNING in LauDecayTimePdf::calcNorm : normalisation integrals for spline acceptance not yet implemented - effect of acceptance will be neglected!" << std::endl;
if ( this -> doSmearing() )
{this->calcSmearedPartialIntegrals( minAbscissa_, maxAbscissa_ , 1.0, mean, sigma, frac);}
else
{this->calcNonSmearedPartialIntegrals( minAbscissa_, maxAbscissa_, 1.0 );}
*/
break;
}
// TODO - should we check here that all terms we expect to use are now non-zero?
// std::cout << "\033[1;34m In calcNorm: \033[0m" << std::endl; //DEBUG
// std::cout << "\033[1;34m normTermExp : \033[0m" << normTermExp_ << std::endl; //DEBUG
// std::cout << "\033[1;34m normTerm[Cos,Sin] : \033[0m[" << normTermCos_ << ", " << normTermSin_ << "]" << std::endl; //DEBUG
// std::cout << "\033[1;34m normTerm[Cosh,Sinh]: \033[0m[" << normTermCosh_ << ", " << normTermSinh_ << "]" << std::endl; //DEBUG
}
// TODO - Mildly concerned this is void rather than returning the integrals
// (but this would require refactoring for different return values).
// As long as it doesn't get called outside of calcNorm() it should be fine - DPO
void LauDecayTimePdf::calcNonSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight)
{
Double_t normTermExp {0.0};
if (method_ == DecayTime) {
normTermExp = weight * this -> nonSmearedExpIntegral(minAbs, maxAbs);
} else if (method_ == DecayTimeDiff) {
const Double_t tau = tau_->unblindValue();
const Double_t Gamma = 1.0 / tau;
// TODO - there should be some TMath::Abs here surely?
normTermExp = weight * tau * (2.0 - TMath::Exp(-maxAbs*Gamma) - TMath::Exp(-minAbs*Gamma));
}
normTermExp_ += normTermExp;
// Normalisation factor for B0 decays
if ( type_ == ExpTrig ) {
normTermCosh_ += normTermExp;
auto [cosIntegral, sinIntegral] = this->nonSmearedCosSinIntegral(minAbs, maxAbs);
normTermCos_ += weight * cosIntegral;
normTermSin_ += weight * sinIntegral;
}
// Normalisation factor for Bs decays
else if ( type_ == ExpHypTrig ) {
auto [coshIntegral, sinhIntegral] = this->nonSmearedCoshSinhIntegral(minAbs, maxAbs);
normTermCosh_ += weight * coshIntegral;
normTermSinh_ += weight * sinhIntegral;
auto [cosIntegral, sinIntegral] = this->nonSmearedCosSinIntegral(minAbs, maxAbs);
normTermCos_ += weight * cosIntegral;
normTermSin_ += weight * sinIntegral;
}
}
void LauDecayTimePdf::calcSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight, const std::vector<Double_t>& means, const std::vector<Double_t>& sigmas, const std::vector<Double_t>& fractions)
{
for (UInt_t i(0); i<nGauss_; ++i)
{
Double_t normTermExp {0.0};
if (method_ == DecayTime) {
normTermExp = weight * this -> smearedExpIntegral(minAbs, maxAbs, sigmas[i], means[i]);
} else if (method_ == DecayTimeDiff) {
const Double_t tau = tau_->unblindValue();
const Double_t Gamma = 1.0 / tau;
// TODO - this is neglecting resolution at the moment
// TODO - there should be some TMath::Abs here surely?
normTermExp = weight * tau * (2.0 - TMath::Exp(-maxAbs*Gamma) - TMath::Exp(-minAbs*Gamma));
}
normTermExp_ += fractions[i] * normTermExp;
// Normalisation factor for B0 decays
if ( type_ == ExpTrig ) {
normTermCosh_ += fractions[i] * normTermExp;
auto [cosIntegral, sinIntegral] = this->smearedCosSinIntegral(minAbs, maxAbs, sigmas[i], means[i]);
normTermCos_ += fractions[i] * weight * cosIntegral;
normTermSin_ += fractions[i] * weight * sinIntegral;
}
// Normalisation factor for Bs decays
else if ( type_ == ExpHypTrig ) {
auto [coshIntegral, sinhIntegral] = this->smearedCoshSinhIntegral(minAbs, maxAbs, sigmas[i], means[i]);
normTermCosh_ += fractions[i] * weight * coshIntegral;
normTermSinh_ += fractions[i] * weight * sinhIntegral;
auto [cosIntegral, sinIntegral] = this->smearedCosSinIntegral(minAbs, maxAbs, sigmas[i], means[i]);
normTermCos_ += fractions[i] * weight * cosIntegral;
normTermSin_ += fractions[i] * weight * sinIntegral;
}
}
}
void LauDecayTimePdf::calcSmearedSplinePartialIntegrals(const UInt_t splineIndex, const std::vector<Double_t>& means, const std::vector<Double_t>& sigmas, const std::vector<Double_t>& fractions)
{
using namespace std::complex_literals;
const Double_t gamma = 1. / this->tau_->unblindValue();
for (UInt_t i(0); i<nGauss_; ++i)
{
Double_t normTermExp {0.0};
if (method_ == DecayTime) {
const std::complex<double> z = (gamma * sigmas[i]) / LauConstants::root2;
normTermExp = this -> smearedSplineNormalise(splineIndex, z, sigmas[i], means[i]).first;
} else if (method_ == DecayTimeDiff) {//TODO this isn't implemented at all
//const Double_t tau = tau_->unblindValue();
//const Double_t Gamma = 1.0 / tau;
// TODO - this is neglecting resolution at the moment
// TODO - there should be some TMath::Abs here surely?
// normTermExp = tau * (2.0 - TMath::Exp(-maxAbs*Gamma) - TMath::Exp(-minAbs*Gamma));
//
; // nop so the compiler doesn't complain
}
normTermExp_ += fractions[i] * normTermExp;
// Normalisation factor for B0 decays
if ( type_ == ExpTrig ) {
normTermCosh_ += fractions[i] * normTermExp;
const std::complex z = std::complex(gamma * sigmas[i] / LauConstants::root2, -this->deltaM_->unblindValue() * sigmas[i] / LauConstants::root2);
auto [cosIntegral, sinIntegral] = this -> smearedSplineNormalise(splineIndex, z, sigmas[i], means[i]);
normTermCos_ += fractions[i] * cosIntegral;
normTermSin_ += fractions[i] * sinIntegral;
}
// Normalisation factor for Bs decays
else if ( type_ == ExpHypTrig ) {
const std::complex z_H = ((gamma - deltaGamma_->unblindValue() / 2.) * sigmas[i]) / LauConstants::root2;
const std::complex z_L = ((gamma + deltaGamma_->unblindValue() / 2.) * sigmas[i]) / LauConstants::root2;
const Double_t N_H = this -> smearedSplineNormalise(splineIndex, z_H, sigmas[i], means[i]).first;
const Double_t N_L = this -> smearedSplineNormalise(splineIndex, z_L, sigmas[i], means[i]).first;
const Double_t coshIntegral = 0.5 * (N_H + N_L);
const Double_t sinhIntegral = 0.5 * (N_H - N_L);
normTermCosh_ += fractions[i] * coshIntegral;
normTermSinh_ += fractions[i] * sinhIntegral;
const std::complex z = std::complex(gamma * sigmas[i] / LauConstants::root2, -this->deltaM_->unblindValue() * sigmas[i] / LauConstants::root2);
auto [cosIntegral, sinIntegral] = this -> smearedSplineNormalise(splineIndex, z, sigmas[i], means[i]);
normTermCos_ += fractions[i] * cosIntegral;
normTermSin_ += fractions[i] * sinIntegral;
}
}
}
std::array<std::complex<double>,4> LauDecayTimePdf::generateKvector(const std::complex<double> z)
{
std::array<std::complex<double>,4> K = {0.,0.,0.,0.};
const std::complex<double> zr = 1./z;
K[0] = 0.5*zr;
K[1] = 0.5*zr*zr;
K[2] = zr*(1.+zr*zr);
K[3] = 3.*zr*zr*(1.+zr*zr);
return K;
}
std::array<std::complex<double>,4> LauDecayTimePdf::generateMvector(const Double_t minAbs, const Double_t maxAbs, const std::complex<double> z, const Double_t sigma, const Double_t mu)
{
using namespace std::complex_literals;
std::array<std::complex<double>,4> M0 = {0.,0.,0.,0.};
std::array<std::complex<double>,4> M1 = {0.,0.,0.,0.};
std::array<std::complex<double>,4> M;
const Double_t x1 = (maxAbs - mu) / (LauConstants::root2 * sigma);
const Double_t x0 = (minAbs - mu) / (LauConstants::root2 * sigma);
// std::cout << "\033[1;31m x1 :" << x1 << ", x0:" << x0 <<" \33[0m\t"; //DEBUG
//Values used a lot
const Double_t ex2_1 = TMath::Exp(-(x1*x1));
const Double_t ex2_0 = TMath::Exp(-(x0*x0));
const Double_t sqrtPir = 1/LauConstants::rootPi;
const std::complex arg1 = (0.+1.i) * (z-x1);
const std::complex arg0 = (0.+1.i) * (z-x0);
//fad = the faddeeva term times the ex2 value (done in different ways depending on the domain)
std::complex<double> fad1;
std::complex<double> fad0;
if(arg1.imag() < -5.)
{fad1 = std::exp(-(x1 * x1) - (arg1 * arg1)) * RooMath::erfc(-1i * arg1);}
else
{fad1 = ex2_1*RooMath::faddeeva(arg1);}
if(arg0.imag() < -5.)
{fad0 = std::exp(-(x0 * x0) - (arg0 * arg0)) * RooMath::erfc(-1i * arg0);}
else
{fad0 = ex2_0*RooMath::faddeeva(arg0);}
//doing the actual functions for x1
M1[0] = RooMath::erf(x1) - fad1;
M1[1] = 2. * (-sqrtPir*ex2_1 - x1*fad1);
M1[2] = 2. * (-2*x1*sqrtPir*ex2_1 - (2*x1*x1 - 1)*fad1);
M1[3] = 4. * (-(2*x1*x1 - 1)*sqrtPir*ex2_1 - x1*(2*x1*x1-3)*fad1);
//doing them again for x0
M0[0] = RooMath::erf(x0) - fad0;
M0[1] = 2. * (-sqrtPir*ex2_0 - x0*fad0);
M0[2] = 2. * (-2*x0*sqrtPir*ex2_0 - (2*x0*x0 - 1)*fad0);
M0[3] = 4. * (-(2*x0*x0 - 1)*sqrtPir*ex2_0 - x0*(2*x0*x0-3)*fad0);
for(Int_t i = 0; i < 4; ++i){M[i] = M1[i] - M0[i];}
return M;
}
std::pair<Double_t,Double_t> LauDecayTimePdf::smearedSplineNormalise(const UInt_t splineIndex, std::complex<double> z, const Double_t sigma, const Double_t mu)
{
using namespace std::complex_literals;
const std::vector<Double_t>& xVals = effiFun_ -> getXValues();
const Double_t minAbs = xVals[splineIndex];
const Double_t maxAbs = xVals[splineIndex+1];
std::array<Double_t,4> coeffs = effiFun_ -> getCoefficients(splineIndex);
std::array<std::complex<double>,4> K = this -> generateKvector(z);
// std::cout << "\033[1;31m zVal: "<< z<<" Kvector: ["; for (std::complex k : K) std::cout << k << ", "; std::cout << "\b\b] \33[0m\t"; //DEBUG
std::array<std::complex<double>,4> M = this -> generateMvector(minAbs, maxAbs, z, sigma, mu);
// std::cout << "\033[1;31m Mvector: ["; for (std::complex m : M) std::cout << m << ", "; std::cout << "\b\b] \33[0m" << std::endl; //DEBUG
// gSystem -> Exit(EXIT_SUCCESS);
//Double sum to get N (eqn 31 in https://arxiv.org/pdf/1407.0748.pdf)
std::complex<double> N = 0. + 0i;
+ /* this is the way to do it from eq 31
for(Int_t i = 0; i < 4; ++i)
{
for(Int_t j = 0; j <= i ; ++j)
{
Int_t ij = i+j;
if(ij > 3){continue;} // sum component = 0
Double_t A = coeffs[ij] * TMath::Binomial(ij,j) / TMath::Power(2,ij);
N += A * M[i] * K[j];
}
}
+ */
+ // The above seems to have a maths error, below is the version I derived, seems to work better
+ for(Int_t k = 0; k < 4; ++k){
+ for(Int_t n = 0; n <=k; ++n){
+ for(Int_t i = 0; i <=n; ++i){
+ Double_t b = TMath::Binomial(n,i)*TMath::Binomial(k,n);//The binomial coefficient terms
+ N += std::pow(sigma/std::sqrt(2), n+1)*coeffs[k]*std::pow(mu,k-n)*K[i]*M[n-i]*b;
+ }}}
return std::make_pair( N.real(), N.imag() );
}
void LauDecayTimePdf::calcNonSmearedSplinePartialIntegrals(const UInt_t splineIndex)
{
using namespace std::complex_literals;
const Double_t gamma = 1. / this->tau_->unblindValue();
Double_t normTermExp {0.0};
if (method_ == DecayTime) {
const std::complex<double> u = gamma;
normTermExp = this -> nonSmearedSplineNormalise(splineIndex, u).first;
} else if (method_ == DecayTimeDiff) {//TODO this isn't implemented at all
//const Double_t tau = tau_->unblindValue();
//const Double_t Gamma = 1.0 / tau;
// TODO - this is neglecting resolution at the moment
// TODO - there should be some TMath::Abs here surely?
// normTermExp = tau * (2.0 - TMath::Exp(-maxAbs*Gamma) - TMath::Exp(-minAbs*Gamma));
//
; // nop so the compiler doesn't complain
}
normTermExp_ += normTermExp;
// Normalisation factor for B0 decays
if ( type_ == ExpTrig ) {
normTermCosh_ += normTermExp;
const std::complex u = std::complex(gamma, -this->deltaM_->unblindValue());
auto [cosIntegral, sinIntegral] = this -> nonSmearedSplineNormalise(splineIndex, u);
normTermCos_ += cosIntegral;
normTermSin_ += sinIntegral;
}
// Normalisation factor for Bs decays
else if ( type_ == ExpHypTrig ) {
const std::complex u_H = (gamma - deltaGamma_->unblindValue() / 2.);
const std::complex u_L = (gamma + deltaGamma_->unblindValue() / 2.);
const Double_t N_H = this -> nonSmearedSplineNormalise(splineIndex, u_H).first;
const Double_t N_L = this -> nonSmearedSplineNormalise(splineIndex, u_L).first;
const Double_t coshIntegral = 0.5 * (N_H + N_L);
const Double_t sinhIntegral = 0.5 * (N_H - N_L);
normTermCosh_ += coshIntegral;
normTermSinh_ += sinhIntegral;
const std::complex u = std::complex(gamma, -this->deltaM_->unblindValue());
auto [cosIntegral, sinIntegral] = this -> nonSmearedSplineNormalise(splineIndex, u);
normTermCos_ += cosIntegral;
normTermSin_ += sinIntegral;
}
}
std::complex<double> LauDecayTimePdf::I_k(const Int_t k, const Double_t minAbs, const Double_t maxAbs, const std::complex<double> u /*= Gamma - iDeltam*/)
{
//Taking mu = 0, this does not have to be the case in general
auto G = [&u](const Int_t n){return -TMath::Factorial(n)/std::pow(u,n+1);};//power of n+1 used rather than n, this is due to maths error in the paper
auto H = [&u](const Int_t n, const Double_t t){return std::pow(t,n)*std::exp(-u*t);};
std::complex<double> ans = 0;
for (Int_t j = 0; j <= k; ++j)
{ans += TMath::Binomial(k,j)*G(j)*( H( k-j, maxAbs ) - H( k-j, minAbs ) );}
return ans;
}
std::pair<Double_t,Double_t> LauDecayTimePdf::nonSmearedSplineNormalise(const UInt_t splineIndex, std::complex<double> u /*= Gamma - iDeltam*/)
{
using namespace std::complex_literals;
const std::vector<Double_t>& xVals = effiFun_ -> getXValues();
const Double_t minAbs = xVals[splineIndex];
const Double_t maxAbs = xVals[splineIndex+1];
std::array<Double_t,4> coeffs = effiFun_ -> getCoefficients(splineIndex);
//sum to get N (eqn 30 in https://arxiv.org/pdf/1407.0748.pdf, using I_k from Appendix B.1 with the corrected maths error)
std::complex<double> N = 0. + 0i;
for(Int_t i = 0; i < 4; ++i){N += I_k(i, minAbs, maxAbs, u) * coeffs[i];}
return std::make_pair( N.real(), N.imag() );
}
Double_t LauDecayTimePdf::generateError(Bool_t forceNew)
{
if (errHist_ && (forceNew || !abscissaErrorGenerated_)) {
LauFitData errData = errHist_->generate(nullptr);
abscissaError_ = errData.at(this->varErrName());
abscissaErrorGenerated_ = kTRUE;
} else {
while (forceNew || !abscissaErrorGenerated_) {
abscissaError_ = LauRandom::randomFun()->Landau(errorDistMPV_,errorDistSigma_);
if (abscissaError_ < maxAbscissaError_ && abscissaError_ > minAbscissaError_) {
abscissaErrorGenerated_ = kTRUE;
forceNew = kFALSE;
}
}
}
return abscissaError_;
}
/*
LauFitData LauDecayTimePdf::generate(const LauKinematics* kinematics)
{
// generateError SHOULD have been called before this
// function but will call it here just to make sure
// (has ns effect if has already been called)
abscissaError_ = this->generateError();
// If the PDF is scaled by the per-event error then need to update the PDF height for each event
Bool_t scale(kFALSE);
for (std::vector<Bool_t>::const_iterator iter = scaleMeans_.begin(); iter != scaleMeans_.end(); ++iter) {
scale |= (*iter);
}
for (std::vector<Bool_t>::const_iterator iter = scaleWidths_.begin(); iter != scaleWidths_.end(); ++iter) {
scale |= (*iter);
}
if (scale || (!this->heightUpToDate() && !this->cachePDF())) {
this->calcPDFHeight(kinematics);
this->heightUpToDate(kTRUE);
}
// Generate the value of the abscissa.
Bool_t gotAbscissa(kFALSE);
Double_t genVal(0.0);
Double_t genPDFVal(0.0);
LauFitData genAbscissa;
const Double_t xMin = this->minAbscissa();
const Double_t xMax = this->maxAbscissa();
const Double_t xRange = xMax - xMin;
while (!gotAbscissa) {
genVal = LauRandom::randomFun()->Rndm()*xRange + xMin;
this->calcLikelihoodInfo(genVal, abscissaError_);
genPDFVal = this->getUnNormLikelihood();
if (LauRandom::randomFun()->Rndm() <= genPDFVal/this->getMaxHeight()) {gotAbscissa = kTRUE;}
if (genPDFVal > this->getMaxHeight()) {
std::cerr<<"Warning in LauDecayTimePdf::generate()."
<<" genPDFVal = "<<genPDFVal<<" is larger than the specified PDF height "
<<this->getMaxHeight()<<" for the abscissa = "<<genVal<<". Need to reset height to be larger than "
<<genPDFVal<<" by using the setMaxHeight(Double_t) function"
<<" and re-run the Monte Carlo generation!"<<std::endl;
}
}
genAbscissa[this->varName()] = genVal;
// mark that we need a new error to be generated next time
abscissaErrorGenerated_ = kFALSE;
return genAbscissa;
}
*/
void LauDecayTimePdf::setErrorHisto(const TH1* hist)
{
if ( errHist_ != nullptr ) {
std::cerr<<"WARNING in LauDecayTimePdf::setErrorHisto : Error histogram already set, not doing it again."<<std::endl;
return;
}
errHist_ = new Lau1DHistPdf(this->varErrName(), hist, this->minAbscissaError(), this->maxAbscissaError());
}
void LauDecayTimePdf::setHistoPdf(const TH1* hist)
{
if ( pdfHist_ != nullptr ) {
std::cerr<<"WARNING in LauDecayTimePdf::setHistoPdf : PDF histogram already set, not doing it again."<<std::endl;
return;
}
pdfHist_ = new Lau1DHistPdf(this->varName(), hist, this->minAbscissa(), this->maxAbscissa());
}
void LauDecayTimePdf::setEffiHist(const TH1* hist)
{
if ( effiHist_ != nullptr ) {
std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : efficiency histogram already set, not doing it again." << std::endl;
return;
}
if ( hist == nullptr ) {
std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : supplied efficiency histogram pointer is null." << std::endl;
return;
}
// Check boundaries of histogram align with our abscissa's range
const Double_t axisMin {hist->GetXaxis()->GetXmin()};
const Double_t axisMax {hist->GetXaxis()->GetXmax()};
if ( TMath::Abs(minAbscissa_ - axisMin)>1e-6 || TMath::Abs(maxAbscissa_ - axisMax)>1e-6 ) {
std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : mismatch in range between supplied histogram and abscissa\n"
<< " : histogram range: " << axisMin << " - " << axisMax << "\n"
<< " : abscissa range: " << minAbscissa_ << " - " << maxAbscissa_ << "\n"
<< " : Disregarding this histogram." << std::endl;
return;
}
effiHist_ = dynamic_cast<TH1*>( hist->Clone() );
//Normalise the hist if the (relative) efficiencies have very large values
if(effiHist_ -> GetMaximum() > 1.)
{
effiHist_ -> Scale( 1. / effiHist_->Integral() ); //Normalise
std::cout << "INFO in LauDecayTimePdf::setEffiHist : Supplied histogram for Decay Time Acceptance has values too large: normalising..." << std::endl;
}
}
void LauDecayTimePdf::setEffiSpline(Lau1DCubicSpline* spline)
{
if ( effiFun_ != 0 ) {
std::cerr<<"WARNING in LauDecayTimePdf::setEffiPdf : efficiency function already set, not doing it again."<<std::endl;
return;
}
effiFun_ = spline;
std::vector<Double_t> effis = effiFun_->getYValues();
effiPars_.resize( effis.size() );
size_t index = 0;
for( Double_t& effi : effis )
{
effiPars_[ index ] = new LauParameter( Form( "%s_Knot_%lu", varName_.Data() ,index ), effi, 0.0, 1.0, kTRUE );
++index;
}
}
LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName)
{
for ( std::vector<LauAbsRValue*>::iterator iter = param_.begin(); iter != param_.end(); ++iter ) {
if ((*iter)->name().Contains(parName)) {
return (*iter);
}
}
std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl;
return 0;
}
const LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName) const
{
for ( std::vector<LauAbsRValue*>::const_iterator iter = param_.begin(); iter != param_.end(); ++iter ) {
if ((*iter)->name().Contains(parName)) {
return (*iter);
}
}
std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl;
return 0;
}
void LauDecayTimePdf::updatePulls()
{
for ( std::vector<LauAbsRValue*>::iterator iter = param_.begin(); iter != param_.end(); ++iter ) {
std::vector<LauParameter*> params = (*iter)->getPars();
for (std::vector<LauParameter*>::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter ) {
if (!(*iter)->fixed()) {
(*params_iter)->updatePull();
}
}
}
}
void LauDecayTimePdf::propagateParUpdates()
{
switch( effMethod_ )
{
case EfficiencyMethod::Spline:
if( effiFun_ ){ effiFun_ -> updateYValues( effiPars_ ); }
else {std::cerr << "WARNING in LauDecayTimePdf::propagateParUpdates: No efficiency Spline found!" << std::endl;}
break;
case EfficiencyMethod::Binned:
case EfficiencyMethod::Flat:
break;
}
}
void LauDecayTimePdf::updateEffiSpline(const std::vector<LauParameter*>& effiPars)
{
if (effiPars.size() != effiFun_->getnKnots()){
std::cerr<<"ERROR in LauDecayTimePdf::updateEffiSpline : number of efficiency parameters is not equal to the number of spline knots."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
effiFun_->updateYValues(effiPars);
}
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Sat, May 3, 6:24 AM (1 d, 17 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
4983007
Default Alt Text
(56 KB)
Attached To
rLAURA laura
Event Timeline
Log In to Comment