Page MenuHomeHEPForge

No OneTemporary

diff --git a/inc/LauDecayTimePdf.hh b/inc/LauDecayTimePdf.hh
index 03949bd..8eb363e 100644
--- a/inc/LauDecayTimePdf.hh
+++ b/inc/LauDecayTimePdf.hh
@@ -1,737 +1,740 @@
/*
Copyright 2006 University of Warwick
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Laura++ package authors:
John Back
Paul Harrison
Thomas Latham
*/
/*! \file LauDecayTimePdf.hh
\brief File containing declaration of LauDecayTimePdf class.
*/
/*! \class LauDecayTimePdf
\brief Class for defining the PDFs used in the time-dependent fit model to describe the decay time.
LauDecayTimePdf is a class that provides the PDFs for describing the
time-dependence of the various terms in a particle/antiparticle decay to a
common final state. The various terms have the form of exponentially
decaying trigonometric or hyperbolic functions convolved with a N-Gaussian
resolution function.
*/
#ifndef LAU_DECAYTIME_PDF
#define LAU_DECAYTIME_PDF
#include <vector>
#include <array>
#include <complex>
#include "TString.h"
#include "LauAbsRValue.hh"
#include "LauFitDataTree.hh"
#include "LauComplex.hh"
class TH1;
class Lau1DHistPdf;
class Lau1DCubicSpline;
// TODO - Should this have Pdf in the name?
// - Audit function names and public/private access category
// - Audit what should be given to constructor and what can be set later (maybe different constructors for different scenarios, e.g. smeared with per-event error/smeared with avg error/not smeared)
class LauDecayTimePdf final {
public:
// TODO - can we think of better names?
//! The functional form of the decay time PDF
enum FuncType {
Hist, //< Hist PDF for fixed background
Delta, //< Delta function - for prompt background
Exp, //< Exponential function - for non-prompt background or charged B's
DeltaExp, //< Delta + Exponential function - for background with prompt and non-prompt parts
ExpTrig, //< Exponential function with Delta m driven mixing - for neutral B_d's
ExpHypTrig //< Exponential function with both Delta m and Delta Gamma driven mixing - for neutral B_s's
};
//! How is the decay time measured - absolute or difference?
enum TimeMeasurementMethod {
DecayTime, //< Absolute measurement of decay time, e.g. LHCb scenario
DecayTimeDiff //< Measurement of the difference of two decay times, e.g. BaBar/Belle(II) scenario
};
//! How is the TD efficiency information going to be given?
enum EfficiencyMethod {
Spline, //< As a cubic spline
Binned, //< As a histogram (TH1D/TH1F)
Flat //< As a flat distribution (constant)
};
//! Constructor
/*!
\param [in] theVarName the name of the decay time variable in the input data
\param [in] theVarErrName the name of the decay time error variable in the input data
\param [in] params the parameters of the PDF
\param [in] minAbscissaVal the minimum value of the abscissa
\param [in] maxAbscissaVal the maximum value of the abscissa
\param [in] minAbscissaErr the minimum value of the abscissa error
\param [in] maxAbscissaErr the maximum value of the abscissa error
\param [in] type the functional form of the PDF
\param [in] nGauss the number of Gaussians in the resolution function
\param [in] scale controls whether the Gaussian parameters are scaled by the per-event error
\param [in] method set the type of the time measurement used in the given experiment
*/
LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector<LauAbsRValue*>& params,
const Double_t minAbscissaVal, const Double_t maxAbscissaVal,
const Double_t minAbscissaErr, const Double_t maxAbscissaErr,
const FuncType type, const UInt_t nGauss, const std::vector<Bool_t>& scale, const TimeMeasurementMethod method, const EfficiencyMethod effMethod = EfficiencyMethod::Spline);
//! Constructor
/*!
\param [in] theVarName the name of the decay time variable in the input data
\param [in] theVarErrName the name of the decay time error variable in the input data
\param [in] params the parameters of the PDF
\param [in] minAbscissaVal the minimum value of the abscissa
\param [in] maxAbscissaVal the maximum value of the abscissa
\param [in] minAbscissaErr the minimum value of the abscissa error
\param [in] maxAbscissaErr the maximum value of the abscissa error
\param [in] type the functional form of the PDF
\param [in] nGauss the number of Gaussians in the resolution function
\param [in] scaleMeans controls whether the Gaussian mean parameters are scaled by the per-event error
\param [in] scaleWidths controls whether the Gaussian width parameters are scaled by the per-event error
\param [in] method set the type of the time measurement used in the given experiment
*/
LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector<LauAbsRValue*>& params,
const Double_t minAbscissaVal, const Double_t maxAbscissaVal,
const Double_t minAbscissaErr, const Double_t maxAbscissaErr,
const FuncType type, const UInt_t nGauss, const std::vector<Bool_t>& scaleMeans,
const std::vector<Bool_t>& scaleWidths, const TimeMeasurementMethod method, const EfficiencyMethod effMethod = EfficiencyMethod::Spline);
//! Copy constructor (deleted)
LauDecayTimePdf(const LauDecayTimePdf& other) = delete;
//! Copy assignment operator (deleted)
LauDecayTimePdf& operator=(const LauDecayTimePdf& other) = delete;
//! Move constructor (deleted)
LauDecayTimePdf(LauDecayTimePdf&& other) = delete;
//! Move assignment operator (deleted)
LauDecayTimePdf& operator=(LauDecayTimePdf&& other) = delete;
//! Destructor
~LauDecayTimePdf();
// TODO - Do we need this?
// - If so, should it be a hist or a LauAbsPdf?
// - Or should there be a dedicated constructor for this scenario?
//! Set the Histogram PDF in case of fixed background PDF
void setHistoPdf(const TH1* hist);
// TODO - should this be a LauAbsPdf instead?
//! Set the histogram to be used for generation of per-event decay time errors
/*!
If not set will fall back to using Landau distribution
\param [in] hist the histogram of the distribution
*/
void setErrorHisto(const TH1* hist);
// TODO - do we still want this option?
//! Set the parameters of the Landau distribution used to generate the per-event decay time errors
/*!
\param [in] mpv the MPV (most probable value) of the distribution
\param [in] sigma the width of the distribution
*/
void setErrorDistTerms(const Double_t mpv, const Double_t sigma) {
errorDistMPV_ = mpv;
errorDistSigma_ = sigma;
}
// TODO - should we remove the EfficiencyMethod argument from the constructor, default to Flat and have these functions modify it?
//! Set the efficiency function in the form of a histogram
/*!
\param [in] hist the histogram of efficiencies
*/
void setEffiHist(const TH1* hist);
//! Set the efficiency function in the form of spline
/*!
\param [in] spline the efficiency spline function
*/
void setEffiSpline(Lau1DCubicSpline* spline);
//! Retrieve the name of the error variable
const TString& varName() const {return varName_;}
//! Retrieve the name of the error variable
const TString& varErrName() const {return varErrName_;}
// TODO - this should probably be set at construction time
//! Turn on or off the resolution function
void doSmearing(Bool_t smear) {smear_ = smear;}
//! Determine if the resolution function is turned on or off
Bool_t doSmearing() const {return smear_;}
// TODO - we don't use this at the moment - remove it?
//! Calculate single effective decay time resolution from multiple Gaussian resolution functions
/*!
\return effective resolution
*/
Double_t effectiveResolution() const;
//! Cache information from data
/*!
\param [in] inputData the dataset to be used to calculate everything
*/
void cacheInfo(const LauFitDataTree& inputData);
//! Calculate the likelihood (and all associated information) given value of the abscissa
/*!
\param [in] abscissa the value of the abscissa
*/
void calcLikelihoodInfo(const Double_t abscissa);
//! Calculate the likelihood (and all associated information) given value of the abscissa and its error
/*!
\param [in] abscissa the value of the abscissa
\param [in] abscissaErr the error on the abscissa
*/
void calcLikelihoodInfo(const Double_t abscissa, const Double_t abscissaErr);
//! Retrieve the likelihood (and all associated information) given the event number
/*!
\param [in] iEvt the event number
*/
void calcLikelihoodInfo(const UInt_t iEvt);
//! Determine the efficiency value for the given abscissa
/*!
\param [in] abscissa the value of the abscissa
\return the corresponding efficiency value
*/
Double_t calcEffiTerm( const Double_t abscissa ) const;
//! Get FuncType from model
FuncType getFuncType() const {return type_;}
// TODO - should maybe do away with exp term (and it's norm) since it's just the cosh term when DG=0 and it's confusing to have both
// - counter argument is to keep it for backgrounds that have a lifetime-like behaviour
//! Get the exponential term
Double_t getExpTerm() const {return expTerm_;}
//! Get the cos(Dm*t) term (multiplied by the exponential)
Double_t getCosTerm() const {return cosTerm_;}
//! Get the sin(Dm*t) term (multiplied by the exponential)
Double_t getSinTerm() const {return sinTerm_;}
//! Get the cosh(DG/2*t) term (multiplied by the exponential)
Double_t getCoshTerm() const {return coshTerm_;}
//! Get the sinh(DG/2*t) term (multiplied by the exponential)
Double_t getSinhTerm() const {return sinhTerm_;}
+ //! Get the hist term from a histogram
+ Double_t getHistTerm() const {return pdfTerm_;}
+
//! Get the normalisation related to the exponential term only
Double_t getNormTermExp() const {return normTermExp_;}
//! Get the normalisation related to the cos term only
Double_t getNormTermCos() const {return normTermCos_;}
//! Get the normalisation related to the sin term only
Double_t getNormTermSin() const {return normTermSin_;}
//! Get the first term in the normalisation (from integrating the cosh)
Double_t getNormTermCosh() const {return normTermCosh_;}
//! Get the second term in the normalisation (from integrating the sinh)
Double_t getNormTermSinh() const {return normTermSinh_;}
//! Get error probability density from Error distribution
Double_t getErrTerm() const{return errTerm_;}
//! Get efficiency probability density from efficiency distribution
Double_t getEffiTerm() const{return effiTerm_;}
//! Retrieve the parameters of the PDF, e.g. so that they can be loaded into a fit
/*!
\return the parameters of the PDF
*/
const std::vector<LauAbsRValue*>& getParameters() const { return params_; }
//! Retrieve the parameters of the PDF, e.g. so that they can be loaded into a fit
/*!
\return the parameters of the PDF
*/
std::vector<LauAbsRValue*>& getParameters() { return params_; }
//! Update the pulls for all parameters
void updatePulls();
//! Calculate the normalisation of all terms
void calcNorm();
//! Calculate the normalisation integrals in the given range for the case of uniform or binned efficiency
/*!
This form to be used for case where decay time resolution is neglected
\param [in] minAbs lower bound for the integral domain
\param [in] maxAbs upper bound for the integral domain
\param [in] weight the weight for this range, typically the efficiency value
*/
void calcNonSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight);
//! Calculate the normalisation integrals in the given range for the case of uniform or binned efficiency
/*!
This form to be used for case where decay time resolution is accounted for
\param [in] minAbs lower bound for the integral domain
\param [in] maxAbs upper bound for the integral domain
\param [in] weight the weight for this range, typically the efficiency value
\param [in] means the mean values of each Gaussian in the resolution function
\param [in] sigmas the width values of each Gaussian in the resolution function
\param [in] fractions the fractional weight of each Gaussian in the resolution function
*/
void calcSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight, const std::vector<Double_t>& means, const std::vector<Double_t>& sigmas, const std::vector<Double_t>& fractions);
//! Calculate the normalisation integrals in the given range for the case of spline efficiency
/*!
This form to be used for case where decay time resolution is accounted for
\param [in] iEvt the event number (for the case of using per-event decay-time error)
\param [in] splineIndex the index of the spline segment being integrated
\param [in] means the mean values of each Gaussian in the resolution function
\param [in] sigmas the width values of each Gaussian in the resolution function
\param [in] fractions the fractional weight of each Gaussian in the resolution function
*/
void calcSmearedSplinePartialIntegrals(const UInt_t iEvt, const UInt_t splineIndex, const std::vector<Double_t>& means, const std::vector<Double_t>& sigmas, const std::vector<Double_t>& fractions);
//! Calculate the normalisation integrals in the given range for the case of spline efficiency
/*!
This form to be used for case where decay time resolution is neglected
\param [in] splineIndex the index of the spline segment being integrated
*/
void calcNonSmearedSplinePartialIntegrals(const UInt_t splineIndex);
//! Calculate normalisation for non-smeared cos and sin terms
/*!
\param [in] minAbs lower bound for the integral domain
\param [in] maxAbs upper bound for the integral domain
\return pair of {cosTermIntegral, sinTermIntegral}
*/
std::pair<Double_t, Double_t> nonSmearedCosSinIntegral(const Double_t minAbs, const Double_t maxAbs);
//! Calculate normalisation for non-smeared cosh and sinh terms
/*!
\param [in] minAbs lower bound for the integral domain
\param [in] maxAbs upper bound for the integral domain
\return pair of {coshTermIntegral, sinhTermIntegral}
*/
std::pair<Double_t, Double_t> nonSmearedCoshSinhIntegral(const Double_t minAbs, const Double_t maxAbs);
//! Calculate normalisation for non-smeared exponential term
/*!
\param [in] minAbs lower bound for the integral domain
\param [in] maxAbs upper bound for the integral domain
\return integral
*/
Double_t nonSmearedExpIntegral(const Double_t minAbs, const Double_t maxAbs);
//! Calculate normalisation for decay-time resolution smeared terms
/*!
Uses the Faddeeva function method from Section 3 of https://arxiv.org/abs/1407.0748
\param [in] z the complex expression with general form: (Gamma - i Delta_m) * sigma / sqrt(2)
\param [in] minAbs lower bound for the integral domain
\param [in] maxAbs upper bound for the integral domain
\param [in] sigmaOverRoot2 width of the Gaussian resolution function, divided by sqrt(2)
\param [in] mu mean of the Gaussian resolution function
\return complex integral
*/
std::complex<Double_t> smearedGeneralIntegral(const std::complex<Double_t>& z, const Double_t minAbs, const Double_t maxAbs, const Double_t sigmaOverRoot2, const Double_t mu);
//! Calculate decay-time resolution smeared terms
/*!
Uses the Faddeeva function method from Section 3 of https://arxiv.org/abs/1407.0748
\param [in] z the complex expression with general form: (Gamma - i Delta_m) * sigma / sqrt(2)
\param [in] x = ( t - mu ) / ( sqrt(2) * sigma )
\return complex smeared term
*/
std::complex<Double_t> smearedGeneralTerm(const std::complex<Double_t>& z, const Double_t x);
//! Calculate and cache powers of means and sigmas for each Gaussian in the resolution function
/*
\param [in] iEvt the event number (for the case of using per-event decay-time error)
\param [in] means mean of each Gaussian in the resolution function
\param [in] sigmas width of each Gaussian in the resolution function
*/
void calcMeanAndSigmaPowers( const UInt_t iEvt, const std::vector<Double_t>& means, const std::vector<Double_t>& sigmas );
//! Calculate and cache K-vectors for each term and for each Gaussian in the resolution function
/*!
\param [in] iEvt the event number (for the case of using per-event decay-time error)
*/
void calcKVectors( const UInt_t iEvt );
//! Generate the K vector used in eqn 31 from arXiv:1407.0748
/*
\param [in] sigma width of the Gaussian resolution function
\param [in] z The z value, changing for exp, sin, sinh, etc
\return size 4 array of vector values
*/
std::array<std::complex<Double_t>,4> generateKvector(const std::complex<Double_t>& z);
//! Generate the M vector used in eqn 31 from arXiv:1407.0748
/*
Uses the using the Faddeeva function method from
(https://arxiv.org/abs/1407.0748)
\param [in] minAbs lower bound for the integral domain
\param [in] maxAbs upper bound for the integral domain
\param [in] z the complex expression with general form: (Gamma - i Delta_m) * sigma / sqrt(2)
\param [in] sigma width of the Gaussian resolution function
\param [in] mu mean of the Gaussian resolution function
\return size 4 array of vector values
*/
std::array<std::complex<Double_t>,4> generateMvector(const Double_t minAbs, const Double_t maxAbs, const std::complex<Double_t>& z, const Double_t sigma, const Double_t mu = 0.);
//! Calculate the normalisation of a given term in a particular spline segment
/*
\param [in] coeffs spline coefficients in this segment
\param [in] K K-vector for this term
- \param [in] M M-vector for this term
+ \param [in] M M-vector for this term
\param [in] sigmaPowers powers of the width of the Gaussian resolution function
\param [in] meanPowers powers of the mean of the Gaussian resolution function
\return the complex normalisation
*/
std::complex<Double_t> smearedSplineNormalise(const std::array<Double_t,4>& coeffs, const std::array<std::complex<Double_t>,4>& K, const std::array<std::complex<Double_t>,4>& M, const std::array<Double_t,4>& sigmaPowers, const std::array<Double_t,4>& meanPowers) const;
//! Calculate integrals of each power of t within a given spline segment
/*
\param [in] k the power of t
\param [in] minAbs lower bound for the integral domain
\param [in] maxAbs upper bound for the integral domain
\param [in] u the complex expression with general form: (Gamma - i Delta_m)
\return the complex integral
*/
std::complex<Double_t> calcIk(const UInt_t k, const Double_t minAbs, const Double_t maxAbs, const std::complex<Double_t>& u);
//! Calculate the normalisation of a given term in a particular spline segment
/*
\param [in] splineIndex the index of the spline segment being integrated
\param [in] u the complex expression with general form: (Gamma - i Delta_m)
\param [in] cache cached results of calcIk, to be used and/or updated as appropriate
\return the complex normalisation
*/
std::complex<Double_t> nonSmearedSplineNormalise(const UInt_t splineIndex, const std::complex<Double_t>& u, std::vector<std::array<std::complex<Double_t>,4>>& cache);
//! Generate the value of the error
/*!
If scaling by the error should call this before calling generate
\param [in] forceNew forces generation of a new value
*/
Double_t generateError(const Bool_t forceNew = kFALSE);
//TODO not clear that this is really needed, perhaps for background? commented out for now
//! Generate an event from the PDF
/*!
\param [in] kinematics used by some PDFs to determine the DP position, on which they have dependence
*/
//LauFitData generate(const LauKinematics* kinematics);
//! Retrieve the decay time minimum value
Double_t minAbscissa() const {return minAbscissa_;}
//! Retrieve the decay time maximum value
Double_t maxAbscissa() const {return maxAbscissa_;}
//! Retrieve the decay time error minimum value
Double_t minAbscissaError() const {return minAbscissaError_;}
//! Retrieve the decay time error maximum value
Double_t maxAbscissaError() const {return maxAbscissaError_;}
// TODO - can we delete this?
// NB calcPDFHeight only calculates the gaussian information for the (type_ == Delta) case
//! Calculate the maximum height of the PDF
//void calcPDFHeight( const LauKinematics* kinematics );
//! Get efficiency parameters to float in the fit
std::vector<LauParameter*>& getEffiPars() {return effiPars_;}
//! Propagate any updates necessary to the decay time Efficiency and recalculate normalisation if necessary
void propagateParUpdates();
// TODO - can we delete this?
//! Update spline Y values when floating the decay time acceptance
/*!
\param [in] params the vector of LauParameters describing the Y values
*/
//void updateEffiSpline(const std::vector<LauParameter*>& params);
//! Set up the initial state correctly - called by the fit model's initialise function
void initialise();
protected:
//! Calculate the pure physics terms with no resolution function applied
void calcNonSmearedTerms(const Double_t abscissa);
//! Retrieve the number of PDF parameters
/*!
\return the number of PDF parameters
*/
UInt_t nParameters() const {return params_.size();}
//! Retrieve the specified parameter
/*!
\param [in] parName the parameter to retrieve
*/
LauAbsRValue* findParameter(const TString& parName);
//! Retrieve the specified parameter
/*!
\param [in] parName the parameter to retrieve
*/
const LauAbsRValue* findParameter(const TString& parName) const;
//! Update the cache values for all events
void updateCache();
private:
//! Name of the variable
const TString varName_;
//! Name of the error variable
const TString varErrName_;
//! The parameters of the PDF
std::vector<LauAbsRValue*> params_;
// TODO - should probably set this at construction time (can then be const)
//! Smear with the resolution model or not
Bool_t smear_;
//! The minimum value of the decay time
const Double_t minAbscissa_;
//! The maximum value of the decay time
const Double_t maxAbscissa_;
//! The minimum value of the decay time error
const Double_t minAbscissaError_;
//! The maximum value of the decay time error
const Double_t maxAbscissaError_;
//! The current value of the decay time error
Double_t abscissaError_;
//! Flag whether a value for the decay time error has been generated
Bool_t abscissaErrorGenerated_;
//! Value of the MPV of the Landau dist used to generate the Delta t error
Double_t errorDistMPV_;
//! Value of the width of the Landau dist used to generate the Delta t error
Double_t errorDistSigma_;
//! The number of gaussians in the resolution model
const UInt_t nGauss_;
// Parameters of the gaussian(s) that accounts for the resolution:
//! mean (offset) of each Gaussian in the resolution function
std::vector<LauAbsRValue*> mean_;
//! spread (sigma) of each Gaussian in the resolution function
std::vector<LauAbsRValue*> sigma_;
//! fraction of each Gaussian in the resolution function
std::vector<LauAbsRValue*> frac_;
// Parameters of the physics decay time distribution
//! Lifetime parameter
LauAbsRValue* tau_;
//! Mass difference parameter
LauAbsRValue* deltaM_;
//! Width difference parameter
LauAbsRValue* deltaGamma_;
//! Parameter for the fraction of prompt events in DeltaExp
LauAbsRValue* fracPrompt_;
//! Which type of decay time function is this?
const FuncType type_;
//! Are we using absolute decay time or decay time difference?
const TimeMeasurementMethod method_;
//! Which method for eff(decaytime) input are we using?
const EfficiencyMethod effMethod_;
//! Scale the mean of each Gaussian by the per-event decay time error?
const std::vector<Bool_t> scaleMeans_;
//! Scale the sigma of each Gaussian by the per-event decay time error?
const std::vector<Bool_t> scaleWidths_;
//! Is anything being scaled by the per-event decay time error?
const Bool_t scaleWithPerEventError_;
//! The exp(-G*t) term
Double_t expTerm_;
//! The cos(Dm*t) term (multiplied by the exponential)
Double_t cosTerm_;
//! The sin(Dm*t) term (multiplied by the exponential)
Double_t sinTerm_;
//! The cosh(DG/2*t) term (multiplied by the exponential)
Double_t coshTerm_;
//! The sinh(DG/2*t) term (multiplied by the exponential)
Double_t sinhTerm_;
//! Normalisation of the exponential term
Double_t normTermExp_;
//! Normalisation of the cos term
Double_t normTermCos_;
//! Normalisation of the sin term
Double_t normTermSin_;
//! Normalisation of the cosh term
Double_t normTermCosh_;
//! Normalisation of the sinh term
Double_t normTermSinh_;
//! Error PDF (NB there is no equivalent cache since the PDF errHist_ keeps a cache)
Double_t errTerm_;
//! Efficiency
Double_t effiTerm_;
//TODO : to be deleted? or needed for backgrounds?
//! Hist PDF term (NB there is no equivalent cache since the PDF pdfHist_ keeps a cache)
Double_t pdfTerm_;
//! The cache of the decay times
std::vector<Double_t> abscissas_;
//! The cache of the per-event errors on the decay time
std::vector<Double_t> abscissaErrors_;
//! The cache of the exponential terms
std::vector<Double_t> expTerms_;
//! The cache of the exponential * cosh(DG/2*t) terms
std::vector<Double_t> coshTerms_;
//! The cache of the exponential * sinh(DG/2*t) terms
std::vector<Double_t> sinhTerms_;
//! The cache of the exponential * cos(Dm*t) terms
std::vector<Double_t> cosTerms_;
//! The cache of the exponential * sin(Dm*t) terms
std::vector<Double_t> sinTerms_;
//! The cache of the exponential normalisation
std::vector<Double_t> normTermsExp_;
//! The cache of the cosh term normalisation
std::vector<Double_t> normTermsCosh_;
//! The cache of the sinh term normalisation
std::vector<Double_t> normTermsSinh_;
//! The cache of the cos term normalisation
std::vector<Double_t> normTermsCos_;
//! The cache of the sin term normalisation
std::vector<Double_t> normTermsSin_;
//! The cache of the efficiency
std::vector<Double_t> effiTerms_;
//! Histogram PDF for abscissa error distribution
Lau1DHistPdf* errHist_;
//! Histogram PDF for abscissa distribution
Lau1DHistPdf* pdfHist_;
//! efficiency PDF in spline
Lau1DCubicSpline* effiFun_;
//! efficiency PDF as Histogram
TH1* effiHist_;
//! Vector of parameters to float acceptance
std::vector<LauParameter*> effiPars_;
// Caching / bookkeeping
//! Binomial coefficients
// TODO - would prefer this to use std::array but cling doesn't like it
static constexpr Double_t binom_[4][4] = {
{1., 0., 0., 0.},
{1., 1., 0., 0.},
{1., 2., 1., 0.},
{1., 3., 3., 1.}
};
Bool_t nothingFloating_{kFALSE};
Bool_t anyKnotFloating_{kTRUE};
Bool_t nonKnotFloating_{kTRUE};
Bool_t physicsParFloating_{kTRUE};
Bool_t tauFloating_{kTRUE};
Bool_t deltaMFloating_{kTRUE};
Bool_t deltaGammaFloating_{kTRUE};
Bool_t resoParFloating_{kTRUE};
//std::vector<Bool_t> meansFloating_;
//std::vector<Bool_t> sigmasFloating_;
//std::vector<Bool_t> fracsFloating_;
Bool_t nothingChanged_{kFALSE};
Bool_t anyKnotChanged_{kTRUE};
Bool_t nonKnotChanged_{kTRUE};
Bool_t physicsParChanged_{kTRUE};
Bool_t tauChanged_{kTRUE};
Bool_t deltaMChanged_{kTRUE};
Bool_t deltaGammaChanged_{kTRUE};
Bool_t resoParChanged_{kTRUE};
//std::vector<Bool_t> meansChanged_;
//std::vector<Bool_t> sigmasChanged_;
//std::vector<Bool_t> fracsChanged_;
Double_t tauVal_{0.0};
Double_t gammaVal_{0.0};
Double_t deltaMVal_{0.0};
Double_t deltaGammaVal_{0.0};
std::vector<Double_t> meanVals_;
std::vector<Double_t> sigmaVals_;
std::vector<Double_t> fracVals_;
std::vector<Double_t> effiParVals_;
// vector has size nSplineSegments, array has 0th - 3rd powers
std::vector<std::array<std::complex<Double_t>,4>> expTermIkVals_;
std::vector<std::array<std::complex<Double_t>,4>> trigTermIkVals_;
std::vector<std::array<std::complex<Double_t>,4>> hypHTermIkVals_;
std::vector<std::array<std::complex<Double_t>,4>> hypLTermIkVals_;
// outer vector has nEvents entries, inner vector has nGauss_ entries, array has 0th - 3rd or 1st - 4th powers, respectively
std::vector<std::vector<std::array<Double_t,4>>> meanPowerVals_;
std::vector<std::vector<std::array<Double_t,4>>> sigmaPowerVals_;
// outer vector has nEvents entries, inner vector has nGauss_ entries, array has 0th - 4th entries of the K-vector
std::vector<std::vector<std::array<std::complex<Double_t>,4>>> expTermKvecVals_;
std::vector<std::vector<std::array<std::complex<Double_t>,4>>> trigTermKvecVals_;
std::vector<std::vector<std::array<std::complex<Double_t>,4>>> hypHTermKvecVals_;
std::vector<std::vector<std::array<std::complex<Double_t>,4>>> hypLTermKvecVals_;
// outer vector has nEvents entries, middle vector has nSplineSegments entries, inner vector has nGauss_ entries, array has 0th - 4th entries of the M-vector
std::vector<std::vector<std::vector<std::array<std::complex<Double_t>,4>>>> expTermMvecVals_;
std::vector<std::vector<std::vector<std::array<std::complex<Double_t>,4>>>> trigTermMvecVals_;
std::vector<std::vector<std::vector<std::array<std::complex<Double_t>,4>>>> hypHTermMvecVals_;
std::vector<std::vector<std::vector<std::array<std::complex<Double_t>,4>>>> hypLTermMvecVals_;
ClassDef(LauDecayTimePdf,0) // Define the Delta t PDF
};
#endif
diff --git a/inc/LauTimeDepFitModel.hh b/inc/LauTimeDepFitModel.hh
index ca89358..4875749 100644
--- a/inc/LauTimeDepFitModel.hh
+++ b/inc/LauTimeDepFitModel.hh
@@ -1,715 +1,715 @@
/*
Copyright 2006 University of Warwick
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Laura++ package authors:
John Back
Paul Harrison
Thomas Latham
*/
/*! \file LauTimeDepFitModel.hh
\brief File containing declaration of LauTimeDepFitModel class.
*/
/*! \class LauTimeDepFitModel
\brief Class for defining a time-dependent fit model.
LauTimeDepFitModel is a class that allows the user to define a three-body
Dalitz plot according to the isobar model, i.e. defining a set of
resonances that have complex amplitudes that can interfere with each other.
It extends the LauSimpleFitModel and LauCPFitModel models in that it allows
the fitting of time-dependent particle/antiparticle decays to
flavour-conjugate Dalitz plots, including their interference through mixing.
*/
#ifndef LAU_TIMEDEP_FIT_MODEL
#define LAU_TIMEDEP_FIT_MODEL
#include <vector>
#include <map>
#include <set>
#include <iostream>
#include "TString.h"
#include "TStopwatch.h"
#include "TSystem.h"
#include "LauAbsFitModel.hh"
#include "LauConstants.hh"
#include "LauEmbeddedData.hh"
#include "LauParameter.hh"
#include "LauFlavTag.hh"
#include "LauCategoryFlavTag.hh"
class LauAbsBkgndDPModel;
class LauAbsCoeffSet;
class LauAbsPdf;
class LauDecayTimePdf;
class LauIsobarDynamics;
class LauKinematics;
class LauScfMap;
class LauTimeDepFitModel : public LauAbsFitModel {
public:
//! Possible CP eigenvalues (the intrinsic CP of the final state particles)
enum CPEigenvalue {
CPOdd = -1, /*!< CP odd final state */
QFS = 0, /*!< Quasi Flavour Specific final state */
CPEven = 1 /*!< CP even final state */
};
//! Constructor
/*!
\param [in] modelB0bar DP model for the antiparticle
\param [in] modelB0 DP model for the particle
\param [in] flavTag flavour tagging information
*/
LauTimeDepFitModel(LauIsobarDynamics* modelB0bar, LauIsobarDynamics* modelB0, LauFlavTag* flavTag);
//! Destructor
virtual ~LauTimeDepFitModel();
//! Set the signal event yield
/*!
\param [in] nSigEvents contains the signal yield and option to fix it
*/
virtual void setNSigEvents(LauParameter* nSigEvents);
//! Set the signal event yield and asymmetry
/*!
\param [in] nSigEvents contains the signal yield and option to fix it
\param [in] sigAsym contains the signal asymmetry and option to fix it
*/
virtual void setNSigEvents(LauParameter* nSigEvents, LauParameter* sigAsym);
//! Set the number of background events
/*!
The name of the parameter must be that of the corresponding background category (so that it can be correctly assigned)
\param [in] nBkgndEvents contains the name, yield and option to fix the yield of the background
*/
virtual void setNBkgndEvents(LauAbsRValue* nBkgndEvents);
//! Set the background event yield and asymmetry
/*!
\param [in] nBkgEvents contains the background yield and option to fix it
\param [in] BkgAsym contains the background asymmetry and option to fix it
*/
virtual void setNBkgndEvents(LauAbsRValue* nBkgndEvents, LauAbsRValue* bkgndAsym);
//! Set the background DP models
/*!
\param [in] bkgndClass the name of the background class
\param [in] model the DP model of the background
*/
void setBkgndDPModels(const TString& bkgndClass, LauAbsBkgndDPModel* model);
//! Switch on/off storage of amplitude info in generated ntuple
void storeGenAmpInfo(Bool_t storeInfo) { storeGenAmpInfo_ = storeInfo; }
//! Set CP eigenvalue
/*!
The CP eigenvalue can be supplied on an event-by-event
basis, e.g. if the data contains daughters that are D0
mesons that can decay to either K+ K- (CP even) or KS pi0
(CP odd).
This method allows you to set the default value that should
be used if the data does not contain this information as
well as the name of the variable in the data that will
specify this information.
If completely unspecified all events will be assumed to be
CP even.
\param defaultCPEV the default for the eigenvalue
\param evVarName the variable name in the data tree that specifies the CP eigenvalue
*/
inline void setCPEigenvalue( const CPEigenvalue defaultCPEV, const TString& cpevVarName = "" )
{
cpEigenValue_ = defaultCPEV;
cpevVarName_ = cpevVarName;
}
//! Set the DP amplitude coefficients
/*!
\param [in] coeffSet the set of coefficients
*/
void setAmpCoeffSet(LauAbsCoeffSet* coeffSet);
//! Set the decay time PDFs
/*!
\param [in] position the tagger position in the vectors
\param [in] pdf the signal decay time PDF
*/
void setSignalDtPdf(LauDecayTimePdf* pdf);
//! Set the decay time PDFs
/*!
\param [in] tagCat the tagging category for which the PDF should be used
\param [in] pdf the background decay time PDF
*/
- void setBackgroundDtPdf(LauDecayTimePdf* pdf);
+ void setBkgndDtPdf(const TString& bkgndClass, LauDecayTimePdf* pdf);
//! Set the signal PDF for a given variable
/*!
\param [in] tagCat the tagging category for which the PDF should be used
\param [in] pdf the PDF to be added to the signal model
*/
void setSignalPdfs(LauAbsPdf* pdf);
//! Set the background PDF
/*!
\param [in] bkgndClass the name of the background class
\param [in] pdf the PDF to be added to the background model
- */
+ */
void setBkgndPdf(const TString& bkgndClass, LauAbsPdf* pdf);
void setSignalFlavTagPdfs( const Int_t tagCat, LauAbsPdf* pdf);
-
+
void setBkgdFlavTagPdfs( const TString name, LauAbsPdf* pdf);
//! Embed full simulation events for the signal, rather than generating toy from the PDFs
/*!
\param [in] tagCat the tagging category for which the file should be used
\param [in] fileName the name of the file containing the events
\param [in] treeName the name of the tree
\param [in] reuseEventsWithinEnsemble
\param [in] reuseEventsWithinExperiment
\param [in] useReweighting
*/
void embedSignal(const TString& fileName, const TString& treeName,
const Bool_t reuseEventsWithinEnsemble, const Bool_t reuseEventsWithinExperiment = kFALSE);
void embedBkgnd(const TString& bkgndClass, const TString& fileName, const TString& treeName,
- Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment = kFALSE);
+ Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment = kFALSE);
//! Set the value of the mixing phase
/*!
\param [in] phiMix the value of the mixing phase
\param [in] fixPhiMix whether the value should be fixed or floated
\param [in] useSinCos whether to use the sine and cosine as separate parameters or to just use the mixing phase itself
*/
void setPhiMix(const Double_t phiMix, const Bool_t fixPhiMix, const Bool_t useSinCos = kFALSE);
//! Initialise the fit
virtual void initialise();
//! Initialise the signal DP model
virtual void initialiseDPModels();
//! Recalculate Normalization the signal DP models
virtual void recalculateNormalisation();
//! Update the coefficients
virtual void updateCoeffs();
// Toy MC generation and fitting overloaded functions
virtual Bool_t genExpt();
//! Set the maximum value of A squared to be used in the accept/reject
/*!
\param [in] value the new value
*/
inline void setASqMaxValue(const Double_t value) {aSqMaxSet_ = value;}
//! Weight events based on the DP model
/*!
\param [in] dataFileName the name of the data file
\param [in] dataTreeName the name of the data tree
*/
virtual void weightEvents( const TString& dataFileName, const TString& dataTreeName );
//! Calculate things that depend on the fit parameters after they have been updated by Minuit
virtual void propagateParUpdates();
//! Read in the input fit data variables, e.g. m13Sq and m23Sq
virtual void cacheInputFitVars();
//! Check the initial fit parameters
virtual void checkInitFitParams();
//! Get the fit results and store them
/*!
\param [in] tablePrefixName prefix for the name of the output file
*/
virtual void finaliseFitResults(const TString& tablePrefixName);
//! Save the pdf Plots for all the resonances of experiment number fitExp
/*!
TODO - not working in this model!!
\param [in] label prefix for the file name to be saved
*/
virtual void savePDFPlots(const TString& label);
//! Save the pdf Plots for the sum of ressonances correspondint to "sin" of experiment number fitExp
/*!
TODO - not working in this model!!
\param [in] label prefix for the file name to be saved
\param [in] spin spin of the wave to be saved
*/
virtual void savePDFPlotsWave(const TString& label, const Int_t& spin);
//! Print the fit fractions, total DP rate and mean efficiency
/*!
\param [out] output the stream to which to print
*/
virtual void printFitFractions(std::ostream& output);
//! Print the asymmetries
/*!
\param [out] output the stream to which to print
*/
virtual void printAsymmetries(std::ostream& output);
//! Write the fit results in latex table format
/*!
\param [in] outputFile the name of the output file
*/
virtual void writeOutTable(const TString& outputFile);
//! Store the per event likelihood values
virtual void storePerEvtLlhds();
// Methods to do with calculating the likelihood functions
// and manipulating the fitting parameters.
//! Get the total likelihood for each event
/*!
\param [in] iEvt the event number
*/
virtual Double_t getTotEvtLikelihood(const UInt_t iEvt);
//! Calculate the signal and background likelihoods for the DP for a given event
/*!
\param [in] iEvt the event number
*/
virtual void getEvtDPDtLikelihood(const UInt_t iEvt);
//! Determine the signal and background likelihood for the extra variables for a given event
/*!
\param [in] iEvt the event number
*/
virtual void getEvtExtraLikelihoods(const UInt_t iEvt);
-
+
virtual void getEvtFlavTagLikelihood(const UInt_t iEvt);
//! Get the total number of events
/*!
\return the total number of events
*/
virtual Double_t getEventSum() const;
//! Set the fit parameters for the DP model
void setSignalDPParameters();
//! Set the fit parameters for the decay time PDFs
void setDecayTimeParameters();
//! Set the fit parameters for the extra PDFs
void setExtraPdfParameters();
//! Set the initial yields
void setFitNEvents();
- //! Set the calibration parameters
- void setCalibParams();
+ //! Set the calibration parameters
+ void setCalibParams();
//! Set the tagging efficiency parameters
void setTagEffParams();
- //! Set the efficiency parameters
- void setEffiParams();
+ //! Set the efficiency parameters
+ void setEffiParams();
- //! Set the asymmetry parameters
- void setAsymParams();
+ //! Set the asymmetry parameters
+ void setAsymParams();
//! Set the tagging asymmetry parameters
void setFlavTagAsymParams();
//! Set-up other parameters that are derived from the fit results, e.g. fit fractions
void setExtraNtupleVars();
//! Set production and detections asymmetries
void setAsymmetries(const Double_t AProd, const Bool_t AProdFix);
//! Randomise the initial fit parameters
void randomiseInitFitPars();
//! Method to set up the storage for background-related quantities called by setBkgndClassNames
virtual void setupBkgndVectors();
//! Calculate the CP asymmetries
/*!
\param [in] initValues is this before or after the fit
*/
void calcAsymmetries(const Bool_t initValues = kFALSE);
//! Finalise value of mixing phase
void checkMixingPhase();
//! Return the map of signal decay time PDFs
typedef std::map< Int_t, LauDecayTimePdf*> LauTagCatDtPdfMap;
LauDecayTimePdf* getSignalDecayTimePdf(){return signalDecayTimePdf_;}
//! Return the map of background decay time PDFs
- std::vector<LauDecayTimePdf*> getBackgroundDecayTimePdfs(){return backgroundDecayTimePdfs_;}
+ std::vector<LauDecayTimePdf*> getBackgroundDecayTimePdfs(){return BkgndDecayTimePdfs_;}
protected:
- typedef std::map< Int_t, LauParameter> LauTagCatParamMap;
+ typedef std::map< Int_t, LauParameter> LauTagCatParamMap;
typedef std::map< Int_t, LauPdfList > LauTagCatPdfListMap;
typedef std::map< Int_t, LauAbsPdf* > LauTagCatPdfMap;
typedef std::map< TString, LauAbsPdf* > LauBkgdPdfMap;
typedef std::map< Int_t, Int_t > LauTagCatEventsMap;
typedef std::map< Int_t, LauEmbeddedData* > LauTagCatEmbDataMap;
//typedef std::map< Int_t, std::pair<Int_t,Double_t> > LauTaggerGenInfo;
//typedef std::map< std::pair<TString,Int_t>, LauTaggerGenInfo > LauGenInfo;
typedef std::map< TString, std::pair<Int_t,Double_t> > LauGenInfo;
-
+
typedef std::vector<LauTagCatEmbDataMap> LauTagCatEmbDataMapList;
typedef std::vector<LauAbsBkgndDPModel*> LauBkgndDPModelList;
typedef std::vector<LauPdfList> LauBkgndPdfsList;
typedef std::vector<LauAbsRValue*> LauBkgndYieldList;
typedef std::vector<Bool_t> LauBkgndReuseEventsList;
//! Determine the number of events to generate for each hypothesis
LauGenInfo eventsToGenerate();
//! Generate signal event
Bool_t generateSignalEvent();
//! Generate background event
/*!
\param [in] bgID ID number of the background class
*/
Bool_t generateBkgndEvent(UInt_t bgID);
//! Setup the required ntuple branches
void setupGenNtupleBranches();
//! Store all of the DP and decay time information
void setDPDtBranchValues();
//! Generate from the extra PDFs
/*!
\param [in] extraPdfs the list of extra PDFs
\param [in] embeddedData the embedded data sample
*/
void generateExtraPdfValues(LauPdfList& extraPdfs, LauEmbeddedData* embeddedData);
//! Add sPlot branches for the extra PDFs
/*!
\param [in] extraPdfs the list of extra PDFs
\param [in] prefix the list of prefixes for the branch names
*/
void addSPlotNtupleBranches(const LauPdfList& extraPdfs, const TString& prefix);
//! Set the branches for the sPlot ntuple with extra PDFs
/*!
\param [in] extraPdfs the list of extra PDFs
\param [in] prefix the list of prefixes for the branch names
\param [in] iEvt the event number
*/
Double_t setSPlotNtupleBranchValues(LauPdfList& extraPdfs, const TString& prefix, const UInt_t iEvt);
//! Update the signal events after Minuit sets background parameters
void updateSigEvents();
//! Add branches to store experiment number and the event number within the experiment
virtual void setupSPlotNtupleBranches();
//! Returns the names of all variables in the fit
virtual LauSPlot::NameSet variableNames() const;
//! Returns the names and yields of species that are free in the fit
virtual LauSPlot::NumbMap freeSpeciesNames() const;
//! Returns the names and yields of species that are fixed in the fit
virtual LauSPlot::NumbMap fixdSpeciesNames() const;
//! Returns the species and variables for all 2D PDFs in the fit
virtual LauSPlot::TwoDMap twodimPDFs() const;
//! Check if the signal is split into well-reconstructed and mis-reconstructed types
virtual Bool_t splitSignal() const { return kFALSE; }
//! Check if the mis-reconstructed signal is to be smeared in the DP
virtual Bool_t scfDPSmear() const { return kFALSE; }
//! Add the parameters from each PDF into the fit
/*!
\param [in] theMap the container of PDFs
*/
UInt_t addParametersToFitList(LauPdfList* theList);
//! Add the parameters from each decay time PDF into the fit
/*!
\param [in] theMap the container of PDFs
*/
UInt_t addParametersToFitList(std::vector<LauDecayTimePdf*> theVector);
//! Calculate the component integrals of the interference term
void calcInterferenceTermIntegrals();
//! Calculate the total integral of the interference term
void calcInterTermNorm();
private:
//! Dalitz plot PDF for the antiparticle
LauIsobarDynamics* sigModelB0bar_;
//! Dalitz plot PDF for the particle
LauIsobarDynamics* sigModelB0_;
//! Kinematics object for antiparticle
LauKinematics* kinematicsB0bar_;
//! Kinematics object for particle
LauKinematics* kinematicsB0_;
//! The background Dalitz plot models
- LauBkgndDPModelList BkgndDPModels_;
+ LauBkgndDPModelList BkgndDPModels_;
//! The background PDFs
- LauBkgndPdfsList BkgndPdfs_;
+ LauBkgndPdfsList BkgndPdfs_;
//! Background boolean
Bool_t usingBkgnd_;
//! LauFlavTag object for flavour tagging
LauFlavTag* flavTag_;
//! Flavour tag for current event
std::vector<LauFlavTag::Flavour> curEvtTagFlv_;
//! Per event mistag for current event
std::vector<Double_t> curEvtMistag_;
//! Per event transformed mistag for current event
std::vector<Double_t> curEvtMistagPrime_;
//! True tag flavour (i.e. flavour at production) for the current event (only relevant for toy generation)
LauFlavTag::Flavour curEvtTrueTagFlv_;
//! Flavour at decay for the current event (only relevant for QFS)
LauFlavTag::Flavour curEvtDecayFlv_;
//! Number of signal components
UInt_t nSigComp_;
//! Number of signal DP parameters
UInt_t nSigDPPar_;
//! Number of decay time PDF parameters
UInt_t nDecayTimePar_;
//! Number of extra PDF parameters
UInt_t nExtraPdfPar_;
//! Number of normalisation parameters (yields, asymmetries)
UInt_t nNormPar_;
//! Number of calibration parameters (p0, p1)
UInt_t nCalibPar_;
//! Number of tagging efficneyc parameters
UInt_t nTagEffPar_;
//! Number of efficiency parameters (p0, p1)
UInt_t nEffiPar_;
//! Number of asymmetry parameters
UInt_t nAsymPar_;
//! Number of tagging asymmetry parameters
UInt_t nTagAsymPar_;
//! The complex coefficients for antiparticle
std::vector<LauComplex> coeffsB0bar_;
//! The complex coefficients for particle
std::vector<LauComplex> coeffsB0_;
//! Magnitudes and Phases or Real and Imaginary Parts
std::vector<LauAbsCoeffSet*> coeffPars_;
//! The integrals of the efficiency corrected amplitude cross terms for each pair of amplitude components
/*!
Calculated as the sum of A* x Abar x efficiency
*/
std::vector< std::vector<LauComplex> > fifjEffSum_;
//! The normalisation for the term 2.0*Re(A*Abar*phiMix)
Double_t interTermReNorm_;
//! The normalisation for the term 2.0*Im(A*Abar*phiMix)
Double_t interTermImNorm_;
//! The antiparticle fit fractions
LauParArray fitFracB0bar_;
//! The particle fit fractions
LauParArray fitFracB0_;
//! The fit fraction asymmetries
std::vector<LauParameter> fitFracAsymm_;
//! A_CP parameter
std::vector<LauParameter> acp_;
//! The mean efficiency for the antiparticle
LauParameter meanEffB0bar_;
//! The mean efficiency for the particle
LauParameter meanEffB0_;
//! The average DP rate for the antiparticle
LauParameter DPRateB0bar_;
//! The average DP rate for the particle
LauParameter DPRateB0_;
//! Signal yields
LauParameter* signalEvents_;
//! Signal asymmetry
LauParameter* signalAsym_;
//! Background yield(s)
- LauBkgndYieldList bkgndEvents_;
+ LauBkgndYieldList bkgndEvents_;
//! Background asymmetries(s)
LauBkgndYieldList bkgndAsym_;
//! CP eigenvalue variable name
TString cpevVarName_;
//! CP eigenvalue for current event
CPEigenvalue cpEigenValue_;
//! Vector to store event CP eigenvalues
std::vector<CPEigenvalue> evtCPEigenVals_;
//! The mass difference between the neutral mass eigenstates
LauParameter deltaM_;
//! The width difference between the neutral mass eigenstates
LauParameter deltaGamma_;
//! The lifetime
LauParameter tau_;
//! The mixing phase
LauParameter phiMix_;
//! The sine of the mixing phase
LauParameter sinPhiMix_;
//! The cosine of the mixing phase
LauParameter cosPhiMix_;
//! Flag whether to use the sine and cosine of the mixing phase or the phase itself as the fit parameters
Bool_t useSinCos_;
//! e^{-i*phiMix}
LauComplex phiMixComplex_;
//! Signal Decay time PDFs (one per tagging category)
LauDecayTimePdf* signalDecayTimePdf_;
//! Background Decay time PDFs (one per tagging category)
- std::vector<LauDecayTimePdf*> backgroundDecayTimePdfs_;
+ std::vector<LauDecayTimePdf*> BkgndDecayTimePdfs_;
//! Decay time for the current event
Double_t curEvtDecayTime_;
//! Decay time error for the current event
Double_t curEvtDecayTimeErr_;
//! PDFs for other variables
LauPdfList sigExtraPdf_;
//! eta PDFs for each TagCat
LauPdfList sigFlavTagPdf_;
//! eta PDFs for each background
LauBkgdPdfMap bkgdFlavTagPdf_;
//! Production asymmetry between B0 and B0bar
LauParameter AProd_;
// Toy generation stuff
//! The maximum allowed number of attempts when generating an event
Int_t iterationsMax_;
//! The number of unsucessful attempts to generate an event so far
Int_t nGenLoop_;
//! The value of A squared for the current event
Double_t ASq_;
//! The maximum value of A squared that has been seen so far while generating
Double_t aSqMaxVar_;
//! The maximum allowed value of A squared
Double_t aSqMaxSet_;
//! Flag for storage of amplitude info in generated ntuple
Bool_t storeGenAmpInfo_;
//! The signal event tree for embedding fully simulated events
LauEmbeddedData* signalTree_;
//! The background event tree for embedding fully simulated events
std::vector<LauEmbeddedData*> bkgndTree_;
//! Boolean to control reuse of embedded signal events
Bool_t reuseSignal_;
//! Vector of booleans to reuse background events
LauBkgndReuseEventsList reuseBkgnd_;
// Likelihood values
//! Signal DP likelihood value
Double_t sigDPLike_;
//! Signal likelihood from extra PDFs
Double_t sigExtraLike_;
-
+
Double_t sigFlavTagLike_;
Double_t bkgdFlavTagLike_;
//! Total signal likelihood
Double_t sigTotalLike_;
//! Background DP likelihood value(s)
std::vector<Double_t> bkgndDPLike_;
//! Background likelihood value(s) from extra PDFs
std::vector<Double_t> bkgndExtraLike_;
//! Total background likelihood(s)
std::vector<Double_t> bkgndTotalLike_;
-
+
ClassDef(LauTimeDepFitModel,0) // Time-dependent neutral model
};
#endif
diff --git a/src/LauDecayTimePdf.cc b/src/LauDecayTimePdf.cc
index 886efbd..1755dc3 100644
--- a/src/LauDecayTimePdf.cc
+++ b/src/LauDecayTimePdf.cc
@@ -1,1670 +1,1668 @@
/*
Copyright 2006 University of Warwick
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Laura++ package authors:
John Back
Paul Harrison
Thomas Latham
*/
/*! \file LauDecayTimePdf.cc
\brief File containing implementation of LauDecayTimePdf class.
*/
#include <complex>
#include <functional>
#include <iostream>
#include <numeric>
#include <vector>
#include <algorithm>
#include "TMath.h"
#include "TRandom.h"
#include "TSystem.h"
#include "TH1.h"
#include "RooMath.h"
#include "Lau1DCubicSpline.hh"
#include "Lau1DHistPdf.hh"
#include "LauConstants.hh"
#include "LauComplex.hh"
#include "LauDecayTimePdf.hh"
#include "LauFitDataTree.hh"
#include "LauParameter.hh"
#include "LauParamFixed.hh"
#include "LauRandom.hh"
ClassImp(LauDecayTimePdf)
LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector<LauAbsRValue*>& params,
Double_t minAbscissaVal, Double_t maxAbscissaVal,
Double_t minAbscissaErr, Double_t maxAbscissaErr,
FuncType type, UInt_t nGauss, const std::vector<Bool_t>& scale, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) :
varName_(theVarName),
varErrName_(theVarErrName),
params_(params),
smear_(kTRUE),
minAbscissa_(minAbscissaVal),
maxAbscissa_(maxAbscissaVal),
minAbscissaError_(minAbscissaErr),
maxAbscissaError_(maxAbscissaErr),
abscissaError_(0.0),
abscissaErrorGenerated_(kFALSE),
errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286
errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102
nGauss_(nGauss),
- mean_(nGauss_,nullptr),
- sigma_(nGauss_,nullptr),
- frac_(nGauss_-1,nullptr),
tau_(nullptr),
deltaM_(nullptr),
deltaGamma_(nullptr),
fracPrompt_(nullptr),
type_(type),
method_(method),
effMethod_(effMethod),
scaleMeans_(scale),
scaleWidths_(scale),
scaleWithPerEventError_( std::accumulate( scale.begin(), scale.end(), kFALSE, std::logical_or<Bool_t>() ) ),
expTerm_(0.0),
cosTerm_(0.0),
sinTerm_(0.0),
coshTerm_(0.0),
sinhTerm_(0.0),
normTermExp_(0.0),
normTermCosh_(0.0),
normTermSinh_(0.0),
errTerm_(0.0),
effiTerm_(0.0),
pdfTerm_(0.0),
errHist_(nullptr),
pdfHist_(nullptr),
effiFun_(nullptr),
effiHist_(nullptr),
- effiPars_(0),
- //meansFloating_(nGauss_,kTRUE),
- //sigmasFloating_(nGauss_,kTRUE),
- //fracsFloating_(nGauss_-1,kTRUE),
- //meansChanged_(nGauss_,kTRUE),
- //sigmasChanged_(nGauss_,kTRUE),
- //fracsChanged_(nGauss_-1,kTRUE),
- meanVals_(nGauss_,0.0),
- sigmaVals_(nGauss_,0.0),
- fracVals_(nGauss_,0.0)
+ effiPars_(0)
{
+ if (nGauss > 0)
+ {
+ frac_.assign(nGauss_-1,nullptr);
+ mean_.assign(nGauss_,nullptr);
+ sigma_.assign(nGauss_,nullptr);
+ meanVals_.assign(nGauss_,0.0);
+ sigmaVals_.assign(nGauss_,0.0);
+ fracVals_.assign(nGauss_,0.0);
+ }
}
LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector<LauAbsRValue*>& params,
Double_t minAbscissaVal, Double_t maxAbscissaVal,
Double_t minAbscissaErr, Double_t maxAbscissaErr,
FuncType type, UInt_t nGauss, const std::vector<Bool_t>& scaleMeans, const std::vector<Bool_t>& scaleWidths, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) :
varName_(theVarName),
varErrName_(theVarErrName),
params_(params),
smear_(kTRUE),
minAbscissa_(minAbscissaVal),
maxAbscissa_(maxAbscissaVal),
minAbscissaError_(minAbscissaErr),
maxAbscissaError_(maxAbscissaErr),
abscissaError_(0.0),
abscissaErrorGenerated_(kFALSE),
errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286
errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102
nGauss_(nGauss),
- mean_(nGauss_,nullptr),
- sigma_(nGauss_,nullptr),
- frac_(nGauss_-1,nullptr),
tau_(nullptr),
deltaM_(nullptr),
deltaGamma_(nullptr),
fracPrompt_(nullptr),
type_(type),
method_(method),
effMethod_(effMethod),
scaleMeans_(scaleMeans),
scaleWidths_(scaleWidths),
scaleWithPerEventError_( std::accumulate( scaleMeans.begin(), scaleMeans.end(), kFALSE, std::logical_or<Bool_t>() ) || std::accumulate( scaleWidths.begin(), scaleWidths.end(), kFALSE, std::logical_or<Bool_t>() ) ),
expTerm_(0.0),
cosTerm_(0.0),
sinTerm_(0.0),
coshTerm_(0.0),
sinhTerm_(0.0),
normTermExp_(0.0),
normTermCosh_(0.0),
normTermSinh_(0.0),
errTerm_(0.0),
effiTerm_(0.0),
pdfTerm_(0.0),
errHist_(nullptr),
pdfHist_(nullptr),
effiFun_(nullptr),
effiHist_(nullptr),
- effiPars_(0),
- //meansFloating_(nGauss_,kTRUE),
- //sigmasFloating_(nGauss_,kTRUE),
- //fracsFloating_(nGauss_-1,kTRUE),
- //meansChanged_(nGauss_,kTRUE),
- //sigmasChanged_(nGauss_,kTRUE),
- //fracsChanged_(nGauss_-1,kTRUE),
- meanVals_(nGauss_,0.0),
- sigmaVals_(nGauss_,0.0),
- fracVals_(nGauss_,0.0)
+ effiPars_(0)
{
+ if (nGauss > 0)
+ {
+ frac_.assign(nGauss_-1,nullptr);
+ mean_.assign(nGauss_,nullptr);
+ sigma_.assign(nGauss_,nullptr);
+ meanVals_.assign(nGauss_,0.0);
+ sigmaVals_.assign(nGauss_,0.0);
+ fracVals_.assign(nGauss_,0.0);
+ }
}
LauDecayTimePdf::~LauDecayTimePdf()
{
// Destructor
delete errHist_; errHist_ = nullptr;
delete pdfHist_; pdfHist_ = nullptr;
delete effiFun_; effiFun_ = nullptr;
delete effiHist_; effiHist_ = nullptr;
for( auto& par : effiPars_ ){ delete par; par = nullptr; }
effiPars_.clear();
}
void LauDecayTimePdf::initialise()
{
// The parameters are:
// - the mean and the sigma (bias and spread in resolution) of the gaussian(s)
// - the mean lifetime, denoted tau, of the exponential decay
// - the frequency of oscillation, denoted Delta m, of the cosine and sine terms
// - the decay width difference, denoted Delta Gamma, of the hyperbolic cosine and sine terms
//
// The next two arguments specify the range in which the PDF is defined,
// and the PDF will be normalised w.r.t. these limits.
//
// The final three arguments define the type of Delta t PDF (Delta, Exp, ExpTrig or ExpHypTrig ), the number of gaussians
// and whether or not the gaussian parameters should be scaled by the per-event errors on Delta t
// First check whether the scale vector is nGauss in size
if (nGauss_ != scaleMeans_.size() || nGauss_ != scaleWidths_.size()) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : scale vector size not the same as nGauss."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
if (type_ == Hist) {
if (this->nParameters() != 0){
std::cerr<<"ERROR in LauDecayTimePdf::initialise : Hist PDF should have 0 parameters"<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else {
TString meanName("mean_");
TString sigmaName("sigma_");
TString fracName("frac_");
Bool_t foundParams(kTRUE);
for (UInt_t i(0); i<nGauss_; ++i) {
TString tempName(meanName); tempName += i;
TString tempName2(sigmaName); tempName2 += i;
TString tempName3(fracName); tempName3 += i;
mean_[i] = this->findParameter(tempName);
foundParams &= (mean_[i] != nullptr);
sigma_[i] = this->findParameter(tempName2);
foundParams &= (sigma_[i] != nullptr);
if (i!=0) {
frac_[i-1] = this->findParameter(tempName3);
foundParams &= (frac_[i-1] != nullptr);
}
}
if (type_ == Delta) {
if ((this->nParameters() != (3*nGauss_-1)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : Delta type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == Exp) {
tau_ = this->findParameter("tau");
foundParams &= (tau_ != nullptr);
if ((this->nParameters() != (3*nGauss_-1+1)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : Exp type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
std::cerr<<" - the lifetime of the exponential decay: \"tau\"."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == DeltaExp) {
tau_ = this->findParameter("tau");
fracPrompt_ = this->findParameter("frac_prompt");
foundParams &= (tau_ != nullptr);
foundParams &= (fracPrompt_ != nullptr);
if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : DeltaExp type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
std::cerr<<" - the lifetime of the exponential decay: \"tau\""<<std::endl;
std::cerr<<" - the fraction of the prompt part: \"frac_prompt\"."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == ExpTrig) {
tau_ = this->findParameter("tau");
deltaM_ = this->findParameter("deltaM");
foundParams &= (tau_ != nullptr);
foundParams &= (deltaM_ != nullptr);
if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : ExpTrig type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
std::cerr<<" - the lifetime of the exponential decay: \"tau\""<<std::endl;
std::cerr<<" - the oscillation frequency: \"deltaM\"."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
} else if (type_ == ExpHypTrig) {
tau_ = this->findParameter("tau");
deltaM_ = this->findParameter("deltaM");
deltaGamma_ = this->findParameter("deltaGamma");
foundParams &= (tau_ != nullptr);
foundParams &= (deltaM_ != nullptr);
foundParams &= (deltaGamma_ != nullptr);
if ((this->nParameters() != (3*nGauss_-1+3)) || (!foundParams)) {
std::cerr<<"ERROR in LauDecayTimePdf::initialise : ExpHypTrig type PDF requires:"<<std::endl;
std::cerr<<" - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\""<<std::endl;
std::cerr<<" - nGauss-1 fractions: \"frac_i\", where i!=0"<<std::endl;
std::cerr<<" - the lifetime of the exponential decay: \"tau\""<<std::endl;
std::cerr<<" - the oscillation frequency: \"deltaM\"."<<std::endl;
std::cerr<<" - the width difference: \"deltaGamma\"."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
}
}
// Setup the normalisation caches
normTermsExp_.clear(); normTermsExp_.resize(1);
normTermsCos_.clear(); normTermsCos_.resize(1);
normTermsSin_.clear(); normTermsSin_.resize(1);
normTermsCosh_.clear(); normTermsCosh_.resize(1);
normTermsSinh_.clear(); normTermsSinh_.resize(1);
if ( effMethod_ == EfficiencyMethod::Spline ) {
const UInt_t nSplineSegments { effiFun_->getnKnots() - 1 };
if ( not this->doSmearing() ) {
expTermIkVals_.clear(); expTermIkVals_.resize(nSplineSegments);
trigTermIkVals_.clear(); trigTermIkVals_.resize(nSplineSegments);
hypHTermIkVals_.clear(); hypHTermIkVals_.resize(nSplineSegments);
hypLTermIkVals_.clear(); hypLTermIkVals_.resize(nSplineSegments);
} else {
// Set outer vectors to size 1 (will be resized to nEvents in cacheInfo if necessary)
meanPowerVals_.clear(); meanPowerVals_.resize(1); meanPowerVals_.front().resize(nGauss_);
sigmaPowerVals_.clear(); sigmaPowerVals_.resize(1); sigmaPowerVals_.front().resize(nGauss_);
expTermKvecVals_.clear(); expTermKvecVals_.resize(1); expTermKvecVals_.front().resize(nGauss_);
trigTermKvecVals_.clear(); trigTermKvecVals_.resize(1); trigTermKvecVals_.front().resize(nGauss_);
hypHTermKvecVals_.clear(); hypHTermKvecVals_.resize(1); hypHTermKvecVals_.front().resize(nGauss_);
hypLTermKvecVals_.clear(); hypLTermKvecVals_.resize(1); hypLTermKvecVals_.front().resize(nGauss_);
expTermMvecVals_.clear(); expTermMvecVals_.resize(1); expTermMvecVals_.front().resize(nSplineSegments);
for ( auto& innerVec : expTermMvecVals_.front() ) { innerVec.resize(nGauss_); }
trigTermMvecVals_.clear(); trigTermMvecVals_.resize(1); trigTermMvecVals_.front().resize(nSplineSegments);
for ( auto& innerVec : trigTermMvecVals_.front() ) { innerVec.resize(nGauss_); }
hypHTermMvecVals_.clear(); hypHTermMvecVals_.resize(1); hypHTermMvecVals_.front().resize(nSplineSegments);
for ( auto& innerVec : hypHTermMvecVals_.front() ) { innerVec.resize(nGauss_); }
hypLTermMvecVals_.clear(); hypLTermMvecVals_.resize(1); hypLTermMvecVals_.front().resize(nSplineSegments);
for ( auto& innerVec : hypLTermMvecVals_.front() ) { innerVec.resize(nGauss_); }
}
}
// Force calculation of all relevant info by faking that all parameter values have changed
nothingFloating_ = nothingChanged_ = kFALSE;
anyKnotFloating_ = anyKnotChanged_ = not effiPars_.empty();
nonKnotFloating_ = nonKnotChanged_ = kTRUE;
physicsParFloating_ = physicsParChanged_ = kTRUE;
tauFloating_ = tauChanged_ = ( tau_ != nullptr );
deltaMFloating_ = deltaMChanged_ = ( deltaM_ != nullptr );
deltaGammaFloating_ = deltaGammaChanged_ = ( deltaGamma_ != nullptr );
resoParFloating_ = resoParChanged_ = kTRUE;
}
Double_t LauDecayTimePdf::effectiveResolution() const
{
Double_t dilution = 0.;
Double_t dMSq = deltaM_->unblindValue() * deltaM_->unblindValue();
// Might be cleaner to just append this to the vector in the init step,
// the the consistency can also be checked
Double_t fracSum = 0;
for (auto f : frac_) fracSum += f->unblindValue();
Double_t lastFrac = 1. - fracSum;
for (size_t i = 0; i < sigma_.size(); i++) {
Double_t sigSq = sigma_[i]->unblindValue() * sigma_[i]->unblindValue();
Double_t thisFrac = lastFrac;
if (i < sigma_.size() - 1) thisFrac = frac_[i]->unblindValue();
dilution += thisFrac * TMath::Exp(-dMSq * 0.5 * sigSq);
}
return TMath::Sqrt(-2. * TMath::Log(dilution)) / deltaM_->unblindValue();
}
void LauDecayTimePdf::cacheInfo(const LauFitDataTree& inputData)
{
// Check that the input data contains the decay time variable
Bool_t hasBranch = inputData.haveBranch(this->varName());
if (!hasBranch) {
std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<<this->varName()<<"\"."<<std::endl;
return;
}
// If we're scaling by the per-event error, also check that the input data contains the decay time error variable
const Bool_t needPerEventNormTerms { this->doSmearing() and scaleWithPerEventError_ };
if ( needPerEventNormTerms ) {
hasBranch = inputData.haveBranch(this->varErrName());
if (!hasBranch) {
std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<<this->varErrName()<<"\"."<<std::endl;
return;
}
// Pass the data to the decay-time error PDF for caching
if ( errHist_ ) {
errHist_->cacheInfo(inputData);
}
}
if (type_ == Hist) {
// Pass the data to the decay-time PDF for caching
if ( pdfHist_ ) {
pdfHist_->cacheInfo(inputData);
}
+ // Make cache of effiTerms
+ const UInt_t nEvents = inputData.nEvents();
+ // Efficiency will always be 1 (assumedly)
+ effiTerms_.assign(nEvents,1.);
} else {
// Clear the vectors and reserve enough space in the caches of the terms
const UInt_t nEvents = inputData.nEvents();
abscissas_.clear(); abscissas_.resize(nEvents);
abscissaErrors_.clear(); abscissaErrors_.resize(nEvents);
expTerms_.clear(); expTerms_.resize(nEvents);
cosTerms_.clear(); cosTerms_.resize(nEvents);
sinTerms_.clear(); sinTerms_.resize(nEvents);
coshTerms_.clear(); coshTerms_.resize(nEvents);
sinhTerms_.clear(); sinhTerms_.resize(nEvents);
effiTerms_.clear(); effiTerms_.resize(nEvents);
// Also resize the normalisation cache elements if we're doing per-event resolution
if ( needPerEventNormTerms ) {
normTermsExp_.clear(); normTermsExp_.resize(nEvents);
normTermsCos_.clear(); normTermsCos_.resize(nEvents);
normTermsSin_.clear(); normTermsSin_.resize(nEvents);
normTermsCosh_.clear(); normTermsCosh_.resize(nEvents);
normTermsSinh_.clear(); normTermsSinh_.resize(nEvents);
if ( effMethod_ == EfficiencyMethod::Spline ) {
meanPowerVals_.resize(nEvents);
for ( auto& innerVec : meanPowerVals_ ) { innerVec.resize(nGauss_); }
sigmaPowerVals_.resize(nEvents);
for ( auto& innerVec : sigmaPowerVals_ ) { innerVec.resize(nGauss_); }
expTermKvecVals_.resize(nEvents);
for ( auto& innerVec : expTermKvecVals_ ) { innerVec.resize(nGauss_); }
trigTermKvecVals_.resize(nEvents);
for ( auto& innerVec : trigTermKvecVals_ ) { innerVec.resize(nGauss_); }
hypHTermKvecVals_.resize(nEvents);
for ( auto& innerVec : hypHTermKvecVals_ ) { innerVec.resize(nGauss_); }
hypLTermKvecVals_.resize(nEvents);
for ( auto& innerVec : hypLTermKvecVals_ ) { innerVec.resize(nGauss_); }
const UInt_t nSplineSegments { effiFun_->getnKnots() - 1 };
expTermMvecVals_.resize(nEvents);
for ( auto& middleVec : expTermMvecVals_) {
middleVec.resize(nSplineSegments);
- for ( auto& innerVec : middleVec ) {
+ for ( auto& innerVec : middleVec ) {
innerVec.resize(nGauss_);
}
}
trigTermMvecVals_.resize(nEvents);
for ( auto& middleVec : trigTermMvecVals_) {
middleVec.resize(nSplineSegments);
- for ( auto& innerVec : middleVec ) {
+ for ( auto& innerVec : middleVec ) {
innerVec.resize(nGauss_);
}
}
hypHTermMvecVals_.resize(nEvents);
for ( auto& middleVec : hypHTermMvecVals_) {
middleVec.resize(nSplineSegments);
- for ( auto& innerVec : middleVec ) {
+ for ( auto& innerVec : middleVec ) {
innerVec.resize(nGauss_);
}
}
hypLTermMvecVals_.resize(nEvents);
for ( auto& middleVec : hypLTermMvecVals_) {
middleVec.resize(nSplineSegments);
- for ( auto& innerVec : middleVec ) {
+ for ( auto& innerVec : middleVec ) {
innerVec.resize(nGauss_);
}
}
}
}
// Determine the abscissa and abscissa error values for each event
for (UInt_t iEvt {0}; iEvt < nEvents; iEvt++) {
const LauFitData& dataValues = inputData.getData(iEvt);
const Double_t abscissa { dataValues.at(this->varName()) };
if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) {
std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay time: "<<abscissa<<
" outside allowed range: ["<<this->minAbscissa()<<","<<this->maxAbscissa()<<"]."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
abscissas_[iEvt] = abscissa ;
const Double_t abscissaErr { needPerEventNormTerms ? dataValues.at(this->varErrName()) : 0.0 };
if ( needPerEventNormTerms and ( abscissaErr > this->maxAbscissaError() or abscissaErr < this->minAbscissaError() ) ) {
std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay-time error: "<<abscissaErr<<
" outside allowed range: ["<<this->minAbscissaError()<<","<<this->maxAbscissaError()<<"]."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
abscissaErrors_[iEvt] = abscissaErr;
}
// Force calculation of all info by faking that all parameter values have changed
nothingFloating_ = nothingChanged_ = kFALSE;
anyKnotFloating_ = anyKnotChanged_ = not effiPars_.empty();
nonKnotFloating_ = nonKnotChanged_ = kTRUE;
physicsParFloating_ = physicsParChanged_ = kTRUE;
tauFloating_ = tauChanged_ = ( tau_ != nullptr );
deltaMFloating_ = deltaMChanged_ = ( deltaM_ != nullptr );
deltaGammaFloating_ = deltaGammaChanged_ = ( deltaGamma_ != nullptr );
resoParFloating_ = resoParChanged_ = kTRUE;
// Fill the rest of the cache
this->updateCache();
// Set the various "parameter-is-floating" flags, used to bookkeep the cache in propagateParUpdates
LauParamFixed isFixed;
nonKnotFloating_ = not std::all_of(params_.begin(), params_.end(), isFixed);
anyKnotFloating_ = not std::all_of(effiPars_.begin(), effiPars_.end(), isFixed);
nothingFloating_ = not (nonKnotFloating_ or anyKnotFloating_);
std::cout << "INFO in LauDecayTimePdf::cacheInfo : nothing floating set to: " << (nothingFloating_ ? "True" : "False") << std::endl;
std::cout << "INFO in LauDecayTimePdf::cacheInfo : any knot floating set to: " << (anyKnotFloating_ ? "True" : "False") << std::endl;
std::cout << "INFO in LauDecayTimePdf::cacheInfo : non-knot floating set to: " << (nonKnotFloating_ ? "True" : "False") << std::endl;
tauFloating_ = tau_ ? not tau_->fixed() : kFALSE;
deltaMFloating_ = deltaM_ ? not deltaM_->fixed() : kFALSE;
deltaGammaFloating_ = deltaGamma_ ? not deltaGamma_->fixed() : kFALSE;
physicsParFloating_ = ( tauFloating_ or deltaMFloating_ or deltaGammaFloating_ );
std::cout << "INFO in LauDecayTimePdf::cacheInfo : tau floating set to: " << (tauFloating_ ? "True" : "False") << std::endl;
std::cout << "INFO in LauDecayTimePdf::cacheInfo : deltaM floating set to: " << (deltaMFloating_ ? "True" : "False") << std::endl;
std::cout << "INFO in LauDecayTimePdf::cacheInfo : deltaGamma floating set to: " << (deltaGammaFloating_ ? "True" : "False") << std::endl;
resoParFloating_ = kFALSE;
for ( UInt_t i{0}; i < nGauss_; ++i )
{
const Bool_t meanFloating { not mean_[i]->fixed() };
const Bool_t sigmaFloating { not sigma_[i]->fixed() };
resoParFloating_ |= (meanFloating or sigmaFloating);
std::cout << "INFO in LauDecayTimePdf::cacheInfo : mean[" << i << "] floating set to: " << (meanFloating ? "True" : "False") << std::endl;
std::cout << "INFO in LauDecayTimePdf::cacheInfo : sigma[" << i << "] floating set to: " << (sigmaFloating ? "True" : "False") << std::endl;
if ( i < (nGauss_ - 1) ) {
const Bool_t fracFloating { not frac_[i]->fixed() };
resoParFloating_ |= fracFloating;
std::cout << "INFO in LauDecayTimePdf::cacheInfo : frac[" << i << "] floating set to: " << (fracFloating ? "True" : "False") << std::endl;
}
}
}
}
void LauDecayTimePdf::updateCache()
{
// Get the updated values of all parameters
static auto assignValue = [](const LauAbsRValue* par){return par->unblindValue();};
if ( anyKnotChanged_ ) {
std::transform( effiPars_.begin(), effiPars_.end(), effiParVals_.begin(), assignValue );
}
if ( tauChanged_ ) {
tauVal_ = tau_->unblindValue();
gammaVal_ = 1.0 / tauVal_;
}
if ( deltaMChanged_ ) {
deltaMVal_ = deltaM_->unblindValue();
}
if ( deltaGammaChanged_ ) {
deltaGammaVal_ = deltaGamma_->unblindValue();
}
if ( resoParChanged_ ) {
std::transform( mean_.begin(), mean_.end(), meanVals_.begin(), assignValue );
std::transform( sigma_.begin(), sigma_.end(), sigmaVals_.begin(), assignValue );
std::transform( frac_.begin(), frac_.end(), fracVals_.begin()+1, assignValue );
fracVals_[0] = std::accumulate( fracVals_.begin()+1, fracVals_.end(), 1.0, std::minus<Double_t>{} );
}
// Calculate the values of all terms for each event
// TODO - need to sort out UInt_t vs ULong_t everywhere!!
const UInt_t nEvents { static_cast<UInt_t>(abscissas_.size()) };
// If any of the physics or resolution parameters have changed we need
// to update everything, otherwise we only need to recalculate the
// efficiency
if ( nonKnotChanged_ ) {
for (UInt_t iEvt {0}; iEvt < nEvents; iEvt++) {
const Double_t abscissa { abscissas_[iEvt] };
const Double_t abscissaErr { abscissaErrors_[iEvt] };
this->calcLikelihoodInfo(abscissa, abscissaErr);
expTerms_[iEvt] = expTerm_;
cosTerms_[iEvt] = cosTerm_;
sinTerms_[iEvt] = sinTerm_;
coshTerms_[iEvt] = coshTerm_;
sinhTerms_[iEvt] = sinhTerm_;
effiTerms_[iEvt] = effiTerm_;
}
} else {
for (UInt_t iEvt {0}; iEvt < nEvents; iEvt++) {
const Double_t abscissa { abscissas_[iEvt] };
effiTerm_ = this->calcEffiTerm( abscissa );
effiTerms_[iEvt] = effiTerm_;
}
}
// Calculate the normalisation terms
this->calcNorm();
// reset the "parameter-has-changed" flags
anyKnotChanged_ = kFALSE;
tauChanged_ = kFALSE;
deltaMChanged_ = kFALSE;
deltaGammaChanged_ = kFALSE;
physicsParChanged_ = kFALSE;
resoParChanged_ = kFALSE;
nonKnotChanged_ = kFALSE;
}
void LauDecayTimePdf::calcLikelihoodInfo(const UInt_t iEvt)
{
// Extract all the terms and their normalisations
const Bool_t needPerEventNormTerms { this->doSmearing() and scaleWithPerEventError_ };
const UInt_t normTermElement { needPerEventNormTerms * iEvt };
if (type_ == Hist) {
if ( pdfHist_ ) {
pdfHist_->calcLikelihoodInfo(iEvt);
pdfTerm_ = pdfHist_->getLikelihood();
} else {
pdfTerm_ = 1.0;
}
} else {
expTerm_ = expTerms_[iEvt];
cosTerm_ = cosTerms_[iEvt];
sinTerm_ = sinTerms_[iEvt];
coshTerm_ = coshTerms_[iEvt];
sinhTerm_ = sinhTerms_[iEvt];
normTermExp_ = normTermsExp_[normTermElement];
normTermCos_ = normTermsCos_[normTermElement];
normTermSin_ = normTermsSin_[normTermElement];
normTermCosh_ = normTermsCosh_[normTermElement];
normTermSinh_ = normTermsSinh_[normTermElement];
}
// Extract the decay time error PDF value
if ( needPerEventNormTerms and errHist_ ) {
errHist_->calcLikelihoodInfo(iEvt);
errTerm_ = errHist_->getLikelihood();
} else {
errTerm_ = 1.0;
}
// Extract the decay time efficiency
effiTerm_ = effiTerms_[iEvt];
}
void LauDecayTimePdf::calcLikelihoodInfo(const Double_t abscissa)
{
// Check whether any of the gaussians should be scaled - if any of them should we need the per-event error
if (this->doSmearing() and scaleWithPerEventError_) {
std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Per-event error on decay time not provided, cannot calculate anything."<<std::endl;
return;
}
this->calcLikelihoodInfo(abscissa, 0.0);
}
Double_t LauDecayTimePdf::calcEffiTerm( const Double_t abscissa ) const
{
Double_t effiTerm{1.0};
switch( effMethod_ )
{
case EfficiencyMethod::Spline : effiTerm = effiFun_ ? effiFun_ -> evaluate(abscissa) : 1.0 ; break;
case EfficiencyMethod::Binned : effiTerm = effiHist_ ? effiHist_-> GetBinContent(effiHist_-> FindFixBin(abscissa)) : 1.0 ; break;
case EfficiencyMethod::Flat : effiTerm = 1.0 ; break;
}
if ( effiTerm > 1.0 ) { effiTerm = 1.0; }
else if ( effiTerm < 0.0 ) { effiTerm = 0.0; }
return effiTerm;
}
void LauDecayTimePdf::calcLikelihoodInfo(const Double_t abscissa, const Double_t abscissaErr)
{
// Check that the decay time and the decay time error are in valid ranges
if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) {
std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of the decay time: "<<abscissa<<
" outside allowed range: ["<<this->minAbscissa()<<","<<this->maxAbscissa()<<"]."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
if ( (this->doSmearing() and scaleWithPerEventError_) && ( abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError() ) ) {
std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of Delta t error: "<<abscissaErr<<
" outside allowed range: ["<<this->minAbscissaError()<<","<<this->maxAbscissaError()<<"]."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
// Determine the decay time efficiency
effiTerm_ = this->calcEffiTerm( abscissa );
// For the histogram PDF just calculate that term and return
if (type_ == Hist){
if ( pdfHist_ ) {
pdfHist_->calcLikelihoodInfo(abscissa);
pdfTerm_ = pdfHist_->getLikelihood();
} else {
pdfTerm_ = 1.0;
}
return;
}
// If we're not using the resolution function, calculate the simple terms and return
if (!this->doSmearing()) {
this->calcNonSmearedTerms(abscissa);
return;
}
// Get all the up to date parameter values for the resolution function
std::vector<Double_t> mean { meanVals_ };
std::vector<Double_t> sigma { sigmaVals_ };
std::vector<Double_t> frac { fracVals_ };
// Scale the gaussian parameters by the per-event error on Delta t (if appropriate)
if ( scaleWithPerEventError_ ) {
for (UInt_t i{0}; i<nGauss_; ++i) {
if (scaleMeans_[i]) {
mean[i] *= abscissaErr;
}
if (scaleWidths_[i]) {
sigma[i] *= abscissaErr;
}
}
}
// Calculate x = ( t - mu ) / ( sqrt(2) * sigma ), used by all types
std::vector<Double_t> x(nGauss_);
for (UInt_t i{0}; i<nGauss_; ++i) {
x[i] = ( abscissa - mean[i] ) / ( LauConstants::root2 * sigma[i] );
}
// TODO - this parameter isn't accounted for in the caching bookkeeping yet
Double_t fracPrompt{0.0};
if (type_ == DeltaExp) {
fracPrompt = fracPrompt_->unblindValue();
}
Double_t value(0.0);
if (type_ == Delta || type_ == DeltaExp) {
// Calculate the gaussian function(s)
const Double_t xMax = this->maxAbscissa();
const Double_t xMin = this->minAbscissa();
for (UInt_t i(0); i<nGauss_; ++i) {
if (TMath::Abs(sigma[i]) > 1e-10) {
Double_t exponent(0.0);
Double_t norm(0.0);
Double_t scale = LauConstants::root2*sigma[i];
Double_t scale2 = LauConstants::rootPiBy2*sigma[i];
exponent = -x[i]*x[i];
norm = scale2*(TMath::Erf((xMax - mean[i])/scale)
- TMath::Erf((xMin - mean[i])/scale));
value += frac[i]*TMath::Exp(exponent)/norm;
}
}
}
if (type_ != Delta) {
// Reset values of terms
expTerm_ = 0.0;
cosTerm_ = 0.0;
sinTerm_ = 0.0;
coshTerm_ = 0.0;
sinhTerm_ = 0.0;
// Calculate values of the PDF convoluted with each Gaussian for a given value of the abscsissa
for (UInt_t i(0); i<nGauss_; ++i) {
const Double_t sigmaOverRoot2 { sigma[i] / LauConstants::root2 };
const std::complex z { gammaVal_ * sigmaOverRoot2 };
const Double_t expTerm { this->smearedGeneralTerm( z, x[i] ).real() };
expTerm_ += frac[i] * expTerm;
if ( type_ == ExpTrig or type_ == ExpHypTrig ) {
const std::complex zTrig { gammaVal_ * sigmaOverRoot2, -deltaMVal_ * sigmaOverRoot2 };
const std::complex trigTerm { this->smearedGeneralTerm( zTrig, x[i] ) };
const Double_t cosTerm { trigTerm.real() };
const Double_t sinTerm { trigTerm.imag() };
cosTerm_ += frac[i] * cosTerm;
sinTerm_ += frac[i] * sinTerm;
if ( type_ == ExpTrig ) {
coshTerm_ += frac[i] * expTerm;
} else {
const std::complex zH { (gammaVal_ - 0.5 * deltaGammaVal_) * sigmaOverRoot2 };
const std::complex zL { (gammaVal_ + 0.5 * deltaGammaVal_) * sigmaOverRoot2 };
const Double_t termH { this->smearedGeneralTerm( zH, x[i] ).real() };
const Double_t termL { this->smearedGeneralTerm( zL, x[i] ).real() };
const Double_t coshTerm { 0.5 * (termH + termL) };
const Double_t sinhTerm { 0.5 * (termH - termL) };
coshTerm_ += frac[i] * coshTerm;
sinhTerm_ += frac[i] * sinhTerm;
}
}
}
if (type_ == DeltaExp) {
value *= fracPrompt;
value += (1.0-fracPrompt)*expTerm_;
} else {
value = expTerm_;
}
}
// Calculate the decay time error PDF value
if ( scaleWithPerEventError_ and errHist_ ) {
const std::vector<Double_t> absErrVec {abscissaErr};
errHist_->calcLikelihoodInfo(absErrVec);
errTerm_ = errHist_->getLikelihood();
} else {
errTerm_ = 1.0;
}
}
void LauDecayTimePdf::calcNonSmearedTerms(const Double_t abscissa)
{
// Reset values of terms
errTerm_ = 1.0;
expTerm_ = 0.0;
cosTerm_ = 0.0;
sinTerm_ = 0.0;
coshTerm_ = 0.0;
sinhTerm_ = 0.0;
if ( type_ == Hist || type_ == Delta ){
return;
}
if (method_ == DecayTime) {
expTerm_ = TMath::Exp(-abscissa*gammaVal_);
} else if (method_ == DecayTimeDiff) {
expTerm_ = TMath::Exp(-TMath::Abs(abscissa)*gammaVal_);
}
// Calculate also the terms related to cosine and sine
if (type_ == ExpTrig) {
coshTerm_ = expTerm_;
sinhTerm_ = 0.0;
cosTerm_ = TMath::Cos(deltaMVal_*abscissa)*expTerm_;
sinTerm_ = TMath::Sin(deltaMVal_*abscissa)*expTerm_;
}
// Calculate also the terms related to cosh, sinh, cosine, and sine
else if (type_ == ExpHypTrig) {
coshTerm_ = TMath::CosH(0.5*deltaGammaVal_*abscissa)*expTerm_;
sinhTerm_ = TMath::SinH(0.5*deltaGammaVal_*abscissa)*expTerm_;
cosTerm_ = TMath::Cos(deltaMVal_*abscissa)*expTerm_;
sinTerm_ = TMath::Sin(deltaMVal_*abscissa)*expTerm_;
}
}
std::complex<Double_t> LauDecayTimePdf::smearedGeneralTerm( const std::complex<Double_t>& z, const Double_t x )
{
using namespace std::complex_literals;
const std::complex arg1 { 1i * (z - x) };
const std::complex arg2 { -(x*x) - (arg1*arg1) };
const std::complex conv { ( arg1.imag() < -5.0 ) ? 0.5 * std::exp(arg2) * RooMath::erfc( -1i * arg1 ) : 0.5 * std::exp(-(x*x)) * RooMath::faddeeva(arg1) };
return conv;
}
std::pair<Double_t,Double_t> LauDecayTimePdf::nonSmearedCosSinIntegral(const Double_t minAbs, const Double_t maxAbs)
{
// From 1407.0748, not clear whether complex is faster in this case
const LauComplex denom { gammaVal_, -deltaMVal_ };
const LauComplex exponent { -gammaVal_, deltaMVal_ };
const LauComplex num0 { -exponent.scale(minAbs).exp() };
const LauComplex num1 { -exponent.scale(maxAbs).exp() };
const LauComplex integral { (num1 - num0) / denom };
return {integral.re(), integral.im()};
}
Double_t LauDecayTimePdf::nonSmearedExpIntegral(const Double_t minAbs, const Double_t maxAbs)
{
return tauVal_ * ( TMath::Exp(-minAbs*gammaVal_) - TMath::Exp(-maxAbs*gammaVal_) );
}
std::pair<Double_t,Double_t> LauDecayTimePdf::nonSmearedCoshSinhIntegral(const Double_t minAbs, const Double_t maxAbs)
{
// Use exponential formualtion rather than cosh, sinh.
// Fewer terms (reused for each), but not guaranteed to be faster.
const Double_t gammaH { gammaVal_ - 0.5 * deltaGammaVal_ };
const Double_t gammaL { gammaVal_ + 0.5 * deltaGammaVal_ };
const Double_t tauH { 1.0 / gammaH };
const Double_t tauL { 1.0 / gammaL };
const Double_t nL1 { -TMath::Exp(-gammaL * maxAbs) * tauL };
const Double_t nH1 { -TMath::Exp(-gammaH * maxAbs) * tauH };
const Double_t nL0 { -TMath::Exp(-gammaL * minAbs) * tauL };
const Double_t nH0 { -TMath::Exp(-gammaH * minAbs) * tauH };
const Double_t coshIntegral { 0.5 * ( (nH1 + nL1) - (nH0 + nL0) ) };
const Double_t sinhIntegral { 0.5 * ( (nH1 - nL1) - (nH0 - nL0) ) };
return {coshIntegral, sinhIntegral};
}
std::complex<Double_t> LauDecayTimePdf::smearedGeneralIntegral(const std::complex<Double_t>& z, const Double_t minAbs, const Double_t maxAbs, const Double_t sigmaOverRoot2, const Double_t mu)
{
using namespace std::complex_literals;
const Double_t x1 { (maxAbs - mu) / (2.0 * sigmaOverRoot2) };
const Double_t x0 { (minAbs - mu) / (2.0 * sigmaOverRoot2) };
const std::complex arg1 { 1i * (z - x1) };
const std::complex arg0 { 1i * (z - x0) };
std::complex integral = 0.0 + 0i;
if(arg1.imag() < -5.0)
{integral = RooMath::erf(x1) - std::exp(-(x1*x1) - (arg1*arg1)) * RooMath::erfc(-1i * arg1);}
else
{integral = RooMath::erf(x1) - TMath::Exp(-(x1*x1)) * RooMath::faddeeva(arg1);}
if(arg0.imag() < -5.0)
{integral -= RooMath::erf(x0) - std::exp(-(x0*x0) - (arg0*arg0)) * RooMath::erfc(-1i * arg0);}
else
{integral -= RooMath::erf(x0) - TMath::Exp(-(x0*x0)) * RooMath::faddeeva(arg0);}
integral *= (sigmaOverRoot2 / (2.0 * z));
return integral;
}
void LauDecayTimePdf::calcNorm()
{
// If we're not doing per-event scaling then we only need to calculate things once
// TODO - need to sort out UInt_t vs ULong_t everywhere!!
const Bool_t needPerEventNormTerms { this->doSmearing() and scaleWithPerEventError_ };
const UInt_t nEvents { needPerEventNormTerms ? static_cast<UInt_t>(abscissaErrors_.size()) : 1 };
// Get all the up to date parameter values
std::vector<Double_t> means { meanVals_ };
std::vector<Double_t> sigmas { sigmaVals_ };
std::vector<Double_t> fracs { fracVals_ };
for ( UInt_t iEvt{0}; iEvt < nEvents; ++iEvt ) {
// first reset integrals to zero
normTermExp_ = 0.0;
normTermCos_ = 0.0;
normTermSin_ = 0.0;
normTermCosh_ = 0.0;
normTermSinh_ = 0.0;
// Scale the gaussian parameters by the per-event error on decay time (if appropriate)
if ( needPerEventNormTerms ) {
const Double_t abscissaErr { abscissaErrors_[iEvt] };
for (UInt_t i{0}; i<nGauss_; ++i) {
if (scaleMeans_[i]) {
means[i] = meanVals_[i] * abscissaErr;
}
if (scaleWidths_[i]) {
sigmas[i] = sigmaVals_[i] * abscissaErr;
}
}
}
switch ( effMethod_ ) {
case EfficiencyMethod::Flat :
{
// No efficiency variation
// Simply calculate integrals over full range
const Double_t uniformEffVal {1.0};
if( this -> doSmearing() )
{this->calcSmearedPartialIntegrals( minAbscissa_, maxAbscissa_ , uniformEffVal, means, sigmas, fracs );}
else
{this->calcNonSmearedPartialIntegrals( minAbscissa_, maxAbscissa_, uniformEffVal );}
break;
}
case EfficiencyMethod::Binned :
{
// Efficiency varies as piecewise constant
// Total integral is sum of integrals in each bin, each weighted by efficiency in that bin
const Int_t nBins { effiHist_->GetNbinsX() };
for ( Int_t bin{1}; bin <= nBins; ++bin ) {
const Double_t loEdge {effiHist_->GetBinLowEdge(bin)};
const Double_t hiEdge {loEdge + effiHist_->GetBinWidth(bin)};
const Double_t effVal {effiHist_->GetBinContent(bin)};
if ( this -> doSmearing() )
{this->calcSmearedPartialIntegrals( loEdge, hiEdge, effVal, means, sigmas, fracs );}
else
{this->calcNonSmearedPartialIntegrals( loEdge, hiEdge, effVal );}
}
break;
}
case EfficiencyMethod::Spline :
{
// Efficiency varies as piecewise polynomial
// Use methods from https://arxiv.org/abs/1407.0748 section 4 to calculate
const UInt_t nSplineSegments { effiFun_->getnKnots() - 1 };
if ( this->doSmearing() ) {
if ( nonKnotChanged_ ) {
if ( resoParChanged_ ) {
this->calcMeanAndSigmaPowers( iEvt, means, sigmas );
}
this->calcKVectors( iEvt );
}
for(UInt_t iSeg{0}; iSeg < nSplineSegments; ++iSeg)
{
this->calcSmearedSplinePartialIntegrals( iEvt, iSeg, means, sigmas, fracs );
}
} else {
for(UInt_t iSeg{0}; iSeg < nSplineSegments; ++iSeg)
{
this->calcNonSmearedSplinePartialIntegrals( iSeg );
}
}
break;
}
}
normTermsExp_[iEvt] = normTermExp_;
normTermsCos_[iEvt] = normTermCos_;
normTermsSin_[iEvt] = normTermSin_;
normTermsCosh_[iEvt] = normTermCosh_;
normTermsSinh_[iEvt] = normTermSinh_;
}
}
// TODO - Mildly concerned this is void rather than returning the integrals
// (but this would require refactoring for different return values).
// As long as it doesn't get called outside of calcNorm() it should be fine - DPO
// (TL: comment applies to all calc*PartialIntegrals functions.)
void LauDecayTimePdf::calcNonSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight)
{
/* TODO - need to implement something for DecayTimeDiff everywhere
if (method_ == DecayTimeDiff) {
// TODO - there should be some TMath::Abs here surely?
normTermExp = weight * tauVal_ * (2.0 - TMath::Exp(-maxAbs*gammaVal_) - TMath::Exp(-minAbs*gammaVal_));
}
*/
const Double_t normTermExp { weight * this->nonSmearedExpIntegral(minAbs, maxAbs) };
normTermExp_ += normTermExp;
if ( type_ == ExpTrig or type_ == ExpHypTrig ) {
auto [cosIntegral, sinIntegral] = this->nonSmearedCosSinIntegral(minAbs, maxAbs);
normTermCos_ += weight * cosIntegral;
normTermSin_ += weight * sinIntegral;
if ( type_ == ExpTrig ) {
normTermCosh_ += normTermExp;
} else {
auto [coshIntegral, sinhIntegral] = this->nonSmearedCoshSinhIntegral(minAbs, maxAbs);
normTermCosh_ += weight * coshIntegral;
normTermSinh_ += weight * sinhIntegral;
}
}
}
void LauDecayTimePdf::calcSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight, const std::vector<Double_t>& means, const std::vector<Double_t>& sigmas, const std::vector<Double_t>& fractions)
{
for (UInt_t i(0); i<nGauss_; ++i)
{
const Double_t sigmaOverRoot2 { sigmas[i] / LauConstants::root2 };
const std::complex z { gammaVal_ * sigmaOverRoot2, 0.0 };
const std::complex integral { this->smearedGeneralIntegral( z, minAbs, maxAbs, sigmaOverRoot2, means[i] ) };
const Double_t normTermExp { weight * integral.real() };
normTermExp_ += fractions[i] * normTermExp;
if ( type_ == ExpTrig or type_ == ExpHypTrig ) {
const std::complex zTrig { gammaVal_ * sigmaOverRoot2, -deltaMVal_ * sigmaOverRoot2 };
const std::complex integralTrig { this->smearedGeneralIntegral( zTrig, minAbs, maxAbs, sigmaOverRoot2, means[i] ) };
const Double_t cosIntegral { integralTrig.real() };
const Double_t sinIntegral { integralTrig.imag() };
normTermCos_ += fractions[i] * weight * cosIntegral;
normTermSin_ += fractions[i] * weight * sinIntegral;
if ( type_ == ExpTrig ) {
normTermCosh_ += fractions[i] * normTermExp;
} else {
// Heavy (H) eigenstate case
const std::complex zH { (gammaVal_ - 0.5 * deltaGammaVal_) * sigmaOverRoot2, 0.0 };
const std::complex integralH { this->smearedGeneralIntegral( zH, minAbs, maxAbs, sigmaOverRoot2, means[i] ) };
// Light (L) eigenstate case
const std::complex zL { (gammaVal_ + 0.5 * deltaGammaVal_) * sigmaOverRoot2, 0.0 };;
const std::complex integralL { this->smearedGeneralIntegral( zL, minAbs, maxAbs, sigmaOverRoot2, means[i] ) };
const std::complex coshIntegral { 0.5 * (integralH + integralL) };
const std::complex sinhIntegral { 0.5 * (integralH - integralL) };
normTermCosh_ += fractions[i] * weight * coshIntegral.real();
normTermSinh_ += fractions[i] * weight * sinhIntegral.real();
}
}
}
}
void LauDecayTimePdf::calcMeanAndSigmaPowers( const UInt_t iEvt, const std::vector<Double_t>& means, const std::vector<Double_t>& sigmas )
{
// Calculate powers of mu and sigma/sqrt(2) needed by all terms in the smeared spline normalisation
for (UInt_t i(0); i<nGauss_; ++i)
{
const Double_t mu { means[i] };
const Double_t sigmaOverRoot2 { sigmas[i] / LauConstants::root2 };
meanPowerVals_[iEvt][i] = {1.0, mu, mu*mu, mu*mu*mu};
sigmaPowerVals_[iEvt][i] = {sigmaOverRoot2,
sigmaOverRoot2*sigmaOverRoot2,
sigmaOverRoot2*sigmaOverRoot2*sigmaOverRoot2,
sigmaOverRoot2*sigmaOverRoot2*sigmaOverRoot2*sigmaOverRoot2};
}
}
void LauDecayTimePdf::calcKVectors( const UInt_t iEvt )
{
using namespace std::complex_literals;
std::complex<Double_t> z;
for (UInt_t i(0); i<nGauss_; ++i)
{
const Double_t sigmaOverRoot2 { sigmaPowerVals_[iEvt][i][0] };
z = gammaVal_ * sigmaOverRoot2;
expTermKvecVals_[iEvt][i] = this->generateKvector(z);
if ( type_ == ExpTrig || type_ == ExpHypTrig ) {
z.real( gammaVal_ * sigmaOverRoot2 );
z.imag( -deltaMVal_ * sigmaOverRoot2 );
trigTermKvecVals_[iEvt][i] = this->generateKvector(z);
if ( type_ == ExpHypTrig ) {
z.real( ( gammaVal_ - 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 );;
z.imag( 0.0 );
hypHTermKvecVals_[iEvt][i] = this->generateKvector(z);
z.real( ( gammaVal_ + 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 );
z.imag( 0.0 );
hypLTermKvecVals_[iEvt][i] = this->generateKvector(z);
}
}
}
}
void LauDecayTimePdf::calcSmearedSplinePartialIntegrals(const UInt_t iEvt, const UInt_t splineIndex, const std::vector<Double_t>& means, const std::vector<Double_t>& sigmas, const std::vector<Double_t>& fractions)
{
using namespace std::complex_literals;
const std::vector<Double_t>& xVals { effiFun_->getXValues() };
const Double_t minAbs = xVals[splineIndex];
const Double_t maxAbs = xVals[splineIndex+1];
const std::array<Double_t,4> coeffs { effiFun_->getCoefficients(splineIndex) };
std::complex<Double_t> z;
for (UInt_t i(0); i<nGauss_; ++i)
{
const Double_t sigmaOverRoot2 { sigmaPowerVals_[iEvt][i][0] };
z = gammaVal_ * sigmaOverRoot2;
if ( nonKnotChanged_ ) {
expTermMvecVals_[iEvt][splineIndex][i] = this->generateMvector(minAbs, maxAbs, z, sigmas[i], means[i]);
}
const Double_t normTermExp { this->smearedSplineNormalise(coeffs, expTermKvecVals_[iEvt][i], expTermMvecVals_[iEvt][splineIndex][i], sigmaPowerVals_[iEvt][i], meanPowerVals_[iEvt][i]).real() };
normTermExp_ += fractions[i] * normTermExp;
if ( type_ == ExpTrig or type_ == ExpHypTrig ) {
z.real( gammaVal_ * sigmaOverRoot2 );
z.imag( -deltaMVal_ * sigmaOverRoot2 );
if ( nonKnotChanged_ ) {
trigTermMvecVals_[iEvt][splineIndex][i] = this->generateMvector(minAbs, maxAbs, z, sigmas[i], means[i]);
}
std::complex integral { this->smearedSplineNormalise(coeffs, trigTermKvecVals_[iEvt][i], trigTermMvecVals_[iEvt][splineIndex][i], sigmaPowerVals_[iEvt][i], meanPowerVals_[iEvt][i]) };
const Double_t cosIntegral { integral.real() };
const Double_t sinIntegral { integral.imag() };
normTermCos_ += fractions[i] * cosIntegral;
normTermSin_ += fractions[i] * sinIntegral;
if ( type_ == ExpTrig ) {
normTermCosh_ += fractions[i] * normTermExp;
} else {
const std::complex zH { ( gammaVal_ - 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 };
const std::complex zL { ( gammaVal_ + 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 };
if ( nonKnotChanged_ ) {
hypHTermMvecVals_[iEvt][splineIndex][i] = this->generateMvector(minAbs, maxAbs, zH, sigmas[i], means[i]);
hypLTermMvecVals_[iEvt][splineIndex][i] = this->generateMvector(minAbs, maxAbs, zL, sigmas[i], means[i]);
}
const Double_t integralH { this->smearedSplineNormalise(coeffs, hypHTermKvecVals_[iEvt][i], hypHTermMvecVals_[iEvt][splineIndex][i], sigmaPowerVals_[iEvt][i], meanPowerVals_[iEvt][i]).real() };
const Double_t integralL { this->smearedSplineNormalise(coeffs, hypLTermKvecVals_[iEvt][i], hypLTermMvecVals_[iEvt][splineIndex][i], sigmaPowerVals_[iEvt][i], meanPowerVals_[iEvt][i]).real() };
const Double_t coshIntegral { 0.5 * (integralH + integralL) };
const Double_t sinhIntegral { 0.5 * (integralH - integralL) };
normTermCosh_ += fractions[i] * coshIntegral;
normTermSinh_ += fractions[i] * sinhIntegral;
}
}
}
}
std::array<std::complex<Double_t>,4> LauDecayTimePdf::generateKvector(const std::complex<Double_t>& z)
{
const std::complex<Double_t> zr { 1.0/z };
const std::complex<Double_t> zr2 { zr*zr };
std::array<std::complex<Double_t>,4> K {
0.5*zr,
0.5*zr2,
zr*(1.0+zr2),
3.0*zr2*(1.0+zr2)
};
return K;
}
std::array<std::complex<Double_t>,4> LauDecayTimePdf::generateMvector(const Double_t minAbs, const Double_t maxAbs, const std::complex<Double_t>& z, const Double_t sigma, const Double_t mean)
{
using namespace std::complex_literals;
std::array<std::complex<Double_t>,4> M0 {0.,0.,0.,0.};
std::array<std::complex<Double_t>,4> M1 {0.,0.,0.,0.};
std::array<std::complex<Double_t>,4> M {0.,0.,0.,0.};
const Double_t x1 { (maxAbs - mean) / (LauConstants::root2 * sigma) };
const Double_t x0 { (minAbs - mean) / (LauConstants::root2 * sigma) };
//Values used a lot
const Double_t ex2_1 { TMath::Exp(-(x1*x1)) };
const Double_t ex2_0 { TMath::Exp(-(x0*x0)) };
const Double_t sqrtPir { 1.0 / LauConstants::rootPi };
const std::complex arg1 { 1i * (z - x1) };
const std::complex arg0 { 1i * (z - x0) };
//fad = the faddeeva term times the ex2 value (done in different ways depending on the domain)
std::complex<Double_t> fad1;
std::complex<Double_t> fad0;
if(arg1.imag() < -5.0)
{fad1 = std::exp(-(x1*x1) - (arg1*arg1)) * RooMath::erfc(-1i * arg1);}
else
{fad1 = ex2_1*RooMath::faddeeva(arg1);}
if(arg0.imag() < -5.0)
{fad0 = std::exp(-(x0*x0) - (arg0*arg0)) * RooMath::erfc(-1i * arg0);}
else
{fad0 = ex2_0*RooMath::faddeeva(arg0);}
//doing the actual functions for x1
M1[0] = RooMath::erf(x1) - fad1;
M1[1] = 2. * (-sqrtPir*ex2_1 - x1*fad1);
M1[2] = 2. * (-2*x1*sqrtPir*ex2_1 - (2*x1*x1 - 1)*fad1);
M1[3] = 4. * (-(2*x1*x1 - 1)*sqrtPir*ex2_1 - x1*(2*x1*x1-3)*fad1);
//doing them again for x0
M0[0] = RooMath::erf(x0) - fad0;
M0[1] = 2. * (-sqrtPir*ex2_0 - x0*fad0);
M0[2] = 2. * (-2*x0*sqrtPir*ex2_0 - (2*x0*x0 - 1)*fad0);
M0[3] = 4. * (-(2*x0*x0 - 1)*sqrtPir*ex2_0 - x0*(2*x0*x0-3)*fad0);
for(Int_t i = 0; i < 4; ++i){M[i] = M1[i] - M0[i];}
return M;
}
std::complex<Double_t> LauDecayTimePdf::smearedSplineNormalise(const std::array<Double_t,4>& coeffs, const std::array<std::complex<Double_t>,4>& K, const std::array<std::complex<Double_t>,4>& M, const std::array<Double_t,4>& sigmaPowers, const std::array<Double_t,4>& meanPowers) const
{
using namespace std::complex_literals;
//Triple sum to get N (eqn 31 and 29 in https://arxiv.org/pdf/1407.0748.pdf)
std::complex<Double_t> N = 0. + 0i;
for(Int_t k = 0; k < 4; ++k){
for(Int_t n = 0; n <=k; ++n){
for(Int_t i = 0; i <=n; ++i){
//The binomial coefficient terms
const Double_t b { binom_[n][i]*binom_[k][n] };
N += sigmaPowers[n]*coeffs[k]*meanPowers[k-n]*K[i]*M[n-i]*b;
}}}
return N;
}
void LauDecayTimePdf::calcNonSmearedSplinePartialIntegrals(const UInt_t splineIndex)
{
using namespace std::complex_literals;
const std::complex u { gammaVal_ };
const Double_t normTermExp { this->nonSmearedSplineNormalise(splineIndex, u, expTermIkVals_).real() };
normTermExp_ += normTermExp;
if ( type_ == ExpTrig or type_ == ExpHypTrig ) {
const std::complex uTrig { gammaVal_, -deltaMVal_ };
const std::complex integral { this->nonSmearedSplineNormalise(splineIndex, uTrig, trigTermIkVals_) };
const Double_t cosIntegral { integral.real() };
const Double_t sinIntegral { integral.imag() };
normTermCos_ += cosIntegral;
normTermSin_ += sinIntegral;
if ( type_ == ExpTrig ) {
normTermCosh_ += normTermExp;
} else {
const std::complex uH { gammaVal_ - 0.5 * deltaGammaVal_ };
const std::complex uL { gammaVal_ + 0.5 * deltaGammaVal_ };
const Double_t integralH { this->nonSmearedSplineNormalise(splineIndex, uH, hypHTermIkVals_).real() };
const Double_t integralL { this->nonSmearedSplineNormalise(splineIndex, uL, hypLTermIkVals_).real() };
const Double_t coshIntegral { 0.5 * (integralH + integralL) };
const Double_t sinhIntegral { 0.5 * (integralH - integralL) };
normTermCosh_ += coshIntegral;
normTermSinh_ += sinhIntegral;
}
}
}
std::complex<Double_t> LauDecayTimePdf::calcIk( const UInt_t k, const Double_t minAbs, const Double_t maxAbs, const std::complex<Double_t>& u )
{
//Taking mu = 0, this does not have to be the case in general
auto G = [&u](const Int_t n){return -TMath::Factorial(n)/std::pow(u,n+1);};//power of n+1 used rather than n, this is due to maths error in the paper
auto H = [&u](const Int_t n, const Double_t t){return std::pow(t,n)*std::exp(-u*t);};
std::complex ans { 0.0, 0.0 };
for (UInt_t j = 0; j <= k; ++j)
{ans += binom_[k][j]*G(j)*( H( k-j, maxAbs ) - H( k-j, minAbs ) );}
return ans;
}
std::complex<Double_t> LauDecayTimePdf::nonSmearedSplineNormalise( const UInt_t splineIndex, const std::complex<Double_t>& u, std::vector<std::array<std::complex<Double_t>,4>>& cache )
{
// u = Gamma - iDeltam in general
using namespace std::complex_literals;
const std::vector<Double_t>& xVals = effiFun_ -> getXValues();
const Double_t minAbs = xVals[splineIndex];
const Double_t maxAbs = xVals[splineIndex+1];
std::array<Double_t,4> coeffs = effiFun_->getCoefficients(splineIndex);
//sum to get N (eqn 30 in https://arxiv.org/pdf/1407.0748.pdf, using I_k from Appendix B.1 with the corrected maths error)
std::complex<Double_t> N = 0. + 0i;
if ( nonKnotChanged_ ) {
for(UInt_t i = 0; i < 4; ++i) {
cache[splineIndex][i] = calcIk(i, minAbs, maxAbs, u);
N += cache[splineIndex][i] * coeffs[i];
}
} else {
for(UInt_t i = 0; i < 4; ++i) {
N += cache[splineIndex][i] * coeffs[i];
}
}
return N;
}
Double_t LauDecayTimePdf::generateError(Bool_t forceNew)
{
if (errHist_ && (forceNew || !abscissaErrorGenerated_)) {
LauFitData errData = errHist_->generate(nullptr);
abscissaError_ = errData.at(this->varErrName());
abscissaErrorGenerated_ = kTRUE;
} else {
while (forceNew || !abscissaErrorGenerated_) {
abscissaError_ = LauRandom::randomFun()->Landau(errorDistMPV_,errorDistSigma_);
if (abscissaError_ < maxAbscissaError_ && abscissaError_ > minAbscissaError_) {
abscissaErrorGenerated_ = kTRUE;
forceNew = kFALSE;
}
}
}
return abscissaError_;
}
/*
LauFitData LauDecayTimePdf::generate(const LauKinematics* kinematics)
{
// generateError SHOULD have been called before this
// function but will call it here just to make sure
// (has ns effect if has already been called)
abscissaError_ = this->generateError();
// If the PDF is scaled by the per-event error then need to update the PDF height for each event
Bool_t scale(kFALSE);
for (std::vector<Bool_t>::const_iterator iter = scaleMeans_.begin(); iter != scaleMeans_.end(); ++iter) {
scale |= (*iter);
}
for (std::vector<Bool_t>::const_iterator iter = scaleWidths_.begin(); iter != scaleWidths_.end(); ++iter) {
scale |= (*iter);
}
if (scale || (!this->heightUpToDate() && !this->cachePDF())) {
this->calcPDFHeight(kinematics);
this->heightUpToDate(kTRUE);
}
// Generate the value of the abscissa.
Bool_t gotAbscissa(kFALSE);
Double_t genVal(0.0);
Double_t genPDFVal(0.0);
LauFitData genAbscissa;
const Double_t xMin = this->minAbscissa();
const Double_t xMax = this->maxAbscissa();
const Double_t xRange = xMax - xMin;
while (!gotAbscissa) {
genVal = LauRandom::randomFun()->Rndm()*xRange + xMin;
this->calcLikelihoodInfo(genVal, abscissaError_);
genPDFVal = this->getUnNormLikelihood();
if (LauRandom::randomFun()->Rndm() <= genPDFVal/this->getMaxHeight()) {gotAbscissa = kTRUE;}
if (genPDFVal > this->getMaxHeight()) {
std::cerr<<"Warning in LauDecayTimePdf::generate()."
<<" genPDFVal = "<<genPDFVal<<" is larger than the specified PDF height "
<<this->getMaxHeight()<<" for the abscissa = "<<genVal<<". Need to reset height to be larger than "
<<genPDFVal<<" by using the setMaxHeight(Double_t) function"
<<" and re-run the Monte Carlo generation!"<<std::endl;
}
}
genAbscissa[this->varName()] = genVal;
// mark that we need a new error to be generated next time
abscissaErrorGenerated_ = kFALSE;
return genAbscissa;
}
*/
void LauDecayTimePdf::setErrorHisto(const TH1* hist)
{
if ( errHist_ != nullptr ) {
std::cerr<<"WARNING in LauDecayTimePdf::setErrorHisto : Error histogram already set, not doing it again."<<std::endl;
return;
}
errHist_ = new Lau1DHistPdf(this->varErrName(), hist, this->minAbscissaError(), this->maxAbscissaError());
}
void LauDecayTimePdf::setHistoPdf(const TH1* hist)
{
if ( pdfHist_ != nullptr ) {
std::cerr<<"WARNING in LauDecayTimePdf::setHistoPdf : PDF histogram already set, not doing it again."<<std::endl;
return;
}
pdfHist_ = new Lau1DHistPdf(this->varName(), hist, this->minAbscissa(), this->maxAbscissa());
}
void LauDecayTimePdf::setEffiHist(const TH1* hist)
{
if ( effiHist_ != nullptr ) {
std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : efficiency histogram already set, not doing it again." << std::endl;
return;
}
if ( hist == nullptr ) {
std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : supplied efficiency histogram pointer is null." << std::endl;
return;
}
// Check boundaries of histogram align with our abscissa's range
const Double_t axisMin {hist->GetXaxis()->GetXmin()};
const Double_t axisMax {hist->GetXaxis()->GetXmax()};
if ( TMath::Abs(minAbscissa_ - axisMin)>1e-6 || TMath::Abs(maxAbscissa_ - axisMax)>1e-6 ) {
std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : mismatch in range between supplied histogram and abscissa\n"
<< " : histogram range: " << axisMin << " - " << axisMax << "\n"
<< " : abscissa range: " << minAbscissa_ << " - " << maxAbscissa_ << "\n"
<< " : Disregarding this histogram." << std::endl;
return;
}
effiHist_ = dynamic_cast<TH1*>( hist->Clone() );
//Normalise the hist if the (relative) efficiencies have very large values
if(effiHist_ -> GetMaximum() > 1.)
{
effiHist_ -> Scale( 1. / effiHist_->Integral() ); //Normalise
std::cout << "INFO in LauDecayTimePdf::setEffiHist : Supplied histogram for Decay Time Acceptance has values too large: normalising..." << std::endl;
}
}
void LauDecayTimePdf::setEffiSpline(Lau1DCubicSpline* spline)
{
if ( effiFun_ != nullptr ) {
std::cerr<<"WARNING in LauDecayTimePdf::setEffiSpline : efficiency function already set, not doing it again."<<std::endl;
return;
}
if ( spline == nullptr ) {
std::cerr<<"WARNING in LauDecayTimePdf::setEffiSpline : supplied efficiency spline pointer is null."<<std::endl;
return;
}
effiFun_ = spline;
std::vector<Double_t> effis = effiFun_->getYValues();
effiPars_.resize( effis.size() );
effiParVals_.resize( effis.size() );
size_t index = 0;
for( Double_t& effi : effis )
{
effiPars_[ index ] = new LauParameter( Form( "%s_Knot_%lu", varName_.Data() ,index ), effi, 0.0, 1.0, kTRUE );
++index;
}
}
LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName)
{
for ( std::vector<LauAbsRValue*>::iterator iter = params_.begin(); iter != params_.end(); ++iter ) {
if ((*iter)->name().Contains(parName)) {
return (*iter);
}
}
std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl;
return nullptr;
}
const LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName) const
{
for ( std::vector<LauAbsRValue*>::const_iterator iter = params_.begin(); iter != params_.end(); ++iter ) {
if ((*iter)->name().Contains(parName)) {
return (*iter);
}
}
std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl;
return nullptr;
}
void LauDecayTimePdf::updatePulls()
{
for ( std::vector<LauAbsRValue*>::iterator iter = params_.begin(); iter != params_.end(); ++iter ) {
std::vector<LauParameter*> params = (*iter)->getPars();
for (std::vector<LauParameter*>::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter ) {
if (!(*iter)->fixed()) {
(*params_iter)->updatePull();
}
}
}
}
void LauDecayTimePdf::propagateParUpdates()
{
// If none of the parameters are floating there's nothing to do
if ( nothingFloating_ ) {
return;
}
// Otherwise, determine which of the floating parameters have changed (if any) and act accordingly
static auto checkEquality = [](const LauAbsRValue* par, const Double_t cacheVal){return par->unblindValue() == cacheVal;};
anyKnotChanged_ = anyKnotFloating_ and not std::equal(effiPars_.begin(), effiPars_.end(), effiParVals_.begin(), checkEquality);
// Update the acceptance spline if any of the knot values have changed
if ( anyKnotChanged_ ) {
effiFun_->updateYValues( effiPars_ );
}
// Check also the physics and resolution parameters
if ( nonKnotFloating_ ) {
if ( physicsParFloating_ ) {
tauChanged_ = tauFloating_ and not checkEquality(tau_, tauVal_);
deltaMChanged_ = deltaMFloating_ and not checkEquality(deltaM_, deltaMVal_);
deltaGammaChanged_ = deltaGammaFloating_ and not checkEquality(deltaGamma_, deltaGammaVal_);
physicsParChanged_ = tauChanged_ || deltaMChanged_ || deltaGammaChanged_;
}
if ( resoParFloating_ ) {
resoParChanged_ = kFALSE;
resoParChanged_ |= not std::equal( mean_.begin(), mean_.end(), meanVals_.begin(), checkEquality );
resoParChanged_ |= not std::equal( sigma_.begin(), sigma_.end(), sigmaVals_.begin(), checkEquality );
resoParChanged_ |= not std::equal( frac_.begin(), frac_.end(), fracVals_.begin()+1, checkEquality );
}
nonKnotChanged_ = physicsParChanged_ or resoParChanged_;
}
// If nothing has changed, there's nothing to do
if ( not ( anyKnotChanged_ or nonKnotChanged_ ) ) {
return;
}
// Otherwise we need to update the cache
this->updateCache();
}
/*
void LauDecayTimePdf::updateEffiSpline(const std::vector<LauParameter*>& effiPars)
{
if (effiPars.size() != effiFun_->getnKnots()){
std::cerr<<"ERROR in LauDecayTimePdf::updateEffiSpline : number of efficiency parameters is not equal to the number of spline knots."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
effiFun_->updateYValues(effiPars);
}
*/
diff --git a/src/LauTimeDepFitModel.cc b/src/LauTimeDepFitModel.cc
index a3a67ed..b2d8526 100644
--- a/src/LauTimeDepFitModel.cc
+++ b/src/LauTimeDepFitModel.cc
@@ -1,2902 +1,2917 @@
/*
Copyright 2006 University of Warwick
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Laura++ package authors:
John Back
Paul Harrison
Thomas Latham
*/
/*! \file LauTimeDepFitModel.cc
\brief File containing implementation of LauTimeDepFitModel class.
*/
#include <algorithm>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <map>
#include <vector>
#include "TFile.h"
#include "TMinuit.h"
#include "TRandom.h"
#include "TSystem.h"
#include "TVirtualFitter.h"
#include "LauAbsBkgndDPModel.hh"
#include "LauAbsCoeffSet.hh"
#include "LauAbsPdf.hh"
#include "LauAsymmCalc.hh"
#include "LauComplex.hh"
#include "LauConstants.hh"
#include "LauDPPartialIntegralInfo.hh"
#include "LauDaughters.hh"
#include "LauDecayTimePdf.hh"
#include "LauFitNtuple.hh"
#include "LauGenNtuple.hh"
#include "LauIsobarDynamics.hh"
#include "LauKinematics.hh"
#include "LauParamFixed.hh"
#include "LauPrint.hh"
#include "LauRandom.hh"
#include "LauScfMap.hh"
#include "LauTimeDepFitModel.hh"
#include "LauFlavTag.hh"
ClassImp(LauTimeDepFitModel)
LauTimeDepFitModel::LauTimeDepFitModel(LauIsobarDynamics* modelB0bar, LauIsobarDynamics* modelB0, LauFlavTag* flavTag) : LauAbsFitModel(),
sigModelB0bar_(modelB0bar),
sigModelB0_(modelB0),
kinematicsB0bar_(modelB0bar ? modelB0bar->getKinematics() : 0),
kinematicsB0_(modelB0 ? modelB0->getKinematics() : 0),
usingBkgnd_(kFALSE),
flavTag_(flavTag),
curEvtTrueTagFlv_(LauFlavTag::Unknown),
curEvtDecayFlv_(LauFlavTag::Unknown),
nSigComp_(0),
nSigDPPar_(0),
nDecayTimePar_(0),
nExtraPdfPar_(0),
nNormPar_(0),
nCalibPar_(0),
nTagEffPar_(0),
nEffiPar_(0),
nAsymPar_(0),
coeffsB0bar_(0),
coeffsB0_(0),
coeffPars_(0),
fitFracB0bar_(0),
fitFracB0_(0),
fitFracAsymm_(0),
acp_(0),
meanEffB0bar_("meanEffB0bar",0.0,0.0,1.0),
meanEffB0_("meanEffB0",0.0,0.0,1.0),
DPRateB0bar_("DPRateB0bar",0.0,0.0,100.0),
DPRateB0_("DPRateB0",0.0,0.0,100.0),
signalEvents_(0),
signalAsym_(0),
cpevVarName_(""),
cpEigenValue_(CPEven),
evtCPEigenVals_(0),
deltaM_("deltaM",0.0),
deltaGamma_("deltaGamma",0.0),
tau_("tau",LauConstants::tauB0),
phiMix_("phiMix", 2.0*LauConstants::beta, -LauConstants::threePi, LauConstants::threePi, kFALSE),
sinPhiMix_("sinPhiMix", TMath::Sin(2.0*LauConstants::beta), -1.0, 1.0, kFALSE),
cosPhiMix_("cosPhiMix", TMath::Cos(2.0*LauConstants::beta), -1.0, 1.0, kFALSE),
useSinCos_(kFALSE),
phiMixComplex_(TMath::Cos(-2.0*LauConstants::beta),TMath::Sin(-2.0*LauConstants::beta)),
signalDecayTimePdf_(),
- backgroundDecayTimePdfs_(),
+ BkgndDecayTimePdfs_(),
curEvtDecayTime_(0.0),
curEvtDecayTimeErr_(0.0),
sigExtraPdf_(),
sigFlavTagPdf_(),
bkgdFlavTagPdf_(),
AProd_("AProd",0.0,-1.0,1.0,kTRUE),
iterationsMax_(100000000),
nGenLoop_(0),
ASq_(0.0),
aSqMaxVar_(0.0),
aSqMaxSet_(1.25),
storeGenAmpInfo_(kFALSE),
signalTree_(),
reuseSignal_(kFALSE),
sigDPLike_(0.0),
sigExtraLike_(0.0),
sigFlavTagLike_(0.0),
bkgdFlavTagLike_(0.0),
sigTotalLike_(0.0)
{
// Set up ftag here?
// Make sure that the integration scheme will be symmetrised
sigModelB0bar_->forceSymmetriseIntegration(kTRUE);
sigModelB0_->forceSymmetriseIntegration(kTRUE);
}
LauTimeDepFitModel::~LauTimeDepFitModel()
{
for ( LauAbsPdf* pdf : sigExtraPdf_ ) {
delete pdf;
}
for (std::vector<LauEmbeddedData*>::iterator iter = bkgndTree_.begin(); iter != bkgndTree_.end(); ++iter){
- delete *(iter);
+ delete *(iter);
}
}
void LauTimeDepFitModel::setupBkgndVectors()
{
UInt_t nBkgnds = this->nBkgndClasses();
BkgndDPModels_.resize( nBkgnds );
+ BkgndDecayTimePdfs_.resize( nBkgnds );
BkgndPdfs_.resize( nBkgnds );
bkgndEvents_.resize( nBkgnds );
bkgndAsym_.resize( nBkgnds );
bkgndTree_.resize( nBkgnds );
reuseBkgnd_.resize( nBkgnds );
bkgndDPLike_.resize( nBkgnds );
bkgndExtraLike_.resize( nBkgnds );
bkgndTotalLike_.resize( nBkgnds );
}
void LauTimeDepFitModel::setNSigEvents(LauParameter* nSigEvents)
{
if ( nSigEvents == 0 ) {
std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The LauParameter pointer is null." << std::endl;
gSystem->Exit(EXIT_FAILURE);
}
if ( signalEvents_ != 0 ) {
std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl;
return;
}
if ( signalAsym_ != 0 ) {
std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl;
return;
}
signalEvents_ = nSigEvents;
signalEvents_->name("signalEvents");
Double_t value = nSigEvents->value();
signalEvents_->range(-2.0*(TMath::Abs(value)+1.0),2.0*(TMath::Abs(value)+1.0));
signalAsym_ = new LauParameter("signalAsym",0.0,-1.0,1.0,kTRUE);
}
void LauTimeDepFitModel::setNSigEvents(LauParameter* nSigEvents, LauParameter* sigAsym)
{
if ( nSigEvents == 0 ) {
std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The event LauParameter pointer is null." << std::endl;
gSystem->Exit(EXIT_FAILURE);
}
if ( sigAsym == 0 ) {
std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The asym LauParameter pointer is null." << std::endl;
gSystem->Exit(EXIT_FAILURE);
}
if ( signalEvents_ != 0 ) {
std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl;
return;
}
if ( signalAsym_ != 0 ) {
std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl;
return;
}
signalEvents_ = nSigEvents;
signalEvents_->name("signalEvents");
Double_t value = nSigEvents->value();
signalEvents_->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0));
signalAsym_ = sigAsym;
signalAsym_->name("signalAsym");
signalAsym_->range(-1.0,1.0);
}
void LauTimeDepFitModel::setNBkgndEvents(LauAbsRValue* nBkgndEvents)
{
if ( nBkgndEvents == 0 ) {
std::cerr << "ERROR in LauTimeDepFitModel::setNBgkndEvents : The background yield LauParameter pointer is null." << std::endl;
gSystem->Exit(EXIT_FAILURE);
}
if ( ! this->validBkgndClass( nBkgndEvents->name() ) ) {
std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : Invalid background class \"" << nBkgndEvents->name() << "\"." << std::endl;
std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl;
gSystem->Exit(EXIT_FAILURE);
}
UInt_t bkgndID = this->bkgndClassID( nBkgndEvents->name() );
if ( bkgndEvents_[bkgndID] != 0 ) {
std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background yield." << std::endl;
return;
}
if ( bkgndAsym_[bkgndID] != 0 ) {
std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background asymmetry." << std::endl;
return;
}
nBkgndEvents->name( nBkgndEvents->name()+"Events" );
if ( nBkgndEvents->isLValue() ) {
Double_t value = nBkgndEvents->value();
LauParameter* yield = dynamic_cast<LauParameter*>( nBkgndEvents );
yield->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0));
}
bkgndEvents_[bkgndID] = nBkgndEvents;
bkgndAsym_[bkgndID] = new LauParameter(nBkgndEvents->name()+"Asym",0.0,-1.0,1.0,kTRUE);
}
void LauTimeDepFitModel::setNBkgndEvents(LauAbsRValue* nBkgndEvents, LauAbsRValue* bkgndAsym)
{
if ( nBkgndEvents == 0 ) {
std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : The background yield LauParameter pointer is null." << std::endl;
gSystem->Exit(EXIT_FAILURE);
}
if ( bkgndAsym == 0 ) {
std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : The background asym LauParameter pointer is null." << std::endl;
gSystem->Exit(EXIT_FAILURE);
}
if ( ! this->validBkgndClass( nBkgndEvents->name() ) ) {
std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : Invalid background class \"" << nBkgndEvents->name() << "\"." << std::endl;
std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl;
gSystem->Exit(EXIT_FAILURE);
}
UInt_t bkgndID = this->bkgndClassID( nBkgndEvents->name() );
if ( bkgndEvents_[bkgndID] != 0 ) {
std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background yield." << std::endl;
return;
}
if ( bkgndAsym_[bkgndID] != 0 ) {
std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background asymmetry." << std::endl;
return;
}
bkgndEvents_[bkgndID]->name( nBkgndEvents->name()+"Events" );
if ( nBkgndEvents->isLValue() ) {
Double_t value = nBkgndEvents->value();
LauParameter* yield = dynamic_cast<LauParameter*>( nBkgndEvents );
yield->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0));
}
bkgndEvents_[bkgndID] = nBkgndEvents;
bkgndAsym_[bkgndID]->name( nBkgndEvents->name()+"Asym" );
if ( bkgndAsym->isLValue() ) {
LauParameter* asym = dynamic_cast<LauParameter*>( bkgndAsym );
asym->range(-1.0, 1.0);
}
bkgndAsym_[bkgndID] = bkgndAsym;
}
void LauTimeDepFitModel::setSignalDtPdf(LauDecayTimePdf* pdf)
{
if (pdf==0) {
std::cerr<<"ERROR in LauTimeDepFitModel::setSignalDtPdf : The PDF pointer is null, not adding it."<<std::endl;
return;
}
signalDecayTimePdf_ = pdf;
}
-void LauTimeDepFitModel::setBackgroundDtPdf(LauDecayTimePdf* pdf)
+void LauTimeDepFitModel::setBkgndDtPdf(const TString& bkgndClass, LauDecayTimePdf* pdf)
{
// TODO If these are all histograms shouldn't need to add much more code in other functions
if (pdf==0) {
- std::cerr<<"ERROR in LauTimeDepFitModel::setBackgroundDtPdf : The PDF pointer is null, not adding it."<<std::endl;
+ std::cerr<<"ERROR in LauTimeDepFitModel::setBkgndDtPdf : The PDF pointer is null, not adding it."<<std::endl;
return;
}
- backgroundDecayTimePdfs_.push_back(pdf);
+
+ // check that this background name is valid
+ if ( ! this->validBkgndClass( bkgndClass) ) {
+ std::cerr << "ERROR in LauTimeDepFitModel::setBkgndDtPdf : Invalid background class \"" << bkgndClass << "\"." << std::endl;
+ std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl;
+ return;
+ }
+ UInt_t bkgndID = this->bkgndClassID( bkgndClass );
+ BkgndDecayTimePdfs_[bkgndID] = pdf;
+
usingBkgnd_ = kTRUE;
}
void LauTimeDepFitModel::setBkgndDPModels(const TString& bkgndClass, LauAbsBkgndDPModel* model)
{
if (model==0) {
std::cerr << "ERROR in LauTimeDepFitModel::setBkgndDPModels : the model pointer is null." << std::endl;
return;
}
// check that this background name is valid
if ( ! this->validBkgndClass( bkgndClass) ) {
std::cerr << "ERROR in LauTimeDepFitModel::setBkgndDPModel : Invalid background class \"" << bkgndClass << "\"." << std::endl;
std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl;
return;
}
UInt_t bkgndID = this->bkgndClassID( bkgndClass );
BkgndDPModels_[bkgndID] = model;
usingBkgnd_ = kTRUE;
}
void LauTimeDepFitModel::setSignalPdfs(LauAbsPdf* pdf)
{
// These "extra variables" are assumed to be purely kinematical, like mES and DeltaE
//or making use of Rest of Event information, and therefore independent of whether
//the parent is a B0 or a B0bar. If this assupmtion doesn't hold, do modify this part!
if (pdf==0) {
std::cerr<<"ERROR in LauTimeDepFitModel::setSignalPdfs : The PDF pointer is null."<<std::endl;
return;
}
sigExtraPdf_.push_back(pdf);
}
void LauTimeDepFitModel::setBkgndPdf(const TString& bkgndClass, LauAbsPdf* pdf)
{
if (pdf==0) {
std::cerr << "ERROR in LauTimeDepFitModel::setBkgndPdf : PDF pointer is null." << std::endl;
return;
}
// check that this background name is valid
if ( ! this->validBkgndClass( bkgndClass ) ) {
std::cerr << "ERROR in LauTimeDepFitModel::setBkgndPdf : Invalid background class \"" << bkgndClass << "\"." << std::endl;
std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl;
return;
}
UInt_t bkgndID = this->bkgndClassID( bkgndClass );
BkgndPdfs_[bkgndID].push_back(pdf);
usingBkgnd_ = kTRUE;
}
void LauTimeDepFitModel::setPhiMix(const Double_t phiMix, const Bool_t fixPhiMix, const Bool_t useSinCos)
{
phiMix_.value(phiMix); phiMix_.initValue(phiMix); phiMix_.genValue(phiMix); phiMix_.fixed(fixPhiMix);
const Double_t sinPhiMix = TMath::Sin(phiMix);
sinPhiMix_.value(sinPhiMix); sinPhiMix_.initValue(sinPhiMix); sinPhiMix_.genValue(sinPhiMix); sinPhiMix_.fixed(fixPhiMix);
const Double_t cosPhiMix = TMath::Cos(phiMix);
cosPhiMix_.value(cosPhiMix); cosPhiMix_.initValue(cosPhiMix); cosPhiMix_.genValue(cosPhiMix); cosPhiMix_.fixed(fixPhiMix);
useSinCos_ = useSinCos;
phiMixComplex_.setRealPart(cosPhiMix);
phiMixComplex_.setImagPart(-1.0*sinPhiMix);
}
void LauTimeDepFitModel::initialise()
{
// From the initial parameter values calculate the coefficients
// so they can be passed to the signal model
this->updateCoeffs();
// Initialisation
if (this->useDP() == kTRUE) {
this->initialiseDPModels();
}
// Flavour tagging
//flavTag_->initialise();
// Decay-time PDFs
signalDecayTimePdf_->initialise();
if (!this->useDP() && sigExtraPdf_.empty()) {
std::cerr<<"ERROR in LauTimeDepFitModel::initialise : Signal model doesn't exist for any variable."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
if (this->useDP() == kTRUE) {
// Check that we have all the Dalitz-plot models
if ((sigModelB0bar_ == 0) || (sigModelB0_ == 0)) {
std::cerr<<"ERROR in LauTimeDepFitModel::initialise : the pointer to one (particle or anti-particle) of the signal DP models is null."<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
}
// Next check that, if a given component is being used we've got the
// right number of PDFs for all the variables involved
// TODO - should probably check variable names and so on as well
//UInt_t nsigpdfvars(0);
//for ( LauPdfList::const_iterator pdf_iter = sigExtraPdf_.begin(); pdf_iter != sigExtraPdf_.end(); ++pdf_iter ) {
// std::vector<TString> varNames = (*pdf_iter)->varNames();
// for ( std::vector<TString>::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) {
// if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) {
// ++nsigpdfvars;
// }
// }
//}
//if (usingBkgnd_) {
// for (LauBkgndPdfsList::const_iterator bgclass_iter = BkgndPdfsB0_.begin(); bgclass_iter != BkgndPdfsB0_.end(); ++bgclass_iter) {
// UInt_t nbkgndpdfvars(0);
// const LauPdfList& pdfList = (*bgclass_iter);
// for ( LauPdfList::const_iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter ) {
// std::vector<TString> varNames = (*pdf_iter)->varNames();
// for ( std::vector<TString>::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) {
// if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) {
// ++nbkgndpdfvars;
// }
// }
// }
// if (nbkgndpdfvars != nsigpdfvars) {
// std::cerr << "ERROR in LauTimeDepFitModel::initialise : There are " << nsigpdfvars << " signal PDF variables but " << nbkgndpdfvars << " bkgnd PDF variables." << std::endl;
// gSystem->Exit(EXIT_FAILURE);
// }
// }
//}
// Clear the vectors of parameter information so we can start from scratch
this->clearFitParVectors();
// Set the fit parameters for signal and background models
this->setSignalDPParameters();
// Set the fit parameters for the decay time models
this->setDecayTimeParameters();
// Set the fit parameters for the extra PDFs
this->setExtraPdfParameters();
// Set the initial bg and signal events
this->setFitNEvents();
// Handle flavour-tagging calibration parameters
this->setCalibParams();
// Add tagging efficiency parameters
this->setTagEffParams();
// Add the efficiency parameters
this->setEffiParams();
//Asymmetry terms AProd and in setAsymmetries()?
//this->setAsymParams();
// Check that we have the expected number of fit variables
const LauParameterPList& fitVars = this->fitPars();
if (fitVars.size() != (nSigDPPar_ + nDecayTimePar_ + nExtraPdfPar_ + nNormPar_ + nCalibPar_ + nTagEffPar_ + nEffiPar_)) {
std::cerr<<"ERROR in LauTimeDepFitModel::initialise : Number of fit parameters not of expected size."<<std::endl;
std::cout<< fitVars.size() << " != " << nSigDPPar_ << nDecayTimePar_ << nExtraPdfPar_ << nNormPar_ << nCalibPar_ << nTagEffPar_ << nEffiPar_ << std::endl;
gSystem->Exit(EXIT_FAILURE);
}
this->setExtraNtupleVars();
}
void LauTimeDepFitModel::recalculateNormalisation()
{
sigModelB0bar_->recalculateNormalisation();
sigModelB0_->recalculateNormalisation();
sigModelB0bar_->modifyDataTree();
sigModelB0_->modifyDataTree();
this->calcInterferenceTermIntegrals();
}
void LauTimeDepFitModel::initialiseDPModels()
{
if (sigModelB0bar_ == 0) {
std::cerr<<"ERROR in LauTimeDepFitModel::initialiseDPModels : B0bar signal DP model doesn't exist"<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
if (sigModelB0_ == 0) {
std::cerr<<"ERROR in LauTimeDepFitModel::initialiseDPModels : B0 signal DP model doesn't exist"<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
// Need to check that the number of components we have and that the dynamics has matches up
const UInt_t nAmpB0bar = sigModelB0bar_->getnTotAmp();
const UInt_t nAmpB0 = sigModelB0_->getnTotAmp();
if ( nAmpB0bar != nAmpB0 ) {
std::cerr << "ERROR in LauTimeDepFitModel::initialiseDPModels : Unequal number of signal DP components in the particle and anti-particle models: " << nAmpB0bar << " != " << nAmpB0 << std::endl;
gSystem->Exit(EXIT_FAILURE);
}
if ( nAmpB0bar != nSigComp_ ) {
std::cerr << "ERROR in LauTimeDepFitModel::initialiseDPModels : Number of signal DP components in the model (" << nAmpB0bar << ") not equal to number of coefficients supplied (" << nSigComp_ << ")." << std::endl;
gSystem->Exit(EXIT_FAILURE);
}
std::cout<<"INFO in LauTimeDepFitModel::initialiseDPModels : Initialising signal DP model"<<std::endl;
sigModelB0bar_->initialise(coeffsB0bar_);
sigModelB0_->initialise(coeffsB0_);
fifjEffSum_.clear();
fifjEffSum_.resize(nSigComp_);
for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) {
fifjEffSum_[iAmp].resize(nSigComp_);
}
// calculate the integrals of the A*Abar terms
this->calcInterferenceTermIntegrals();
this->calcInterTermNorm();
// Add backgrounds
if (usingBkgnd_ == kTRUE) {
for (LauBkgndDPModelList::iterator iter = BkgndDPModels_.begin(); iter != BkgndDPModels_.end(); ++iter) {
(*iter)->initialise();
}
}
}
void LauTimeDepFitModel::calcInterferenceTermIntegrals()
{
const std::vector<LauDPPartialIntegralInfo*>& integralInfoListB0bar = sigModelB0bar_->getIntegralInfos();
const std::vector<LauDPPartialIntegralInfo*>& integralInfoListB0 = sigModelB0_->getIntegralInfos();
// TODO should check (first time) that they match in terms of number of entries in the vectors and that each entry has the same number of points, ranges, weights etc.
LauComplex A, Abar, fifjEffSumTerm;
for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) {
for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) {
fifjEffSum_[iAmp][jAmp].zero();
}
}
const UInt_t nIntegralRegions = integralInfoListB0bar.size();
for ( UInt_t iRegion(0); iRegion < nIntegralRegions; ++iRegion ) {
const LauDPPartialIntegralInfo* integralInfoB0bar = integralInfoListB0bar[iRegion];
const LauDPPartialIntegralInfo* integralInfoB0 = integralInfoListB0[iRegion];
const UInt_t nm13Points = integralInfoB0bar->getnm13Points();
const UInt_t nm23Points = integralInfoB0bar->getnm23Points();
for (UInt_t m13 = 0; m13 < nm13Points; ++m13) {
for (UInt_t m23 = 0; m23 < nm23Points; ++m23) {
const Double_t weight = integralInfoB0bar->getWeight(m13,m23);
const Double_t eff = integralInfoB0bar->getEfficiency(m13,m23);
const Double_t effWeight = eff*weight;
for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) {
A = integralInfoB0->getAmplitude(m13, m23, iAmp);
for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) {
Abar = integralInfoB0bar->getAmplitude(m13, m23, jAmp);
fifjEffSumTerm = Abar*A.conj();
fifjEffSumTerm.rescale(effWeight);
fifjEffSum_[iAmp][jAmp] += fifjEffSumTerm;
}
}
}
}
}
}
void LauTimeDepFitModel::calcInterTermNorm()
{
const std::vector<Double_t>& fNormB0bar = sigModelB0bar_->getFNorm();
const std::vector<Double_t>& fNormB0 = sigModelB0_->getFNorm();
LauComplex norm;
for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) {
for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) {
LauComplex coeffTerm = coeffsB0bar_[jAmp]*coeffsB0_[iAmp].conj();
coeffTerm *= fifjEffSum_[iAmp][jAmp];
coeffTerm.rescale(fNormB0bar[jAmp] * fNormB0[iAmp]);
norm += coeffTerm;
}
}
norm *= phiMixComplex_;
interTermReNorm_ = 2.0*norm.re();
interTermImNorm_ = 2.0*norm.im();
}
void LauTimeDepFitModel::setAmpCoeffSet(LauAbsCoeffSet* coeffSet)
{
// Is there a component called compName in the signal models?
TString compName = coeffSet->name();
TString conjName = sigModelB0bar_->getConjResName(compName);
const LauDaughters* daughtersB0bar = sigModelB0bar_->getDaughters();
const LauDaughters* daughtersB0 = sigModelB0_->getDaughters();
const Bool_t conjugate = daughtersB0bar->isConjugate( daughtersB0 );
if ( ! sigModelB0bar_->hasResonance(compName) ) {
if ( ! sigModelB0bar_->hasResonance(conjName) ) {
std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : B0bar signal DP model doesn't contain component \""<<compName<<"\"."<<std::endl;
return;
}
std::cerr<<"WARNING in LauTimeDepFitModel::setAmpCoeffSet : B0bar signal DP model doesn't contain component \""<<compName<<"\" but does contain the conjugate \""<<conjName<<"\", resetting name to use the conjugate."<<std::endl;
TString tmp = compName;
compName = conjName;
conjName = tmp;
coeffSet->name( compName );
}
if ( conjugate ) {
if ( ! sigModelB0_->hasResonance(conjName) ) {
std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : B0 signal DP model doesn't contain component \""<<conjName<<"\"."<<std::endl;
return;
}
} else {
if ( ! sigModelB0_->hasResonance(compName) ) {
std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : B0 signal DP model doesn't contain component \""<<compName<<"\"."<<std::endl;
return;
}
}
// Do we already have it in our list of names?
for (std::vector<LauAbsCoeffSet*>::const_iterator iter=coeffPars_.begin(); iter!=coeffPars_.end(); ++iter) {
if ((*iter)->name() == compName) {
std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : Have already set coefficients for \""<<compName<<"\"."<<std::endl;
return;
}
}
coeffSet->index(nSigComp_);
coeffPars_.push_back(coeffSet);
TString parName = coeffSet->baseName(); parName += "FitFracAsym";
fitFracAsymm_.push_back(LauParameter(parName, 0.0, -1.0, 1.0));
acp_.push_back(coeffSet->acp());
++nSigComp_;
std::cout<<"INFO in LauTimeDepFitModel::setAmpCoeffSet : Added coefficients for component \""<<compName<<"\" to the fit model."<<std::endl;
}
void LauTimeDepFitModel::calcAsymmetries(Bool_t initValues)
{
// Calculate the CP asymmetries
// Also calculate the fit fraction asymmetries
for (UInt_t i = 0; i < nSigComp_; i++) {
acp_[i] = coeffPars_[i]->acp();
LauAsymmCalc asymmCalc(fitFracB0bar_[i][i].value(), fitFracB0_[i][i].value());
Double_t asym = asymmCalc.getAsymmetry();
fitFracAsymm_[i].value(asym);
if (initValues) {
fitFracAsymm_[i].genValue(asym);
fitFracAsymm_[i].initValue(asym);
}
}
}
void LauTimeDepFitModel::setSignalDPParameters()
{
// Set the fit parameters for the signal model.
nSigDPPar_ = 0;
if ( ! this->useDP() ) {
return;
}
std::cout << "INFO in LauTimeDepFitModel::setSignalDPParameters : Setting the initial fit parameters for the signal DP model." << std::endl;
// Place isobar coefficient parameters in vector of fit variables
LauParameterPList& fitVars = this->fitPars();
for (UInt_t i = 0; i < nSigComp_; ++i) {
LauParameterPList pars = coeffPars_[i]->getParameters();
for (LauParameterPList::iterator iter = pars.begin(); iter != pars.end(); ++iter) {
if ( !(*iter)->clone() ) {
fitVars.push_back(*iter);
++nSigDPPar_;
}
}
}
// Obtain the resonance parameters and place them in the vector of fit variables and in a separate vector
// Need to make sure that they are unique because some might appear in both DP models
LauParameterPSet& resVars = this->resPars();
resVars.clear();
LauParameterPList& sigDPParsB0bar = sigModelB0bar_->getFloatingParameters();
LauParameterPList& sigDPParsB0 = sigModelB0_->getFloatingParameters();
for ( LauParameterPList::iterator iter = sigDPParsB0bar.begin(); iter != sigDPParsB0bar.end(); ++iter ) {
if ( resVars.insert(*iter).second ) {
fitVars.push_back(*iter);
++nSigDPPar_;
}
}
for ( LauParameterPList::iterator iter = sigDPParsB0.begin(); iter != sigDPParsB0.end(); ++iter ) {
if ( resVars.insert(*iter).second ) {
fitVars.push_back(*iter);
++nSigDPPar_;
}
}
}
UInt_t LauTimeDepFitModel::addParametersToFitList(std::vector<LauDecayTimePdf*> theVector)
{
UInt_t counter(0);
LauParameterPList& fitVars = this->fitPars();
// loop through the map
for (std::vector<LauDecayTimePdf*>::iterator iter = theVector.begin(); iter != theVector.end(); ++iter) {
// grab the pdf and then its parameters
LauDecayTimePdf* thePdf = *iter; // The first one is the tagging category
LauAbsRValuePList& rvalues = thePdf->getParameters();
// loop through the parameters
for (LauAbsRValuePList::iterator pars_iter = rvalues.begin(); pars_iter != rvalues.end(); ++pars_iter) {
LauParameterPList params = (*pars_iter)->getPars();
for (LauParameterPList::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter) {
// for each "original" parameter add it to the list of fit parameters and increment the counter
if ( !(*params_iter)->clone() && ( !(*params_iter)->fixed() ||
(this->twoStageFit() && (*params_iter)->secondStage()) ) ) {
fitVars.push_back(*params_iter);
++counter;
}
}
}
}
return counter;
}
UInt_t LauTimeDepFitModel::addParametersToFitList(LauPdfList* theList)
{
UInt_t counter(0);
counter += this->addFitParameters(*(theList));
return counter;
}
void LauTimeDepFitModel::setDecayTimeParameters()
{
nDecayTimePar_ = 0;
std::cout << "INFO in LauTimeDepFitModel::setDecayTimeParameters : Setting the initial fit parameters of the DecayTime Pdfs." << std::endl;
LauParameterPList& fitVars = this->fitPars();
// Loop over the Dt PDFs
LauAbsRValuePList& rvalues = signalDecayTimePdf_->getParameters();
// loop through the parameters
for (LauAbsRValuePList::iterator pars_iter = rvalues.begin(); pars_iter != rvalues.end(); ++pars_iter) {
LauParameterPList params = (*pars_iter)->getPars();
for (LauParameterPList::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter) {
// for each "original" parameter add it to the list of fit parameters and increment the counter
if ( !(*params_iter)->clone() && ( !(*params_iter)->fixed() ||
(this->twoStageFit() && (*params_iter)->secondStage()) ) ) {
fitVars.push_back(*params_iter);
++nDecayTimePar_;
}
}
}
if (usingBkgnd_){
- nDecayTimePar_ += this->addParametersToFitList(backgroundDecayTimePdfs_);
+ nDecayTimePar_ += this->addParametersToFitList(BkgndDecayTimePdfs_);
}
if (useSinCos_) {
if ( not sinPhiMix_.fixed() ) {
fitVars.push_back(&sinPhiMix_);
fitVars.push_back(&cosPhiMix_);
nDecayTimePar_ += 2;
}
} else {
if ( not phiMix_.fixed() ) {
fitVars.push_back(&phiMix_);
++nDecayTimePar_;
}
}
}
void LauTimeDepFitModel::setExtraPdfParameters()
{
// Include the parameters of the PDF for each tagging category in the fit
// NB all of them are passed to the fit, even though some have been fixed through parameter.fixed(kTRUE)
// With the new "cloned parameter" scheme only "original" parameters are passed to the fit.
// Their clones are updated automatically when the originals are updated.
nExtraPdfPar_ = 0;
std::cout << "INFO in LauTimeDepFitModel::setExtraPdfParameters : Setting the initial fit parameters of the extra Pdfs." << std::endl;
nExtraPdfPar_ += this->addFitParameters(sigExtraPdf_);
if (usingBkgnd_ == kTRUE) {
for (LauBkgndPdfsList::iterator iter = BkgndPdfs_.begin(); iter != BkgndPdfs_.end(); ++iter) {
nExtraPdfPar_ += this->addFitParameters(*iter);
}
}
}
void LauTimeDepFitModel::setFitNEvents()
{
nNormPar_ = 0;
std::cout << "INFO in LauTimeDepFitModel::setFitNEvents : Setting the initial fit parameters of the signal and ackground yields." << std::endl;
// Initialise the total number of events to be the sum of all the hypotheses
Double_t nTotEvts = signalEvents_->value();
this->eventsPerExpt(TMath::FloorNint(nTotEvts));
LauParameterPList& fitVars = this->fitPars();
// if doing an extended ML fit add the signal fraction into the fit parameters
if (this->doEMLFit()) {
std::cout<<"INFO in LauTimeDepFitModel::setFitNEvents : Initialising number of events for signal and background components..."<<std::endl;
fitVars.push_back(signalEvents_);
++nNormPar_;
} else {
std::cout<<"INFO in LauTimeDepFitModel::setFitNEvents : Initialising number of events for background components (and hence signal)..."<<std::endl;
}
// if not using the DP in the model we need an explicit signal asymmetry parameter
if (this->useDP() == kFALSE) {
fitVars.push_back(signalAsym_);
++nNormPar_;
}
// TODO arguably should delegate this
//LauTagCatParamMap& signalTagCatFrac = flavTag_->getSignalTagCatFrac();
// tagging-category fractions for signal events
//for (LauTagCatParamMap::iterator iter = signalTagCatFrac.begin(); iter != signalTagCatFrac.end(); ++iter) {
// if (iter == signalTagCatFrac.begin()) {
// continue;
// }
// LauParameter* par = &((*iter).second);
// fitVars.push_back(par);
// ++nNormPar_;
//}
// Backgrounds
if (usingBkgnd_ == kTRUE) {
for (LauBkgndYieldList::iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) {
std::vector<LauParameter*> parameters = (*iter)->getPars();
for ( LauParameter* parameter : parameters ) {
if(!parameter->clone()) {
fitVars.push_back(parameter);
++nNormPar_;
}
}
}
for (LauBkgndYieldList::iterator iter = bkgndAsym_.begin(); iter != bkgndAsym_.end(); ++iter) {
std::vector<LauParameter*> parameters = (*iter)->getPars();
for ( LauParameter* parameter : parameters ) {
if(!parameter->clone()) {
fitVars.push_back(parameter);
++nNormPar_;
}
}
}
}
}
void LauTimeDepFitModel::setAsymParams()
{
nAsymPar_ = 0;
LauParameterPList& fitVars = this->fitPars();
if (!AProd_.fixed()){
fitVars.push_back(&AProd_);
nAsymPar_+=1;
}
}
void LauTimeDepFitModel::setTagEffParams()
{
nTagEffPar_ = 0;
Bool_t useAltPars = flavTag_->getUseAveDelta();
std::cout << "INFO in LauTimeDepFitModel::setTagEffParams : Setting the initial fit parameters for flavour tagging efficiencies." << std::endl;
if (useAltPars){
std::vector<LauParameter*> tageff_ave = flavTag_->getTagEffAve();
std::vector<LauParameter*> tageff_delta = flavTag_->getTagEffDelta();
LauParameterPList& fitVars = this->fitPars();
for(std::vector<LauParameter*>::iterator iter = tageff_ave.begin(); iter != tageff_ave.end(); ++iter){
LauParameter* eff = *iter;
if (eff->fixed()){continue;}
fitVars.push_back(eff);
++nTagEffPar_;
}
for(std::vector<LauParameter*>::iterator iter = tageff_delta.begin(); iter != tageff_delta.end(); ++iter){
LauParameter* eff = *iter;
if (eff->fixed()){continue;}
fitVars.push_back(eff);
++nTagEffPar_;
}
} else {
std::vector<LauParameter*> tageff_b0 = flavTag_->getTagEffB0();
std::vector<LauParameter*> tageff_b0bar = flavTag_->getTagEffB0bar();
LauParameterPList& fitVars = this->fitPars();
for(std::vector<LauParameter*>::iterator iter = tageff_b0.begin(); iter != tageff_b0.end(); ++iter){
LauParameter* eff = *iter;
if (eff->fixed()){continue;}
fitVars.push_back(eff);
++nTagEffPar_;
}
for(std::vector<LauParameter*>::iterator iter = tageff_b0bar.begin(); iter != tageff_b0bar.end(); ++iter){
LauParameter* eff = *iter;
if (eff->fixed()){continue;}
fitVars.push_back(eff);
++nTagEffPar_;
}
}
}
void LauTimeDepFitModel::setCalibParams()
{
Bool_t useAltPars = flavTag_->getUseAveDelta();
std::cout << "INFO in LauTimeDepFitModel::setCalibParams : Setting the initial fit parameters of the flavour tagging calibration parameters." << std::endl;
if (useAltPars){
std::vector<LauParameter*> p0pars_ave = flavTag_->getCalibP0Ave();
std::vector<LauParameter*> p0pars_delta = flavTag_->getCalibP0Delta();
std::vector<LauParameter*> p1pars_ave = flavTag_->getCalibP1Ave();
std::vector<LauParameter*> p1pars_delta = flavTag_->getCalibP1Delta();
LauParameterPList& fitVars = this->fitPars();
for(std::vector<LauParameter*>::iterator iter = p0pars_ave.begin(); iter != p0pars_ave.end(); ++iter){
LauParameter* p0 = *iter;
if (p0->fixed()){continue;}
fitVars.push_back(p0);
++nCalibPar_;
}
for(std::vector<LauParameter*>::iterator iter = p0pars_delta.begin(); iter != p0pars_delta.end(); ++iter){
LauParameter* p0 = *iter;
if (p0->fixed()){continue;}
fitVars.push_back(p0);
++nCalibPar_;
}
for(std::vector<LauParameter*>::iterator iter = p1pars_ave.begin(); iter != p1pars_ave.end(); ++iter){
LauParameter* p1 = *iter;
if (p1->fixed()){continue;}
fitVars.push_back(p1);
++nCalibPar_;
}
for(std::vector<LauParameter*>::iterator iter = p1pars_delta.begin(); iter != p1pars_delta.end(); ++iter){
LauParameter* p1 = *iter;
if (p1->fixed()){continue;}
fitVars.push_back(p1);
++nCalibPar_;
}
} else {
std::vector<LauParameter*> p0pars_b0 = flavTag_->getCalibP0B0();
std::vector<LauParameter*> p0pars_b0bar = flavTag_->getCalibP0B0bar();
std::vector<LauParameter*> p1pars_b0 = flavTag_->getCalibP1B0();
std::vector<LauParameter*> p1pars_b0bar = flavTag_->getCalibP1B0bar();
LauParameterPList& fitVars = this->fitPars();
for(std::vector<LauParameter*>::iterator iter = p0pars_b0.begin(); iter != p0pars_b0.end(); ++iter){
LauParameter* p0 = *iter;
if (p0->fixed()){continue;}
fitVars.push_back(p0);
++nCalibPar_;
}
for(std::vector<LauParameter*>::iterator iter = p0pars_b0bar.begin(); iter != p0pars_b0bar.end(); ++iter){
LauParameter* p0 = *iter;
if (p0->fixed()){continue;}
fitVars.push_back(p0);
++nCalibPar_;
}
for(std::vector<LauParameter*>::iterator iter = p1pars_b0.begin(); iter != p1pars_b0.end(); ++iter){
LauParameter* p1 = *iter;
if (p1->fixed()){continue;}
fitVars.push_back(p1);
++nCalibPar_;
}
for(std::vector<LauParameter*>::iterator iter = p1pars_b0bar.begin(); iter != p1pars_b0bar.end(); ++iter){
LauParameter* p1 = *iter;
if (p1->fixed()){continue;}
fitVars.push_back(p1);
++nCalibPar_;
}
}
}
void LauTimeDepFitModel::setEffiParams()
{
nEffiPar_ = 0;
LauParameterPList& fitVars = this->fitPars();
LauParameterPList& effiPars = signalDecayTimePdf_->getEffiPars();
// If all of the knots are fixed we have nothing to do
LauParamFixed isFixed;
if ( std::all_of( effiPars.begin(), effiPars.end(), isFixed ) ) {
return;
}
// If any knots are floating, add all knots (fixed or floating)
for(LauParameterPList::iterator iter = effiPars.begin(); iter != effiPars.end(); ++iter){
LauParameter* par = *iter;
fitVars.push_back(par);
++nEffiPar_;
}
}
void LauTimeDepFitModel::setExtraNtupleVars()
{
// Set-up other parameters derived from the fit results, e.g. fit fractions.
if (this->useDP() != kTRUE) {
return;
}
// First clear the vectors so we start from scratch
this->clearExtraVarVectors();
LauParameterList& extraVars = this->extraPars();
// Add the B0 and B0bar fit fractions for each signal component
fitFracB0bar_ = sigModelB0bar_->getFitFractions();
if (fitFracB0bar_.size() != nSigComp_) {
std::cerr<<"ERROR in LauTimeDepFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<<fitFracB0bar_.size()<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
for (UInt_t i(0); i<nSigComp_; ++i) {
if (fitFracB0bar_[i].size() != nSigComp_) {
std::cerr<<"ERROR in LauTimeDepFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<<fitFracB0bar_[i].size()<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
}
for (UInt_t i(0); i<nSigComp_; ++i) {
for (UInt_t j = i; j < nSigComp_; j++) {
TString name = fitFracB0bar_[i][j].name();
name.Insert( name.Index("FitFrac"), "B0bar" );
fitFracB0bar_[i][j].name(name);
extraVars.push_back(fitFracB0bar_[i][j]);
}
}
fitFracB0_ = sigModelB0_->getFitFractions();
if (fitFracB0_.size() != nSigComp_) {
std::cerr<<"ERROR in LauTimeDepFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<<fitFracB0_.size()<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
for (UInt_t i(0); i<nSigComp_; ++i) {
if (fitFracB0_[i].size() != nSigComp_) {
std::cerr<<"ERROR in LauTimeDepFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<<fitFracB0_[i].size()<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
}
for (UInt_t i(0); i<nSigComp_; ++i) {
for (UInt_t j = i; j < nSigComp_; j++) {
TString name = fitFracB0_[i][j].name();
name.Insert( name.Index("FitFrac"), "B0" );
fitFracB0_[i][j].name(name);
extraVars.push_back(fitFracB0_[i][j]);
}
}
// Calculate the ACPs and FitFrac asymmetries
this->calcAsymmetries(kTRUE);
// Add the Fit Fraction asymmetry for each signal component
for (UInt_t i = 0; i < nSigComp_; i++) {
extraVars.push_back(fitFracAsymm_[i]);
}
// Add the calculated CP asymmetry for each signal component
for (UInt_t i = 0; i < nSigComp_; i++) {
extraVars.push_back(acp_[i]);
}
// Now add in the DP efficiency values
Double_t initMeanEffB0bar = sigModelB0bar_->getMeanEff().initValue();
meanEffB0bar_.value(initMeanEffB0bar); meanEffB0bar_.initValue(initMeanEffB0bar); meanEffB0bar_.genValue(initMeanEffB0bar);
extraVars.push_back(meanEffB0bar_);
Double_t initMeanEffB0 = sigModelB0_->getMeanEff().initValue();
meanEffB0_.value(initMeanEffB0); meanEffB0_.initValue(initMeanEffB0); meanEffB0_.genValue(initMeanEffB0);
extraVars.push_back(meanEffB0_);
// Also add in the DP rates
Double_t initDPRateB0bar = sigModelB0bar_->getDPRate().initValue();
DPRateB0bar_.value(initDPRateB0bar); DPRateB0bar_.initValue(initDPRateB0bar); DPRateB0bar_.genValue(initDPRateB0bar);
extraVars.push_back(DPRateB0bar_);
Double_t initDPRateB0 = sigModelB0_->getDPRate().initValue();
DPRateB0_.value(initDPRateB0); DPRateB0_.initValue(initDPRateB0); DPRateB0_.genValue(initDPRateB0);
extraVars.push_back(DPRateB0_);
}
void LauTimeDepFitModel::setAsymmetries(const Double_t AProd, const Bool_t AProdFix){
AProd_.value(AProd);
AProd_.fixed(AProdFix);
}
void LauTimeDepFitModel::finaliseFitResults(const TString& tablePrefixName)
{
// Retrieve parameters from the fit results for calculations and toy generation
// and eventually store these in output root ntuples/text files
// Now take the fit parameters and update them as necessary
// i.e. to make mag > 0.0, phase in the right range.
// This function will also calculate any other values, such as the
// fit fractions, using any errors provided by fitParErrors as appropriate.
// Also obtain the pull values: (measured - generated)/(average error)
if (this->useDP() == kTRUE) {
for (UInt_t i = 0; i < nSigComp_; ++i) {
// Check whether we have "a > 0.0", and phases in the right range
coeffPars_[i]->finaliseValues();
}
}
// update the pulls on the event fractions and asymmetries
if (this->doEMLFit()) {
signalEvents_->updatePull();
}
if (this->useDP() == kFALSE) {
signalAsym_->updatePull();
}
// Finalise the pulls on the decay time parameters
signalDecayTimePdf_->updatePulls();
// and for backgrounds if required
if (usingBkgnd_){
- for (std::vector<LauDecayTimePdf*>::iterator iter = backgroundDecayTimePdfs_.begin(); iter != backgroundDecayTimePdfs_.end(); ++iter) {
+ for (std::vector<LauDecayTimePdf*>::iterator iter = BkgndDecayTimePdfs_.begin(); iter != BkgndDecayTimePdfs_.end(); ++iter) {
LauDecayTimePdf* pdf = *iter;
pdf->updatePulls();
}
}
if (useSinCos_) {
if ( not sinPhiMix_.fixed() ) {
sinPhiMix_.updatePull();
cosPhiMix_.updatePull();
}
} else {
this->checkMixingPhase();
}
if (usingBkgnd_ == kTRUE) {
for (LauBkgndYieldList::iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) {
std::vector<LauParameter*> parameters = (*iter)->getPars();
for ( LauParameter* parameter : parameters ) {
parameter->updatePull();
}
}
for (LauBkgndYieldList::iterator iter = bkgndAsym_.begin(); iter != bkgndAsym_.end(); ++iter) {
std::vector<LauParameter*> parameters = (*iter)->getPars();
for ( LauParameter* parameter : parameters ) {
parameter->updatePull();
}
}
}
// Update the pulls on all the extra PDFs' parameters
this->updateFitParameters(sigExtraPdf_);
if (usingBkgnd_ == kTRUE) {
for (LauBkgndPdfsList::iterator iter = BkgndPdfs_.begin(); iter != BkgndPdfs_.end(); ++iter) {
this->updateFitParameters(*iter);
}
}
// Fill the fit results to the ntuple
// update the coefficients and then calculate the fit fractions and ACP's
if (this->useDP() == kTRUE) {
this->updateCoeffs();
sigModelB0bar_->updateCoeffs(coeffsB0bar_); sigModelB0bar_->calcExtraInfo();
sigModelB0_->updateCoeffs(coeffsB0_); sigModelB0_->calcExtraInfo();
LauParArray fitFracB0bar = sigModelB0bar_->getFitFractions();
if (fitFracB0bar.size() != nSigComp_) {
std::cerr<<"ERROR in LauTimeDepFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<<fitFracB0bar.size()<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
for (UInt_t i(0); i<nSigComp_; ++i) {
if (fitFracB0bar[i].size() != nSigComp_) {
std::cerr<<"ERROR in LauTimeDepFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<<fitFracB0bar[i].size()<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
}
LauParArray fitFracB0 = sigModelB0_->getFitFractions();
if (fitFracB0.size() != nSigComp_) {
std::cerr<<"ERROR in LauTimeDepFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<<fitFracB0.size()<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
for (UInt_t i(0); i<nSigComp_; ++i) {
if (fitFracB0[i].size() != nSigComp_) {
std::cerr<<"ERROR in LauTimeDepFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<<fitFracB0[i].size()<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
}
for (UInt_t i(0); i<nSigComp_; ++i) {
for (UInt_t j(i); j<nSigComp_; ++j) {
fitFracB0bar_[i][j].value(fitFracB0bar[i][j].value());
fitFracB0_[i][j].value(fitFracB0[i][j].value());
}
}
meanEffB0bar_.value(sigModelB0bar_->getMeanEff().value());
meanEffB0_.value(sigModelB0_->getMeanEff().value());
DPRateB0bar_.value(sigModelB0bar_->getDPRate().value());
DPRateB0_.value(sigModelB0_->getDPRate().value());
this->calcAsymmetries();
// Then store the final fit parameters, and any extra parameters for
// the signal model (e.g. fit fractions, FF asymmetries, ACPs, mean efficiency and DP rate)
this->clearExtraVarVectors();
LauParameterList& extraVars = this->extraPars();
for (UInt_t i(0); i<nSigComp_; ++i) {
for (UInt_t j(i); j<nSigComp_; ++j) {
extraVars.push_back(fitFracB0bar_[i][j]);
}
}
for (UInt_t i(0); i<nSigComp_; ++i) {
for (UInt_t j(i); j<nSigComp_; ++j) {
extraVars.push_back(fitFracB0_[i][j]);
}
}
for (UInt_t i(0); i<nSigComp_; ++i) {
extraVars.push_back(fitFracAsymm_[i]);
}
for (UInt_t i(0); i<nSigComp_; ++i) {
extraVars.push_back(acp_[i]);
}
extraVars.push_back(meanEffB0bar_);
extraVars.push_back(meanEffB0_);
extraVars.push_back(DPRateB0bar_);
extraVars.push_back(DPRateB0_);
this->printFitFractions(std::cout);
this->printAsymmetries(std::cout);
}
const LauParameterPList& fitVars = this->fitPars();
const LauParameterList& extraVars = this->extraPars();
LauFitNtuple* ntuple = this->fitNtuple();
ntuple->storeParsAndErrors(fitVars, extraVars);
// find out the correlation matrix for the parameters
ntuple->storeCorrMatrix(this->iExpt(), this->fitStatus(), this->covarianceMatrix());
// Fill the data into ntuple
ntuple->updateFitNtuple();
// Print out the partial fit fractions, phases and the
// averaged efficiency, reweighted by the dynamics (and anything else)
if (this->writeLatexTable()) {
TString sigOutFileName(tablePrefixName);
sigOutFileName += "_"; sigOutFileName += this->iExpt(); sigOutFileName += "Expt.tex";
this->writeOutTable(sigOutFileName);
}
}
void LauTimeDepFitModel::printFitFractions(std::ostream& output)
{
// Print out Fit Fractions, total DP rate and mean efficiency
// First for the B0bar events
for (UInt_t i = 0; i < nSigComp_; i++) {
const TString compName(coeffPars_[i]->name());
output<<"B0bar FitFraction for component "<<i<<" ("<<compName<<") = "<<fitFracB0bar_[i][i]<<std::endl;
}
output<<"B0bar overall DP rate (integral of matrix element squared) = "<<DPRateB0bar_<<std::endl;
output<<"B0bar average efficiency weighted by whole DP dynamics = "<<meanEffB0bar_<<std::endl;
// Then for the B0 sample
for (UInt_t i = 0; i < nSigComp_; i++) {
const TString compName(coeffPars_[i]->name());
const TString conjName(sigModelB0bar_->getConjResName(compName));
output<<"B0 FitFraction for component "<<i<<" ("<<conjName<<") = "<<fitFracB0_[i][i]<<std::endl;
}
output<<"B0 overall DP rate (integral of matrix element squared) = "<<DPRateB0_<<std::endl;
output<<"B0 average efficiency weighted by whole DP dynamics = "<<meanEffB0_<<std::endl;
}
void LauTimeDepFitModel::printAsymmetries(std::ostream& output)
{
for (UInt_t i = 0; i < nSigComp_; i++) {
const TString compName(coeffPars_[i]->name());
output<<"Fit Fraction asymmetry for component "<<i<<" ("<<compName<<") = "<<fitFracAsymm_[i]<<std::endl;
}
for (UInt_t i = 0; i < nSigComp_; i++) {
const TString compName(coeffPars_[i]->name());
output<<"ACP for component "<<i<<" ("<<compName<<") = "<<acp_[i].value()<<" +- "<<acp_[i].error()<<std::endl;
}
}
void LauTimeDepFitModel::writeOutTable(const TString& outputFile)
{
// Write out the results of the fit to a tex-readable table
std::ofstream fout(outputFile);
LauPrint print;
std::cout<<"INFO in LauTimeDepFitModel::writeOutTable : Writing out results of the fit to the tex file "<<outputFile<<std::endl;
if (this->useDP() == kTRUE) {
// print the fit coefficients in one table
coeffPars_.front()->printTableHeading(fout);
for (UInt_t i = 0; i < nSigComp_; i++) {
coeffPars_[i]->printTableRow(fout);
}
fout<<"\\hline"<<std::endl;
fout<<"\\end{tabular}"<<std::endl<<std::endl;
// print the fit fractions in another
fout<<"\\begin{tabular}{|l|c|c|c|c|}"<<std::endl;
fout<<"\\hline"<<std::endl;
fout<<"Component & \\Bzb\\ Fit Fraction & \\Bz\\ Fit Fraction & Fit Fraction Asymmetry & $A_{\\CP}$ \\\\"<<std::endl;
fout<<"\\hline"<<std::endl;
Double_t fitFracSumB0bar(0.0);
Double_t fitFracSumB0(0.0);
for (UInt_t i = 0; i < nSigComp_; i++) {
TString resName = coeffPars_[i]->name();
resName = resName.ReplaceAll("_", "\\_");
fout<<resName<<" & $";
Double_t fitFracB0bar = fitFracB0bar_[i][i].value();
fitFracSumB0bar += fitFracB0bar;
print.printFormat(fout, fitFracB0bar);
fout << "$ & $" << std::endl;
Double_t fitFracB0 = fitFracB0_[i][i].value();
fitFracSumB0 += fitFracB0;
print.printFormat(fout, fitFracB0);
fout << "$ & $" << std::endl;
Double_t fitFracAsymm = fitFracAsymm_[i].value();
print.printFormat(fout, fitFracAsymm);
fout << "$ & $" << std::endl;
Double_t acp = acp_[i].value();
Double_t acpErr = acp_[i].error();
print.printFormat(fout, acp);
fout<<" \\pm ";
print.printFormat(fout, acpErr);
fout<<"$ \\\\"<<std::endl;
}
fout<<"\\hline"<<std::endl;
// Also print out sum of fit fractions
fout << "Fit Fraction Sum = & $";
print.printFormat(fout, fitFracSumB0bar);
fout << "$ & $";
print.printFormat(fout, fitFracSumB0);
fout << "$ & & \\\\" << std::endl;
fout << "\\hline \n\\hline" << std::endl;
fout << "DP rate = & $";
print.printFormat(fout, DPRateB0bar_.value());
fout << "$ & $";
print.printFormat(fout, DPRateB0_.value());
fout << "$ & & \\\\" << std::endl;
fout << "$< \\varepsilon > =$ & $";
print.printFormat(fout, meanEffB0bar_.value());
fout << "$ & $";
print.printFormat(fout, meanEffB0_.value());
fout << "$ & & \\\\" << std::endl;
if (useSinCos_) {
fout << "$\\sinPhiMix =$ & $";
print.printFormat(fout, sinPhiMix_.value());
fout << " \\pm ";
print.printFormat(fout, sinPhiMix_.error());
fout << "$ & & & & & & & \\\\" << std::endl;
fout << "$\\cosPhiMix =$ & $";
print.printFormat(fout, cosPhiMix_.value());
fout << " \\pm ";
print.printFormat(fout, cosPhiMix_.error());
fout << "$ & & & & & & & \\\\" << std::endl;
} else {
fout << "$\\phiMix =$ & $";
print.printFormat(fout, phiMix_.value());
fout << " \\pm ";
print.printFormat(fout, phiMix_.error());
fout << "$ & & & & & & & \\\\" << std::endl;
}
fout << "\\hline \n\\end{tabular}" << std::endl;
}
if (!sigExtraPdf_.empty()) {
fout<<"\\begin{tabular}{|l|c|}"<<std::endl;
fout<<"\\hline"<<std::endl;
fout<<"\\Extra Signal PDFs' Parameters: & \\\\"<<std::endl;
this->printFitParameters(sigExtraPdf_, fout);
if (usingBkgnd_ == kTRUE && !BkgndPdfs_.empty()) {
fout << "\\hline" << std::endl;
fout << "\\Extra Background PDFs' Parameters: & \\\\" << std::endl;
for (LauBkgndPdfsList::const_iterator iter = BkgndPdfs_.begin(); iter != BkgndPdfs_.end(); ++iter) {
this->printFitParameters(*iter, fout);
}
}
fout<<"\\hline \n\\end{tabular}"<<std::endl<<std::endl;
}
}
void LauTimeDepFitModel::checkInitFitParams()
{
// Update the number of signal events to be total-sum(background events)
this->updateSigEvents();
// Check whether we want to have randomised initial fit parameters for the signal model
if (this->useRandomInitFitPars() == kTRUE) {
this->randomiseInitFitPars();
}
}
void LauTimeDepFitModel::randomiseInitFitPars()
{
// Only randomise those parameters that are not fixed!
std::cout<<"INFO in LauTimeDepFitModel::randomiseInitFitPars : Randomising the initial values of the coefficients of the DP components (and phiMix)..."<<std::endl;
for (UInt_t i = 0; i < nSigComp_; i++) {
coeffPars_[i]->randomiseInitValues();
}
phiMix_.randomiseValue(-LauConstants::pi, LauConstants::pi);
if (useSinCos_) {
sinPhiMix_.initValue(TMath::Sin(phiMix_.initValue()));
cosPhiMix_.initValue(TMath::Cos(phiMix_.initValue()));
}
}
LauTimeDepFitModel::LauGenInfo LauTimeDepFitModel::eventsToGenerate()
{
// Determine the number of events to generate for each hypothesis
// If we're smearing then smear each one individually
// NB this individual smearing has to be done individually per tagging category as well
LauGenInfo nEvtsGen;
// Signal
// If we're including the DP and decay time we can't decide on the tag
// yet, since it depends on the whole DP+dt PDF, however, if
// we're not then we need to decide.
Double_t evtWeight(1.0);
Double_t nEvts = signalEvents_->genValue();
if ( nEvts < 0.0 ) {
evtWeight = -1.0;
nEvts = TMath::Abs( nEvts );
}
Double_t sigAsym(0.0);
if (this->useDP() == kFALSE) {
sigAsym = signalAsym_->genValue();
//TODO fill in here if we care
} else {
Double_t rateB0bar = sigModelB0bar_->getDPRate().value();
Double_t rateB0 = sigModelB0_->getDPRate().value();
if ( rateB0bar+rateB0 > 1e-30) {
sigAsym = (rateB0bar-rateB0)/(rateB0bar+rateB0);
}
//for (LauTagCatParamMap::const_iterator iter = signalTagCatFrac.begin(); iter != signalTagCatFrac.end(); ++iter) {
// const LauParameter& par = iter->second;
// Double_t eventsbyTagCat = par.value() * nEvts;
// if (this->doPoissonSmearing()) {
// eventsbyTagCat = LauRandom::randomFun()->Poisson(eventsbyTagCat);
// }
// eventsB0[iter->first] = std::make_pair( TMath::Nint(eventsbyTagCat), evtWeight );
//}
//nEvtsGen[std::make_pair("signal",0)] = eventsB0; // generate signal event, decide tag later.
nEvtsGen["signal"] = std::make_pair( nEvts, evtWeight );
}
std::cout<<"INFO in LauTimeDepFitModel::eventsToGenerate : Generating toy MC with:"<<std::endl;
std::cout<<" : Signal asymmetry = "<<sigAsym<<" and number of signal events = "<<signalEvents_->genValue()<<std::endl;
return nEvtsGen;
}
Bool_t LauTimeDepFitModel::genExpt()
{
// Routine to generate toy Monte Carlo events according to the various models we have defined.
// Determine the number of events to generate for each hypothesis
LauGenInfo nEvts = this->eventsToGenerate();
Bool_t genOK(kTRUE);
Int_t evtNum(0);
const UInt_t nBkgnds = this->nBkgndClasses();
std::vector<TString> bkgndClassNames(nBkgnds);
std::vector<TString> bkgndClassNamesGen(nBkgnds);
for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) {
TString name( this->bkgndClassName(iBkgnd) );
bkgndClassNames[iBkgnd] = name;
bkgndClassNamesGen[iBkgnd] = "gen"+name;
}
// Loop over the hypotheses and generate the appropriate number of
// events for each one
for (LauGenInfo::const_iterator iter = nEvts.begin(); iter != nEvts.end(); ++iter) {
// find the category of events (e.g. signal)
const TString& evtCategory(iter->first);
// Type
const TString& type(iter->first);
// Number of events
Int_t nEvtsGen( iter->second.first );
// get the event weight for this category
const Double_t evtWeight( iter->second.second );
for (Int_t iEvt(0); iEvt<nEvtsGen; ++iEvt) {
this->setGenNtupleDoubleBranchValue( "evtWeight", evtWeight );
if (evtCategory == "signal") {
this->setGenNtupleIntegerBranchValue("genSig",1);
for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) {
this->setGenNtupleIntegerBranchValue( bkgndClassNamesGen[iBkgnd], 0 );
}
// All the generate*Event() methods have to fill in curEvtDecayTime_ and curEvtDecayTimeErr_
// In addition, generateSignalEvent has to decide on the tag and fill in curEvtTagFlv_
genOK = this->generateSignalEvent();
} else {
this->setGenNtupleIntegerBranchValue("genSig",0);
UInt_t bkgndID(0);
for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) {
Int_t gen(0);
if ( bkgndClassNames[iBkgnd] == type ) {
gen = 1;
bkgndID = iBkgnd;
}
this->setGenNtupleIntegerBranchValue( bkgndClassNamesGen[iBkgnd], gen );
}
genOK = this->generateBkgndEvent(bkgndID);
}
if (!genOK) {
// If there was a problem with the generation then break out and return.
// The problem model will have adjusted itself so that all should be OK next time.
break;
}
if (this->useDP() == kTRUE) {
this->setDPDtBranchValues(); // store DP, decay time and tagging variables in the ntuple
}
// Store the event's tag and tagging category
this->setGenNtupleIntegerBranchValue("cpEigenvalue", cpEigenValue_);
const TString& trueTagVarName { flavTag_->getTrueTagVarName() };
if ( trueTagVarName != "" ) {
this->setGenNtupleIntegerBranchValue(trueTagVarName, curEvtTrueTagFlv_);
}
if ( cpEigenValue_ == QFS ) {
const TString& decayFlvVarName { flavTag_->getDecayFlvVarName() };
if ( decayFlvVarName != "" ) {
this->setGenNtupleIntegerBranchValue(decayFlvVarName, curEvtDecayFlv_);
}
}
const std::vector<TString>& tagVarNames { flavTag_->getTagVarNames() };
const std::vector<TString>& mistagVarNames { flavTag_->getMistagVarNames() };
// Loop over the taggers - values set via generateSignalEvent
const ULong_t nTaggers {flavTag_->getNTaggers()};
for (ULong_t i=0; i<nTaggers; ++i){
this->setGenNtupleIntegerBranchValue(tagVarNames[i], curEvtTagFlv_[i]);
this->setGenNtupleDoubleBranchValue(mistagVarNames[i], curEvtMistag_[i]);
}
// Store the event number (within this experiment)
// and then increment it
this->setGenNtupleIntegerBranchValue("iEvtWithinExpt",evtNum);
++evtNum;
// Write the values into the tree
this->fillGenNtupleBranches();
// Print an occasional progress message
if (iEvt%1000 == 0) {std::cout<<"INFO in LauTimeDepFitModel::genExpt : Generated event number "<<iEvt<<" out of "<<nEvtsGen<<" "<<evtCategory<<" events."<<std::endl;}
}
} //end of loop over species and tagFlv.
if (this->useDP() && genOK) {
sigModelB0bar_->checkToyMC(kTRUE);
sigModelB0_->checkToyMC(kTRUE);
std::cout<<"aSqMaxSet = "<<aSqMaxSet_<<" and aSqMaxVar = "<<aSqMaxVar_<<std::endl;
// Get the fit fractions if they're to be written into the latex table
if (this->writeLatexTable()) {
LauParArray fitFracB0bar = sigModelB0bar_->getFitFractions();
if (fitFracB0bar.size() != nSigComp_) {
std::cerr<<"ERROR in LauTimeDepFitModel::generate : Fit Fraction array of unexpected dimension: "<<fitFracB0bar.size()<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
for (UInt_t i(0); i<nSigComp_; ++i) {
if (fitFracB0bar[i].size() != nSigComp_) {
std::cerr<<"ERROR in LauTimeDepFitModel::generate : Fit Fraction array of unexpected dimension: "<<fitFracB0bar[i].size()<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
}
LauParArray fitFracB0 = sigModelB0_->getFitFractions();
if (fitFracB0.size() != nSigComp_) {
std::cerr<<"ERROR in LauTimeDepFitModel::generate : Fit Fraction array of unexpected dimension: "<<fitFracB0.size()<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
for (UInt_t i(0); i<nSigComp_; ++i) {
if (fitFracB0[i].size() != nSigComp_) {
std::cerr<<"ERROR in LauTimeDepFitModel::generate : Fit Fraction array of unexpected dimension: "<<fitFracB0[i].size()<<std::endl;
gSystem->Exit(EXIT_FAILURE);
}
}
for (UInt_t i(0); i<nSigComp_; ++i) {
for (UInt_t j(i); j<nSigComp_; ++j) {
fitFracB0bar_[i][j].value(fitFracB0bar[i][j].value());
fitFracB0_[i][j].value(fitFracB0[i][j].value());
}
}
meanEffB0bar_.value(sigModelB0bar_->getMeanEff().value());
meanEffB0_.value(sigModelB0_->getMeanEff().value());
DPRateB0bar_.value(sigModelB0bar_->getDPRate().value());
DPRateB0_.value(sigModelB0_->getDPRate().value());
}
}
// If we're reusing embedded events or if the generation is being
// reset then clear the lists of used events
if (reuseSignal_ || !genOK) {
if (signalTree_) {
signalTree_->clearUsedList();
}
}
for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) {
LauEmbeddedData* data = bkgndTree_[bkgndID];
if (reuseBkgnd_[bkgndID] || !genOK) {
if (data) {
data->clearUsedList();
}
}
}
return genOK;
}
Bool_t LauTimeDepFitModel::generateSignalEvent()
{
// Generate signal event, including SCF if necessary.
// DP:DecayTime generation follows.
// If it's ok, we then generate mES, DeltaE, Fisher/NN...
Bool_t genOK(kTRUE);
Bool_t generatedEvent(kFALSE);
Bool_t doSquareDP = kinematicsB0bar_->squareDP();
doSquareDP &= kinematicsB0_->squareDP();
LauKinematics* kinematics(kinematicsB0bar_);
if (this->useDP()) {
if (signalTree_) {
signalTree_->getEmbeddedEvent(kinematics);
//curEvtTagFlv_ = TMath::Nint(signalTree_->getValue("tagFlv"));
curEvtDecayTimeErr_ = signalTree_->getValue(signalDecayTimePdf_->varErrName());
curEvtDecayTime_ = signalTree_->getValue(signalDecayTimePdf_->varName());
if (signalTree_->haveBranch("mcMatch")) {
Int_t match = TMath::Nint(signalTree_->getValue("mcMatch"));
if (match) {
this->setGenNtupleIntegerBranchValue("genTMSig",1);
this->setGenNtupleIntegerBranchValue("genSCFSig",0);
} else {
this->setGenNtupleIntegerBranchValue("genTMSig",0);
this->setGenNtupleIntegerBranchValue("genSCFSig",1);
}
}
} else {
nGenLoop_ = 0;
// Now generate from the combined DP / decay-time PDF
while (generatedEvent == kFALSE && nGenLoop_ < iterationsMax_) {
curEvtTrueTagFlv_ = LauFlavTag::Flavour::Unknown;
curEvtDecayFlv_ = LauFlavTag::Flavour::Unknown;
// First choose the true tag, accounting for the production asymmetry
// CONVENTION WARNING regarding meaning of sign of AProd
Double_t random = LauRandom::randomFun()->Rndm();
if (random <= 0.5 * ( 1.0 - AProd_.unblindValue() ) ) {
curEvtTrueTagFlv_ = LauFlavTag::Flavour::B;
} else {
curEvtTrueTagFlv_ = LauFlavTag::Flavour::Bbar;
}
// Generate the DP position
Double_t m13Sq{0.0}, m23Sq{0.0};
kinematicsB0bar_->genFlatPhaseSpace(m13Sq, m23Sq);
// Next, calculate the total A and Abar for the given DP position
sigModelB0_->calcLikelihoodInfo(m13Sq, m23Sq);
sigModelB0bar_->calcLikelihoodInfo(m13Sq, m23Sq);
// Generate decay time
const Double_t tMin = signalDecayTimePdf_->minAbscissa();
const Double_t tMax = signalDecayTimePdf_->maxAbscissa();
curEvtDecayTime_ = LauRandom::randomFun()->Rndm()*(tMax-tMin) + tMin;
// Generate the decay time error (NB the kTRUE forces the generation of a new value)
curEvtDecayTimeErr_ = signalDecayTimePdf_->generateError(kTRUE);
// Calculate all the decay time info
signalDecayTimePdf_->calcLikelihoodInfo(curEvtDecayTime_,curEvtDecayTimeErr_);
// Retrieve the amplitudes and efficiency from the dynamics
const LauComplex& Abar { sigModelB0bar_->getEvtDPAmp() };
const LauComplex& A { sigModelB0_->getEvtDPAmp() };
const Double_t ASq { A.abs2() };
const Double_t AbarSq { Abar.abs2() };
const Double_t dpEff { sigModelB0bar_->getEvtEff() };
// Also retrieve all the decay time terms
const Double_t dtCos { signalDecayTimePdf_->getCosTerm() };
const Double_t dtSin { signalDecayTimePdf_->getSinTerm() };
const Double_t dtCosh { signalDecayTimePdf_->getCoshTerm() };
const Double_t dtSinh { signalDecayTimePdf_->getSinhTerm() };
// and the decay time acceptance
const Double_t dtEff { signalDecayTimePdf_->getEffiTerm() };
if ( cpEigenValue_ == QFS) {
// Calculate the total intensities for each flavour-specific final state
const Double_t ATotSq { ( ASq * dtCosh + curEvtTrueTagFlv_ * ASq * dtCos ) * dpEff * dtEff };
const Double_t AbarTotSq { ( AbarSq * dtCosh - curEvtTrueTagFlv_ * AbarSq * dtCos ) * dpEff * dtEff };
const Double_t ASumSq { ATotSq + AbarTotSq };
// Finally we throw the dice to see whether this event should be generated (and, if so, which final state)
const Double_t randNum = LauRandom::randomFun()->Rndm();
if (randNum <= ASumSq / aSqMaxSet_ ) {
generatedEvent = kTRUE;
nGenLoop_ = 0;
if (ASumSq > aSqMaxVar_) {aSqMaxVar_ = ASumSq;}
if ( randNum <= ATotSq / aSqMaxSet_ ) {
curEvtDecayFlv_ = LauFlavTag::Flavour::B;
} else {
curEvtDecayFlv_ = LauFlavTag::Flavour::Bbar;
}
// Generate the flavour tagging information from the true tag
// (we do this after accepting the event to save time)
flavTag_->generateEventInfo( curEvtTrueTagFlv_ );
curEvtTagFlv_ = flavTag_->getCurEvtTagFlv();
curEvtMistag_ = flavTag_->getCurEvtMistag();
} else {
nGenLoop_++;
}
} else {
// Calculate the DP terms
const Double_t aSqSum { ASq + AbarSq };
const Double_t aSqDif { ASq - AbarSq };
const LauComplex inter { Abar * A.conj() * phiMixComplex_ };
const Double_t interTermIm { ( cpEigenValue_ == CPEven ) ? 2.0 * inter.im() : -2.0 * inter.im() };
const Double_t interTermRe { ( cpEigenValue_ == CPEven ) ? 2.0 * inter.re() : -2.0 * inter.re() };
// Combine DP and decay-time info for all terms
const Double_t coshTerm { aSqSum * dtCosh };
const Double_t sinhTerm { interTermRe * dtSinh };
const Double_t cosTerm { aSqDif * dtCos };
const Double_t sinTerm { interTermIm * dtSin };
// Sum to obtain the total and multiply by the efficiency
// Multiplying the cos and sin terms by the true flavour at production
const Double_t ATotSq { ( coshTerm + sinhTerm + curEvtTrueTagFlv_ * ( cosTerm - sinTerm ) ) * dpEff * dtEff };
//Finally we throw the dice to see whether this event should be generated
const Double_t randNum = LauRandom::randomFun()->Rndm();
if (randNum <= ATotSq/aSqMaxSet_ ) {
generatedEvent = kTRUE;
nGenLoop_ = 0;
if (ATotSq > aSqMaxVar_) {aSqMaxVar_ = ATotSq;}
// Generate the flavour tagging information from the true tag
// (we do this after accepting the event to save time)
flavTag_->generateEventInfo( curEvtTrueTagFlv_ );
curEvtTagFlv_ = flavTag_->getCurEvtTagFlv();
curEvtMistag_ = flavTag_->getCurEvtMistag();
} else {
nGenLoop_++;
}
}
} // end of while !generatedEvent loop
} // end of if (signalTree_) else control
} else {
if ( signalTree_ ) {
signalTree_->getEmbeddedEvent(0);
//curEvtTagFlv_ = TMath::Nint(signalTree_->getValue("tagFlv"));
curEvtDecayTimeErr_ = signalTree_->getValue(signalDecayTimePdf_->varErrName());
curEvtDecayTime_ = signalTree_->getValue(signalDecayTimePdf_->varName());
}
}
// Check whether we have generated the toy MC OK.
if (nGenLoop_ >= iterationsMax_) {
aSqMaxSet_ = 1.01 * aSqMaxVar_;
genOK = kFALSE;
std::cerr<<"WARNING in LauTimeDepFitModel::generateSignalEvent : Hit max iterations: setting aSqMaxSet_ to "<<aSqMaxSet_<<std::endl;
} else if (aSqMaxVar_ > aSqMaxSet_) {
aSqMaxSet_ = 1.01 * aSqMaxVar_;
genOK = kFALSE;
std::cerr<<"WARNING in LauTimeDepFitModel::generateSignalEvent : Found a larger ASq value: setting aSqMaxSet_ to "<<aSqMaxSet_<<std::endl;
}
if (genOK) {
//Some variables, like Fisher or NN, might use m13Sq and m23Sq from the kinematics
//kinematicsB0bar_ is up to date, update kinematicsB0_
kinematicsB0_->updateKinematics(kinematicsB0bar_->getm13Sq(), kinematicsB0bar_->getm23Sq() );
this->generateExtraPdfValues(sigExtraPdf_, signalTree_);
}
// Check for problems with the embedding
if (signalTree_ && (signalTree_->nEvents() == signalTree_->nUsedEvents())) {
std::cerr<<"WARNING in LauTimeDepFitModel::generateSignalEvent : Source of embedded signal events used up, clearing the list of used events."<<std::endl;
signalTree_->clearUsedList();
}
return genOK;
}
Bool_t LauTimeDepFitModel::generateBkgndEvent([[maybe_unused]] UInt_t bkgndID)
{
// Generate Bkgnd event
Bool_t genOK(kTRUE);
//LauAbsBkgndDPModel* model(0);
//LauEmbeddedData* embeddedData(0);
//LauPdfList* extraPdfs(0);
//LauKinematics* kinematics(0);
//model = BkgndDPModels_[bkgndID];
//if (this->enableEmbedding()) {
// // find the right embedded data for the current tagging category
// LauTagCatEmbDataMap::const_iterator emb_iter = bkgndTree_[bkgndID].find(curEvtTagCat_);
// embeddedData = (emb_iter != bkgndTree_[bkgndID].end()) ? emb_iter->second : 0;
//}
//extraPdfs = &BkgndPdfs_[bkgndID];
//kinematics = kinematicsB0bar_;
//if (this->useDP()) {
// if (embeddedData) {
// embeddedData->getEmbeddedEvent(kinematics);
// } else {
// if (model == 0) {
// const TString& bkgndClass = this->bkgndClassName(bkgndID);
// std::cerr << "ERROR in LauCPFitModel::generateBkgndEvent : Can't find the DP model for background class \"" << bkgndClass << "\"." << std::endl;
// gSystem->Exit(EXIT_FAILURE);
// }
// genOK = model->generate();
// }
//} else {
// if (embeddedData) {
// embeddedData->getEmbeddedEvent(0);
// }
//}
//if (genOK) {
// this->generateExtraPdfValues(extraPdfs, embeddedData);
//}
//// Check for problems with the embedding
//if (embeddedData && (embeddedData->nEvents() == embeddedData->nUsedEvents())) {
// const TString& bkgndClass = this->bkgndClassName(bkgndID);
// std::cerr << "WARNING in LauCPFitModel::generateBkgndEvent : Source of embedded " << bkgndClass << " events used up, clearing the list of used events." << std::endl;
// embeddedData->clearUsedList();
//}
return genOK;
}
void LauTimeDepFitModel::setupGenNtupleBranches()
{
// Setup the required ntuple branches
this->addGenNtupleDoubleBranch("evtWeight");
this->addGenNtupleIntegerBranch("genSig");
this->addGenNtupleIntegerBranch("cpEigenvalue");
const TString& trueTagVarName { flavTag_->getTrueTagVarName() };
if ( trueTagVarName != "" ) {
this->addGenNtupleIntegerBranch(trueTagVarName);
}
if ( cpEigenValue_ == QFS ) {
const TString& decayFlvVarName { flavTag_->getDecayFlvVarName() };
if ( decayFlvVarName != "" ) {
this->addGenNtupleIntegerBranch(decayFlvVarName);
}
}
const std::vector<TString>& tagVarNames { flavTag_->getTagVarNames() };
const std::vector<TString>& mistagVarNames { flavTag_->getMistagVarNames() };
const ULong_t nTaggers {flavTag_->getNTaggers()};
for (ULong_t position{0}; position<nTaggers; ++position){
this->addGenNtupleIntegerBranch(tagVarNames[position]);
this->addGenNtupleDoubleBranch(mistagVarNames[position]);
}
if (this->useDP() == kTRUE) {
// Let's add the decay time variables.
this->addGenNtupleDoubleBranch(signalDecayTimePdf_->varName());
this->addGenNtupleDoubleBranch(signalDecayTimePdf_->varErrName());
this->addGenNtupleDoubleBranch("m12");
this->addGenNtupleDoubleBranch("m23");
this->addGenNtupleDoubleBranch("m13");
this->addGenNtupleDoubleBranch("m12Sq");
this->addGenNtupleDoubleBranch("m23Sq");
this->addGenNtupleDoubleBranch("m13Sq");
this->addGenNtupleDoubleBranch("cosHel12");
this->addGenNtupleDoubleBranch("cosHel23");
this->addGenNtupleDoubleBranch("cosHel13");
if (kinematicsB0bar_->squareDP() && kinematicsB0_->squareDP()) {
this->addGenNtupleDoubleBranch("mPrime");
this->addGenNtupleDoubleBranch("thPrime");
}
// Can add the real and imaginary parts of the B0 and B0bar total
// amplitudes seen in the generation (restrict this with a flag
// that defaults to false)
if ( storeGenAmpInfo_ ) {
this->addGenNtupleDoubleBranch("reB0Amp");
this->addGenNtupleDoubleBranch("imB0Amp");
this->addGenNtupleDoubleBranch("reB0barAmp");
this->addGenNtupleDoubleBranch("imB0barAmp");
}
}
// Let's look at the extra variables for signal in one of the tagging categories
for ( const LauAbsPdf* pdf : sigExtraPdf_ ) {
const std::vector<TString> varNames{ pdf->varNames() };
for ( const TString& varName : varNames ) {
if ( varName != "m13Sq" && varName != "m23Sq" ) {
this->addGenNtupleDoubleBranch( varName );
}
}
}
}
void LauTimeDepFitModel::setDPDtBranchValues()
{
// Store the decay time variables.
this->setGenNtupleDoubleBranchValue(signalDecayTimePdf_->varName(),curEvtDecayTime_);
this->setGenNtupleDoubleBranchValue(signalDecayTimePdf_->varErrName(),curEvtDecayTimeErr_);
// CONVENTION WARNING
// TODO check - for now use B0 for any tags
//LauKinematics* kinematics(0);
//if (curEvtTagFlv_[position]<0) {
LauKinematics* kinematics = kinematicsB0_;
//} else {
// kinematics = kinematicsB0bar_;
//}
// Store all the DP information
this->setGenNtupleDoubleBranchValue("m12", kinematics->getm12());
this->setGenNtupleDoubleBranchValue("m23", kinematics->getm23());
this->setGenNtupleDoubleBranchValue("m13", kinematics->getm13());
this->setGenNtupleDoubleBranchValue("m12Sq", kinematics->getm12Sq());
this->setGenNtupleDoubleBranchValue("m23Sq", kinematics->getm23Sq());
this->setGenNtupleDoubleBranchValue("m13Sq", kinematics->getm13Sq());
this->setGenNtupleDoubleBranchValue("cosHel12", kinematics->getc12());
this->setGenNtupleDoubleBranchValue("cosHel23", kinematics->getc23());
this->setGenNtupleDoubleBranchValue("cosHel13", kinematics->getc13());
if (kinematics->squareDP()) {
this->setGenNtupleDoubleBranchValue("mPrime", kinematics->getmPrime());
this->setGenNtupleDoubleBranchValue("thPrime", kinematics->getThetaPrime());
}
// Can add the real and imaginary parts of the B0 and B0bar total
// amplitudes seen in the generation (restrict this with a flag
// that defaults to false)
if ( storeGenAmpInfo_ ) {
if ( this->getGenNtupleIntegerBranchValue("genSig")==1 ) {
LauComplex Abar = sigModelB0bar_->getEvtDPAmp();
LauComplex A = sigModelB0_->getEvtDPAmp();
this->setGenNtupleDoubleBranchValue("reB0Amp", A.re());
this->setGenNtupleDoubleBranchValue("imB0Amp", A.im());
this->setGenNtupleDoubleBranchValue("reB0barAmp", Abar.re());
this->setGenNtupleDoubleBranchValue("imB0barAmp", Abar.im());
} else {
this->setGenNtupleDoubleBranchValue("reB0Amp", 0.0);
this->setGenNtupleDoubleBranchValue("imB0Amp", 0.0);
this->setGenNtupleDoubleBranchValue("reB0barAmp", 0.0);
this->setGenNtupleDoubleBranchValue("imB0barAmp", 0.0);
}
}
}
void LauTimeDepFitModel::generateExtraPdfValues(LauPdfList& extraPdfs, LauEmbeddedData* embeddedData)
{
// CONVENTION WARNING
LauKinematics* kinematics = kinematicsB0_;
//LauKinematics* kinematics(0);
//if (curEvtTagFlv_<0) {
// kinematics = kinematicsB0_;
//} else {
// kinematics = kinematicsB0bar_;
//}
// Generate from the extra PDFs
for (LauPdfList::iterator pdf_iter = extraPdfs.begin(); pdf_iter != extraPdfs.end(); ++pdf_iter) {
LauFitData genValues;
if (embeddedData) {
genValues = embeddedData->getValues( (*pdf_iter)->varNames() );
} else {
genValues = (*pdf_iter)->generate(kinematics);
}
for ( LauFitData::const_iterator var_iter = genValues.begin(); var_iter != genValues.end(); ++var_iter ) {
TString varName = var_iter->first;
if ( varName != "m13Sq" && varName != "m23Sq" ) {
Double_t value = var_iter->second;
this->setGenNtupleDoubleBranchValue(varName,value);
}
}
}
}
void LauTimeDepFitModel::propagateParUpdates()
{
// Update the complex mixing phase
if (useSinCos_) {
phiMixComplex_.setRealPart(cosPhiMix_.unblindValue());
phiMixComplex_.setImagPart(-1.0*sinPhiMix_.unblindValue());
} else {
phiMixComplex_.setRealPart(TMath::Cos(-1.0*phiMix_.unblindValue()));
phiMixComplex_.setImagPart(TMath::Sin(-1.0*phiMix_.unblindValue()));
}
// Update the total normalisation for the signal likelihood
if (this->useDP() == kTRUE) {
this->updateCoeffs();
sigModelB0bar_->updateCoeffs(coeffsB0bar_);
sigModelB0_->updateCoeffs(coeffsB0_);
this->calcInterTermNorm();
}
// Update the decay time normalisation
if ( signalDecayTimePdf_ ) {
signalDecayTimePdf_->propagateParUpdates();
}
// TODO
// - maybe also need to add an update of the background decay time PDFs here
// Update the signal events from the background numbers if not doing an extended fit
// And update the tagging category fractions
this->updateSigEvents();
}
void LauTimeDepFitModel::updateSigEvents()
{
// The background parameters will have been set from Minuit.
// We need to update the signal events using these.
if (!this->doEMLFit()) {
Double_t nTotEvts = this->eventsPerExpt();
Double_t signalEvents = nTotEvts;
signalEvents_->range(-2.0*nTotEvts,2.0*nTotEvts);
for (LauBkgndYieldList::iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) {
LauAbsRValue* nBkgndEvents = (*iter);
if ( nBkgndEvents->isLValue() ) {
LauParameter* yield = dynamic_cast<LauParameter*>( nBkgndEvents );
yield->range(-2.0*nTotEvts,2.0*nTotEvts);
}
}
// Subtract background events (if any) from signal.
if (usingBkgnd_ == kTRUE) {
for (LauBkgndYieldList::const_iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) {
signalEvents -= (*iter)->value();
}
}
if ( ! signalEvents_->fixed() ) {
signalEvents_->value(signalEvents);
}
}
}
void LauTimeDepFitModel::cacheInputFitVars()
{
// Fill the internal data trees of the signal and background models.
// Note that we store the events of both charges in both the
// negative and the positive models. It's only later, at the stage
// when the likelihood is being calculated, that we separate them.
LauFitDataTree* inputFitData = this->fitData();
evtCPEigenVals_.clear();
const Bool_t hasCPEV = ( (cpevVarName_ != "") && inputFitData->haveBranch( cpevVarName_ ) );
UInt_t nEvents = inputFitData->nEvents();
evtCPEigenVals_.reserve( nEvents );
LauFitData::const_iterator fitdata_iter;
for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) {
const LauFitData& dataValues = inputFitData->getData(iEvt);
// if the CP-eigenvalue is in the data use those, otherwise use the default
if ( hasCPEV ) {
fitdata_iter = dataValues.find( cpevVarName_ );
const Int_t cpEV = static_cast<Int_t>( fitdata_iter->second );
if ( cpEV == 1 ) {
cpEigenValue_ = CPEven;
} else if ( cpEV == -1 ) {
cpEigenValue_ = CPOdd;
} else if ( cpEV == 0 ) {
cpEigenValue_ = QFS;
} else {
std::cerr<<"WARNING in LauTimeDepFitModel::cacheInputFitVars : Unknown value: "<<cpEV<<" for CP eigenvalue, setting to CP-even"<<std::endl;
cpEigenValue_ = CPEven;
}
}
evtCPEigenVals_.push_back( cpEigenValue_ );
}
// We'll cache the DP amplitudes at the end because we'll
// append some points that the other PDFs won't deal with.
// Flavour tagging information
- flavTag_->cacheInputFitVars(inputFitData);
+ flavTag_->cacheInputFitVars(inputFitData);
if (this->useDP() == kTRUE) {
// DecayTime and SigmaDecayTime
signalDecayTimePdf_->cacheInfo(*inputFitData);
+ // cache all the backgrounds too
+ for(auto& bg : BkgndDecayTimePdfs_)
+ {bg->cacheInfo(*inputFitData);}
}
// ...and then the extra PDFs
if (not sigExtraPdf_.empty()){
this->cacheInfo(sigExtraPdf_, *inputFitData);
}
if(usingBkgnd_ == kTRUE){
for (LauBkgndPdfsList::iterator iter = BkgndPdfs_.begin(); iter != BkgndPdfs_.end(); ++iter) {
this->cacheInfo((*iter), *inputFitData);
}
}
if (this->useDP() == kTRUE) {
sigModelB0bar_->fillDataTree(*inputFitData);
sigModelB0_->fillDataTree(*inputFitData);
if (usingBkgnd_ == kTRUE) {
for (LauBkgndDPModelList::iterator iter = BkgndDPModels_.begin(); iter != BkgndDPModels_.end(); ++iter) {
(*iter)->fillDataTree(*inputFitData);
}
}
}
}
Double_t LauTimeDepFitModel::getTotEvtLikelihood(const UInt_t iEvt)
{
// Get the CP eigenvalue of the current event
cpEigenValue_ = evtCPEigenVals_[iEvt];
// Get the DP and DecayTime likelihood for signal (TODO and eventually backgrounds)
this->getEvtDPDtLikelihood(iEvt);
// Get the flavour tagging likelihood from eta PDFs (per tagging category - TODO backgrounds to come later)
sigFlavTagLike_ = 1.0;
//this->getEvtFlavTagLikelihood(iEvt);
// Get the combined extra PDFs likelihood for signal (TODO and eventually backgrounds)
this->getEvtExtraLikelihoods(iEvt);
// Construct the total likelihood for signal, qqbar and BBbar backgrounds
Double_t sigLike = sigDPLike_ * sigFlavTagLike_ * sigExtraLike_;
//std::cout << "DP like = " << sigDPLike_ << std::endl;
//std::cout << "flav tag like = " << sigFlavTagLike_ << std::endl;
//std::cout << "extra like = " << sigExtraLike_ << std::endl;
// TODO
Double_t signalEvents = signalEvents_->unblindValue();
if (this->useDP() == kFALSE) {
//signalEvents *= 0.5 * (1.0 + curEvtTagFlv_ * signalAsym_->unblindValue());
}
if ( ! signalEvents_->fixed() ) {
sigLike *= signalEvents;
}
return sigLike;
}
Double_t LauTimeDepFitModel::getEventSum() const
{
Double_t eventSum(0.0);
eventSum += signalEvents_->unblindValue();
return eventSum;
}
void LauTimeDepFitModel::getEvtDPDtLikelihood(const UInt_t iEvt)
{
// Function to return the signal and background likelihoods for the
// Dalitz plot for the given event evtNo.
if ( ! this->useDP() ) {
// There's always going to be a term in the likelihood for the
// signal, so we'd better not zero it.
sigDPLike_ = 1.0;
const UInt_t nBkgnds = this->nBkgndClasses();
for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) {
if (usingBkgnd_ == kTRUE) {
bkgndDPLike_[bkgndID] = 1.0;
} else {
bkgndDPLike_[bkgndID] = 0.0;
}
}
return;
}
// Calculate event quantities
// Get the DP dynamics, decay time, and flavour tagging to calculate
// everything required for the likelihood calculation
sigModelB0bar_->calcLikelihoodInfo(iEvt);
sigModelB0_->calcLikelihoodInfo(iEvt);
signalDecayTimePdf_->calcLikelihoodInfo(iEvt);
flavTag_->updateEventInfo(iEvt);
// Retrieve the amplitudes and efficiency from the dynamics
LauComplex Abar { sigModelB0bar_->getEvtDPAmp() };
LauComplex A { sigModelB0_->getEvtDPAmp() };
const Double_t dpEff { sigModelB0bar_->getEvtEff() };
// If this is a QFS decay, one of the DP amplitudes needs to be zeroed
if (cpEigenValue_ == QFS){
curEvtDecayFlv_ = flavTag_->getCurEvtDecayFlv();
if ( curEvtDecayFlv_ == +1 ) {
Abar.zero();
} else if ( curEvtDecayFlv_ == -1 ) {
A.zero();
}
}
// Next calculate the DP terms
const Double_t aSqSum { A.abs2() + Abar.abs2() };
const Double_t aSqDif { A.abs2() - Abar.abs2() };
Double_t interTermRe { 0.0 };
Double_t interTermIm { 0.0 };
if ( cpEigenValue_ != QFS ) {
const LauComplex inter { Abar * A.conj() * phiMixComplex_ };
if ( cpEigenValue_ == CPEven ) {
interTermIm = 2.0 * inter.im();
interTermRe = 2.0 * inter.re();
} else {
interTermIm = -2.0 * inter.im();
interTermRe = -2.0 * inter.re();
}
}
// First get all the decay time terms
// TODO Backgrounds
// Get the decay time acceptance
const Double_t dtEff { signalDecayTimePdf_->getEffiTerm() };
// Get all the decay time terms
const Double_t dtCos { signalDecayTimePdf_->getCosTerm() };
const Double_t dtSin { signalDecayTimePdf_->getSinTerm() };
const Double_t dtCosh { signalDecayTimePdf_->getCoshTerm() };
const Double_t dtSinh { signalDecayTimePdf_->getSinhTerm() };
// Get the decay time error term
const Double_t dtErrLike { signalDecayTimePdf_->getErrTerm() };
// Get flavour tagging terms
Double_t omega{1.0};
Double_t omegabar{1.0};
const ULong_t nTaggers { flavTag_->getNTaggers() };
for (ULong_t position{0}; position<nTaggers; ++position){
omega *= flavTag_->getCapitalOmega(position, LauFlavTag::Flavour::B);
omegabar *= flavTag_->getCapitalOmega(position, LauFlavTag::Flavour::Bbar);
}
const Double_t prodAsym { AProd_.unblindValue() };
const Double_t ftOmegaHyp { ((1.0 - prodAsym)*omega + (1.0 + prodAsym)*omegabar) };
const Double_t ftOmegaTrig { ((1.0 - prodAsym)*omega - (1.0 + prodAsym)*omegabar) };
const Double_t coshTerm { ftOmegaHyp * dtCosh * aSqSum };
const Double_t sinhTerm { ftOmegaHyp * dtSinh * interTermRe };
const Double_t cosTerm { ftOmegaTrig * dtCos * aSqDif };
const Double_t sinTerm { ftOmegaTrig * dtSin * interTermIm };
// Combine all terms to get the total amplitude squared
const Double_t ASq { coshTerm + sinhTerm + cosTerm - sinTerm };
// Calculate the DP and time normalisation
const Double_t normASqSum { sigModelB0_->getDPNorm() + sigModelB0bar_->getDPNorm() };
const Double_t normASqDiff { sigModelB0_->getDPNorm() - sigModelB0bar_->getDPNorm() };
Double_t normInterTermRe { 0.0 };
Double_t normInterTermIm { 0.0 };
if ( cpEigenValue_ != QFS ) {
// TODO - double check this sign flipping here (it's presumably right but...)
normInterTermRe = ( cpEigenValue_ == CPOdd ) ? -1.0 * interTermReNorm_ : interTermReNorm_;
normInterTermIm = ( cpEigenValue_ == CPOdd ) ? -1.0 * interTermImNorm_ : interTermImNorm_;
}
const Double_t normCoshTerm { signalDecayTimePdf_->getNormTermCosh() };
const Double_t normSinhTerm { signalDecayTimePdf_->getNormTermSinh() };
const Double_t normCosTerm { signalDecayTimePdf_->getNormTermCos() };
const Double_t normSinTerm { signalDecayTimePdf_->getNormTermSin() };
const Double_t normHyp { normASqSum * normCoshTerm + normInterTermRe * normSinhTerm };
const Double_t normTrig { - prodAsym * ( normASqDiff * normCosTerm + normInterTermIm * normSinTerm ) };
// Combine all terms to get the total normalisation
const Double_t norm { 2.0 * ( normHyp + normTrig ) };
// Multiply the squared-amplitude by the efficiency (DP and decay time) and decay-time error likelihood
// and normalise to obtain the signal likelihood
sigDPLike_ = ( ASq * dpEff * dtEff * dtErrLike ) / norm;
// Background part
// TODO add them into the actual Likelihood calculations
const UInt_t nBkgnds = this->nBkgndClasses();
for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) {
if (usingBkgnd_ == kTRUE) {
+ BkgndDecayTimePdfs_[bkgndID]->calcLikelihoodInfo(iEvt);
bkgndDPLike_[bkgndID] = BkgndDPModels_[bkgndID]->getLikelihood(iEvt);
+ bkgndDPLike_[bkgndID]*= BkgndDecayTimePdfs_[bkgndID]->getHistTerm();
} else {
bkgndDPLike_[bkgndID] = 0.0;
}
}
}
void LauTimeDepFitModel::getEvtExtraLikelihoods(const UInt_t iEvt)
{
// Function to return the signal and background likelihoods for the
// extra variables for the given event evtNo.
sigExtraLike_ = 1.0; //There's always a likelihood term for signal, so we better not zero it.
// First, those independent of the tagging of the event:
// signal
if ( not sigExtraPdf_.empty() ) {
sigExtraLike_ = this->prodPdfValue( sigExtraPdf_, iEvt );
}
// Background
const UInt_t nBkgnds = this->nBkgndClasses();
for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) {
if (usingBkgnd_) {
bkgndExtraLike_[bkgndID] = this->prodPdfValue( BkgndPdfs_[bkgndID], iEvt );
} else {
bkgndExtraLike_[bkgndID] = 0.0;
}
}
}
void LauTimeDepFitModel::getEvtFlavTagLikelihood(const UInt_t iEvt)
{
// Function to return the signal and background likelihoods for the
// extra variables for the given event evtNo.
sigFlavTagLike_ = 1.0; //There's always a likelihood term for signal, so we better not zero it.
// Loop over taggers
const ULong_t nTaggers { flavTag_->getNTaggers() };
for (ULong_t position{0}; position<nTaggers; ++position){
if (sigFlavTagPdf_[position]) {
sigFlavTagPdf_[position]->calcLikelihoodInfo(iEvt);
sigFlavTagLike_ = sigFlavTagPdf_[position]->getLikelihood();
}
}
if (sigFlavTagLike_<=0){
std::cout<<"INFO in LauTimeDepFitModel::getEvtFlavTagLikelihood : Event with 0 FlavTag Liklihood"<<std::endl;
}
// TODO Add in the background components too
}
void LauTimeDepFitModel::updateCoeffs()
{
coeffsB0bar_.clear(); coeffsB0_.clear();
coeffsB0bar_.reserve(nSigComp_); coeffsB0_.reserve(nSigComp_);
for (UInt_t i = 0; i < nSigComp_; ++i) {
coeffsB0bar_.push_back(coeffPars_[i]->antiparticleCoeff());
coeffsB0_.push_back(coeffPars_[i]->particleCoeff());
}
}
void LauTimeDepFitModel::checkMixingPhase()
{
Double_t phase = phiMix_.value();
Double_t genPhase = phiMix_.genValue();
// Check now whether the phase lies in the right range (-pi to pi).
Bool_t withinRange(kFALSE);
while (withinRange == kFALSE) {
if (phase > -LauConstants::pi && phase < LauConstants::pi) {
withinRange = kTRUE;
} else {
// Not within the specified range
if (phase > LauConstants::pi) {
phase -= LauConstants::twoPi;
} else if (phase < -LauConstants::pi) {
phase += LauConstants::twoPi;
}
}
}
// A further problem can occur when the generated phase is close to -pi or pi.
// The phase can wrap over to the other end of the scale -
// this leads to artificially large pulls so we wrap it back.
Double_t diff = phase - genPhase;
if (diff > LauConstants::pi) {
phase -= LauConstants::twoPi;
} else if (diff < -LauConstants::pi) {
phase += LauConstants::twoPi;
}
// finally store the new value in the parameter
// and update the pull
phiMix_.value(phase);
phiMix_.updatePull();
}
void LauTimeDepFitModel::embedSignal(const TString& fileName, const TString& treeName,
Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment)
{
if (signalTree_) {
std::cerr<<"ERROR in LauTimeDepFitModel::embedSignal : Already embedding signal from file."<<std::endl;
return;
}
if (!reuseEventsWithinEnsemble && reuseEventsWithinExperiment) {
std::cerr<<"WARNING in LauTimeDepFitModel::embedSignal : Conflicting options provided, will not reuse events at all."<<std::endl;
reuseEventsWithinExperiment = kFALSE;
}
signalTree_ = new LauEmbeddedData(fileName,treeName,reuseEventsWithinExperiment);
Bool_t dataOK = signalTree_->findBranches();
if (!dataOK) {
delete signalTree_; signalTree_ = 0;
std::cerr<<"ERROR in LauTimeDepFitModel::embedSignal : Problem creating data tree for embedding."<<std::endl;
return;
}
reuseSignal_ = reuseEventsWithinEnsemble;
}
void LauTimeDepFitModel::embedBkgnd(const TString& bkgndClass, const TString& fileName, const TString& treeName,
Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment)
{
if ( ! this->validBkgndClass( bkgndClass ) ) {
std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Invalid background class \"" << bkgndClass << "\"." << std::endl;
std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl;
return;
}
UInt_t bkgndID = this->bkgndClassID( bkgndClass );
LauEmbeddedData* bkgTree = bkgndTree_[bkgndID];
if (bkgTree) {
std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Already embedding background from a file." << std::endl;
return;
}
bkgTree = new LauEmbeddedData(fileName,treeName,reuseEventsWithinExperiment);
Bool_t dataOK = bkgTree->findBranches();
if (!dataOK) {
delete bkgTree; bkgTree = 0;
std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Problem creating data tree for embedding." << std::endl;
return;
}
reuseBkgnd_[bkgndID] = reuseEventsWithinEnsemble;
if (this->enableEmbedding() == kFALSE) {
this->enableEmbedding(kTRUE);
}
}
void LauTimeDepFitModel::setupSPlotNtupleBranches()
{
// add branches for storing the experiment number and the number of
// the event within the current experiment
this->addSPlotNtupleIntegerBranch("iExpt");
this->addSPlotNtupleIntegerBranch("iEvtWithinExpt");
// Store the efficiency of the event (for inclusive BF calculations).
if (this->storeDPEff()) {
this->addSPlotNtupleDoubleBranch("efficiency");
}
// Store the total event likelihood for each species.
this->addSPlotNtupleDoubleBranch("sigTotalLike");
if (usingBkgnd_) {
const UInt_t nBkgnds = this->nBkgndClasses();
for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) {
TString name( this->bkgndClassName(iBkgnd) );
name += "TotalLike";
this->addSPlotNtupleDoubleBranch(name);
}
}
// Store the DP likelihoods
if (this->useDP()) {
this->addSPlotNtupleDoubleBranch("sigDPLike");
if (usingBkgnd_) {
const UInt_t nBkgnds = this->nBkgndClasses();
for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) {
TString name( this->bkgndClassName(iBkgnd) );
name += "DPLike";
this->addSPlotNtupleDoubleBranch(name);
}
}
}
// Store the likelihoods for each extra PDF
this->addSPlotNtupleBranches(sigExtraPdf_, "sig");
if (usingBkgnd_) {
const UInt_t nBkgnds = this->nBkgndClasses();
for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) {
const TString& bkgndClass = this->bkgndClassName(iBkgnd);
this->addSPlotNtupleBranches(BkgndPdfs_[iBkgnd], bkgndClass);
}
}
}
void LauTimeDepFitModel::addSPlotNtupleBranches(const LauPdfList& extraPdfs, const TString& prefix)
{
// Loop through each of the PDFs
for ( const LauAbsPdf* pdf : extraPdfs ) {
// Count the number of input variables that are not
// DP variables (used in the case where there is DP
// dependence for e.g. MVA)
UInt_t nVars{0};
const std::vector<TString> varNames { pdf->varNames() };
for ( const TString& varName : varNames ) {
if ( varName != "m13Sq" && varName != "m23Sq" ) {
++nVars;
}
}
if ( nVars == 1 ) {
// If the PDF only has one variable then
// simply add one branch for that variable
TString name{prefix};
name += pdf->varName();
name += "Like";
this->addSPlotNtupleDoubleBranch(name);
} else if ( nVars == 2 ) {
// If the PDF has two variables then we
// need a branch for them both together and
// branches for each
TString allVars{""};
for ( const TString& varName : varNames ) {
if ( varName != "m13Sq" && varName != "m23Sq" ) {
allVars += varName;
TString name{prefix};
name += varName;
name += "Like";
this->addSPlotNtupleDoubleBranch(name);
}
}
TString name{prefix};
name += allVars;
name += "Like";
this->addSPlotNtupleDoubleBranch(name);
} else {
std::cerr<<"WARNING in LauTimeDepFitModel::addSPlotNtupleBranches : Can't yet deal with 3D PDFs."<<std::endl;
}
}
}
Double_t LauTimeDepFitModel::setSPlotNtupleBranchValues(LauPdfList& extraPdfs, const TString& prefix, const UInt_t iEvt)
{
// Store the PDF value for each variable in the list
Double_t totalLike(1.0);
Double_t extraLike(0.0);
if ( extraPdfs.empty() ) {
return totalLike;
}
for ( LauAbsPdf* pdf : extraPdfs ) {
// calculate the likelihood for this event
pdf->calcLikelihoodInfo(iEvt);
extraLike = pdf->getLikelihood();
totalLike *= extraLike;
// Count the number of input variables that are not
// DP variables (used in the case where there is DP
// dependence for e.g. MVA)
UInt_t nVars{0};
const std::vector<TString> varNames { pdf->varNames() };
for ( const TString& varName : varNames ) {
if ( varName != "m13Sq" && varName != "m23Sq" ) {
++nVars;
}
}
if ( nVars == 1 ) {
// If the PDF only has one variable then
// simply store the value for that variable
TString name{prefix};
name += pdf->varName();
name += "Like";
this->setSPlotNtupleDoubleBranchValue(name, extraLike);
} else if ( nVars == 2 ) {
// If the PDF has two variables then we
// store the value for them both together
// and for each on their own
TString allVars{""};
for ( const TString& varName : varNames ) {
if ( varName != "m13Sq" && varName != "m23Sq" ) {
allVars += varName;
TString name{prefix};
name += varName;
name += "Like";
const Double_t indivLike = pdf->getLikelihood( varName );
this->setSPlotNtupleDoubleBranchValue(name, indivLike);
}
}
TString name{prefix};
name += allVars;
name += "Like";
this->setSPlotNtupleDoubleBranchValue(name, extraLike);
} else {
std::cerr<<"WARNING in LauAllFitModel::setSPlotNtupleBranchValues : Can't yet deal with 3D PDFs."<<std::endl;
}
}
return totalLike;
}
LauSPlot::NameSet LauTimeDepFitModel::variableNames() const
{
LauSPlot::NameSet nameSet;
if (this->useDP()) {
nameSet.insert("DP");
}
for ( const LauAbsPdf* pdf : sigExtraPdf_ ) {
// Loop over the variables involved in each PDF
const std::vector<TString> varNames { pdf->varNames() };
for ( const TString& varName : varNames ) {
// If they are not DP coordinates then add them
if ( varName != "m13Sq" && varName != "m23Sq" ) {
nameSet.insert( varName );
}
}
}
return nameSet;
}
LauSPlot::NumbMap LauTimeDepFitModel::freeSpeciesNames() const
{
LauSPlot::NumbMap numbMap;
if (!signalEvents_->fixed() && this->doEMLFit()) {
numbMap["sig"] = signalEvents_->genValue();
}
if ( usingBkgnd_ ) {
const UInt_t nBkgnds = this->nBkgndClasses();
for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) {
const TString& bkgndClass = this->bkgndClassName(iBkgnd);
const LauAbsRValue* par = bkgndEvents_[iBkgnd];
if (!par->fixed()) {
numbMap[bkgndClass] = par->genValue();
if ( ! par->isLValue() ) {
std::cerr << "WARNING in LauTimeDepFitModel::freeSpeciesNames : \"" << par->name() << "\" is a LauFormulaPar, which implies it is perhaps not entirely free to float in the fit, so the sWeight calculation may not be reliable" << std::endl;
}
}
}
}
return numbMap;
}
LauSPlot::NumbMap LauTimeDepFitModel::fixdSpeciesNames() const
{
LauSPlot::NumbMap numbMap;
if (signalEvents_->fixed() && this->doEMLFit()) {
numbMap["sig"] = signalEvents_->genValue();
}
if ( usingBkgnd_ ) {
const UInt_t nBkgnds = this->nBkgndClasses();
for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) {
const TString& bkgndClass = this->bkgndClassName(iBkgnd);
const LauAbsRValue* par = bkgndEvents_[iBkgnd];
if (par->fixed()) {
numbMap[bkgndClass] = par->genValue();
}
}
}
return numbMap;
}
LauSPlot::TwoDMap LauTimeDepFitModel::twodimPDFs() const
{
LauSPlot::TwoDMap twodimMap;
for ( const LauAbsPdf* pdf : sigExtraPdf_ ) {
// Count the number of input variables that are not DP variables
UInt_t nVars{0};
const std::vector<TString> varNames { pdf->varNames() };
for ( const TString& varName : varNames ) {
if ( varName != "m13Sq" && varName != "m23Sq" ) {
++nVars;
}
}
if ( nVars == 2 ) {
twodimMap.insert( std::make_pair( "sig", std::make_pair( varNames[0], varNames[1] ) ) );
}
}
if (usingBkgnd_) {
const UInt_t nBkgnds = this->nBkgndClasses();
for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) {
const TString& bkgndClass = this->bkgndClassName(iBkgnd);
for ( const LauAbsPdf* pdf : BkgndPdfs_[iBkgnd] ) {
// Count the number of input variables that are not DP variables
UInt_t nVars{0};
const std::vector<TString> varNames { pdf->varNames() };
for ( const TString& varName : varNames ) {
if ( varName != "m13Sq" && varName != "m23Sq" ) {
++nVars;
}
}
if ( nVars == 2 ) {
twodimMap.insert( std::make_pair( bkgndClass, std::make_pair( varNames[0], varNames[1] ) ) );
}
}
}
}
return twodimMap;
}
void LauTimeDepFitModel::storePerEvtLlhds()
{
std::cout<<"INFO in LauTimeDepFitModel::storePerEvtLlhds : Storing per-event likelihood values..."<<std::endl;
LauFitDataTree* inputFitData = this->fitData();
// if we've not been using the DP model then we need to cache all
// the info here so that we can get the efficiency from it
if (!this->useDP() && this->storeDPEff()) {
sigModelB0bar_->initialise(coeffsB0bar_);
sigModelB0_->initialise(coeffsB0_);
sigModelB0bar_->fillDataTree(*inputFitData);
sigModelB0_->fillDataTree(*inputFitData);
}
UInt_t evtsPerExpt(this->eventsPerExpt());
LauIsobarDynamics* sigModel(sigModelB0bar_);
for (UInt_t iEvt = 0; iEvt < evtsPerExpt; ++iEvt) {
// Find out whether we have B0bar or B0
flavTag_->updateEventInfo(iEvt);
curEvtTagFlv_ = flavTag_->getCurEvtTagFlv();
curEvtMistag_ = flavTag_->getCurEvtMistag();
// the DP information
this->getEvtDPDtLikelihood(iEvt);
if (this->storeDPEff()) {
if (!this->useDP()) {
sigModel->calcLikelihoodInfo(iEvt);
}
this->setSPlotNtupleDoubleBranchValue("efficiency",sigModel->getEvtEff());
}
if (this->useDP()) {
sigTotalLike_ = sigDPLike_;
this->setSPlotNtupleDoubleBranchValue("sigDPLike",sigDPLike_);
if (usingBkgnd_) {
const UInt_t nBkgnds = this->nBkgndClasses();
for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) {
TString name = this->bkgndClassName(iBkgnd);
name += "DPLike";
this->setSPlotNtupleDoubleBranchValue(name,bkgndDPLike_[iBkgnd]);
}
}
} else {
sigTotalLike_ = 1.0;
}
// the signal PDF values
sigTotalLike_ *= this->setSPlotNtupleBranchValues(sigExtraPdf_, "sig", iEvt);
// the background PDF values
if (usingBkgnd_) {
const UInt_t nBkgnds = this->nBkgndClasses();
for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) {
const TString& bkgndClass = this->bkgndClassName(iBkgnd);
LauPdfList& pdfs = BkgndPdfs_[iBkgnd];
bkgndTotalLike_[iBkgnd] *= this->setSPlotNtupleBranchValues(pdfs, bkgndClass, iEvt);
}
}
// the total likelihoods
this->setSPlotNtupleDoubleBranchValue("sigTotalLike",sigTotalLike_);
if (usingBkgnd_) {
const UInt_t nBkgnds = this->nBkgndClasses();
for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) {
TString name = this->bkgndClassName(iBkgnd);
name += "TotalLike";
this->setSPlotNtupleDoubleBranchValue(name,bkgndTotalLike_[iBkgnd]);
}
}
// fill the tree
this->fillSPlotNtupleBranches();
}
std::cout<<"INFO in LauTimeDepFitModel::storePerEvtLlhds : Finished storing per-event likelihood values."<<std::endl;
}
void LauTimeDepFitModel::weightEvents( const TString& /*dataFileName*/, const TString& /*dataTreeName*/ )
{
std::cerr << "ERROR in LauTimeDepFitModel::weightEvents : Method not available for this fit model." << std::endl;
return;
}
void LauTimeDepFitModel::savePDFPlots(const TString& /*label*/)
{
}
void LauTimeDepFitModel::savePDFPlotsWave(const TString& /*label*/, const Int_t& /*spin*/)
{
}
void LauTimeDepFitModel::setBkgdFlavTagPdfs(const TString name, LauAbsPdf* pdf)
{
// TODO - when backgrounds are added - implement a check that "name" really is the name of a background category
if (pdf==0) {
std::cerr<<"ERROR in LauTimeDepFitModel::setBkgdFlavTagPdfs : The PDF pointer is null."<<std::endl;
return;
}
bkgdFlavTagPdf_[name] = pdf;
}

File Metadata

Mime Type
text/x-diff
Expires
Sat, Dec 21, 12:21 PM (1 d, 17 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
4022749
Default Alt Text
(218 KB)

Event Timeline