diff --git a/examples/CalcChiSq.cc b/examples/CalcChiSq.cc index 9fd61a3..3980936 100644 --- a/examples/CalcChiSq.cc +++ b/examples/CalcChiSq.cc @@ -1,44 +1,44 @@ /* Copyright 2013 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ #include "LauCalcChiSq.hh" #include "TString.h" int main(const int argc, const char** argv) { TString inputFile("chiSqInput.txt"); if (argc > 1) {inputFile = TString(argv[1]);} LauCalcChiSq a(inputFile); - //a.setVerbose(kTRUE); + //a.setPrintLevel(LauOutputLevel::Verbose); a.run(); return 0; } diff --git a/inc/Lau1DCubicSpline.hh b/inc/Lau1DCubicSpline.hh index 7302474..bd1f1eb 100644 --- a/inc/Lau1DCubicSpline.hh +++ b/inc/Lau1DCubicSpline.hh @@ -1,256 +1,263 @@ /* Copyright 2015 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file Lau1DCubicSpline.hh \brief File containing declaration of Lau1DCubicSpline class. */ /*! \class Lau1DCubicSpline \brief Class for defining a 1D cubic spline based on a set of knots. Class for defining a 1D cubic spline based on a set of knots. Interpolation between the knots is performed either by one of two types of cubic spline (standard or Akima) or by linear interpolation. The splines are defined by a piecewise cubic function which, between knots i and i+1, has the form f_i(x) = (1 - t)*y_i + t*y_i+1 + t*(1 - t)(a*(1 - t) + b*t) where t = (x - x_i)/(x_i+1 - x_i), a = k_i *(x_i+1 - x_i) - (y_i+1 - y_i), b = -k_i+1*(x_i+1 - x_i) + (y_i+1 - y_i) and k_i is (by construction) the first derivative at knot i. f(x) and f'(x) are continuous at the internal knots by construction. For the standard splines, f''(x) is required to be continuous at all internal knots placing n-2 constraints on the n parameters, k_i. The final two constraints are set by the boundary conditions. At each boundary, the function may be: (i) Clamped : f'(x) = C at the last knot (ii) Natural : f''(x) = 0 at the last knot (iii) Not a knot : f'''(x) continuous at the second last knot The algorithms used in these splines can be found on: http://en.wikipedia.org/wiki/Spline_interpolation#Algorithm_to_find_the_interpolating_cubic_spline http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm For the Akima splines, the values of k_i are determined from the slopes of the four nearest segments (a_i-1, a_i, a_i+1 and a_i+2) as k_i = ( | a_i+2 - a_i+1 | a_i + | a_i - a_i-1 | a_i+1 ) / ( | a_i+2 - a_i+1 | + | a_i - a_i-1 | ) and as k_i = ( a_i + a_i+1 ) / 2 in the special case a_i-1 == a_i != a_i+1 == a_i+2. Boundary conditions are specified by the relations a_2 - a_1 = a_1 - a_0 = a_0 - a_-1 and a_n-1 - a_n-2 = a_n - a_n-1 = a_n+1 - a_n The algorithms used in these splines can be found in: J.ACM vol. 17 no. 4 pp 589-602 */ #ifndef LAU_1DCUBICSPLINE #define LAU_1DCUBICSPLINE #include #include #include "Rtypes.h" #include "TF1.h" #include "TFitResultPtr.h" +#include "LauPrint.hh" + class TH1; class LauAbsRValue; class LauParameter; class Lau1DCubicSpline final { public: //! Define the allowed interpolation types enum LauSplineType { StandardSpline, /*!< standard cubic splines with f''(x) continuous at all internal knots */ AkimaSpline, /*!< Akima cubic splines with f'(x) at each knot defined locally by the positions of only five knots */ LinearInterpolation /*! Linear interpolation between each pair of knots */ }; //! Define the allowed boundary condition types /*! These are only supported by standard splines */ enum LauSplineBoundaryType { Clamped, /*!< clamped boundary - f'(x) = C */ Natural, /*!< natural boundary - f''(x) = 0 */ NotAKnot /*!< 'not a knot' boundary - f'''(x) continuous at second last knot */ }; //! Constructor /*! - /param [in] xs the x-values of the knots - /param [in] ys the y-values of the knots - /param [in] leftBound the left-hand boundary condition - /param [in] rightBound the right-hand boundary condition - /param [in] dydx0 the gradient at the left-hand boundary of a clamped spline - /param [in] dydxn the gradient at the right-hand boundary of a clamped spline + \param [in] xs the x-values of the knots + \param [in] ys the y-values of the knots + \param [in] type the type of spline (Standard, Akima, Linear) + \param [in] leftBound the left-hand boundary condition + \param [in] rightBound the right-hand boundary condition + \param [in] dydx0 the gradient at the left-hand boundary of a clamped spline + \param [in] dydxn the gradient at the right-hand boundary of a clamped spline */ Lau1DCubicSpline(const std::vector& xs, const std::vector& ys, LauSplineType type = Lau1DCubicSpline::StandardSpline, LauSplineBoundaryType leftBound = Lau1DCubicSpline::NotAKnot, LauSplineBoundaryType rightBound = Lau1DCubicSpline::NotAKnot, Double_t dydx0 = 0.0, Double_t dydxn = 0.0); //! Evaluate the function at given point /*! \param [in] x the x-coordinate \return the value of the spline at x */ Double_t evaluate(Double_t x) const; //! Update the y-values of the knots /*! \param [in] ys the y-values of the knots */ void updateYValues(const std::vector& ys); //! Update the y-values of the knots /*! \param [in] ys the y-values of the knots */ void updateYValues(const std::vector& ys); //! Update the y-values of the knots /*! \param [in] ys the y-values of the knots */ void updateYValues(const std::vector& ys); //! Update the type of interpolation to perform /*! \param [in] type the type of interpolation */ void updateType(LauSplineType type); //! Update the boundary conditions for the spline /*! - /param [in] leftBound the left-hand boundary condition - /param [in] rightBound the right-hand boundary condition - /param [in] dydx0 the gradient at the left-hand boundary of a clamped spline - /param [in] dydxn the gradient at the right-hand boundary of a clamped spline + \param [in] leftBound the left-hand boundary condition + \param [in] rightBound the right-hand boundary condition + \param [in] dydx0 the gradient at the left-hand boundary of a clamped spline + \param [in] dydxn the gradient at the right-hand boundary of a clamped spline */ void updateBoundaryConditions(LauSplineBoundaryType leftBound, LauSplineBoundaryType rightBound, Double_t dydx0 = 0.0, Double_t dydxn = 0.0); - //! Return the number of knots + //! Get the number of knots std::size_t getnKnots() const {return nKnots_;} //! Get y values const std::vector& getYValues() const {return y_;} //! Get x values const std::vector& getXValues() const {return x_;} - //! Get the coefficients of spline section i in the form a + bx + cx^2 + dx^3 + //! Get the coefficients of a given spline segment in the form a + bx + cx^2 + dx^3 /*! - \params [in] i refers to the left-hand index of the knot (i = 0 gets the params between x_0 and x_1) + \param [in] segIndex refers to the index of the left-hand knot (segIndex = 0 gets the params between x_0 and x_1) + \param [in] normalise whether the coefficients should be normalised by the integral of the spline \return coefficients {a, b, c, d} */ - std::array getCoefficients(const std::size_t i, const bool normalise = false) const; + std::array getCoefficients(const std::size_t segIndex, const bool normalise = false) const; //! Get the integral over all the spline segments Double_t integral() const; //! Make a TF1 showing the spline with its current knot values /*! - \params [in] normalise whether or not you want the spline normalised + \param [in] normalise whether or not you want the spline normalised \return 1D function object */ TF1* makeTF1(const bool normalise = false) const; //! Fit the the normalisation of the spline to a TH1 //! So they look as good as possible when plotted on top of one another //! Useful when a sample has been generated with a hist and fitted with a spline to compare them /*! - \params [in] The histogram to be fit to - \return a TF1 fit to `hist` + \param [in] hist the histogram to be fitted + \param [in] printLevel the level of printout desired from fit + \return a TF1 fit to the histogram */ - TF1* normaliseToTH1(TH1* hist) const; + TF1* normaliseToTH1(TH1* hist, LauOutputLevel printLevel = LauOutputLevel::Standard) const; //! Fit the spline to a TH1 //! Useful as a starting-point for fitting a spline to data (maybe the hist is taken from MC) /*! - \params [in] The histogram to be fit to + \param [in] hist the histogram to be fit to + \param [in] printLevel the level of printout desired from fit + \return a shared-ownership smart pointer to the fit results */ - TFitResultPtr fitToTH1(TH1* hist); + TFitResultPtr fitToTH1(TH1* hist, LauOutputLevel printLevel = LauOutputLevel::Standard); private: //! Initialise the class void init(); //! Calculate the first derivative at each knot void calcDerivatives(); //! Calculate the first derivatives according to the standard method void calcDerivativesStandard(); //! Calculate the first derivatives according to the Akima method void calcDerivativesAkima(); //! The number of knots in the spline const std::size_t nKnots_; //! The x-value at each knot std::vector x_; //! The y-value at each knot std::vector y_; //! The first derivative at each knot std::vector dydx_; //! The 'a' coefficients used to determine the derivatives std::vector a_; //! The 'b' coefficients used to determine the derivatives std::vector b_; //! The 'c' coefficients used to determine the derivatives std::vector c_; //! The 'd' coefficients used to determine the derivatives std::vector d_; //! The type of interpolation to be performed LauSplineType type_; //! The left-hand boundary condition on the spline LauSplineBoundaryType leftBound_; //! The right-hand boundary condition on the spline LauSplineBoundaryType rightBound_; //! The gradient at the left boundary for a clamped spline Double_t dydx0_; //! The gradient at the right boundary for a clamped spline Double_t dydxn_; ClassDef(Lau1DCubicSpline, 0); // Class for defining a 1D cubic spline }; #endif diff --git a/inc/LauAbsFitModel.hh b/inc/LauAbsFitModel.hh index 90d1ed7..7730927 100644 --- a/inc/LauAbsFitModel.hh +++ b/inc/LauAbsFitModel.hh @@ -1,878 +1,878 @@ /* Copyright 2004 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauAbsFitModel.hh \brief File containing declaration of LauAbsFitModel class. */ /*! \class LauAbsFitModel \brief Abstract interface to the fitting and toy MC model Abstract interface to the fitting and toy MC model Any class inheriting from this must implement the following functions: - cacheInputFitVars() - checkInitFitParams() - finaliseFitResults() - fixdSpeciesNames() - freeSpeciesNames() - genExpt() - getEventSum() - getTotEvtLikelihood() - initialise() - initialiseDPModels() - propagateParUpdates() - recalculateNormalisation() - scfDPSmear() - setAmpCoeffSet() - setNBkgndEvents() - setNSigEvents() - setupBkgndVectors() - setupGenNtupleBranches() - setupSPlotNtupleBranches() - splitSignal() - storePerEvtLlhds() - twodimPDFs() - updateCoeffs() - variableNames() - weightEvents() - writeOutTable() */ #ifndef LAU_ABS_FIT_MODEL #define LAU_ABS_FIT_MODEL #include "TMatrixDfwd.h" #include "TString.h" #include "TStopwatch.h" #include #include #include #include "LauFitObject.hh" #include "LauFormulaPar.hh" #include "LauSimFitTask.hh" // LauSPlot included to get LauSPlot::NameSet typedef #include "LauSPlot.hh" class LauAbsCoeffSet; class LauAbsPdf; class LauFitDataTree; class LauGenNtuple; class LauAbsRValue; class LauParameter; class LauAbsFitModel : public LauSimFitTask { public: //! Constructor LauAbsFitModel(); //! Destructor virtual ~LauAbsFitModel(); //! Is the Dalitz plot term in the likelihood Bool_t useDP() const { return usingDP_; } //! Switch on/off the Dalitz plot term in the Likelihood (allows fits to other quantities, e.g. B mass) /*! \param [in] usingDP the boolean flag */ void useDP(Bool_t usingDP) { usingDP_ = usingDP; } //! Return the flag to store the status of using an sFit or not Bool_t doSFit() const { return doSFit_; } //! Do an sFit (use sWeights to isolate signal decays rather than using background histograms) /*! \param [in] sWeightBranchName name of the branch of the tree containing the sWeights \param [in] scaleFactor scaling factor to get the uncertainties correct */ void doSFit( const TString& sWeightBranchName, Double_t scaleFactor = 1.0 ); //! Determine whether an extended maximum likelihood fit it being performed Bool_t doEMLFit() const {return emlFit_;} //! Choice to perform an extended maximum likelihood fit /*! \param [in] emlFit boolean specifying whether or not to perform the EML */ void doEMLFit(Bool_t emlFit) {emlFit_ = emlFit;} //! Determine whether Poisson smearing is enabled for the toy MC generation Bool_t doPoissonSmearing() const {return poissonSmear_;} //! Turn Poisson smearing (for the toy MC generation) on or off /*! \param [in] poissonSmear boolean specifying whether or not to do Poisson smearing */ void doPoissonSmearing(Bool_t poissonSmear) {poissonSmear_ = poissonSmear;} //! Determine whether embedding of events is enabled in the generation Bool_t enableEmbedding() const {return enableEmbedding_;} //! Turn on or off embedding of events in the generation /*! \param [in] enable boolean specifying whether to embed events */ void enableEmbedding(Bool_t enable) {enableEmbedding_ = enable;} //! Determine whether writing out of the latex table is enabled Bool_t writeLatexTable() const {return writeLatexTable_;} //! Turn on or off the writing out of the latex table /*! \param [in] writeTable boolean specifying whether or not the latex table should be written out */ void writeLatexTable(Bool_t writeTable) {writeLatexTable_ = writeTable;} //! save files containing graphs of the resonance's PDFs Bool_t saveFilePDF() const {return savePDF_;} //! Turn on or off the save of files containing graphs of the resonance's PDFs /*! \param [in] savePDF boolean specifying whether or not the save of files containing graphs of the resonance's PDFs */ void saveFilePDF(Bool_t savePDF) {savePDF_ = savePDF;} //! Set up the sPlot ntuple /*! \param [in] fileName the sPlot file name \param [in] treeName the sPlot tree name \param [in] storeDPEfficiency whether or not to store the efficiency information too - \param [in] verbosity define the level of output + \param [in] verbosity define the level of printout \see LauSPlot::runCalculations */ - void writeSPlotData(const TString& fileName, const TString& treeName, Bool_t storeDPEfficiency, const TString& verbosity = "q"); + void writeSPlotData(const TString& fileName, const TString& treeName, Bool_t storeDPEfficiency, const LauOutputLevel verbosity = LauOutputLevel::Quiet); //! Determine whether the sPlot data is to be written out Bool_t writeSPlotData() const {return writeSPlotData_;} //! Determine whether the efficiency information should be stored in the sPlot ntuple Bool_t storeDPEff() const {return storeDPEff_;} //! Determine whether the initial values of the fit parameters, in particular the isobar coefficient parameters, are to be randomised Bool_t useRandomInitFitPars() const {return randomFit_;} //! Randomise the initial values of the fit parameters, in particular the isobar coefficient parameters void useRandomInitFitPars(Bool_t boolean) {randomFit_ = boolean;} //! Setup the background class names /*! \param [in] names a vector of all the background names */ virtual void setBkgndClassNames( const std::vector& names ); //! Returns the number of background classes inline UInt_t nBkgndClasses() const {return bkgndClassNames_.size();} //! Set the number of signal events /*! \param [in] nSigEvents contains the signal yield and option to fix it */ virtual void setNSigEvents(LauParameter* nSigEvents) = 0; //! Set the number of background events /*! The name of the parameter must be that of the corresponding background category (so that it can be correctly assigned) \param [in] nBkgndEvents contains the name, yield and option to fix the yield of the background */ virtual void setNBkgndEvents(LauAbsRValue* nBkgndEvents) = 0; //! Set the DP amplitude coefficients /*! The name of the coeffSet must match the name of one of the resonances in the DP model. The supplied order of coefficients will be rearranged to match the order in which the resonances are stored in the dynamics, see LauIsobarDynamics::addResonance. \param [in] coeffSet the set of coefficients */ virtual void setAmpCoeffSet(LauAbsCoeffSet* coeffSet) = 0; //! Specify that a toy MC sample should be created for a successful fit to an experiment /*! Generation uses the fitted parameters so that the user can compare the fit to the data \param [in] toyMCScale the scale factor to get the number of events to generate \param [in] mcFileName the file name where the toy sample will be stored \param [in] tableFileName name of the output tex file \param [in] poissonSmearing turn smearing on or off */ void compareFitData(UInt_t toyMCScale = 10, const TString& mcFileName = "fitToyMC.root", const TString& tableFileName = "fitToyMCTable.tex", Bool_t poissonSmearing = kTRUE); //! Start the toy generation / fitting /*! \param [in] applicationCode specifies what to do, perform a fit ("fit") or generate toy MC ("gen") \param [in] dataFileName the name of the input data file \param [in] dataTreeName the name of the tree containing the data \param [in] histFileName the file name for the output histograms \param [in] tableFileName the file name for the latex output file */ void run(const TString& applicationCode, const TString& dataFileName, const TString& dataTreeName, const TString& histFileName, const TString& tableFileName = ""); //! This function sets the parameter values from Minuit /*! This function has to be public since it is called from the global FCN. It should not be called otherwise! \param [in] par an array storing the various parameter values \param [in] npar the number of free parameters */ virtual void setParsFromMinuit(Double_t* par, Int_t npar); //! Calculates the total negative log-likelihood /*! This function has to be public since it is called from the global FCN. It should not be called otherwise! */ virtual Double_t getTotNegLogLikelihood(); //! Set model parameters from a file /*! \param [in] fileName the name of the file with parameters to set \param [in] treeName the name of the tree in the file corresponding to the parameters to set \param [in] fix whether to fix (set constant) the loaded parameters, or leave them floating */ void setParametersFromFile(const TString& fileName, const TString& treeName, const Bool_t fix); //! Set model parameters from a given std::map /*! Only parameters named in the map are imported, all others are set to their values specified in the model configuration. \param [in] parameters map from parameter name to imported value \param [in] fix whether to fix (set constant) the loaded parameters, or leave them floating */ void setParametersFromMap(const std::map& parameters, const Bool_t fix); //! Set named model parameters from a file /*! Identical to setParametersFromFile, but only import parameters named from parameters set. All others are set to their values specified in the model configuration. \param [in] fileName the name of the file with parameters to set \param [in] treeName the name of the tree in the file corresponding to the parameters to set \param [in] parameters the set of parameters to import from the file \param [in] fix whether to fix (set constant) the loaded parameters, or leave them floating */ void setNamedParameters(const TString& fileName, const TString& treeName, const std::set& parameters, const Bool_t fix); //! Set named model parameters from a given std::map, with fallback to those from a file /*! Parameters named in the map are imported with their specified values. All other parameters are set to the values corresponding to the value in the given file. \param [in] fileName the name of the file with parameters to set \param [in] treeName the name of the tree in the file corresponding to the parameters to set \param [in] parameters map from parameter name to imported value (override parameters form the file) \param [in] fix whether to fix (set constant) the loaded parameters, or leave them floating */ void setParametersFileFallback(const TString& fileName, const TString& treeName, const std::map& parameters, const Bool_t fix); protected: // Some typedefs //! List of Pdfs typedef std::vector LauPdfPList; //! List of parameter pointers typedef std::vector LauParameterPList; //! List of parameter pointers typedef std::vector LauAbsRValuePList; //! Set of parameter pointers typedef std::set LauParameterPSet; //! List of parameters typedef std::vector LauParameterList; //! A type to store background classes typedef std::map LauBkgndClassMap; //! Clear the vectors containing fit parameters void clearFitParVectors(); //! Clear the vectors containing extra ntuple variables void clearExtraVarVectors(); //! Weighting - allows e.g. MC events to be weighted by the DP model /*! \param [in] dataFileName the name of the data file \param [in] dataTreeName the name of the tree containing the data */ virtual void weightEvents( const TString& dataFileName, const TString& dataTreeName ) = 0; //! Generate toy MC /*! \param [in] dataFileName the name of the file where the generated events are stored \param [in] dataTreeName the name of the tree used to store the variables \param [in] histFileName the name of the histogram output file (currently not used) \param [in] tableFileNameBase the name the latex output file */ virtual void generate(const TString& dataFileName, const TString& dataTreeName, const TString& histFileName, const TString& tableFileNameBase); //! The method that actually generates the toy MC events for the given experiment /*! \return the success/failure flag of the generation procedure */ virtual Bool_t genExpt() = 0; //! Perform the total fit /*! \param [in] dataFileName the name of the data file \param [in] dataTreeName the name of the tree containing the data \param [in] histFileName the name of the histogram output file \param [in] tableFileNameBase the name the of latex output file */ void fit(const TString& dataFileName, const TString& dataTreeName, const TString& histFileName, const TString& tableFileNameBase); //! Routine to perform the actual fit for a given experiment void fitExpt(); //! Routine to perform the minimisation /*! \return the success/failure flag of the fit */ Bool_t runMinimisation(); //! Create a toy MC sample from the fitted parameters /*! \param [in] mcFileName the file name where the toy sample will be stored \param [in] tableFileName name of the output tex file */ void createFitToyMC(const TString& mcFileName, const TString& tableFileName); //! Read in the data for the current experiment /*! \return the number of events read in */ virtual UInt_t readExperimentData(); //! Open the input file and verify that all required variables are present /*! \param [in] dataFileName the name of the input file \param [in] dataTreeName the name of the input tree */ virtual Bool_t verifyFitData(const TString& dataFileName, const TString& dataTreeName); //! Cache the input data values to calculate the likelihood during the fit virtual void cacheInputFitVars() = 0; //! Cache the value of the sWeights to be used in the sFit virtual void cacheInputSWeights(); //! Initialise the fit par vectors /*! Each class that inherits from this one must implement this sensibly for all vectors specified in clearFitParVectors, i.e. specify parameter names, initial, min, max and fixed values */ virtual void initialise() = 0; //! Recalculate normalisation the signal DP model(s) virtual void recalculateNormalisation() = 0; //! Initialise the DP models virtual void initialiseDPModels() = 0; /*! For each amp in the fit this function takes its particular parameters and from them calculates the single complex number that is its coefficient. The vector of these coeffs can then be passed to the signal dynamics. */ virtual void updateCoeffs() = 0; //! This function (specific to each model) calculates anything that depends on the fit parameter values virtual void propagateParUpdates() = 0; //! Calculate the sum of the log-likelihood over the specified events /*! \param [in] iStart the event number of the first event to be considered \param [in] iEnd the event number of the final event to be considered */ Double_t getLogLikelihood( UInt_t iStart, UInt_t iEnd ); //! Calculate the penalty terms to the log likelihood from Gaussian constraints Double_t getLogLikelihoodPenalty(); //! Calculates the likelihood for a given event /*! \param [in] iEvt the event number */ virtual Double_t getTotEvtLikelihood(UInt_t iEvt) = 0; //! Returns the sum of the expected events over all hypotheses; used in the EML fit scenario virtual Double_t getEventSum() const = 0; //! Prints the values of all the fit variables for the specified event - useful for diagnostics /*! \param [in] iEvt the event number */ virtual void printEventInfo(UInt_t iEvt) const; //! Same as printEventInfo, but printing out the values of the variables in the fit virtual void printVarsInfo() const; //! Update initial fit parameters if required virtual void checkInitFitParams() = 0; //! Setup saving of fit results to ntuple/LaTeX table etc. /*! \param [in] histFileName the file name for the output histograms \param [in] tableFileName the file name for the latex output file */ virtual void setupResultsOutputs( const TString& histFileName, const TString& tableFileName ); //! Package the initial fit parameters for transmission to the coordinator /*! \param [out] array the array to be filled with the LauParameter objects */ virtual void prepareInitialParArray( TObjArray& array ); //! Perform all finalisation actions /*! - Receive the results of the fit from the coordinator - Perform any finalisation routines - Package the finalised fit parameters for transmission back to the coordinator \param [in] fitStat the status of the fit, e.g. status code, EDM, NLL \param [in] parsFromCoordinator the parameters at the fit minimum \param [in] covMat the fit covariance matrix \param [out] parsToCoordinator the array to be filled with the finalised LauParameter objects */ virtual void finaliseExperiment( const LauAbsFitter::FitStatus& fitStat, const TObjArray* parsFromCoordinator, const TMatrixD* covMat, TObjArray& parsToCoordinator ); //! Write the results of the fit into the ntuple /*! \param [in] tableFileName the structure containing the results of the fit */ virtual void finaliseFitResults(const TString& tableFileName) = 0; //! Save the pdf Plots for all the resonances of experiment number fitExp /*! \param [in] label prefix for the file name to be saved */ virtual void savePDFPlots(const TString& label) = 0; //! Save the pdf Plots for the sum of ressonances correspondint to "sin" of experiment number fitExp /*! \param [in] label prefix for the file name to be saved \param [in] spin spin of the wave to be saved */ virtual void savePDFPlotsWave(const TString& label, const Int_t& spin) = 0; //! Write the latex table /*! \param [in] outputFile the name of the output file */ virtual void writeOutTable(const TString& outputFile) = 0; //! Store the per-event likelihood values virtual void storePerEvtLlhds() = 0; //! Calculate the sPlot data virtual void calculateSPlotData(); //! Make sure all parameters hold their genValue as the current value void setGenValues(); //! Method to set up the storage for background-related quantities called by setBkgndClassNames virtual void setupBkgndVectors() = 0; //! Check if the given background class is in the list /*! \param [in] className the name of the class to check \return true or false */ Bool_t validBkgndClass( const TString& className ) const; //! The number assigned to a background class /*! \param [in] className the name of the class to check \return the background class ID number */ UInt_t bkgndClassID( const TString& className ) const; //! Get the name of a background class from the number /*! \param [in] classID the ID number of the background class \return the class name */ const TString& bkgndClassName( UInt_t classID ) const; //! Setup the generation ntuple branches virtual void setupGenNtupleBranches() = 0; //! Add a branch to the gen tree for storing an integer /*! \param [in] name the name of the branch */ virtual void addGenNtupleIntegerBranch(const TString& name); //! Add a branch to the gen tree for storing a double /*! \param [in] name the name of the branch */ virtual void addGenNtupleDoubleBranch(const TString& name); //! Set the value of an integer branch in the gen tree /*! \param [in] name the name of the branch \param [in] value the value to be stored */ virtual void setGenNtupleIntegerBranchValue(const TString& name, Int_t value); //! Set the value of a double branch in the gen tree /*! \param [in] name the name of the branch \param [in] value the value to be stored */ virtual void setGenNtupleDoubleBranchValue(const TString& name, Double_t value); //! Get the value of an integer branch in the gen tree /*! \param [in] name the name of the branch \return the value of the parameter */ virtual Int_t getGenNtupleIntegerBranchValue(const TString& name) const; //! Get the value of a double branch in the gen tree /*! \param [in] name the name of the branch \return the value of the parameter */ virtual Double_t getGenNtupleDoubleBranchValue(const TString& name) const; //! Fill the gen tuple branches virtual void fillGenNtupleBranches(); //! Setup the branches of the sPlot tuple virtual void setupSPlotNtupleBranches() = 0; //! Add a branch to the sPlot tree for storing an integer /*! \param [in] name the name of the branch */ virtual void addSPlotNtupleIntegerBranch(const TString& name); //! Add a branch to the sPlot tree for storing a double /*! \param [in] name the name of the branch */ virtual void addSPlotNtupleDoubleBranch(const TString& name); //! Set the value of an integer branch in the sPlot tree /*! \param [in] name the name of the branch \param [in] value the value to be stored */ virtual void setSPlotNtupleIntegerBranchValue(const TString& name, Int_t value); //! Set the value of a double branch in the sPlot tree /*! \param [in] name the name of the branch \param [in] value the value to be stored */ virtual void setSPlotNtupleDoubleBranchValue(const TString& name, Double_t value); //! Fill the sPlot tuple virtual void fillSPlotNtupleBranches(); //! Returns the names of all variables in the fit virtual LauSPlot::NameSet variableNames() const = 0; //! Returns the names and yields of species that are free in the fit virtual LauSPlot::NumbMap freeSpeciesNames() const = 0; //! Returns the names and yields of species that are fixed in the fit virtual LauSPlot::NumbMap fixdSpeciesNames() const = 0; //! Returns the species and variables for all 2D PDFs in the fit virtual LauSPlot::TwoDMap twodimPDFs() const = 0; //! Check if the signal is split into well-reconstructed and mis-reconstructed types virtual Bool_t splitSignal() const = 0; //! Check if the mis-reconstructed signal is to be smeared in the DP virtual Bool_t scfDPSmear() const = 0; //! Add the given parameter to the list of all fit parameters /*! \param [in] param the parameter \param [in] addFixed if true add the parameter even if it is not floating \return the number of parameters added */ UInt_t addFitParameters(LauParameter* param, const Bool_t addFixed = kFALSE); //! Add the given parameter(s) to the list of all fit parameters /*! \param [in] param the parameter (which may depend on other parameters) \param [in] addFixed if true add the parameter even if it is not floating \return the number of parameters added */ UInt_t addFitParameters(LauAbsRValue* param, const Bool_t addFixed = kFALSE); //! Add the given parameter(s) to the list of all fit parameters /*! \param [in] paramList a list of parameters \param [in] addFixed if true add the parameter even if it is not floating \return the number of parameters added */ UInt_t addFitParameters(LauParameterPList& paramList, const Bool_t addFixed = kFALSE); //! Add the given parameter(s) to the list of all fit parameters /*! \param [in] paramList a list of parameters \param [in] addFixed if true add the parameter even if it is not floating \return the number of parameters added */ UInt_t addFitParameters(LauAbsRValuePList& paramList, const Bool_t addFixed = kFALSE); //! Add parameters of the PDFs in the list to the list of all fit parameters /*! \param [in] pdfList a list of Pdfs \param [in] addFixed if true add the parameter even if it is not floating \return the number of parameters added */ UInt_t addFitParameters(LauPdfPList& pdfList, const Bool_t addFixed = kFALSE); //! Add the given parameter to the list of resonance parameters and the list of all fit parameters /*! \param [in] param a resonance parameter \return the number of parameters added */ UInt_t addResonanceParameters(LauParameter* param); //! Add the given parameter(s) to the list of resonance parameters and the list of all fit parameters /*! \param [in] parList a list of resonance parameters \return the number of parameters added */ UInt_t addResonanceParameters(LauParameterPList& paramList); //! Add parameters to the list of Gaussian constrained parameters void addConParameters(); //! Print the fit parameters for all PDFs in the list /*! \param [in] pdfList a list of Pdfs \param [in] fout the output stream to write to */ void printFitParameters(const LauPdfPList& pdfList, std::ostream& fout) const; //! Update the fit parameters for all PDFs in the list /*! \param [in] pdfList a list of Pdfs */ void updateFitParameters(LauPdfPList& pdfList); //! Have all PDFs in the list cache the data /*! \param [in] pdfList the list of pdfs \param [in] theData the data from the fit */ void cacheInfo(LauPdfPList& pdfList, const LauFitDataTree& theData); //! Calculate the product of the per-event likelihoods of the PDFs in the list /*! \param [in] pdfList the list of pdfs \param [in] iEvt the event number */ Double_t prodPdfValue(LauPdfPList& pdfList, UInt_t iEvt); //! Do any of the PDFs have a dependence on the DP? /*! \return the flag to indicated if there is a DP dependence */ Bool_t pdfsDependOnDP() const {return pdfsDependOnDP_;} //! Do any of the PDFs have a dependence on the DP? /*! \param [in] dependOnDP the flag to indicated if there is a DP dependence */ void pdfsDependOnDP(Bool_t dependOnDP) { pdfsDependOnDP_ = dependOnDP; } //! Const access to the fit variables const LauParameterPList& fitPars() const {return fitVars_;} //! Const access the fit variables which affect the DP normalisation const LauParameterPSet& resPars() const {return resVars_;} //! Const access the extra variables const LauParameterList& extraPars() const {return extraVars_;} //! Non-const access the extra variables LauParameterList& extraPars() {return extraVars_;} //! Const access the Gaussian constrained variables const LauAbsRValuePList& conPars() const {return conVars_;} //! Const access the gen ntuple const LauGenNtuple* genNtuple() const {return genNtuple_;} //! Access the gen ntuple LauGenNtuple* genNtuple() {return genNtuple_;} //! Const access the sPlot ntuple const LauGenNtuple* sPlotNtuple() const {return sPlotNtuple_;} //! Access the sPlot ntuple LauGenNtuple* sPlotNtuple() {return sPlotNtuple_;} //! Const access the data store const LauFitDataTree* fitData() const {return inputFitData_;} //! Access the data store LauFitDataTree* fitData() {return inputFitData_;} //! Imported parameters file name TString fixParamFileName_; //! Imported parameters tree name TString fixParamTreeName_; //! Map from imported parameter name to value std::map fixParamMap_; //! Imported parameter names std::set fixParamNames_; //! Whether to fix the loaded parameters (kTRUE) or leave them floating (kFALSE) Bool_t fixParams_; //! The set of parameters that are imported (either from a file or by value) and not // set to be fixed in the fit. In addition to those from fixParamNames_, these // include those imported from a file. std::set allImportedFreeParams_; private: //! Copy constructor (not implemented) LauAbsFitModel(const LauAbsFitModel& rhs); //! Copy assignment operator (not implemented) LauAbsFitModel& operator=(const LauAbsFitModel& rhs); // Various control booleans //! Option to make toy from 1st successful experiment Bool_t compareFitData_; //! Option to output a .C file of PDF's Bool_t savePDF_; //! Option to output a Latex format table Bool_t writeLatexTable_; //! Option to write sPlot data Bool_t writeSPlotData_; //! Option to store DP efficiencies in the sPlot ntuple Bool_t storeDPEff_; //! Option to randomise the initial values of the fit parameters Bool_t randomFit_; //! Option to perform an extended ML fit Bool_t emlFit_; //! Option to perform Poisson smearing Bool_t poissonSmear_; //! Option to enable embedding Bool_t enableEmbedding_; //! Option to include the DP as part of the fit Bool_t usingDP_; //! Option to state if pdfs depend on DP position Bool_t pdfsDependOnDP_; // Info on number of experiments and number of events //! Internal vector of fit parameters LauParameterPList fitVars_; //! Internal set of the same fit parameters (used to check uniqueness) LauParameterPSet fitVarsSet_; //! Internal set of fit parameters upon which the DP normalisation depends LauParameterPSet resVars_; //! Extra variables that aren't in the fit but are stored in the ntuple LauParameterList extraVars_; //! Internal vectors of Gaussian parameters LauAbsRValuePList conVars_; // Input data and output ntuple //! The input data LauFitDataTree* inputFitData_; //! The generated ntuple LauGenNtuple* genNtuple_; //! The sPlot ntuple LauGenNtuple* sPlotNtuple_; // Background class names //! The background class names LauBkgndClassMap bkgndClassNames_; //! An empty string const TString nullString_; // sFit related variables //! Option to perfom the sFit Bool_t doSFit_; //! The name of the sWeight branch TString sWeightBranchName_; //! The vector of sWeights std::vector sWeights_; //! The sWeight scaling factor Double_t sWeightScaleFactor_; // Fit timers //! The fit timer TStopwatch timer_; //! The total fit timer TStopwatch cumulTimer_; //! The output table name TString outputTableName_; // Comparison toy MC related variables //! The output file name for Toy MC TString fitToyMCFileName_; //! The output table name for Toy MC TString fitToyMCTableName_; //! The scaling factor (toy vs data statistics) UInt_t fitToyMCScale_; //! Option to perform Poisson smearing Bool_t fitToyMCPoissonSmear_; // sPlot related variables //! The name of the sPlot file TString sPlotFileName_; //! The name of the sPlot tree TString sPlotTreeName_; - //! Control the verbosity of the sFit - TString sPlotVerbosity_; + //! Control the verbosity of the sPlot calculations + LauOutputLevel sPlotVerbosity_; ClassDef(LauAbsFitModel,0) // Abstract interface to fit/toyMC model }; #endif diff --git a/inc/LauCalcChiSq.hh b/inc/LauCalcChiSq.hh index 7fec5cd..3a216c7 100644 --- a/inc/LauCalcChiSq.hh +++ b/inc/LauCalcChiSq.hh @@ -1,163 +1,165 @@ /* Copyright 2008 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ #include "TH2Poly.h" #include "TString.h" +#include "LauPrint.hh" + #include /*! \file LauCalcChiSq.hh \brief File containing declaration of LauCalcChiSq class. */ /*! \class LauCalcChiSq \brief Utility class to allow the calculation of the chisq of the fit to the Dalitz plot A utility class to allow the calculation of the chisq of the fit to the Dalitz plot. A text config file is provided that gives the datasets for the data and toy MC generated from the fit results. These can be in the traditional DP or the square DP. A sample config file is provided. The fields are: */ class LauCalcChiSq { public: //! Constructor /*! \param [in] inputFileName name of the config file */ LauCalcChiSq(const TString& inputFileName = "chiSqInput.txt"); //! Destructor virtual ~LauCalcChiSq(); - //! Toggle verbose printout + //! Control verbosity of printout /*! - \param [in] flag true to enable verbose printout, false to disable + \param [in] printLevel level of printout */ - inline void setVerbose(const Bool_t flag) {verbose_ = flag;} + inline void setPrintLevel(const LauOutputLevel printLevel) {printLevel_ = printLevel;} //! Run the calculations void run(); private: //! Read the config file, read the data and create histograms void initialiseHistos(); //! Choose the binning scheme /*! \param [in] xs x coordinates of low statistics sample \param [in] ys y coordinates of low statistics sample \param [in] nEntries number of entries in low statistics sample \param [out] divisions resulting binning scheme */ void pickBinning(const Double_t* xs, const Double_t* ys, const Int_t nEntries, std::vector& divisions); //! Create the template histogram based on the binning scheme /*! This function is called recursively to perform subdivisions \param [in] xMin the minimum x coordinate of the region to be (sub)divided \param [in] xMax the maximum x coordinate of the region to be (sub)divided \param [in] yMin the minimum y coordinate of the region to be (sub)divided \param [in] yMax the maximum y coordinate of the region to be (sub)divided \param [in] xs x coordinates of low statistics sample \param [in] ys y coordinates of low statistics sample \param [in] nEntries number of entries in low statistics sample \param [in] divisions the binning scheme \param [in] iter indicates depth of the subdivisions */ void getHisto(const Double_t xMin, const Double_t xMax, const Double_t yMin, const Double_t yMax, const Double_t* xs, const Double_t* ys, const Int_t nEntries, const std::vector& divisions, const UInt_t iter=0); //! Calculate the chisq from the data histograms void calculateChiSq(); //! Create plots void makePlots(); //! Name of the config file TString inputFileName_; //! Name of the low stats data file TString fileName1_; //! Name of the high stats data file TString fileName2_; //! Name of the low stats data tree TString treeName1_; //! Name of the high stats data tree TString treeName2_; //! Name of the x-coordinate branch in tree 1 TString xName1_; //! Name of the x-coordinate branch in tree 2 TString xName2_; //! Name of the y-coordinate branch in tree 1 TString yName1_; //! Name of the y-coordinate branch in tree 2 TString yName2_; //! The minimum bin content Float_t minContent_; //! Template histogram constructed from the binning scheme TH2Poly* theHisto_; //! Histogram (constructed from template) filled from tree 1 TH2Poly* histo1_; //! Histogram (constructed from template) filled from tree 2 TH2Poly* histo2_; //! Histogram (constructed from template) filled with pulls of tree1 vs tree2 TH2Poly* pullHisto_; //! Histogram (constructed from template) filled with chisq of tree1 vs tree2 TH2Poly* chiSqHisto_; //! Histogram (constructed from template) filled with signed chisq of tree1 vs tree2 TH2Poly* chiSqSignedHisto_; //! Minimum x coordinate of histograms Float_t xMin_; //! Maximum x coordinate of histograms Float_t xMax_; //! Minimum y coordinate of histograms Float_t yMin_; //! Maximum y coordinate of histograms Float_t yMax_; //! Number of free parameters in fit (used for calculating the ndof) Int_t nParams_; //! Scalefactor between low and high stats data samples Float_t scaleFactor_; - //! Verbose flag - Bool_t verbose_; + //! Verbosity flag + LauOutputLevel printLevel_; ClassDef(LauCalcChiSq,0) }; diff --git a/inc/LauPrint.hh b/inc/LauPrint.hh index 15e5013..bcb709a 100644 --- a/inc/LauPrint.hh +++ b/inc/LauPrint.hh @@ -1,70 +1,80 @@ /* Copyright 2004 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauPrint.hh - \brief File containing declaration of LauPrint class. -*/ - -/*! \class LauPrint - \brief Class to define various output print commands. - - Class to define various output print commands (for output tables etc..) + \brief File containing declaration of LauPrint class and LauOutputLevel enum. */ #ifndef LAU_PRINT #define LAU_PRINT #include "Rtypes.h" #include +/*! \enum LauOutputLevel + \brief Enumeration to define verbosity level for various printouts +*/ + +enum class LauOutputLevel { + Quiet, //!< Reduced or zero printout + Standard, //!< Normal level of printout + Verbose //!< Verbose printout +}; + +/*! \class LauPrint + \brief Class to define various output print commands. + + Class to define various output print commands (for output tables etc..) +*/ + class LauPrint { public: //! Constructor LauPrint(); //! Destructor virtual ~LauPrint(); //! Method to choose the printing format to a specified level of precision /*! \param [in,out] stream the output stream to be printed to \param [in] value the double value to be printed */ void printFormat(std::ostream& stream, Double_t value) const; private: //! Copy constructor (not implemented) LauPrint(const LauPrint& rhs); //! Copy assignment operator (not implemented) LauPrint& operator=(const LauPrint& rhs); ClassDef(LauPrint,0) }; #endif diff --git a/inc/LauSPlot.hh b/inc/LauSPlot.hh index 3f90555..f6780f8 100644 --- a/inc/LauSPlot.hh +++ b/inc/LauSPlot.hh @@ -1,374 +1,376 @@ /* Copyright 2006 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauSPlot.hh \brief File containing declaration of LauSPlot class. */ /*! \class LauSPlot \brief Class for defining the SPlot technique Class for defining the SPlot technique based on TSplot from ROOT by the following authors: Muriel Pivk, Anna Kreshuk (10/2005). (Original copyright notice below) Code extended to deal with the following two extra scenarios: - Extended sPlots (see appendix B of sPlot paper) - 2D PDFs, i.e. one PDF for 2 variables When performing a multidimensional fit, the sWeights are calculated excluding each dimension in turn and excluding none of the variables. This allows sPlots of the fit variables to be made by using the weight calculated when that variable is excluded, while variables not in the fit can be plotted from the complete information. */ /********************************************************************** * * * Copyright (c) 2005 ROOT Foundation, CERN/PH-SFT * * * **********************************************************************/ #ifndef LAU_SPLOT #define LAU_SPLOT #include #include #include #include "TMatrixD.h" #include "TString.h" +#include "LauPrint.hh" + class TEventList; class TLeaf; class TFile; class TTree; class LauSPlot : public TObject { public: //! Type to store names, e.g. of the discriminating/control variables typedef std::set NameSet; //! Type to associate a category name with a double precision number, e.g. a yield or PDF value for a given species typedef std::map NumbMap; //! Type to associate a variable name with the leaf of the input tree typedef std::map LeafMap; //! Type to associate the name of the species that have 2D PDFs with the names of the two variables involved in each such PDF typedef std::multimap< TString, std::pair > TwoDMap; //! Constructor /*! \param [in] fileName the name of the data file (containing the input tree) \param [in] treeName the name of the input tree (containing the per-event llhds) \param [in] firstExpt the first experiment \param [in] nExpt the number of experiments \param [in] variableNames the names of the discriminating variables \param [in] freeSpecies the species that are free to float in the fit \param [in] fixdSpecies the species that are fixed in the fit \param [in] twodimPDFs the species that have 2D PDFs and the name of the variables involved in each such PDF \param [in] sigSplit boolean flag to check whether the signal is split into Truth Matched and Self Cross Feed \param [in] scfDPSmeared boolean flag to check whether the SCF is smeared in the DP */ LauSPlot(const TString& fileName, const TString& treeName, Int_t firstExpt, Int_t nExpt, const NameSet& variableNames, const LauSPlot::NumbMap& freeSpecies, const LauSPlot::NumbMap& fixdSpecies, const TwoDMap& twodimPDFs, Bool_t sigSplit = kFALSE, Bool_t scfDPSmeared = kFALSE); //! Destructor virtual ~LauSPlot(); //! Method to calculate the sWeights and cN coeffs. /*! - \param [in] option control print level (default set to no print out)\n - "Q" - no print out (default)\n - "V" - prints the estimated # of events in species\n - "VV" - as "V" + the MINUIT printing + sums of weights for control + \param [in] printLevel control print level (default set to no print out)\n + LauOutputLevel::Quiet - no print out (default)\n + LauOutputLevel::Standard - prints the estimated # of events in species\n + LauOutputLevel::Verbose - as Standard + the MINUIT printing + sums of weights for control */ - void runCalculations(const TString& option = "q"); + void runCalculations(const LauOutputLevel printLevel = LauOutputLevel::Quiet); //! Save the sWeight results as a friend tree to the input tree (in the same file) void writeOutResults(); //! Access the per-event total PDF values for each species /*! \return the per-event total PDF values */ const std::vector& totalPdf() const {return pdfTot_;} protected: //! Check whether the input tree has been successfully read /*! \return true/false whether the task have been successfully performed */ Bool_t readInput() const {return readInput_;} //! Set that the input tree has been successfully read /*! \param [in] ok set that the input tree has been succeessfully read */ void readInput(Bool_t ok) {readInput_ = ok;} //! Check whether the signal is split into Truth Matched and Self Cross Feed /*! \return true/false whether the signal is split into TM and SCF */ Bool_t signalSplit() const {return signalSplit_;} //! Check whether the Self Cross Feed is smeared in the DP /*! \return true/false whether the Self Cross Feed is smeared in the DP */ Bool_t scfDPSmear() const {return scfDPSmear_;} //! Check whether the cN branches have been already created /*! \return true/false whether cN branches have been already created */ Bool_t definedCNBranches() const {return definedCNBranches_;} //! Set that the cN branches have been already defined /*! \param [in] defined set that the cN branches have been already defined */ void definedCNBranches(Bool_t defined) {definedCNBranches_ = defined;} //! Check whether the sWeights branches have been already created /*! \return true/false whether sWeights branches have been already created */ Bool_t definedSWeightBranches() const {return definedSWeightBranches_;} //! Set that the sWeights branches have been already defined /*! \param [in] defined set that the sWeights branches have been already defined */ void definedSWeightBranches(Bool_t defined) {definedSWeightBranches_ = defined;} //! Method to open the file in "update" mode and grab the input tree for reading void openInputFileAndTree(); //! Read the leaf structure from the tree and check the status of the read (calls LauSPlot::readInputLeaves and LauSPlot::checkLeaves) void readInputInfo(); //! Read the leaf structure from the tree and setup the leaf map Bool_t readInputLeaves(); //! Check whether the leaf structure makes sense given the PDFs we are expecting Bool_t checkLeaves() const; //! Create (if not already done) the tree for storing the cN coeffs void createCNTree(); //! Create the branches for each cN coefficient void defineCNBranches(); //! Create (if not already done) the tree for storing the sWeights void createSWeightTree(); //! Create the branches to store the sWeights void defineSWeightBranches(); //! Set the event list to contain only events from the given experiment /*! \param [in] iExpt the required experiment number */ void setExperiment(Int_t iExpt); //! Reads the values of each PDF likelihood for every event in the experiment void readExpt(); //! Make sure that we're using Minuit void checkFitter() const; //! Initialise Minuit, set the verbosity /*! - \param [in] opt option to set the print level (opt = "Q", "V" or "W" == "VV") + \param [in] printLevel control print level \see runCalculations */ - void initialiseFitter(const TString& opt); + void initialiseFitter(const LauOutputLevel printLevel); //! Add the species yields as fit parameters and fix them as appropriate void setFitParameters() const; //! Perform the minimisation wrt the yields alone void runFit(); - //! Update the yields with the newly fitted values and print them (unless print option is "Q"). + //! Update the yields with the newly fitted values and (optionally) print them /*! - \param [in] opt option to set the print level (opt = "Q", "V" or "W") + \param [in] printLevel control print level \see runCalculations */ - void retrieveFittedParameters(const TString& opt); + void retrieveFittedParameters(const LauOutputLevel printLevel); //! Print the supplied covariance matrix or, if pointer is null, the one previously calculated /*! \param [in] covmat 2D array containing the covariace matrix elements */ void printCovMatrixElements(const Double_t * covmat = 0) const; //! Print the sum of sWeights for all species /*! \param [in] exclName the name of variable excluded (or "none") */ void printSumOfWeights(const TString& exclName) const; // Calculate the covariance matrix from the various PDFs /*! \return true if calculation succeeded or false in case the covariance matrix can not be inverted */ Bool_t calcCovMatrix(); //! Calculate the total likelihood for each species by multiply together all the PDFs for that species /*! \param [in] exclName the name of excluded variable (or "none") */ void calcTotPDFValues(const TString& exclName); //! Computes the cN for the extended sPlots from the covariance matrix /*! \param [in] exclName the name of excluded variable (or "none") \param [in] covmat 2D array containing the covariace matrix elements */ void calcCNCoeffs(const TString& exclName, const Double_t * covmat = 0); //! Computes the sWeights from the PDFs and covariance matrix /*! \param [in] exclName the name of excluded variable (or "none") \param [in] covmat 2D array containing the covariace matrix elements */ void calcSWeights(const TString& exclName, Double_t * covmat = 0); //! Copy the sWeight of a given event into LauSPlot::sWeightsCurrent_, from which they can be stored in the output tree /*! \param [in] iEvent the requested event */ void copyEventWeights(Int_t iEvent); //! Fill the cN branches void fillCNBranches(); //! Fill the sWeights branches void fillSWeightBranches(); //! Add the sWeightTree as a friend tree of the input tree void addFriendTree(); private: //! Copy constructor (not implemented) LauSPlot(const LauSPlot& rhs); //! Copy assignment operator (not implemented) LauSPlot& operator=(const LauSPlot& rhs); //! The name of the data file TString fileName_; //! The name of the input tree (containing the per-event llhds) TString inputTreeName_; //! The name of the cn tree (containing the cN coefficients) TString cnTreeName_; //! The name of the sweight tree (containing the sWeights) TString sweightTreeName_; //! Pointer to the data file object TFile* file_; //! Pointer to the input tree TTree* inputTree_; //! Pointer to the output tree containing the cN coefficients TTree* cnTree_; //! Pointer to the output tree containing the sWeights TTree* sweightTree_; //! Pointer to an event list, that is used to loop through the experiments TEventList* eventList_; //! Collection to hold pointers to the leaves of the input tree LeafMap leaves_; //! The names of the discriminating variables NameSet variableNames_; //! The names and estimated yields of the free species NumbMap freeSpecies_; //! The names and estimated yields of the fixed species NumbMap fixdSpecies_; //! The names and estimated yields of the free species - need to keep the original values NumbMap origFreeSpecies_; //! The names and estimated yields of the fixed species - need to keep the original values NumbMap origFixdSpecies_; //! The names of the species that have 2D PDFs and the names of the variables involved TwoDMap twodimPDFs_; //! Is the signal split into TM and SCF? const Bool_t signalSplit_; //! If so then is the SCF smeared in the DP? const Bool_t scfDPSmear_; //! Flag whether the input tree has been successfully read Bool_t readInput_; //! Flag whether the cN branches have already been created Bool_t definedCNBranches_; //! Flag whether the sWeights branches have already been created Bool_t definedSWeightBranches_; //! First experiment Int_t firstExpt_; //! Number of experiments Int_t nExpt_; //! The current experiment Int_t iExpt_; //! Number of events in current experiment Int_t nEvents_; //! Number of discriminating variables Int_t nDiscVars_; //! Number of species free to float in the fit Int_t nFreeSpecies_; //! Number of species fixed in the fit Int_t nFixdSpecies_; //! Total number of species (free + fixed) Int_t nSpecies_; //! The per-event values of the total PDF for each species std::vector pdfTot_; //! The per-event values of the PDFs for each species for each disc variable std::vector > discPdf_; //! The per-event values of the SCF fraction std::vector scfFrac_; //! The calculated covariance matrix TMatrixD covMat_; //! The per-event values of the computed sWeights (for each species and for each combination of excluded vars) std::vector< std::map > sWeights_; //! The current-event values of the computed sWeights std::map sWeightsCurrent_; //! The extended sPlot coefficients (for each species and for each combination of excluded vars) std::map cN_; ClassDef(LauSPlot, 0) }; #endif diff --git a/inc/LauSplineDecayTimeEfficiency.hh b/inc/LauSplineDecayTimeEfficiency.hh index bcb8625..47be645 100644 --- a/inc/LauSplineDecayTimeEfficiency.hh +++ b/inc/LauSplineDecayTimeEfficiency.hh @@ -1,406 +1,418 @@ /* Copyright 2021 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauSplineDecayTimeEfficiency.hh \brief File containing declaration of LauSplineDecayTimeEfficiency class. */ /*! \class LauSplineDecayTimeEfficiency \brief Class for defining a spline-interpolated model for decay time efficiency */ #ifndef LAU_SPLINE_DECAYTIME_EFFICIENCY #define LAU_SPLINE_DECAYTIME_EFFICIENCY #include #include #include #include "Rtypes.h" #include "TFitResult.h" +#include "TMath.h" #include "TSystem.h" #include "Lau1DCubicSpline.hh" #include "LauAbsDecayTimeEfficiency.hh" #include "LauParameter.hh" class TH1; //! Defines the allowed orders of spline polynomials enum class LauSplineOrder : std::size_t { Cubic = 3, //!< 3rd order Sixth = 6 //!< 6th order }; template class LauSplineDecayTimeEfficiency : public LauAbsDecayTimeEfficiency { public: //! Constructor for a Cubic spline /*! \param [in] prefix the prefix to use when forming the names of the knot parameters \param [in] effSpline the cubic spline to be used to represent the efficiency variation */ template = true> LauSplineDecayTimeEfficiency( const TString& prefix, std::unique_ptr effSpline ) : prefix_{prefix}, effSpline_{std::move(effSpline)} { this->initialSetup(); } //! Constructor for a Sixth order spline /*! \param [in] prefix the prefix to use when forming the names of the knot parameters \param [in] effSpline the cubic spline being used to represent the efficiency variation in another channel \param [in] correctionSpline the cubic spline to be used to represent the correction to effSpline to obtain the efficiency variation for this channel */ template = true> LauSplineDecayTimeEfficiency( const TString& prefix, const LauSplineDecayTimeEfficiency& effSpline, std::unique_ptr correctionSpline ) : prefix_{prefix}, effSpline_{std::move(correctionSpline)}, otherSpline_{std::make_unique>(effSpline)} { this->initialSetup(); } //! Destructor ~LauSplineDecayTimeEfficiency() = default; //! Move constructor (default) LauSplineDecayTimeEfficiency(LauSplineDecayTimeEfficiency&& other) = default; //! Move assignment operator (default) LauSplineDecayTimeEfficiency& operator=(LauSplineDecayTimeEfficiency&& other) = default; //! Copy constructor (custom) LauSplineDecayTimeEfficiency(const LauSplineDecayTimeEfficiency& other); //! Copy assignment operator (deleted) LauSplineDecayTimeEfficiency& operator=(const LauSplineDecayTimeEfficiency& other) = delete; //! The number of coefficients of each spline segment static constexpr std::size_t nCoeffs { static_cast>(Order) + 1 }; //! Retrieve the efficiency for a given value of the decay time /*! \param abscissa the value of the decay time \return the efficiency */ Double_t getEfficiency( const Double_t abscissa ) const override; //! Retrieve the parameters of the efficiency model so that they can be loaded into a fit /*! \return the parameters of the efficiency model */ std::vector getParameters() override { return params_; } //! Initialise the parameter cache /*! Must be called prior to starting fitting or generation */ void initialise() override; //! Propagate any updates to parameters and recalculate information as neeeded /*! Should be called at each fit iteration */ void propagateParUpdates() override; //! Retrieve the number of segments in the spline std::size_t nSegments() const { return effSpline_->getnKnots() - 1; } //! Retrieve the positions of the spline knots const std::vector& knotPositions() const { return effSpline_->getXValues(); } //! Fix all knots void fixKnots(); //! Float all knots void floatKnots(); //! Fix or float a specific knot /*! \param knotIndex the index of the knot to fix/float \param fixed true if knot to be fixed, false if knot to be floated */ void fixKnot( const std::size_t knotIndex, const bool fixed ); //! Retrieve the polynomial coefficients for the given spline segment /*! \param segmentIndex the index of the spline segment \return the polynomial coefficients */ std::array getCoefficients( const std::size_t segmentIndex ) const; //! Retrieve whether any of the parameters are floating in the fit bool anythingFloating() const { return anyKnotFloating_; } //! Retrieve whether any of the parameters have changed in the latest fit iteration const bool& anythingChanged() const override { return anyKnotChanged_; } //! Fit our spline to a TH1 /*! Useful to obtain reasonable starting values and uncertainties for the knot parameters - \params [in] The histogram to be fit to + \param [in] hist the histogram to be fit to + \param [in] printLevel the level of printout desired from fit */ - void fitToTH1( TH1* hist ); + void fitToTH1( TH1* hist, const LauOutputLevel printLevel = LauOutputLevel::Quiet ); private: //! Perform the initial setup - shared by the various constructors void initialSetup( const LauSplineDecayTimeEfficiency* other = nullptr ); //! Update the cached parameter values void updateParameterCache(); //! The prefix for the parameter names const TString prefix_; //! The spline std::unique_ptr effSpline_; //! The other spline std::unique_ptr> otherSpline_; //! The spline parameters std::vector> ownedParams_; //! The spline parameters (for giving access) std::vector params_; //! The spline values std::vector values_; //! Are any of our knot parameters floating in the fit? bool ourKnotsFloating_{false}; //! Are any of the other spline's knot parameters floating in the fit? bool otherKnotsFloating_{false}; //! Are any of the knot parameters floating in the fit? bool anyKnotFloating_{false}; //! Have any of our knot parameters changed in the latest fit iteration? bool ourKnotsChanged_{false}; //! Have any of the other spline's knot parameters changed in the latest fit iteration? bool otherKnotsChanged_{false}; //! Have any of the knot parameters changed in the lastest fit iteration? bool anyKnotChanged_{false}; ClassDefOverride(LauSplineDecayTimeEfficiency, 0) }; templateClassImp(LauSplineDecayTimeEfficiency); template void LauSplineDecayTimeEfficiency::initialSetup( const LauSplineDecayTimeEfficiency* other ) { if ( not effSpline_ ) { std::cerr<<"ERROR in LauSplineDecayTimeEfficiency::initialSetup : supplied efficiency spline pointer is null"<Exit(EXIT_FAILURE); } if constexpr ( Order == LauSplineOrder::Sixth ) { std::vector ourKnots { effSpline_->getXValues() }; std::vector otherKnots { otherSpline_->knotPositions() }; if ( ourKnots != otherKnots ) { std::cerr<<"ERROR in LauSplineDecayTimeEfficiency::initialSetup : mismatch in knot positions"<Exit(EXIT_FAILURE); } // TODO - any other checks we need to do? } values_ = effSpline_->getYValues(); const std::size_t nPars { values_.size() }; ownedParams_.reserve( nPars ); if ( other ) { for ( auto& ptr : other->ownedParams_ ) { // TODO - maybe need a way to give these a unique prefix? ownedParams_.emplace_back( ptr->createClone() ); } } else { for( std::size_t index{0}; index < nPars; ++index ) { constexpr Double_t maxVal { (Order == LauSplineOrder::Cubic) ? 1.0 : 2.0 }; ownedParams_.emplace_back( std::make_unique( Form( "%s_knot_%lu", prefix_.Data(), index ), values_[index], 0.0, maxVal, kTRUE ) ); } } if constexpr ( Order == LauSplineOrder::Sixth ) { params_ = otherSpline_->getParameters(); params_.reserve( 2*nPars ); } else { params_.reserve( nPars ); } for ( auto& ptr : ownedParams_ ) { params_.push_back( ptr.get() ); } } template LauSplineDecayTimeEfficiency::LauSplineDecayTimeEfficiency(const LauSplineDecayTimeEfficiency& other) : prefix_{ other.prefix_ }, effSpline_{ std::make_unique( * other.effSpline_ ) }, otherSpline_{ other.otherSpline_ ? std::make_unique>( *other.otherSpline_ ) : nullptr } { this->initialSetup( &other ); } template void LauSplineDecayTimeEfficiency::fixKnots() { for ( auto& ptr : ownedParams_ ) { ptr->fixed(true); } } template void LauSplineDecayTimeEfficiency::floatKnots() { for ( auto& ptr : ownedParams_ ) { ptr->fixed(false); } } template void LauSplineDecayTimeEfficiency::fixKnot( const std::size_t knotIndex, const bool fixed ) { if ( knotIndex >= ownedParams_.size() ) { std::cerr<<"ERROR in LauSplineDecayTimeEfficiency::fixKnot : supplied knot index is out of range"<fixed(fixed); } template Double_t LauSplineDecayTimeEfficiency::getEfficiency( const Double_t abscissa ) const { Double_t eff { effSpline_->evaluate( abscissa ) }; if constexpr ( Order == LauSplineOrder::Sixth ) { eff *= otherSpline_->getEfficiency( abscissa ); } return eff; } template std::array::nCoeffs> LauSplineDecayTimeEfficiency::getCoefficients( const std::size_t segmentIndex ) const { if constexpr ( Order == LauSplineOrder::Cubic ) { return effSpline_->getCoefficients(segmentIndex); } else { std::array ourCoeffs { effSpline_->getCoefficients(segmentIndex) }; std::array otherCoeffs { otherSpline_->getCoefficients(segmentIndex) }; std::array coeffs; coeffs[0] = ourCoeffs[0] * otherCoeffs[0]; coeffs[1] = ourCoeffs[0] * otherCoeffs[1] + ourCoeffs[1] * otherCoeffs[0]; coeffs[2] = ourCoeffs[0] * otherCoeffs[2] + ourCoeffs[1] * otherCoeffs[1] + ourCoeffs[2] * otherCoeffs[0]; coeffs[3] = ourCoeffs[0] * otherCoeffs[3] + ourCoeffs[1] * otherCoeffs[2] + ourCoeffs[2] * otherCoeffs[1] + ourCoeffs[3] * otherCoeffs[0]; coeffs[4] = ourCoeffs[1] * otherCoeffs[3] + ourCoeffs[2] * otherCoeffs[2] + ourCoeffs[3] * otherCoeffs[1]; coeffs[5] = ourCoeffs[2] * otherCoeffs[3] + ourCoeffs[3] * otherCoeffs[2]; coeffs[6] = ourCoeffs[3] * otherCoeffs[3]; return coeffs; } } template void LauSplineDecayTimeEfficiency::initialise() { static auto isFixed = []( const std::unique_ptr& par ){ return par->fixed(); }; anyKnotFloating_ = ourKnotsFloating_ = not std::all_of( ownedParams_.begin(), ownedParams_.end(), isFixed ); if constexpr ( Order == LauSplineOrder::Sixth ) { otherSpline_->initialise(); otherKnotsFloating_ = otherSpline_->anythingFloating(); anyKnotFloating_ |= otherKnotsFloating_; } this->updateParameterCache(); } template void LauSplineDecayTimeEfficiency::updateParameterCache() { static auto assignValue = []( const std::unique_ptr& par ){ return par->unblindValue(); }; // Update our local cache std::transform( ownedParams_.begin(), ownedParams_.end(), values_.begin(), assignValue ); // Update the spline itself effSpline_->updateYValues( values_ ); } template void LauSplineDecayTimeEfficiency::propagateParUpdates() { // If none of the parameters are floating there's nothing to do if ( not anyKnotFloating_ ) { return; } // Otherwise, determine which of the floating parameters have changed (if any) and act accordingly if ( ourKnotsFloating_ ) { static auto checkEquality = []( const std::unique_ptr& par, const Double_t cacheVal ){ return par->unblindValue() == cacheVal; }; anyKnotChanged_ = ourKnotsChanged_ = not std::equal( ownedParams_.begin(), ownedParams_.end(), values_.begin(), checkEquality); // Update the acceptance spline if any of the knot values have changed if ( ourKnotsChanged_ ) { this->updateParameterCache(); } } if constexpr ( Order == LauSplineOrder::Sixth ) { if ( otherKnotsFloating_ ) { otherSpline_->propagateParUpdates(); otherKnotsChanged_ = otherSpline_->anythingChanged(); anyKnotChanged_ |= otherKnotsChanged_; } } } template -void LauSplineDecayTimeEfficiency::fitToTH1( TH1* hist ) +void LauSplineDecayTimeEfficiency::fitToTH1( TH1* hist, const LauOutputLevel printLevel ) { - TFitResultPtr fitResult { effSpline_->fitToTH1( hist ) }; + std::cout << "INFO in LauSplineDecayTimeEfficiency::fitToTH1 : fitting " << prefix_ << " spline to supplied histogram \"" << hist->GetName() << "\"\n"; + std::cout << " : to obtain initial estimates of parameter values and errors" << std::endl; + + TFitResultPtr fitResult { effSpline_->fitToTH1( hist, printLevel ) }; + if ( fitResult->Status() != 0 || fitResult->CovMatrixStatus() != 3 ) { + std::cout << "WARNING in LauSplineDecayTimeEfficiency::fitToTH1 : fit failed with status " << fitResult->Status() << " and covariance matrix status " << fitResult->CovMatrixStatus() << "\n"; + std::cout << " : will not update spline parameters based on this fit result" << std::endl; + return; + } const std::size_t nKnots { ownedParams_.size() }; for ( std::size_t knot{0}; knot < nKnots; ++knot ) { const Double_t knotVal { fitResult->Value(knot) }; const Double_t knotErr { fitResult->Error(knot) }; + std::cout << " : Setting parameter " << ownedParams_[knot]->name() << " to " << knotVal << " +/- " << knotErr << std::endl; + ownedParams_[knot]->value( knotVal ); ownedParams_[knot]->genValue( knotVal ); ownedParams_[knot]->initValue( knotVal ); ownedParams_[knot]->error( knotErr ); } this->updateParameterCache(); } #endif diff --git a/src/Lau1DCubicSpline.cc b/src/Lau1DCubicSpline.cc index 251d057..0cf449c 100644 --- a/src/Lau1DCubicSpline.cc +++ b/src/Lau1DCubicSpline.cc @@ -1,532 +1,565 @@ /* Copyright 2015 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file Lau1DCubicSpline.cc \brief File containing implementation of Lau1DCubicSpline class. */ #include #include #include #include #include #include "Lau1DCubicSpline.hh" #include "LauAbsRValue.hh" #include "LauParameter.hh" ClassImp(Lau1DCubicSpline) Lau1DCubicSpline::Lau1DCubicSpline(const std::vector& xs, const std::vector& ys, LauSplineType type, LauSplineBoundaryType leftBound, LauSplineBoundaryType rightBound, Double_t dydx0, Double_t dydxn) : nKnots_(xs.size()), x_(xs), y_(ys), type_(type), leftBound_(leftBound), rightBound_(rightBound), dydx0_(dydx0), dydxn_(dydxn) { init(); } Double_t Lau1DCubicSpline::evaluate(Double_t x) const { // do not attempt to extrapolate the spline if( xx_[nKnots_-1] ) { std::cerr << "WARNING in Lau1DCubicSpline::evaluate : function is only defined between " << x_[0] << " and " << x_[nKnots_-1] << std::endl; std::cerr << " value at " << x << " returned as 0" << std::endl; return 0.; } // first determine which 'cell' of the spline x is in // cell i runs from knot i to knot i+1 Int_t cell(0); while( x > x_[cell+1] ) { ++cell; } // obtain x- and y-values of the neighbouring knots Double_t xLow = x_[cell]; Double_t xHigh = x_[cell+1]; Double_t yLow = y_[cell]; Double_t yHigh = y_[cell+1]; if(type_ == Lau1DCubicSpline::LinearInterpolation) { return yHigh*(x-xLow)/(xHigh-xLow) + yLow*(xHigh-x)/(xHigh-xLow); } // obtain t, the normalised x-coordinate within the cell, // and the coefficients a and b, which are defined in cell i as: // // a_i = k_i *(x_i+1 - x_i) - (y_i+1 - y_i), // b_i = -k_i+1*(x_i+1 - x_i) + (y_i+1 - y_i) // // where k_i is (by construction) the first derivative at knot i Double_t t = (x - xLow) / (xHigh - xLow); Double_t a = dydx_[cell] * (xHigh - xLow) - (yHigh - yLow); Double_t b = -1.*dydx_[cell+1] * (xHigh - xLow) + (yHigh - yLow); Double_t retVal = (1 - t) * yLow + t * yHigh + t * (1 - t) * ( a * (1 - t) + b * t ); return retVal; } void Lau1DCubicSpline::updateYValues(const std::vector& ys) { y_ = ys; this->calcDerivatives(); } void Lau1DCubicSpline::updateYValues(const std::vector& ys) { for (std::size_t i=0; iunblindValue(); } this->calcDerivatives(); } void Lau1DCubicSpline::updateYValues(const std::vector& ys) { for (std::size_t i=0; iunblindValue(); } this->calcDerivatives(); } void Lau1DCubicSpline::updateType(LauSplineType type) { if(type_ != type) { type_ = type; this->calcDerivatives(); } } void Lau1DCubicSpline::updateBoundaryConditions(LauSplineBoundaryType leftBound, LauSplineBoundaryType rightBound, Double_t dydx0, Double_t dydxn) { bool updateDerivatives{false}; if(leftBound_ != leftBound || rightBound_ != rightBound) { leftBound_ = leftBound; rightBound_ = rightBound; updateDerivatives = true; } if(dydx0_ != dydx0) { dydx0_ = dydx0; if(leftBound_ == Lau1DCubicSpline::Clamped) updateDerivatives = true; } if(dydxn_ != dydxn) { dydxn_ = dydxn; if(rightBound_ == Lau1DCubicSpline::Clamped) updateDerivatives = true; } if(updateDerivatives) { this->calcDerivatives(); } } -std::array Lau1DCubicSpline::getCoefficients(const std::size_t i, const bool normalise) const +std::array Lau1DCubicSpline::getCoefficients(const std::size_t segIndex, const bool normalise) const { std::array result = {0.,0.,0.,0.}; - if(i >= nKnots_-1) + if(segIndex >= nKnots_-1) { std::cerr << "ERROR in Lau1DCubicSpline::getCoefficients requested for too high a knot value" << std::endl; return result; } - Double_t xL = x_[i] , xH = x_[i+1]; - Double_t yL = y_[i] , yH = y_[i+1]; + Double_t xL = x_[segIndex] , xH = x_[segIndex+1]; + Double_t yL = y_[segIndex] , yH = y_[segIndex+1]; Double_t h = xH-xL; //This number comes up a lot switch(type_) { case Lau1DCubicSpline::StandardSpline: case Lau1DCubicSpline::AkimaSpline: { - Double_t kL = dydx_[i], kH = dydx_[i+1]; + Double_t kL = dydx_[segIndex], kH = dydx_[segIndex+1]; //a and b based on definitions from https://en.wikipedia.org/wiki/Spline_interpolation#Algorithm_to_find_the_interpolating_cubic_spline Double_t a = kL*h-(yH-yL); Double_t b =-kH*h+(yH-yL); Double_t denom = -h*h*h;//The terms have a common demoninator result[0] = -b*xL*xL*xH + a*xL*xH*xH + h*h*(xL*yH - xH*yL); result[1] = -a*xH*(2*xL+xH) + b*xL*(xL+2*xH) + h*h*(yL-yH); result[2] = -b*(2*xL+xH) + a*(xL+2*xH); result[3] = -a+b; for(auto& res : result){res /= denom;} break; } /* case Lau1DCubicSpline::AkimaSpline: // Double check the Akima description of splines (in evaluate) right now they're the same except for the first derivatives { //using fomulae from https://asmquantmacro.com/2015/09/01/akima-spline-interpolation-in-excel/ std::function m = [&](Int_t j) //formula to get the straight line gradient { if(j < 0){return 2*m(j+1)-m(j+2);} if(j >= nKnots_){return 2*m(j-1)-m(j-2);} return (y_[j+1]-y_[j]) / (x_[j+1]-x_[j]); }; auto t = [&](Int_t j) { Double_t res = 0.; //originally res was called 't' but that produced a shadow warning Double_t denom = TMath::Abs(m(j+1)-m(j)) + TMath::Abs(m(j-1)-m(j-2)); if(denom == 0){res = (m(j)-m(j-1))/2;} //Special case for when denom = 0 else { res = TMath::Abs(m(j+1)-m(j))*m(j-1) + TMath::Abs(m(j-1)-m(j-2))*m(j); res /= denom; } return res; }; //These are the p's to get the spline in the form p_k(x-xL)^k - Double_t pDenom = x_[i+1]/x_[i]; //a denominator used for p[2] and p[3] - std::array p = {y_[i],t(i),0.,0.}; //we'll do the last 2 below + Double_t pDenom = x_[segIndex+1]/x_[segIndex]; //a denominator used for p[2] and p[3] + std::array p = {y_[segIndex],t(segIndex),0.,0.}; //we'll do the last 2 below - p[2] = 3*m(i)-2*t(i)-t(i+1); + p[2] = 3*m(segIndex)-2*t(segIndex)-t(segIndex+1); p[2]/= pDenom; - p[3] = t(i)+t(i+1)-2*m(i); + p[3] = t(segIndex)+t(segIndex+1)-2*m(segIndex); p[3]/= pDenom*pDenom; //Now finally rearranging the p's into the desired results result[0] = p[0]-p[1]*xL+p[2]*xL*xL-p[3]*xL*xL*xL; result[1] = p[1]-2*p[2]*xL+3*p[3]*xL*xL; result[2] = p[2]-3*p[3]*xL; result[3] = p[3]; break; }*/ case Lau1DCubicSpline::LinearInterpolation: { result[0] = xH*yL-xL*yH; result[1] = yH-yL; for(auto& res : result){res /= h;} break; } } if(normalise) { Double_t integral = this->integral(); for(auto& res : result){res /= integral;} } return result; } Double_t Lau1DCubicSpline::integral() const { Double_t integral{0.0}; for(std::size_t iKnot{0}; iKnot < nKnots_ -1; ++iKnot) { const Double_t minAbs { x_[iKnot] }; const Double_t maxAbs { x_[iKnot+1] }; const std::array coeffs { this->getCoefficients(iKnot, false) }; auto integralFunc = [&coeffs](Double_t x){return coeffs[0]*x + coeffs[1]*x*x/2.0 + coeffs[2]*x*x*x/3.0 + coeffs[3]*x*x*x*x/4.0;}; integral += integralFunc(maxAbs); integral -= integralFunc(minAbs); } return integral; } TF1* Lau1DCubicSpline::makeTF1(const bool normalise) const { TString functionString{""}; //make a long piecewise construction of all the spline pieces - for(std::size_t i{0}; i < nKnots_-1; ++i) + for(std::size_t segIndex{0}; segIndex < nKnots_-1; ++segIndex) { - const std::array coeffs { this->getCoefficients(i,normalise) }; - functionString += Form("(x>%f && x<= %f)*",x_[i],x_[i+1]);//get the bounds of this piece + const std::array coeffs { this->getCoefficients(segIndex,normalise) }; + functionString += Form("(x>%f && x<= %f)*",x_[segIndex],x_[segIndex+1]);//get the bounds of this piece functionString += Form("(%f + %f*x + %f*x^2 + %f*x^3)",coeffs[0],coeffs[1],coeffs[2],coeffs[3]); - if(i < nKnots_ -2){functionString += " + \n";}//add to all lines except the last + if(segIndex < nKnots_ -2){functionString += " + \n";}//add to all lines except the last } TF1* func = new TF1("SplineFunction", functionString, x_.front(), x_.back()); return func; } -TF1* Lau1DCubicSpline::normaliseToTH1(TH1* hist) const +TF1* Lau1DCubicSpline::normaliseToTH1(TH1* hist, LauOutputLevel printLevel) const { //first define the fit function auto fitf = [this](Double_t* x, Double_t* par) {//there is only 1 x (the abscissa) and 1 par (a scaling of the entire thing) return this->evaluate( x[0] ) * par[0]; }; //Make the function TF1* FittedFunc = new TF1("FittedFunc",fitf,x_.front(),x_.back(),1); //Set the parameter name FittedFunc -> SetParNames("Constant"); //Set the parameter limits and default value FittedFunc -> SetParLimits(0,0.,10.); FittedFunc -> SetParameter(0,1.); - hist->Fit("FittedFunc","N O V"); + // Options to: + // - N = do not store the graphics function + // - 0 = do not plot the fit result + TString fitOptions {"N 0"}; + + switch ( printLevel ) { + case LauOutputLevel::Verbose : + fitOptions += " V"; + break; + case LauOutputLevel::Quiet : + fitOptions += " Q"; + break; + case LauOutputLevel::Standard : + break; + } + + hist->Fit( "FittedFunc", fitOptions ); return FittedFunc; } -TFitResultPtr Lau1DCubicSpline::fitToTH1(TH1* hist) +TFitResultPtr Lau1DCubicSpline::fitToTH1(TH1* hist, LauOutputLevel printLevel) { auto fitf = [this](Double_t* x, Double_t* par) { this -> updateYValues( std::vector(par, par + nKnots_) ); return this -> evaluate( x[0] ); }; TF1 FittedFunc("FittedFunc",fitf,x_.front(),x_.back(),nKnots_); const Double_t knotMax { hist->GetMaximum() * 1.5 }; for(std::size_t knot{0}; knot <= nKnots_ ; ++knot) { FittedFunc.SetParName(knot, Form("Knot%lu",knot)); FittedFunc.SetParLimits(knot, 0., knotMax); FittedFunc.SetParameter(knot, y_[knot]); } - return hist->Fit("FittedFunc","N O V S"); + // Options to: + // - N = do not store the graphics function + // - 0 = do not plot the fit result + // - S = save the result of the fit + TString fitOptions {"N 0 S"}; + + switch ( printLevel ) { + case LauOutputLevel::Verbose : + fitOptions += " V"; + break; + case LauOutputLevel::Quiet : + fitOptions += " Q"; + break; + case LauOutputLevel::Standard : + break; + } + + return hist->Fit( "FittedFunc", fitOptions ); } void Lau1DCubicSpline::init() { if( y_.size() != x_.size()) { std::cerr << "ERROR in Lau1DCubicSpline::init : The number of y-values given does not match the number of x-values" << std::endl; std::cerr << " Found " << y_.size() << ", expected " << x_.size() << std::endl; gSystem->Exit(EXIT_FAILURE); } if( nKnots_ < 3 ) { std::cerr << "ERROR in Lau1DCubicSpline::init : The number of knots is too small" << std::endl; std::cerr << " Found " << nKnots_ << ", expected at least 3 (to have at least 1 internal knot)" << std::endl; gSystem->Exit(EXIT_FAILURE); } dydx_.assign(nKnots_,0.0); a_.assign(nKnots_,0.0); b_.assign(nKnots_,0.0); c_.assign(nKnots_,0.0); d_.assign(nKnots_,0.0); this->calcDerivatives(); } void Lau1DCubicSpline::calcDerivatives() { switch ( type_ ) { case Lau1DCubicSpline::StandardSpline : this->calcDerivativesStandard(); break; case Lau1DCubicSpline::AkimaSpline : this->calcDerivativesAkima(); break; case Lau1DCubicSpline::LinearInterpolation : //derivatives not needed for linear interpolation break; } } void Lau1DCubicSpline::calcDerivativesStandard() { // derivatives are determined such that the second derivative is continuous at internal knots // derivatives, k_i, are the solutions to a set of linear equations of the form: // a_i * k_i-1 + b_i * k+i + c_i * k_i+1 = d_i with a_0 = 0, c_n-1 = 0 // this is solved using the tridiagonal matrix algorithm as on en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm // first and last equations give boundary conditions // - for natural boundary, require f''(x) = 0 at end knot // - for 'not a knot' boundary, require f'''(x) continuous at second knot // - for clamped boundary, require predefined value of f'(x) at end knot // non-zero values of a_0 and c_n-1 would give cyclic boundary conditions a_[0] = 0.; c_[nKnots_-1] = 0.; // set left boundary condition if(leftBound_ == Lau1DCubicSpline::Natural) { b_[0] = 2./(x_[1]-x_[0]); c_[0] = 1./(x_[1]-x_[0]); d_[0] = 3.*(y_[1]-y_[0])/((x_[1]-x_[0])*(x_[1]-x_[0])); } else if(leftBound_ == Lau1DCubicSpline::NotAKnot) { // define the width, h, and the 'slope', delta, of the first cell Double_t h1(x_[1]-x_[0]), h2(x_[2]-x_[0]); Double_t delta1((y_[1]-y_[0])/h1), delta2((y_[2]-y_[1])/h2); // these coefficients can be determined by requiring f'''_0(x_1) = f'''_1(x_1) // the requirement f''_0(x_1) = f''_1(x_1) has been used to remove the dependence on k_2 b_[0] = h2; c_[0] = h1+h2; d_[0] = delta1*(2.*h2*h2 + 3.*h1*h2)/(h1+h2) + delta2*5.*h1*h1/(h1+h2); } else { //Clamped b_[0] = 1.; c_[0] = 0.; d_[0] = dydx0_; } // set right boundary condition if(rightBound_ == Lau1DCubicSpline::Natural) { a_[nKnots_-1] = 1./(x_[nKnots_-1]-x_[nKnots_-2]); b_[nKnots_-1] = 2./(x_[nKnots_-1]-x_[nKnots_-2]); d_[nKnots_-1] = 3.*(y_[nKnots_-1]-y_[nKnots_-2])/((x_[nKnots_-1]-x_[nKnots_-2])*(x_[nKnots_-1]-x_[nKnots_-2])); } else if(rightBound_ == Lau1DCubicSpline::NotAKnot) { // define the width, h, and the 'slope', delta, of the last cell Double_t hnm1(x_[nKnots_-1]-x_[nKnots_-2]), hnm2(x_[nKnots_-2]-x_[nKnots_-3]); Double_t deltanm1((y_[nKnots_-1]-y_[nKnots_-2])/hnm1), deltanm2((y_[nKnots_-2]-y_[nKnots_-3])/hnm2); // these coefficients can be determined by requiring f'''_n-3(x_n-2) = f'''_n-2(x_n-2) // the requirement f''_n-3(x_n-2) = f''_n-2(x_n-2) has been used to remove // the dependence on k_n-3 a_[nKnots_-1] = hnm2 + hnm1; b_[nKnots_-1] = hnm1; d_[nKnots_-1] = deltanm2*hnm1*hnm1/(hnm2+hnm1) + deltanm1*(2.*hnm2*hnm2 + 3.*hnm2*hnm1)/(hnm2+hnm1); } else { //Clamped a_[nKnots_-1] = 0.; b_[nKnots_-1] = 1.; d_[nKnots_-1] = dydxn_; } // the remaining equations ensure that f_i-1''(x_i) = f''_i(x_i) for all internal knots for(std::size_t i{1}; i(nKnots_-2)}; i>=0; --i) { dydx_[i] = d_[i] - c_[i]*dydx_[i+1]; } } void Lau1DCubicSpline::calcDerivativesAkima() { //derivatives are calculated according to the Akima method // J.ACM vol. 17 no. 4 pp 589-602 Double_t am1(0.), an(0.), anp1(0.); // a[i] is the slope of the segment from point i-1 to point i // // n.b. segment 0 is before point 0 and segment n is after point n-1 // internal segments are numbered 1 - n-1 for(std::size_t i{1}; i #include #include #include "TMessage.h" #include "TMonitor.h" #include "TServerSocket.h" #include "TSocket.h" #include "TSystem.h" #include "TVirtualFitter.h" #include "LauAbsFitModel.hh" #include "LauAbsFitter.hh" #include "LauAbsPdf.hh" #include "LauComplex.hh" #include "LauFitter.hh" #include "LauFitDataTree.hh" #include "LauGenNtuple.hh" #include "LauParameter.hh" #include "LauParamFixed.hh" #include "LauPrint.hh" #include "LauSPlot.hh" ClassImp(LauAbsFitModel) LauAbsFitModel::LauAbsFitModel() : fixParams_(kFALSE), compareFitData_(kFALSE), savePDF_(kFALSE), writeLatexTable_(kFALSE), writeSPlotData_(kFALSE), storeDPEff_(kFALSE), randomFit_(kFALSE), emlFit_(kFALSE), poissonSmear_(kFALSE), enableEmbedding_(kFALSE), usingDP_(kTRUE), pdfsDependOnDP_(kFALSE), inputFitData_(0), genNtuple_(0), sPlotNtuple_(0), nullString_(""), doSFit_(kFALSE), sWeightBranchName_(""), sWeightScaleFactor_(1.0), outputTableName_(""), fitToyMCFileName_("fitToyMC.root"), fitToyMCTableName_("fitToyMCTable"), fitToyMCScale_(10), fitToyMCPoissonSmear_(kFALSE), sPlotFileName_(""), sPlotTreeName_(""), - sPlotVerbosity_("") + sPlotVerbosity_(LauOutputLevel::Quiet) { } LauAbsFitModel::~LauAbsFitModel() { delete inputFitData_; inputFitData_ = 0; delete genNtuple_; genNtuple_ = 0; delete sPlotNtuple_; sPlotNtuple_ = 0; // Remove the components created to apply constraints to fit parameters for ( auto par : conVars_ ) { if ( ! par->isLValue() ){ delete par; } } } void LauAbsFitModel::run(const TString& applicationCode, const TString& dataFileName, const TString& dataTreeName, const TString& histFileName, const TString& tableFileName) { // Chose whether you want to generate or fit events in the Dalitz plot. // To generate events choose applicationCode = "gen", to fit events choose // applicationCode = "fit". TString runCode(applicationCode); runCode.ToLower(); TString histFileNameCopy(histFileName); TString tableFileNameCopy(tableFileName); TString dataFileNameCopy(dataFileName); TString dataTreeNameCopy(dataTreeName); // Initialise the fit par vectors. Each class that inherits from this one // must implement this sensibly for all vectors specified in clearFitParVectors, // i.e. specify parameter names, initial, min, max and fixed values this->initialise(); // Add variables to Gaussian constrain to a list this->addConParameters(); if (dataFileNameCopy == "") {dataFileNameCopy = "data.root";} if (dataTreeNameCopy == "") {dataTreeNameCopy = "genResults";} if (runCode.Contains("gen")) { if (histFileNameCopy == "") {histFileNameCopy = "parInfo.root";} if (tableFileNameCopy == "") {tableFileNameCopy = "genResults";} this->setGenValues(); this->generate(dataFileNameCopy, dataTreeNameCopy, histFileNameCopy, tableFileNameCopy); } else if (runCode.Contains("fit")) { if (histFileNameCopy == "") {histFileNameCopy = "parInfo.root";} if (tableFileNameCopy == "") {tableFileNameCopy = "fitResults";} this->fit(dataFileNameCopy, dataTreeNameCopy, histFileNameCopy, tableFileNameCopy); } else if (runCode.Contains("plot")) { this->savePDFPlots("plot"); } else if (runCode.Contains("weight")) { this->weightEvents(dataFileNameCopy, dataTreeNameCopy); } } void LauAbsFitModel::doSFit( const TString& sWeightBranchName, Double_t scaleFactor ) { if ( sWeightBranchName == "" ) { std::cerr << "WARNING in LauAbsFitModel::doSFit : sWeight branch name is empty string, not setting-up sFit." << std::endl; return; } doSFit_ = kTRUE; sWeightBranchName_ = sWeightBranchName; sWeightScaleFactor_ = scaleFactor; } void LauAbsFitModel::setBkgndClassNames( const std::vector& names ) { if ( !bkgndClassNames_.empty() ) { std::cerr << "WARNING in LauAbsFitModel::setBkgndClassNames : Names already stored, not changing them." << std::endl; return; } UInt_t nBkgnds = names.size(); for ( UInt_t i(0); i < nBkgnds; ++i ) { bkgndClassNames_.insert( std::make_pair( i, names[i] ) ); } this->setupBkgndVectors(); } Bool_t LauAbsFitModel::validBkgndClass( const TString& className ) const { if ( bkgndClassNames_.empty() ) { return kFALSE; } Bool_t found(kFALSE); for ( LauBkgndClassMap::const_iterator iter = bkgndClassNames_.begin(); iter != bkgndClassNames_.end(); ++iter ) { if ( iter->second == className ) { found = kTRUE; break; } } return found; } UInt_t LauAbsFitModel::bkgndClassID( const TString& className ) const { if ( ! this->validBkgndClass( className ) ) { std::cerr << "ERROR in LauAbsFitModel::bkgndClassID : Request for ID for invalid background class \"" << className << "\"." << std::endl; return (bkgndClassNames_.size() + 1); } UInt_t bgID(0); for ( LauBkgndClassMap::const_iterator iter = bkgndClassNames_.begin(); iter != bkgndClassNames_.end(); ++iter ) { if ( iter->second == className ) { bgID = iter->first; break; } } return bgID; } const TString& LauAbsFitModel::bkgndClassName( UInt_t classID ) const { LauBkgndClassMap::const_iterator iter = bkgndClassNames_.find( classID ); if ( iter == bkgndClassNames_.end() ) { std::cerr << "ERROR in LauAbsFitModel::bkgndClassName : Request for name of invalid background class ID " << classID << "." << std::endl; return nullString_; } return iter->second; } void LauAbsFitModel::clearFitParVectors() { std::cout << "INFO in LauAbsFitModel::clearFitParVectors : Clearing fit variable vectors" << std::endl; // Remove the components created to apply constraints to fit parameters for ( auto par : conVars_ ) { if ( ! par->isLValue() ){ delete par; } } conVars_.clear(); resVars_.clear(); fitVars_.clear(); } void LauAbsFitModel::clearExtraVarVectors() { std::cout << "INFO in LauAbsFitModel::clearExtraVarVectors : Clearing extra ntuple variable vectors" << std::endl; extraVars_.clear(); } void LauAbsFitModel::setGenValues() { // makes sure each parameter holds its genValue as its current value for (LauParameterPList::iterator iter = fitVars_.begin(); iter != fitVars_.end(); ++iter) { (*iter)->value((*iter)->genValue()); } this->propagateParUpdates(); } -void LauAbsFitModel::writeSPlotData(const TString& fileName, const TString& treeName, Bool_t storeDPEfficiency, const TString& verbosity) +void LauAbsFitModel::writeSPlotData(const TString& fileName, const TString& treeName, Bool_t storeDPEfficiency, const LauOutputLevel verbosity) { if (this->writeSPlotData()) { std::cerr << "ERROR in LauAbsFitModel::writeSPlotData : Already have an sPlot ntuple setup, not doing it again." << std::endl; return; } writeSPlotData_ = kTRUE; sPlotFileName_ = fileName; sPlotTreeName_ = treeName; sPlotVerbosity_ = verbosity; storeDPEff_ = storeDPEfficiency; } // TODO : histFileName isn't used here at the moment but could be used for // storing the values of the parameters used in the generation. // These could then be read and used for setting the "true" values // in a subsequent fit. void LauAbsFitModel::generate(const TString& dataFileName, const TString& dataTreeName, const TString& /*histFileName*/, const TString& tableFileNameBase) { // Create the ntuple for storing the results std::cout << "INFO in LauAbsFitModel::generate : Creating generation ntuple." << std::endl; if (genNtuple_ != 0) {delete genNtuple_; genNtuple_ = 0;} genNtuple_ = new LauGenNtuple(dataFileName,dataTreeName); // add branches for storing the experiment number and the number of // the event within the current experiment this->addGenNtupleIntegerBranch("iExpt"); this->addGenNtupleIntegerBranch("iEvtWithinExpt"); this->setupGenNtupleBranches(); // Start the cumulative timer cumulTimer_.Start(); const UInt_t firstExp = this->firstExpt(); const UInt_t nExp = this->nExpt(); Bool_t genOK(kTRUE); do { // Loop over the number of experiments for (UInt_t iExp = firstExp; iExp < (firstExp+nExp); ++iExp) { // Start the timer to see how long each experiment takes to generate timer_.Start(); // Store the experiment number in the ntuple this->setGenNtupleIntegerBranchValue("iExpt",iExp); // Do the generation for this experiment std::cout << "INFO in LauAbsFitModel::generate : Generating experiment number " << iExp << std::endl; genOK = this->genExpt(); // Stop the timer and see how long the program took so far timer_.Stop(); timer_.Print(); if (!genOK) { // delete and recreate an empty tree genNtuple_->deleteAndRecreateTree(); // then break out of the experiment loop std::cerr << "WARNING in LauAbsFitModel::generate : Problem in toy MC generation. Starting again with updated parameters..." << std::endl; break; } if (this->writeLatexTable()) { TString tableFileName(tableFileNameBase); tableFileName += "_"; tableFileName += iExp; tableFileName += ".tex"; this->writeOutTable(tableFileName); } } // Loop over number of experiments } while (!genOK); // Print out total timing info. cumulTimer_.Stop(); std::cout << "INFO in LauAbsFitModel::generate : Finished generating all experiments." << std::endl; std::cout << "INFO in LauAbsFitModel::generate : Cumulative timing:" << std::endl; cumulTimer_.Print(); // Build the event index std::cout << "INFO in LauAbsFitModel::generate : Building experiment:event index." << std::endl; // TODO - can test this return value? //Int_t nIndexEntries = genNtuple_->buildIndex("iExpt","iEvtWithinExpt"); // Write out toy MC ntuple std::cout << "INFO in LauAbsFitModel::generate : Writing data to file " << dataFileName << "." << std::endl; genNtuple_->writeOutGenResults(); } void LauAbsFitModel::addGenNtupleIntegerBranch(const TString& name) { genNtuple_->addIntegerBranch(name); } void LauAbsFitModel::addGenNtupleDoubleBranch(const TString& name) { genNtuple_->addDoubleBranch(name); } void LauAbsFitModel::setGenNtupleIntegerBranchValue(const TString& name, Int_t value) { genNtuple_->setIntegerBranchValue(name,value); } void LauAbsFitModel::setGenNtupleDoubleBranchValue(const TString& name, Double_t value) { genNtuple_->setDoubleBranchValue(name,value); } Int_t LauAbsFitModel::getGenNtupleIntegerBranchValue(const TString& name) const { return genNtuple_->getIntegerBranchValue(name); } Double_t LauAbsFitModel::getGenNtupleDoubleBranchValue(const TString& name) const { return genNtuple_->getDoubleBranchValue(name); } void LauAbsFitModel::fillGenNtupleBranches() { genNtuple_->fillBranches(); } void LauAbsFitModel::addSPlotNtupleIntegerBranch(const TString& name) { sPlotNtuple_->addIntegerBranch(name); } void LauAbsFitModel::addSPlotNtupleDoubleBranch(const TString& name) { sPlotNtuple_->addDoubleBranch(name); } void LauAbsFitModel::setSPlotNtupleIntegerBranchValue(const TString& name, Int_t value) { sPlotNtuple_->setIntegerBranchValue(name,value); } void LauAbsFitModel::setSPlotNtupleDoubleBranchValue(const TString& name, Double_t value) { sPlotNtuple_->setDoubleBranchValue(name,value); } void LauAbsFitModel::fillSPlotNtupleBranches() { sPlotNtuple_->fillBranches(); } void LauAbsFitModel::fit(const TString& dataFileName, const TString& dataTreeName, const TString& histFileName, const TString& tableFileNameBase) { // Routine to perform the total fit. const UInt_t firstExp = this->firstExpt(); const UInt_t nExp = this->nExpt(); std::cout << "INFO in LauAbsFitModel::fit : First experiment = " << firstExp << std::endl; std::cout << "INFO in LauAbsFitModel::fit : Number of experiments = " << nExp << std::endl; // Start the cumulative timer cumulTimer_.Start(); this->resetFitCounters(); // Create and setup the fit results ntuple this->setupResultsOutputs( histFileName, tableFileNameBase ); // Create and setup the sPlot ntuple if (this->writeSPlotData()) { std::cout << "INFO in LauAbsFitModel::fit : Creating sPlot ntuple." << std::endl; if (sPlotNtuple_ != 0) {delete sPlotNtuple_; sPlotNtuple_ = 0;} sPlotNtuple_ = new LauGenNtuple(sPlotFileName_,sPlotTreeName_); this->setupSPlotNtupleBranches(); } // This reads in the given dataFile and creates an input // fit data tree that stores them for all events and experiments. Bool_t dataOK = this->verifyFitData(dataFileName,dataTreeName); if (!dataOK) { std::cerr << "ERROR in LauAbsFitModel::fit : Problem caching the fit data." << std::endl; gSystem->Exit(EXIT_FAILURE); } // Loop over the number of experiments for (UInt_t iExp = firstExp; iExp < (firstExp+nExp); ++iExp) { // Start the timer to see how long each fit takes timer_.Start(); this->setCurrentExperiment( iExp ); UInt_t nEvents = this->readExperimentData(); if (nEvents < 1) { std::cerr << "WARNING in LauAbsFitModel::fit : Zero events in experiment " << iExp << ", skipping..." << std::endl; timer_.Stop(); continue; } // Now the sub-classes must implement whatever they need to do // to cache any more input fit data they need in order to // calculate the likelihoods during the fit. // They need to use the inputFitData_ tree as input. For example, // inputFitData_ contains m13Sq and m23Sq. The appropriate fit model // then caches the resonance dynamics for the signal model, as // well as the background likelihood values in the Dalitz plot this->cacheInputFitVars(); if ( this->doSFit() ) { this->cacheInputSWeights(); } // Do the fit for this experiment this->fitExpt(); // Write the results into the ntuple this->finaliseFitResults( outputTableName_ ); // Stop the timer and see how long the program took so far timer_.Stop(); timer_.Print(); // Store the per-event likelihood values if ( this->writeSPlotData() ) { this->storePerEvtLlhds(); } // Create a toy MC sample using the fitted parameters so that // the user can compare the fit to the data. if (compareFitData_ == kTRUE && this->statusCode() == 3) { this->createFitToyMC(fitToyMCFileName_, fitToyMCTableName_); } } // Loop over number of experiments // Print out total timing info. cumulTimer_.Stop(); std::cout << "INFO in LauAbsFitModel::fit : Cumulative timing:" << std::endl; cumulTimer_.Print(); // Print out stats on OK fits. const UInt_t nOKFits = this->numberOKFits(); const UInt_t nBadFits = this->numberBadFits(); std::cout << "INFO in LauAbsFitModel::fit : Number of OK Fits = " << nOKFits << std::endl; std::cout << "INFO in LauAbsFitModel::fit : Number of Failed Fits = " << nBadFits << std::endl; Double_t fitEff(0.0); if (nExp != 0) {fitEff = nOKFits/(1.0*nExp);} std::cout << "INFO in LauAbsFitModel::fit : Fit efficiency = " << fitEff*100.0 << "%." << std::endl; // Write out any fit results (ntuples etc...). this->writeOutAllFitResults(); if ( this->writeSPlotData() ) { this->calculateSPlotData(); } } void LauAbsFitModel::setupResultsOutputs( const TString& histFileName, const TString& tableFileName ) { this->LauSimFitTask::setupResultsOutputs( histFileName, tableFileName ); outputTableName_ = tableFileName; } Bool_t LauAbsFitModel::verifyFitData(const TString& dataFileName, const TString& dataTreeName) { // From the input data stream, store the variables into the // internal tree inputFitData_ that can be used by the sub-classes // in calculating their likelihood functions for the fit delete inputFitData_; inputFitData_ = new LauFitDataTree(dataFileName,dataTreeName); Bool_t dataOK = inputFitData_->findBranches(); if (!dataOK) { delete inputFitData_; inputFitData_ = 0; } return dataOK; } void LauAbsFitModel::cacheInputSWeights() { Bool_t hasBranch = inputFitData_->haveBranch( sWeightBranchName_ ); if ( ! hasBranch ) { std::cerr << "ERROR in LauAbsFitModel::cacheInputSWeights : Input data does not contain variable \"" << sWeightBranchName_ << "\".\n"; std::cerr << " : Turning off sFit!" << std::endl; doSFit_ = kFALSE; return; } UInt_t nEvents = this->eventsPerExpt(); sWeights_.clear(); sWeights_.reserve( nEvents ); for (UInt_t iEvt = 0; iEvt < nEvents; ++iEvt) { const LauFitData& dataValues = inputFitData_->getData(iEvt); LauFitData::const_iterator iter = dataValues.find( sWeightBranchName_ ); sWeights_.push_back( iter->second * sWeightScaleFactor_ ); } } void LauAbsFitModel::fitExpt() { // Routine to perform the actual fit for the given experiment // Update initial fit parameters if required (e.g. if using random numbers). this->checkInitFitParams(); // Initialise the fitter LauFitter::fitter()->useAsymmFitErrors( this->useAsymmFitErrors() ); LauFitter::fitter()->twoStageFit( this->twoStageFit() ); LauFitter::fitter()->initialise( this, fitVars_ ); this->startNewFit( LauFitter::fitter()->nParameters(), LauFitter::fitter()->nFreeParameters() ); // Now ready for minimisation step std::cout << "\nINFO in LauAbsFitModel::fitExpt : Start minimisation...\n"; LauAbsFitter::FitStatus fitResult = LauFitter::fitter()->minimise(); // If we're doing a two stage fit we can now release (i.e. float) // the 2nd stage parameters and re-fit if (this->twoStageFit()) { if ( fitResult.status != 3 ) { std::cerr << "WARNING in LauAbsFitModel:fitExpt : Not running second stage fit since first stage failed." << std::endl; LauFitter::fitter()->releaseSecondStageParameters(); } else { LauFitter::fitter()->releaseSecondStageParameters(); this->startNewFit( LauFitter::fitter()->nParameters(), LauFitter::fitter()->nFreeParameters() ); fitResult = LauFitter::fitter()->minimise(); } } const TMatrixD& covMat = LauFitter::fitter()->covarianceMatrix(); this->storeFitStatus( fitResult, covMat ); // Store the final fit results and errors into protected internal vectors that // all sub-classes can use within their own finalFitResults implementation // used below (e.g. putting them into an ntuple in a root file) LauFitter::fitter()->updateParameters(); } void LauAbsFitModel::calculateSPlotData() { if (sPlotNtuple_ != 0) { sPlotNtuple_->addFriendTree(inputFitData_->fileName(), inputFitData_->treeName()); sPlotNtuple_->writeOutGenResults(); LauSPlot splot(sPlotNtuple_->fileName(), sPlotNtuple_->treeName(), this->firstExpt(), this->nExpt(), this->variableNames(), this->freeSpeciesNames(), this->fixdSpeciesNames(), this->twodimPDFs(), this->splitSignal(), this->scfDPSmear()); splot.runCalculations(sPlotVerbosity_); splot.writeOutResults(); } } void LauAbsFitModel::compareFitData(UInt_t toyMCScale, const TString& mcFileName, const TString& tableFileName, Bool_t poissonSmearing) { compareFitData_ = kTRUE; fitToyMCScale_ = toyMCScale; fitToyMCFileName_ = mcFileName; fitToyMCTableName_ = tableFileName; fitToyMCPoissonSmear_ = poissonSmearing; } void LauAbsFitModel::createFitToyMC(const TString& mcFileName, const TString& tableFileName) { // Create a toy MC sample so that the user can compare the fitted // result with the data. // Generate more toy MC to reduce statistical fluctuations: // - use the rescaling value fitToyMCScale_ // Store the info on the number of experiments, first expt and current expt const UInt_t oldNExpt(this->nExpt()); const UInt_t oldFirstExpt(this->firstExpt()); const UInt_t oldIExpt(this->iExpt()); // Turn off Poisson smearing if required const Bool_t poissonSmearing(this->doPoissonSmearing()); this->doPoissonSmearing(fitToyMCPoissonSmear_); // Turn off embedding, since we need toy MC, not reco'ed events const Bool_t enableEmbeddingOrig(this->enableEmbedding()); this->enableEmbedding(kFALSE); // Need to make sure that the generation of the DP co-ordinates is // switched on if any of our PDFs depend on it const Bool_t origUseDP = this->useDP(); if ( !origUseDP && this->pdfsDependOnDP() ) { this->useDP( kTRUE ); this->initialiseDPModels(); } // Construct a unique filename for this experiment TString exptString("_expt"); exptString += oldIExpt; TString fileName( mcFileName ); fileName.Insert( fileName.Last('.'), exptString ); // Generate the toy MC std::cout << "INFO in LauAbsFitModel::createFitToyMC : Generating toy MC in " << fileName << " to compare fit with data...\n"; std::cout << " : Number of experiments to generate = " << fitToyMCScale_ << "\n"; std::cout << " : This is to allow the toy MC to be made with reduced statistical fluctuations\n"; std::cout << " : Number of events in each experiment will be " << (fitToyMCPoissonSmear_ ? "fluctuated according to a Poisson distribution" : "exactly the same") << std::endl; // Set the genValue of each parameter to its current (fitted) value // but first store the original genValues for restoring later std::vector origGenValues; origGenValues.reserve(this->nTotParams()); Bool_t blind(kFALSE); for (LauParameterPList::iterator iter = fitVars_.begin(); iter != fitVars_.end(); ++iter) { origGenValues.push_back((*iter)->genValue()); (*iter)->genValue((*iter)->unblindValue()); if ( (*iter)->blind() ) { blind = kTRUE; } } if ( blind ) { std::cerr << "WARNING in LauAbsFitModel::createFitToyMC : One or more parameters are blind but the toy will be created using the unblind values - use with caution!!" << std::endl; } // If we're asked to generate more than 100 experiments then split it // up into multiple files since otherwise can run into memory issues // when building the index // TODO - this obviously depends on the number of events per experiment as well, so should do this properly UInt_t totalExpts = fitToyMCScale_; if ( totalExpts > 100 ) { UInt_t nFiles = totalExpts/100; if ( totalExpts%100 ) { nFiles += 1; } TString fileNameBase {fileName}; for ( UInt_t iFile(0); iFile < nFiles; ++iFile ) { UInt_t firstExp( iFile*100 ); // Set number of experiments and first experiment to generate UInt_t nExp = ((firstExp + 100)>totalExpts) ? totalExpts-firstExp : 100; this->setNExpts(nExp, firstExp); // Create a unique filename and generate the events fileName = fileNameBase; TString extraname = "_file"; extraname += iFile; fileName.Insert( fileName.Last('.'), extraname ); this->generate(fileName, "genResults", "dummy.root", tableFileName); } } else { // Set number of experiments to new value this->setNExpts(fitToyMCScale_, 0); // Generate the toy this->generate(fileName, "genResults", "dummy.root", tableFileName); } // Reset number of experiments to original value this->setNExpts(oldNExpt, oldFirstExpt); this->setCurrentExperiment(oldIExpt); // Restore the Poisson smearing to its former value this->doPoissonSmearing(poissonSmearing); // Restore the embedding status this->enableEmbedding(enableEmbeddingOrig); // Restore "useDP" to its former status this->useDP( origUseDP ); // Restore the original genValue to each parameter for (UInt_t i(0); inTotParams(); ++i) { fitVars_[i]->genValue(origGenValues[i]); } std::cout << "INFO in LauAbsFitModel::createFitToyMC : Finished in createFitToyMC." << std::endl; } Double_t LauAbsFitModel::getTotNegLogLikelihood() { // Calculate the total negative log-likelihood over all events. // This function assumes that the fit parameters and data tree have // already been set-up correctly. // Loop over the data points to calculate the log likelihood Double_t logLike = this->getLogLikelihood( 0, this->eventsPerExpt() ); // Include the Poisson term in the extended likelihood if required if (this->doEMLFit()) { logLike -= this->getEventSum(); } // Calculate any penalty terms from Gaussian constrained variables if ( ! conVars_.empty() ){ logLike -= this->getLogLikelihoodPenalty(); } Double_t totNegLogLike = -logLike; return totNegLogLike; } Double_t LauAbsFitModel::getLogLikelihoodPenalty() { Double_t penalty(0.0); for ( LauAbsRValuePList::const_iterator iter = conVars_.begin(); iter != conVars_.end(); ++iter ) { Double_t val = (*iter)->unblindValue(); Double_t mean = (*iter)->constraintMean(); Double_t width = (*iter)->constraintWidth(); Double_t term = ( val - mean )*( val - mean ); penalty += term/( 2*width*width ); } return penalty; } Double_t LauAbsFitModel::getLogLikelihood( UInt_t iStart, UInt_t iEnd ) { // Calculate the total negative log-likelihood over all events. // This function assumes that the fit parameters and data tree have // already been set-up correctly. // Loop over the data points to calculate the log likelihood Double_t logLike(0.0); const Double_t worstLL = this->worstLogLike(); // Loop over the number of events in this experiment Bool_t ok(kTRUE); for (UInt_t iEvt = iStart; iEvt < iEnd; ++iEvt) { Double_t likelihood = this->getTotEvtLikelihood(iEvt); if (likelihood > std::numeric_limits::min()) { // Is the likelihood zero? Double_t evtLogLike = TMath::Log(likelihood); if ( doSFit_ ) { evtLogLike *= sWeights_[iEvt]; } logLike += evtLogLike; } else { ok = kFALSE; std::cerr << "WARNING in LauAbsFitModel::getLogLikelihood : Strange likelihood value for event " << iEvt << ": " << likelihood << "\n"; this->printEventInfo(iEvt); this->printVarsInfo(); //Write the values of the floated variables for which the likelihood is zero break; } } if (!ok) { std::cerr << " : Returning worst NLL found so far to force MINUIT out of this region." << std::endl; logLike = worstLL; } else if (logLike < worstLL) { this->worstLogLike( logLike ); } return logLike; } void LauAbsFitModel::setParsFromMinuit(Double_t* par, Int_t npar) { // This function sets the internal parameters based on the values // that Minuit is using when trying to minimise the total likelihood function. // MINOS reports different numbers of free parameters depending on the // situation, so disable this check if ( ! this->withinAsymErrorCalc() ) { const UInt_t nFreePars = this->nFreeParams(); if (static_cast(npar) != nFreePars) { std::cerr << "ERROR in LauAbsFitModel::setParsFromMinuit : Unexpected number of free parameters: " << npar << ".\n"; std::cerr << " Expected: " << nFreePars << ".\n" << std::endl; gSystem->Exit(EXIT_FAILURE); } } // Despite npar being the number of free parameters // the par array actually contains all the parameters, // free and floating... // Update all the floating ones with their new values // Also check if we have any parameters on which the DP integrals depend // and whether they have changed since the last iteration Bool_t recalcNorm(kFALSE); const LauParameterPSet::const_iterator resVarsEnd = resVars_.end(); for (UInt_t i(0); inTotParams(); ++i) { if (!fitVars_[i]->fixed()) { if ( resVars_.find( fitVars_[i] ) != resVarsEnd ) { if ( fitVars_[i]->value() != par[i] ) { recalcNorm = kTRUE; } } fitVars_[i]->value(par[i]); } } // If so, then recalculate the normalisation if (recalcNorm) { this->recalculateNormalisation(); } this->propagateParUpdates(); } UInt_t LauAbsFitModel::addFitParameters(LauParameter* param, const Bool_t addFixed) { UInt_t nParsAdded{0}; // if we're dealing with a clone, // we should instead deal with its parent if ( param->clone() ) { param = param->parent(); } // we need to include the parameter if it is either: // - floating // - currently fixed but will float in the second stage of a two-stage fit if ( addFixed || ! param->fixed() || ( this->twoStageFit() && param->secondStage() ) ) { // check whether we already have this parameter stored auto [ _, addedOK ] { fitVarsSet_.insert( param ) }; if ( addedOK ) { // if not, add it to the list fitVars_.push_back( param ); ++nParsAdded; } } return nParsAdded; } UInt_t LauAbsFitModel::addFitParameters(LauParameterPList& paramList, const Bool_t addFixed) { UInt_t nParsAdded{0}; for ( auto param : paramList ) { nParsAdded += this->addFitParameters( param, addFixed ); } return nParsAdded; } UInt_t LauAbsFitModel::addFitParameters(LauAbsRValue* param, const Bool_t addFixed) { UInt_t nParsAdded{0}; LauParameterPList pars { param->getPars() }; for ( auto par : pars ) { nParsAdded += this->addFitParameters( par, addFixed ); } return nParsAdded; } UInt_t LauAbsFitModel::addFitParameters(LauAbsRValuePList& paramList, const Bool_t addFixed) { UInt_t nParsAdded{0}; for ( auto param : paramList ) { nParsAdded += this->addFitParameters( param, addFixed ); } return nParsAdded; } UInt_t LauAbsFitModel::addFitParameters(LauPdfPList& pdfList, const Bool_t addFixed) { UInt_t nParsAdded{0}; for ( auto pdf : pdfList ) { if ( pdf->isDPDependent() ) { this->pdfsDependOnDP( kTRUE ); } LauAbsRValuePList& pars = pdf->getParameters(); nParsAdded += this->addFitParameters( pars, addFixed ); } return nParsAdded; } UInt_t LauAbsFitModel::addResonanceParameters(LauParameter* param) { UInt_t nParsAdded{0}; // first check if we have this parameter in the set of resonance parameters auto [ _, addedOK ] { resVars_.insert( param ) }; if ( addedOK ) { nParsAdded += this->addFitParameters( param ); } return nParsAdded; } UInt_t LauAbsFitModel::addResonanceParameters(LauParameterPList& paramList) { UInt_t nParsAdded{0}; for ( auto param : paramList ) { nParsAdded += this->addResonanceParameters( param ); } return nParsAdded; } void LauAbsFitModel::addConParameters() { for ( LauParameterPList::const_iterator iter = fitVars_.begin(); iter != fitVars_.end(); ++iter ) { if ( (*iter)->gaussConstraint() ) { conVars_.push_back( *iter ); std::cout << "INFO in LauAbsFitModel::addConParameters : Added Gaussian constraint to parameter "<< (*iter)->name() << std::endl; } } // Add penalties from the constraints to fit parameters const std::vector& storeCon = this->constraintsStore(); for ( std::vector::const_iterator iter = storeCon.begin(); iter != storeCon.end(); ++iter ) { const std::vector& names = (*iter).conPars_; std::vector params; for ( std::vector::const_iterator iternames = names.begin(); iternames != names.end(); ++iternames ) { for ( LauParameterPList::const_iterator iterfit = fitVars_.begin(); iterfit != fitVars_.end(); ++iterfit ) { if ( (*iternames) == (*iterfit)->name() ){ params.push_back(*iterfit); } } } // If the parameters are not found, skip it if ( params.size() != (*iter).conPars_.size() ) { std::cerr << "WARNING in LauAbsFitModel::addConParameters: Could not find parameters to constrain in the formula... skipping" << std::endl; continue; } LauFormulaPar* formPar = new LauFormulaPar( (*iter).formula_, (*iter).formula_, params ); formPar->addGaussianConstraint( (*iter).mean_, (*iter).width_ ); conVars_.push_back(formPar); std::cout << "INFO in LauAbsFitModel::addConParameters : Added Gaussian constraint to formula\n"; std::cout << " : Formula: " << (*iter).formula_ << std::endl; for ( std::vector::iterator iterparam = params.begin(); iterparam != params.end(); ++iterparam ) { std::cout << " : Parameter: " << (*iterparam)->name() << std::endl; } } } void LauAbsFitModel::updateFitParameters(LauPdfPList& pdfList) { for (LauPdfPList::iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter) { (*pdf_iter)->updatePulls(); } } void LauAbsFitModel::printFitParameters(const LauPdfPList& pdfList, std::ostream& fout) const { LauPrint print; for (LauPdfPList::const_iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter) { const LauAbsRValuePList& pars = (*pdf_iter)->getParameters(); for (LauAbsRValuePList::const_iterator pars_iter = pars.begin(); pars_iter != pars.end(); ++pars_iter) { LauParameterPList params = (*pars_iter)->getPars(); for (LauParameterPList::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter) { if (!(*params_iter)->clone()) { fout << (*params_iter)->name() << " & $"; print.printFormat(fout, (*params_iter)->value()); if ((*params_iter)->fixed() == kTRUE) { fout << "$ (fixed) \\\\"; } else { fout << " \\pm "; print.printFormat(fout, (*params_iter)->error()); fout << "$ \\\\" << std::endl; } } } } } } void LauAbsFitModel::cacheInfo(LauPdfPList& pdfList, const LauFitDataTree& theData) { for (LauPdfPList::iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter) { (*pdf_iter)->cacheInfo(theData); } } Double_t LauAbsFitModel::prodPdfValue(LauPdfPList& pdfList, UInt_t iEvt) { Double_t pdfVal = 1.0; for (LauPdfPList::iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter) { (*pdf_iter)->calcLikelihoodInfo(iEvt); pdfVal *= (*pdf_iter)->getLikelihood(); } return pdfVal; } void LauAbsFitModel::printEventInfo(UInt_t iEvt) const { const LauFitData& data = inputFitData_->getData(iEvt); std::cerr << " : Input data values for this event:" << std::endl; for (LauFitData::const_iterator iter = data.begin(); iter != data.end(); ++iter) { std::cerr << " " << iter->first << " = " << iter->second << std::endl; } } void LauAbsFitModel::printVarsInfo() const { std::cerr << " : Current values of fit parameters:" << std::endl; for (UInt_t i(0); inTotParams(); ++i) { std::cerr << " " << (fitVars_[i]->name()).Data() << " = " << fitVars_[i]->value() << std::endl; } } void LauAbsFitModel::prepareInitialParArray( TObjArray& array ) { // Update initial fit parameters if required (e.g. if using random numbers). this->checkInitFitParams(); // Store the total number of parameters and the number of free parameters UInt_t nPars = fitVars_.size(); UInt_t nFreePars = 0; // Send the fit parameters for ( LauParameterPList::iterator iter = fitVars_.begin(); iter != fitVars_.end(); ++iter ) { if ( ! (*iter)->fixed() ) { ++nFreePars; } array.Add( *iter ); } this->startNewFit( nPars, nFreePars ); } void LauAbsFitModel::finaliseExperiment( const LauAbsFitter::FitStatus& fitStat, const TObjArray* parsFromCoordinator, const TMatrixD* covMat, TObjArray& parsToCoordinator ) { // Copy the fit status information this->storeFitStatus( fitStat, *covMat ); // Now process the parameters const UInt_t nPars = this->nTotParams(); UInt_t nParsFromCoordinator = parsFromCoordinator->GetEntries(); if ( nParsFromCoordinator != nPars ) { std::cerr << "ERROR in LauAbsFitModel::finaliseExperiment : Unexpected number of parameters received from coordinator" << std::endl; std::cerr << " : Received " << nParsFromCoordinator << " when expecting " << nPars << std::endl; gSystem->Exit( EXIT_FAILURE ); } for ( UInt_t iPar(0); iPar < nParsFromCoordinator; ++iPar ) { LauParameter* parameter = dynamic_cast( (*parsFromCoordinator)[iPar] ); if ( ! parameter ) { std::cerr << "ERROR in LauAbsFitModel::finaliseExperiment : Error reading parameter from coordinator" << std::endl; gSystem->Exit( EXIT_FAILURE ); } if ( parameter->name() != fitVars_[iPar]->name() ) { std::cerr << "ERROR in LauAbsFitModel::finaliseExperiment : Error reading parameter from coordinator" << std::endl; gSystem->Exit( EXIT_FAILURE ); } *(fitVars_[iPar]) = *parameter; } // Write the results into the ntuple this->finaliseFitResults( outputTableName_ ); // Store the per-event likelihood values if ( this->writeSPlotData() ) { this->storePerEvtLlhds(); } // Create a toy MC sample using the fitted parameters so that // the user can compare the fit to the data. if (compareFitData_ == kTRUE && fitStat.status == 3) { this->createFitToyMC(fitToyMCFileName_, fitToyMCTableName_); } // Send the finalised fit parameters for ( LauParameterPList::iterator iter = fitVars_.begin(); iter != fitVars_.end(); ++iter ) { parsToCoordinator.Add( *iter ); } } UInt_t LauAbsFitModel::readExperimentData() { // retrieve the data and find out how many events have been read const UInt_t exptIndex = this->iExpt(); inputFitData_->readExperimentData( exptIndex ); const UInt_t nEvent = inputFitData_->nEvents(); this->eventsPerExpt( nEvent ); return nEvent; } void LauAbsFitModel::setParametersFromFile(const TString& fileName, const TString& treeName, const Bool_t fix) { fixParamFileName_ = fileName; fixParamTreeName_ = treeName; fixParams_ = fix; } void LauAbsFitModel::setParametersFromMap(const std::map& parameters, const Bool_t fix) { fixParamMap_ = parameters; fixParams_ = fix; } void LauAbsFitModel::setNamedParameters(const TString& fileName, const TString& treeName, const std::set& parameters, const Bool_t fix) { fixParamFileName_ = fileName; fixParamTreeName_ = treeName; fixParamNames_ = parameters; fixParams_ = fix; } void LauAbsFitModel::setParametersFileFallback(const TString& fileName, const TString& treeName, const std::map& parameters, const Bool_t fix) { fixParamFileName_ = fileName; fixParamTreeName_ = treeName; fixParamMap_ = parameters; fixParams_ = fix; } diff --git a/src/LauCalcChiSq.cc b/src/LauCalcChiSq.cc index b687c25..c720f4a 100644 --- a/src/LauCalcChiSq.cc +++ b/src/LauCalcChiSq.cc @@ -1,543 +1,543 @@ /* Copyright 2008 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauCalcChiSq.cc \brief File containing implementation of LauCalcChiSq class. Code to produce an adaptive binning scheme and calculate the 2D chi-square between two datasets (e.g. low-stat data and high-stat toyMC). Note that the low and high stat histograms must have the same bin axes ranges and number of bins. It works by using the low stat (first) histogram to find a binning scheme such that the total number of entries in each bin is >= Min_bin_content. The number of entries in the histogram is divided by the desired minimum bin content to give a target number of bins. The largest number of bins that can be expressed as a product of powers of 4, 9, 25, 49 and 121 that does not exceed the target value is chosen. The histogram is the recursively subdivided in 2x2, 3x3, 5x5, 7x7 or 11x11 bins. For each stage of the subdivision, each bin is first divided into equally populated bins in x then each of these is further divded into equally popiulated bins in y. The (Pearson) chi-squared is then the sum of the chi-squared contributions of all bins: (low_stat_number - high_stat_number)^2/(high_stat_number) The nDof = number of bins - number of free params - 1 */ #include "LauCalcChiSq.hh" #include "TAxis.h" #include "TFile.h" #include "TMath.h" #include "TSystem.h" #include "TTree.h" #include "TCanvas.h" #include "TColor.h" #include "TStyle.h" #include #include #include #include ClassImp(LauCalcChiSq) LauCalcChiSq::LauCalcChiSq(const TString& inputFileName) : inputFileName_(inputFileName), fileName1_(""), fileName2_(""), treeName1_(""), treeName2_(""), xName1_(""), xName2_(""), yName1_(""), yName2_(""), minContent_(10.0), histo1_(0), histo2_(0), chiSqHisto_(0), chiSqSignedHisto_(0), xMin_(0.0), xMax_(0.0), yMin_(0.0), yMax_(0.0), nParams_(0), scaleFactor_(1.0), - verbose_(kFALSE) + printLevel_(LauOutputLevel::Standard) { } LauCalcChiSq::~LauCalcChiSq() { } void LauCalcChiSq::run() { std::cout<<"Running chi-squared algorithm"<initialiseHistos(); std::cout<<"Calculating chi-squared"<calculateChiSq(); //make plots this->makePlots(); // Output the various histograms std::cout<<"Writing out histogram output"<SetDirectory(outFile); histo2_->SetDirectory(outFile); pullHisto_->SetDirectory(outFile); chiSqHisto_->SetDirectory(outFile); chiSqSignedHisto_->SetDirectory(outFile); outFile->Write(); outFile->Close(); std::cout<<"Done"<GetBinContent(1); for(Int_t i=1; i<=histo1_->GetNumberOfBins(); ++i) { //keep track of actual minimum if(histo1_->GetBinContent(i)GetBinContent(i); } // Calculate Pearson chi-square for this bin, using the // second histogram for the expected distribution chiSq = 0.; toyVal = histo2_->GetBinContent(i); dataVal = histo1_->GetBinContent(i); diff = dataVal-toyVal; if(toyVal>0) chiSq = (diff*diff)/toyVal; totalChiSq += chiSq; chiSqHisto_->SetBinContent(i, chiSq); if(diff>0) { chiSqSignedHisto_->SetBinContent(i, chiSq); pullHisto_->SetBinContent(i, sqrt(chiSq)); } else { chiSqSignedHisto_->SetBinContent(i, -chiSq); pullHisto_->SetBinContent(i, -sqrt(chiSq)); } } ndof = histo1_->GetNumberOfBins()-nParams_-1; std::cout<<"Total ChiSq/nDof = "<SetPalette(1,0); const Int_t NRGBs = 5; const Int_t NCont = 255; Double_t stops[NRGBs] = { 0.00, 0.34, 0.61, 0.84, 1.00}; Double_t red[NRGBs] = { 0.00, 0.00, 0.87, 1.00, 0.51}; Double_t green[NRGBs] = { 0.00, 0.81, 1.00, 0.20, 0.00}; Double_t blue[NRGBs] = { 0.51, 1.00, 0.12, 0.00, 0.00}; TColor::CreateGradientColorTable(NRGBs, stops, red, green, blue, NCont); gStyle->SetNumberContours(NCont); gStyle->SetOptStat(0000); TCanvas can; can.SetTopMargin(0.05); can.SetRightMargin(0.17); can.SetBottomMargin(0.16); can.SetLeftMargin(0.14); histo1_->SetLabelFont(62,"x"); histo1_->SetLabelFont(62,"y"); histo1_->SetTitleFont(62,"x"); histo1_->SetTitleFont(62,"y"); histo1_->SetTitleSize(0.06,"x"); histo1_->SetTitleSize(0.06,"y"); histo1_->SetLabelSize(0.05,"x"); histo1_->SetLabelSize(0.05,"y"); histo1_->SetXTitle(xName1_); histo1_->SetYTitle(yName1_); histo1_->Draw("colz"); can.SaveAs("data.pdf"); histo2_->SetLabelFont(62,"x"); histo2_->SetLabelFont(62,"y"); histo2_->SetTitleFont(62,"x"); histo2_->SetTitleFont(62,"y"); histo2_->SetTitleSize(0.06,"x"); histo2_->SetTitleSize(0.06,"y"); histo2_->SetLabelSize(0.05,"x"); histo2_->SetLabelSize(0.05,"y"); histo2_->SetXTitle(xName1_); histo2_->SetYTitle(yName1_); histo2_->Draw("colz"); can.SaveAs("toy.pdf"); if(-1.*pullHisto_->GetMinimum() > pullHisto_->GetMaximum()) pullHisto_->SetMaximum(-1.*pullHisto_->GetMinimum()); else pullHisto_->SetMinimum(-1.*pullHisto_->GetMaximum()); pullHisto_->SetLabelFont(62,"x"); pullHisto_->SetLabelFont(62,"y"); pullHisto_->SetTitleFont(62,"x"); pullHisto_->SetTitleFont(62,"y"); pullHisto_->SetTitleSize(0.06,"x"); pullHisto_->SetTitleSize(0.06,"y"); pullHisto_->SetLabelSize(0.05,"x"); pullHisto_->SetLabelSize(0.05,"y"); pullHisto_->SetXTitle(xName1_); pullHisto_->SetYTitle(yName1_); pullHisto_->Draw("colz"); can.SaveAs("pull.pdf"); chiSqHisto_->SetLabelFont(62,"x"); chiSqHisto_->SetLabelFont(62,"y"); chiSqHisto_->SetTitleFont(62,"x"); chiSqHisto_->SetTitleFont(62,"y"); chiSqHisto_->SetTitleSize(0.06,"x"); chiSqHisto_->SetTitleSize(0.06,"y"); chiSqHisto_->SetLabelSize(0.05,"x"); chiSqHisto_->SetLabelSize(0.05,"y"); chiSqHisto_->SetXTitle(xName1_); chiSqHisto_->SetYTitle(yName1_); chiSqHisto_->Draw("colz"); can.SaveAs("chiSq.pdf"); chiSqSignedHisto_->SetLabelFont(62,"x"); chiSqSignedHisto_->SetLabelFont(62,"y"); chiSqSignedHisto_->SetTitleFont(62,"x"); chiSqSignedHisto_->SetTitleFont(62,"y"); chiSqSignedHisto_->SetTitleSize(0.06,"x"); chiSqSignedHisto_->SetTitleSize(0.06,"y"); chiSqSignedHisto_->SetLabelSize(0.05,"x"); chiSqSignedHisto_->SetLabelSize(0.05,"y"); chiSqSignedHisto_->SetXTitle(xName1_); chiSqSignedHisto_->SetYTitle(yName1_); chiSqSignedHisto_->Draw("colz"); can.SaveAs("chiSqSigned.pdf"); } void LauCalcChiSq::initialiseHistos() { // Open the input control file: // Low_stat_file_name Low_stat_tree_name Low_stat_x_axis_name Low_stat_y_axis_name // High_stat_file_name High_stat_tree_name High_stat_x_axis_name High_stat_y_axis_name // Min_bin_content N_free_params Low/high_stat_histo_ratio xMin xMax yMin yMax std::ifstream getData(inputFileName_.Data()); // get the info on the low stat histo getData >> fileName1_ >> treeName1_ >> xName1_ >> yName1_; if (!getData.good()) { std::cerr<<"Error. Could not read first line of the input file "<Exit(EXIT_FAILURE); } // open the file that contains the low stat histogram TFile * file1 = TFile::Open(fileName1_.Data(), "read"); if (file1 == 0) {gSystem->Exit(EXIT_FAILURE);} // retrieve the low stat histogram TTree* tree1 = dynamic_cast(file1->Get(treeName1_.Data())); if (tree1 == 0) { std::cerr<<"Error. Could not find the tree "<Exit(EXIT_FAILURE); } // get the info on the high stat histogram getData >> fileName2_ >> treeName2_ >> xName2_ >> yName2_; if (!getData.good()) { std::cerr<<"Error. Could not read the second line of the input file "<Exit(EXIT_FAILURE); } // if both histograms are in the same file then just retrieve the // high stat histogram from the file we already have open TFile * file2(0); TTree* tree2(0); if ( fileName2_ == fileName1_ ) { tree2 = dynamic_cast(file1->Get(treeName2_.Data())); } // otherwise open the other file and retrieve the high stat histogram from there else { file2 = TFile::Open(fileName2_.Data(), "read"); if (file2 == 0) {gSystem->Exit(EXIT_FAILURE);} tree2 = dynamic_cast(file2->Get(treeName2_.Data())); } if (tree2 == 0) { std::cerr<<"Error. Could not find the tree "<Exit(EXIT_FAILURE); } // get the info on the minimum content, number of parameters and scalefactor Int_t nParameters(0); Float_t minContent(0.0), scaleFactor(1.0), xMin(0.0), xMax(1.0), yMin(0.0), yMax(1.0); getData >> minContent >> nParameters >> scaleFactor >> xMin >> xMax >> yMin >> yMax; if (getData.good()) { minContent_ = minContent; nParams_ = nParameters; scaleFactor_ = scaleFactor; xMin_ = xMin; xMax_ = xMax; yMin_ = yMin; yMax_ = yMax; } // close the text file getData.close(); std::cout<<"Using the files and trees: "<SetBranchAddress(xName1_.Data(),&x); tree1->SetBranchAddress(yName1_.Data(),&y); Int_t nEntries = tree1->GetEntries(); Double_t* xs = new Double_t[nEntries]; Double_t* ys = new Double_t[nEntries]; for ( Int_t i=0; i < nEntries; ++i ) { tree1->GetEntry( i ); xs[i] = x; ys[i] = y; } theHisto_ = new TH2Poly("theHisto_", "", xMin_, xMax_, yMin_, yMax_); //select the number of divisions to get us closest to minContent entries per bin std::vector divisions; this->pickBinning(xs,ys,nEntries,divisions); //perform the adaptive bining based on histo1_ this->getHisto(xMin_, xMax_, yMin_, yMax_, xs, ys, nEntries, divisions); histo1_ = dynamic_cast(theHisto_->Clone("histo1_")); histo2_ = dynamic_cast(theHisto_->Clone("histo2_")); pullHisto_ = dynamic_cast(theHisto_->Clone("pullHisto_")); chiSqHisto_ = dynamic_cast(theHisto_->Clone("chiSqHisto_")); chiSqSignedHisto_ = dynamic_cast(theHisto_->Clone("chiSqSignedHisto_")); delete[] xs; delete[] ys; //fill the two histograms from the trees TString drawString1, drawString2, weightString2; drawString1 += yName1_; drawString1 += ":"; drawString1 += xName1_; drawString1 += ">>histo1_"; drawString2 += yName2_; drawString2 += ":"; drawString2 += xName2_; drawString2 += ">>histo2_"; weightString2 += scaleFactor_; tree1->Draw(drawString1); tree2->Draw(drawString2,weightString2); histo1_->SetDirectory(0); histo2_->SetDirectory(0); pullHisto_->SetDirectory(0); chiSqHisto_->SetDirectory(0); chiSqSignedHisto_->SetDirectory(0); // close the file(s) containing the trees if (file1 != 0) {file1->Close();} delete file1; if (file2 != 0) {file2->Close();} delete file2; } void LauCalcChiSq::pickBinning(const Double_t* xs, const Double_t* ys, const Int_t nEntries, std::vector& divisions) { //first check how many events we have within the histogram limits Int_t nIn(0); for(Int_t i=0; i= xMin_ && ys[i]= yMin_) { ++nIn; } } //aim to have exactly minContent events in each bin Int_t nBinsTarget = nIn / minContent_; std::cout << "Target is " << minContent_ << " entries per bin" << std::endl; std::cout << "Aiming to divide " << nIn << " entries between " << nBinsTarget << " bins" << std::endl; //we will iteratively sub-divide histogram bins into either 4, 9, 25, 49 or 121 //here we figure out how many 4s, 9s, 25s, 49s and 121s to use to best match our target without exceeding it Int_t nDivisions(0), nNines(0), nTwentyFives(0), nFortyNines(0), nEleventyElevens(0), nBins(1); Int_t nDivisionsBest(0), nNinesBest(0), nTwentyFivesBest(0), nFortyNinesBest(0), nEleventyElevensBest(0), nBinsBest(1); do { ++nDivisions; for(nNines=0; nNines<=nDivisions; ++nNines) { for(nTwentyFives=0; nTwentyFives<=nDivisions-nNines; ++nTwentyFives) { for(nFortyNines=0; nFortyNines<=nDivisions-nNines-nTwentyFives; ++nFortyNines) { for(nEleventyElevens=0; nEleventyElevens<=nDivisions-nNines-nTwentyFives-nFortyNines; ++nEleventyElevens) { nBins = TMath::Power(4,nDivisions-nNines-nTwentyFives-nFortyNines-nEleventyElevens) *TMath::Power(9,nNines)*TMath::Power(25,nTwentyFives) *TMath::Power(49,nFortyNines)*TMath::Power(121,nEleventyElevens); if(nBins < nBinsTarget && nBins > nBinsBest) { //keep track of the best number of bins so far nBinsBest = nBins; nDivisionsBest = nDivisions; nNinesBest = nNines; nTwentyFivesBest = nTwentyFives; nFortyNinesBest = nFortyNines; nEleventyElevensBest = nEleventyElevens; } } } } } } while(TMath::Power(4,nDivisions+1) < nBinsTarget);//if 4^n > target then we've gone far enough std::cout << "Using " << nBinsBest << " bins" << std::endl; //fill the vector with the divisions that we want to make for(Int_t i=0; i& divisions, const UInt_t iter) { //If it's the last iteration create the bin and return if(iter == divisions.size()) { Double_t * x_new = new Double_t[5]; Double_t * y_new = new Double_t[5]; x_new[0] = xMin; x_new[1] = xMin; x_new[2] = xMax; x_new[3] = xMax; x_new[4] = xMin; y_new[0] = yMin; y_new[1] = yMax; y_new[2] = yMax; y_new[3] = yMin; y_new[4] = yMin; theHisto_->AddBin(5, x_new, y_new); - if(verbose_) std::cout << "Adding bin from (" << xMin << "," << yMin << ") to (" << xMax << "," << yMax << ")" << std::endl; + if(printLevel_ == LauOutputLevel::Verbose) std::cout << "Adding bin from (" << xMin << "," << yMin << ") to (" << xMax << "," << yMax << ")" << std::endl; return; } //If not the last iteration then divide the bin Int_t n_divx=divisions[iter]; Int_t n_divy=divisions[iter]; - if(verbose_) std::cout << "Dividing bin from (" << xMin << "," << yMin << ") to (" << xMax << "," << yMax << ") into " << n_divx << " by " << n_divy << " subbins" << std::endl; + if(printLevel_ == LauOutputLevel::Verbose) std::cout << "Dividing bin from (" << xMin << "," << yMin << ") to (" << xMax << "," << yMax << ") into " << n_divx << " by " << n_divy << " subbins" << std::endl; Double_t *xIn = new Double_t[nEntries]; Double_t *yIn = new Double_t[nEntries]; Int_t *xIndex = new Int_t [nEntries+2]; Int_t *yIndex = new Int_t [nEntries+2]; Int_t xCountIn = 0; for(Int_t i = 0; ixMax)||(ys[i]yMax)) continue; xIn[xCountIn] = xs[i]; ++xCountIn; } //find the delimitting x and y values for the sub bins Double_t xLimits[n_divx + 1]; Double_t yLimits[n_divx][n_divy + 1]; //first sort entries in x and divide bin into equally populated bins in x TMath::Sort(xCountIn, xIn, xIndex, false); xLimits[0] = xMin; xLimits[n_divx] = xMax; for (Int_t nDivx = 0; nDivx < n_divx; ++nDivx){ if (nDivx < (n_divx-1)){ xLimits[nDivx+1] = xIn[xIndex[xCountIn*(nDivx+1)/n_divx]]; } //for each bin in x divide into equally populated bins in y yLimits[nDivx][0] = yMin; yLimits[nDivx][n_divy] = yMax; Int_t yCountIn = 0; for(Int_t i = 0; ixMax)||(ys[i]yMax)) continue; if ((xs[i]=xLimits[nDivx+1])||(ys[i]yMax)) continue; yIn[yCountIn] = ys[i]; ++yCountIn; } TMath::Sort(yCountIn, yIn, yIndex, false); for (Int_t nDivy = 1; nDivy < n_divy; ++nDivy){ yLimits[nDivx][nDivy] = yIn[yIndex[yCountIn*nDivy/n_divy]]; } } delete[] xIn; delete[] yIn; delete[] xIndex; delete[] yIndex; //call for each sub bin for (Int_t nDivx = 0; nDivx < n_divx; ++nDivx){ for (Int_t nDivy = 0; nDivy < n_divy; ++nDivy){ this->getHisto(xLimits[nDivx], xLimits[nDivx + 1], yLimits[nDivx][nDivy], yLimits[nDivx][nDivy + 1], xs, ys, nEntries, divisions,iter+1); } } } diff --git a/src/LauSPlot.cc b/src/LauSPlot.cc index 993a543..81c61df 100644 --- a/src/LauSPlot.cc +++ b/src/LauSPlot.cc @@ -1,1303 +1,1295 @@ /* Copyright 2006 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauSPlot.cc \brief File containing implementation of LauSPlot class. Class for defining the SPlot technique based on TSplot from ROOT by the following authors: Muriel Pivk, Anna Kreshuk (10/2005). (Original copyright notice below) */ /********************************************************************** * * * Copyright (c) 2005 ROOT Foundation, CERN/PH-SFT * * * **********************************************************************/ #include #include #include -using std::cout; -using std::cerr; -using std::endl; -using std::vector; #include "TEventList.h" #include "TFile.h" #include "TLeaf.h" #include "TMath.h" #include "TObjArray.h" #include "TSystem.h" #include "TTree.h" #include "TVirtualFitter.h" #include "LauSPlot.hh" extern void Yields(Int_t &, Double_t *, Double_t &f, Double_t *x, Int_t iflag); ClassImp(LauSPlot) LauSPlot::LauSPlot(const TString& fileName, const TString& treeName, Int_t firstExpt, Int_t nExpt, const NameSet& variableNames, const NumbMap& freeSpecies, const NumbMap& fixdSpecies, const TwoDMap& twodimPDFs, Bool_t sigSplit, Bool_t scfDPSmeared) : fileName_(fileName), inputTreeName_(treeName), cnTreeName_(""), sweightTreeName_(""), file_(0), inputTree_(0), cnTree_(0), sweightTree_(0), eventList_(0), variableNames_(variableNames), freeSpecies_(freeSpecies), fixdSpecies_(fixdSpecies), origFreeSpecies_(freeSpecies), origFixdSpecies_(fixdSpecies), twodimPDFs_(twodimPDFs), signalSplit_(sigSplit), scfDPSmear_(scfDPSmeared), readInput_(kFALSE), definedCNBranches_(kFALSE), definedSWeightBranches_(kFALSE), firstExpt_(firstExpt), nExpt_(nExpt), iExpt_(0), nEvents_(0), nDiscVars_(variableNames.size()), nFreeSpecies_(freeSpecies.size()), nFixdSpecies_(fixdSpecies.size()), nSpecies_(freeSpecies.size()+fixdSpecies.size()) { this->openInputFileAndTree(); this->readInputInfo(); this->createSWeightTree(); if (nFixdSpecies_>0) { this->createCNTree(); } } LauSPlot::~LauSPlot() { // seems that closing the file deletes the tree // so only delete if the file is still open if (file_ && file_->IsOpen()) { delete inputTree_; inputTree_ = 0; delete sweightTree_; sweightTree_ = 0; delete cnTree_; cnTree_ = 0; } delete file_; file_ = 0; } void LauSPlot::openInputFileAndTree() { // first check whether we've already opened up the file or not if (!file_) { // if not, first check the filename and if all ok create the file if (fileName_ == "") { - cerr<<"ERROR in LauSPlot::createFileAndTree : Bad filename supplied, not creating file or tree."<IsZombie() || !file_->IsWritable()) { - cerr<<"ERROR in LauSPlot::createFileAndTree : Problem opening file \""<cd(); inputTree_ = dynamic_cast(file_->Get(inputTreeName_)); inputTree_->SetDirectory(file_); } } void LauSPlot::readInputInfo() { // Read the tree and then setup the maps so we know which leaves to // read from the tree to get the various PDF values Bool_t inputOK = this->readInputLeaves(); if (!inputOK) { this->readInput(inputOK); return; } inputOK = this->checkLeaves(); this->readInput(inputOK); return; } Bool_t LauSPlot::readInputLeaves() { // Read all the leaves in the tree and store them in the leaves map if (!inputTree_) { - cerr<<"ERROR in LauSPlot::readInputInfo : Invalid pointer to data tree."<(inputTree_->GetNbranches()) : 0; TObjArray* pLeaves = inputTree_->GetListOfLeaves(); if (!pLeaves) { - cerr<<"ERROR in LauSPlot::readInputInfo : Problem retrieving leaves from the tree."< leaves.GetSize()) { - cerr<<"ERROR in LauSPlot::readInputInfo : List of leaves is smaller than number of branches - this is strange!"<(leaves[iLeaf]); // we can't deal with arrays Int_t size = leaf->GetNdata(); if (size != 1) { - cerr<<"ERROR in LauSPlot::readInputInfo : Tree has array branches, can't deal with those."<GetName(); // initialise an entry in the maps to hold the value leaves_[name] = leaf; } return kTRUE; } Bool_t LauSPlot::checkLeaves() const { // Analyse the names of the leaves to check that we have a leaf for // all bits of information we require, i.e. a likelihood value for // each combination of variable and species // If we have 2D PDFs then we have to look for some extra leaves for ( TwoDMap::const_iterator twodim_iter = twodimPDFs_.begin(); twodim_iter != twodimPDFs_.end(); ++twodim_iter ) { const TString& specName = twodim_iter->first; const TString& firstVarName = twodim_iter->second.first; const TString& secondVarName = twodim_iter->second.second; TString expectedName(specName); expectedName += firstVarName; expectedName += secondVarName; expectedName += "Like"; Bool_t found(kFALSE); for (LeafMap::const_iterator leaf_iter = leaves_.begin(); leaf_iter != leaves_.end(); ++leaf_iter) { const TString& leafName = leaf_iter->first; const TLeaf* leaf = leaf_iter->second; if ((leafName == expectedName) && (leaf != 0)) { found = kTRUE; break; } } if (!found) { - cerr<<"ERROR in LauSPlot::checkLeaves : Could not find expected leaf \""<Like for (NumbMap::const_iterator fixd_iter = fixdSpecies_.begin(); fixd_iter != fixdSpecies_.end(); ++fixd_iter) { const TString& specName = fixd_iter->first; // if the signal is split we need to do a dedicated search // for sigTM and sigSCF, so skip the signal here if ( specName == "sig" && this->signalSplit() ) { continue; } for (NameSet::const_iterator vars_iter = variableNames_.begin(); vars_iter != variableNames_.end(); ++vars_iter) { const TString& varName = (*vars_iter); TString expectedName(specName); expectedName += varName; expectedName += "Like"; Bool_t found(kFALSE); for (LeafMap::const_iterator leaf_iter = leaves_.begin(); leaf_iter != leaves_.end(); ++leaf_iter) { const TString& leafName = leaf_iter->first; const TLeaf* leaf = leaf_iter->second; if ((leafName == expectedName) && (leaf != 0)) { found = kTRUE; break; } } if (!found) { - cerr<<"ERROR in LauSPlot::checkLeaves : Could not find expected leaf \""<first; // if the signal is split we need to do a dedicated search // for sigTM and sigSCF, so skip the signal here if ( specName == "sig" && this->signalSplit() ) { continue; } for (NameSet::const_iterator vars_iter = variableNames_.begin(); vars_iter != variableNames_.end(); ++vars_iter) { const TString& varName = (*vars_iter); TString expectedName(specName); expectedName += varName; expectedName += "Like"; Bool_t found(kFALSE); for (LeafMap::const_iterator leaf_iter = leaves_.begin(); leaf_iter != leaves_.end(); ++leaf_iter) { const TString& leafName = leaf_iter->first; const TLeaf* leaf = leaf_iter->second; if ((leafName == expectedName) && (leaf != 0)) { found = kTRUE; break; } } if (!found) { - cerr<<"ERROR in LauSPlot::checkLeaves : Could not find expected leaf \""<signalSplit() ) { // now need to search for the sigTM and sigSCF leaves for (NameSet::const_iterator vars_iter = variableNames_.begin(); vars_iter != variableNames_.end(); ++vars_iter) { const TString& varName = (*vars_iter); TString expectedName("sigTM"); expectedName += varName; expectedName += "Like"; Bool_t found(kFALSE); for (LeafMap::const_iterator leaf_iter = leaves_.begin(); leaf_iter != leaves_.end(); ++leaf_iter) { const TString& leafName = leaf_iter->first; const TLeaf* leaf = leaf_iter->second; if ((leafName == expectedName) && (leaf != 0)) { found = kTRUE; break; } } if (!found) { - cerr<<"ERROR in LauSPlot::checkLeaves : Could not find expected leaf \""<first; const TLeaf* leaf = leaf_iter->second; if ((leafName == expectedName) && (leaf != 0)) { found = kTRUE; break; } } if (!found) { - cerr<<"ERROR in LauSPlot::checkLeaves : Could not find expected leaf \""<first; const TLeaf* leaf = leaf_iter->second; if ((leafName == expectedName) && (leaf != 0)) { found = kTRUE; break; } } if (!found) { - cerr<<"ERROR in LauSPlot::checkLeaves : Could not find expected leaf \""<cd(); cnTree_ = new TTree(cnTreeName_, cnTreeName_); cnTree_->SetDirectory(file_); this->definedCNBranches(kFALSE); } } void LauSPlot::createSWeightTree() { // check whether we've already created the tree if (!sweightTree_) { // if not change to the file's directory and create the tree sweightTreeName_ = inputTreeName_; sweightTreeName_ += "_sWeights"; file_->cd(); sweightTree_ = new TTree(sweightTreeName_, sweightTreeName_); sweightTree_->SetDirectory(file_); this->definedSWeightBranches(kFALSE); } } void LauSPlot::defineCNBranches() { if (this->definedCNBranches()) { - cerr<<"ERROR in LauSPlot::defineCNBranches : Already defined branches, not doing it again."<Branch("iExpt", &iExpt_, "iExpt/I"); for (std::map::iterator excl_iter = cN_.begin(); excl_iter != cN_.end(); ++excl_iter) { const TString& exclName = excl_iter->first; NumbMap& species = excl_iter->second; for (NumbMap::iterator spec_iter = species.begin(); spec_iter != species.end(); ++spec_iter) { const TString& specName = spec_iter->first; Double_t * pointer = &(spec_iter->second); TString name(specName); name += "_cN"; if (exclName == "none") { name += "_all"; } else { name += "_no"; name += exclName; } TString thirdPart(name); thirdPart += "/D"; cnTree_->Branch(name, pointer, thirdPart); } } this->definedCNBranches(kTRUE); } void LauSPlot::defineSWeightBranches() { if (this->definedSWeightBranches()) { - cerr<<"ERROR in LauSPlot::defineSWeightBranches : Already defined branches, not doing it again."<::iterator excl_iter = sWeightsCurrent_.begin(); excl_iter != sWeightsCurrent_.end(); ++excl_iter) { const TString& exclName = excl_iter->first; NumbMap& species = excl_iter->second; for (NumbMap::iterator spec_iter = species.begin(); spec_iter != species.end(); ++spec_iter) { const TString& specName = spec_iter->first; Double_t * pointer = &(spec_iter->second); TString name(specName); name += "_sWeight"; if (exclName == "none") { name += "_all"; } else { name += "_no"; name += exclName; } TString thirdPart(name); thirdPart += "/D"; sweightTree_->Branch(name, pointer, thirdPart); } } this->definedSWeightBranches(kTRUE); } void LauSPlot::setExperiment(Int_t iExpt) { if (!inputTree_) { - cerr<<"ERROR in LauSPlot::setExperiment : Invalid pointer to data tree."<SetDirectory(file_); } // fill the event list with this experiment's events TString listName(eventList_->GetName()); listName.Prepend(">>"); TString selection("iExpt=="); selection += iExpt; - cout<<"LauSPlot::setExperiment : Setting tree to experiment number "<Draw(listName,selection); // find how many events there are nEvents_ = eventList_->GetN(); - cout<<" Found "<readExpt(); } void LauSPlot::readExpt() { for (Int_t iEvt(0); iEvt < nEvents_; ++iEvt) { // Find which entry from the full tree contains the requested event Long64_t iEntry = eventList_ ? eventList_->GetEntry(iEvt) : iEvt; if (iEntry<0) { // this shouldn't happen, but just in case... - cerr<<"ERROR in LauSPlot::readExpt : Problem retrieving event."<Exit(EXIT_FAILURE); } // Then retrieve that entry from the tree inputTree_->GetEntry(iEntry); // If needed retrieve the SCF fraction values if ( this->signalSplit() ) { for (LeafMap::const_iterator leaf_iter = leaves_.begin(); leaf_iter != leaves_.end(); ++leaf_iter) { const TString& leafName = leaf_iter->first; const TLeaf* leaf = leaf_iter->second; if ((leafName == "sigSCFFrac") && (leaf != 0)) { scfFrac_[iEvt] = leaf->GetValue(); break; } } } // Copy the leaf values into discPdf_ for ( TwoDMap::const_iterator twodim_iter = twodimPDFs_.begin(); twodim_iter != twodimPDFs_.end(); ++twodim_iter ) { const TString& specName = twodim_iter->first; const TString& firstVarName = twodim_iter->second.first; const TString& secondVarName = twodim_iter->second.second; TString varName = firstVarName + secondVarName; TString expectedName(specName); expectedName += varName; expectedName += "Like"; for (LeafMap::const_iterator leaf_iter = leaves_.begin(); leaf_iter != leaves_.end(); ++leaf_iter) { const TString& leafName = leaf_iter->first; const TLeaf* leaf = leaf_iter->second; if ((leafName == expectedName) && (leaf != 0)) { discPdf_[iEvt][specName][varName] = leaf->GetValue(); break; } } } Bool_t needSignalSearch(kFALSE); for (NumbMap::const_iterator fixd_iter = fixdSpecies_.begin(); fixd_iter != fixdSpecies_.end(); ++fixd_iter) { const TString& specName = fixd_iter->first; // if the signal is split we need to do a dedicated search // for sigTM and sigSCF, so skip the signal here if ( specName == "sig" && this->signalSplit() ) { needSignalSearch = kTRUE; continue; } for (NameSet::const_iterator vars_iter = variableNames_.begin(); vars_iter != variableNames_.end(); ++vars_iter) { const TString& varName = (*vars_iter); TString expectedName(specName); expectedName += varName; expectedName += "Like"; for (LeafMap::const_iterator leaf_iter = leaves_.begin(); leaf_iter != leaves_.end(); ++leaf_iter) { const TString& leafName = leaf_iter->first; const TLeaf* leaf = leaf_iter->second; if ((leafName == expectedName) && (leaf != 0)) { discPdf_[iEvt][specName][varName] = leaf->GetValue(); break; } } } } for (NumbMap::const_iterator free_iter = freeSpecies_.begin(); free_iter != freeSpecies_.end(); ++free_iter) { const TString& specName = free_iter->first; // if the signal is split we need to do a dedicated search // for sigTM and sigSCF, so skip the signal here if ( specName == "sig" && this->signalSplit() ) { needSignalSearch = kTRUE; continue; } for (NameSet::const_iterator vars_iter = variableNames_.begin(); vars_iter != variableNames_.end(); ++vars_iter) { const TString& varName = (*vars_iter); TString expectedName(specName); expectedName += varName; expectedName += "Like"; for (LeafMap::const_iterator leaf_iter = leaves_.begin(); leaf_iter != leaves_.end(); ++leaf_iter) { const TString& leafName = leaf_iter->first; const TLeaf* leaf = leaf_iter->second; if ((leafName == expectedName) && (leaf != 0)) { discPdf_[iEvt][specName][varName] = leaf->GetValue(); break; } } } } if ( needSignalSearch ) { for (NameSet::const_iterator vars_iter = variableNames_.begin(); vars_iter != variableNames_.end(); ++vars_iter) { const TString& varName = (*vars_iter); TString specName("sigTM"); TString expectedName(specName); expectedName += varName; expectedName += "Like"; for (LeafMap::const_iterator leaf_iter = leaves_.begin(); leaf_iter != leaves_.end(); ++leaf_iter) { const TString& leafName = leaf_iter->first; const TLeaf* leaf = leaf_iter->second; if ((leafName == expectedName) && (leaf != 0)) { discPdf_[iEvt][specName][varName] = leaf->GetValue(); break; } } specName = "sigSCF"; expectedName = specName; expectedName += varName; expectedName += "Like"; for (LeafMap::const_iterator leaf_iter = leaves_.begin(); leaf_iter != leaves_.end(); ++leaf_iter) { const TString& leafName = leaf_iter->first; const TLeaf* leaf = leaf_iter->second; if ((leafName == expectedName) && (leaf != 0)) { discPdf_[iEvt][specName][varName] = leaf->GetValue(); break; } } } } } } -void LauSPlot::runCalculations(const TString& option) +void LauSPlot::runCalculations(const LauOutputLevel printLevel) { // Calculates the sWeights and cN coeffs - // The option controls the print level: - // "Q" - no print out (default) - // "V" - prints the estimated # of events in species - // "VV" - as "V" + the MINUIT printing + sums of weights for control if (!this->readInput()) { - cerr<<"ERROR in LauSPlot::runCalculations : The input ntuple has not been successfully read, can't calculate anything."<checkFitter(); // Loop over experiments for (iExpt_ = firstExpt_; iExpt_ < (firstExpt_+nExpt_); ++iExpt_) { this->setExperiment(iExpt_); if (nEvents_ < 1) { - cerr<<"ERROR in LauSPlot::runCalculations : Zero events in experiment "<calcTotPDFValues(exclName); // Reset the fitter - this->initialiseFitter(opt); + this->initialiseFitter(printLevel); // Set the parameters to their initial values this->setFitParameters(); // Run the fit this->runFit(); // Get the fitted parameter values back - this->retrieveFittedParameters(opt); + this->retrieveFittedParameters(printLevel); // Get the covariance matrix Bool_t covMatOK = this->calcCovMatrix(); Double_t * covmat(0); if (!covMatOK) { TVirtualFitter * fitter = TVirtualFitter::GetFitter(); covmat = fitter->GetCovarianceMatrix(); } - if (opt.Contains("W")) { + if (printLevel == LauOutputLevel::Verbose) { this->printCovMatrixElements(covmat); } // calculate the cN and sWeights from the covariance matrix if (nFixdSpecies_ > 0) { this->calcCNCoeffs(exclName, covmat); } this->calcSWeights(exclName, covmat); // print verbose info if required - if (opt.Contains("W")) { + if (printLevel == LauOutputLevel::Verbose) { this->printSumOfWeights(exclName); } } // Finally fill all the branches for this experiment if (nFixdSpecies_ > 0) { this->fillCNBranches(); } this->fillSWeightBranches(); } } void LauSPlot::checkFitter() const { TString minuitName("TFitter"); TVirtualFitter * fitter = TVirtualFitter::GetFitter(); if (fitter) { TString fitterName(fitter->IsA()->GetName()); if (fitterName != minuitName) { delete fitter; fitter = 0; } } fitter = TVirtualFitter::Fitter(0); } -void LauSPlot::initialiseFitter(const TString& opt) +void LauSPlot::initialiseFitter(const LauOutputLevel printLevel) { TVirtualFitter * fitter = TVirtualFitter::GetFitter(); fitter->Clear(); fitter->SetFCN(Yields); fitter->SetObjectFit(this); // Set the print level Double_t arglist[10]; - if (opt.Contains("Q")) { - arglist[0]=-1; - } - if (opt.Contains("V")) { - arglist[0]=0; - } - if (opt.Contains("W")) { - arglist[0]=1; + switch ( printLevel ) { + case LauOutputLevel::Quiet : + arglist[0]=-1; + break; + case LauOutputLevel::Standard : + arglist[0]=0; + break; + case LauOutputLevel::Verbose : + arglist[0]=1; + break; } fitter->ExecuteCommand("SET PRINT", arglist, 1); // Need to set the "SET ERR" command to +0.5 for +/-1 sigma errors // for maximum likelihood fit. arglist[0] = 0.5; fitter->ExecuteCommand("SET ERR", arglist, 1); } void LauSPlot::setFitParameters() const { TVirtualFitter * fitter = TVirtualFitter::GetFitter(); // must add the parameters in the same order as they are stored in pdfTot_ Int_t ispecies(0); const NumbMap& species = pdfTot_.front(); for (NumbMap::const_iterator spec_iter = species.begin(); spec_iter != species.end(); ++spec_iter) { const TString& name(spec_iter->first); // starting parameters should be the original values, // not those that came out of the last fit NumbMap::const_iterator free_iter = origFreeSpecies_.find(name); NumbMap::const_iterator fixd_iter = origFixdSpecies_.find(name); Bool_t fixed = fixd_iter != origFixdSpecies_.end(); Double_t value(0.0); if (fixed) { value = fixd_iter->second; } else { value = free_iter->second; } fitter->SetParameter(ispecies, name, value, 1, 0, 0); if (fixed) { fitter->FixParameter(ispecies); } ++ispecies; } } void LauSPlot::runFit() { TVirtualFitter * fitter = TVirtualFitter::GetFitter(); Double_t arglist[10]; arglist[0] = 1000*nFreeSpecies_; // maximum iterations arglist[1] = 0.05; // tolerance : min EDM = 0.001*tolerance // Execute MIGRAD Int_t fitStatus = fitter->ExecuteCommand("MIGRAD", arglist, 2); if (fitStatus != 0) { - cerr<<"ERROR in LauSPlot::runFit : Error during MIGRAD minimisation."<ExecuteCommand("HESSE", arglist, 1); if (fitStatus != 0) { - cerr<<"ERROR in LauSPlot::runFit : Error during HESSE error calculation."<first<<","<first<<"] = "<first<<","<first<<"] = "<first<<","<first<<"] = "<first<<","<first<<"] = "<first); NumbMap::iterator free_iter = freeSpecies_.find(name); if (free_iter != freeSpecies_.end()) { free_iter->second = fitter->GetParameter(ispecies); - if (!opt.Contains("Q")) { - cout<<"Estimated # of events in species "<second<first; Double_t sumweight(0.0); for (Int_t iEvt(0); iEvt < nEvents_; ++iEvt) { const NumbMap& specWeightMap = sWeights_[iEvt].find(exclName)->second; Double_t weight = specWeightMap.find(specName)->second; sumweight += weight; } - cout<<"Sum of sWeights for species \""<first; // if the signal is split we need to do a dedicated search // for sigTM and sigSCF, so skip the signal here if ( specName == "sig" && this->signalSplit() ) { needSignalSearch = kTRUE; continue; } pdfTot_[iEvt][specName] = 1.0; // loop through the 2D histo list NameSet skipList; for ( TwoDMap::const_iterator twodim_iter = twodimPDFs_.begin(); twodim_iter != twodimPDFs_.end(); ++twodim_iter ) { // if the entry doesn't refer to this // species then skip on if ( specName != twodim_iter->first ) { continue; } // retrieve the two variable names const TString& firstVarName = twodim_iter->second.first; const TString& secondVarName = twodim_iter->second.second; if ( firstVarName != exclName && secondVarName != exclName ) { // if neither name is the one being excluded then... // add them both to the skip list skipList.insert( firstVarName ); skipList.insert( secondVarName ); // and multiply the total by the combined PDF value TString varName = firstVarName + secondVarName; pdfTot_[iEvt][specName] *= discPdf_[iEvt][specName][varName]; } } // loop through all the variables for (NameSet::const_iterator var_iter = variableNames_.begin(); var_iter != variableNames_.end(); ++var_iter) { const TString& varName = (*var_iter); // if the variable isn't the one being excluded if (exclName != varName) { // and it's not involved in a 2D PDF if ( skipList.find(varName) == skipList.end() ) { // multiply the total by its PDF value pdfTot_[iEvt][specName] *= discPdf_[iEvt][specName][varName]; } } } } for (NumbMap::const_iterator spec_iter = freeSpecies_.begin(); spec_iter != freeSpecies_.end(); ++spec_iter) { const TString& specName = spec_iter->first; // if the signal is split we need to do a dedicated search // for sigTM and sigSCF, so skip the signal here if ( specName == "sig" && this->signalSplit() ) { needSignalSearch = kTRUE; continue; } pdfTot_[iEvt][specName] = 1.0; // loop through the 2D histo list NameSet skipList; for ( TwoDMap::const_iterator twodim_iter = twodimPDFs_.begin(); twodim_iter != twodimPDFs_.end(); ++twodim_iter ) { // if the entry doesn't refer to this // species then skip on if ( specName != twodim_iter->first ) { continue; } // retrieve the two variable names const TString& firstVarName = twodim_iter->second.first; const TString& secondVarName = twodim_iter->second.second; if ( firstVarName != exclName && secondVarName != exclName ) { // if neither name is the one being excluded then... // add them both to the skip list skipList.insert( firstVarName ); skipList.insert( secondVarName ); // and multiply the total by the combined PDF value TString varName = firstVarName + secondVarName; pdfTot_[iEvt][specName] *= discPdf_[iEvt][specName][varName]; } } // loop through all the variables for (NameSet::const_iterator var_iter = variableNames_.begin(); var_iter != variableNames_.end(); ++var_iter) { const TString& varName = (*var_iter); // if the variable isn't the one being excluded if (exclName != varName) { // and it's not involved in a 2D PDF if ( skipList.find(varName) == skipList.end() ) { // multiply the total by its PDF value pdfTot_[iEvt][specName] *= discPdf_[iEvt][specName][varName]; } } } } if ( needSignalSearch ) { // loop through the 2D histo list TString specName("sigTM"); Double_t tmPDFVal(1.0); NameSet skipList; for ( TwoDMap::const_iterator twodim_iter = twodimPDFs_.begin(); twodim_iter != twodimPDFs_.end(); ++twodim_iter ) { // if the entry doesn't refer to this // species then skip on if ( specName != twodim_iter->first ) { continue; } // retrieve the two variable names const TString& firstVarName = twodim_iter->second.first; const TString& secondVarName = twodim_iter->second.second; if ( firstVarName != exclName && secondVarName != exclName ) { // if neither name is the one being excluded then... // add them both to the skip list skipList.insert( firstVarName ); skipList.insert( secondVarName ); // and multiply the total by the combined PDF value TString varName = firstVarName + secondVarName; tmPDFVal *= discPdf_[iEvt][specName][varName]; } } // loop through all the variables for (NameSet::const_iterator var_iter = variableNames_.begin(); var_iter != variableNames_.end(); ++var_iter) { const TString& varName = (*var_iter); // if the variable isn't the one being excluded if (exclName != varName) { // and it's not involved in a 2D PDF if ( skipList.find(varName) == skipList.end() ) { // multiply the total by its PDF value tmPDFVal *= discPdf_[iEvt][specName][varName]; } } } tmPDFVal *= (1.0 - scfFrac_[iEvt]); // loop through the 2D histo list specName = "sigSCF"; Double_t scfPDFVal(1.0); skipList.clear(); for ( TwoDMap::const_iterator twodim_iter = twodimPDFs_.begin(); twodim_iter != twodimPDFs_.end(); ++twodim_iter ) { // if the entry doesn't refer to this // species then skip on if ( specName != twodim_iter->first ) { continue; } // retrieve the two variable names const TString& firstVarName = twodim_iter->second.first; const TString& secondVarName = twodim_iter->second.second; if ( firstVarName != exclName && secondVarName != exclName ) { // if neither name is the one being excluded then... // add them both to the skip list skipList.insert( firstVarName ); skipList.insert( secondVarName ); // and multiply the total by the combined PDF value TString varName = firstVarName + secondVarName; scfPDFVal *= discPdf_[iEvt][specName][varName]; } } // loop through all the variables for (NameSet::const_iterator var_iter = variableNames_.begin(); var_iter != variableNames_.end(); ++var_iter) { const TString& varName = (*var_iter); // if the variable isn't the one being excluded if (exclName != varName) { // and it's not involved in a 2D PDF if ( skipList.find(varName) == skipList.end() ) { // multiply the total by its PDF value scfPDFVal *= discPdf_[iEvt][specName][varName]; } } } if ( exclName == "DP" || !this->scfDPSmear() ) { scfPDFVal *= scfFrac_[iEvt]; } pdfTot_[iEvt]["sig"] = tmPDFVal + scfPDFVal; } } } void LauSPlot::calcCNCoeffs(const TString& exclName, const Double_t *covmat) { // Computes the cN for the extended sPlots from the covariance matrix if (nFixdSpecies_ <= 0) { return; } Int_t species_n(0); for (NumbMap::const_iterator iter_n = freeSpecies_.begin(); iter_n != freeSpecies_.end(); ++iter_n) { Int_t species_j(0); const TString& specName = iter_n->first; Double_t value = iter_n->second; cN_[exclName][specName] = value; for (NumbMap::const_iterator iter_j = freeSpecies_.begin(); iter_j != freeSpecies_.end(); ++iter_j) { if (covmat) { cN_[exclName][specName] -= covmat[species_n*nSpecies_+species_j]; } else { cN_[exclName][specName] -= covMat_(species_n,species_j); } ++species_j; } ++species_n; } } void LauSPlot::calcSWeights(const TString& exclName, Double_t *covmat) { // Computes the sWeights from the covariance matrix // NB for the extended sPlot the sum in the denominator is still over all species, // while that in the numerator is only over the free species. // Similarly the sWeights can only be calculated for the free species. Double_t numerator(0.0), denominator(0.0); for (Int_t iEvent = 0; iEvent < nEvents_; ++iEvent) { denominator = 0.0; for (NumbMap::const_iterator free_iter = freeSpecies_.begin(); free_iter != freeSpecies_.end(); ++free_iter) { denominator += free_iter->second * pdfTot_[iEvent][free_iter->first]; } for (NumbMap::const_iterator fixd_iter = fixdSpecies_.begin(); fixd_iter != fixdSpecies_.end(); ++fixd_iter) { denominator += fixd_iter->second * pdfTot_[iEvent][fixd_iter->first]; } Int_t species_n(0); for (NumbMap::const_iterator iter_n = freeSpecies_.begin(); iter_n != freeSpecies_.end(); ++iter_n) { numerator = 0.0; Int_t species_j(0); for (NumbMap::const_iterator iter_j = freeSpecies_.begin(); iter_j != freeSpecies_.end(); ++iter_j) { if (covmat) { numerator += covmat[species_n*nSpecies_+species_j] * pdfTot_[iEvent][iter_j->first]; } else { numerator += covMat_(species_n,species_j) * pdfTot_[iEvent][iter_j->first]; } ++species_j; } sWeights_[iEvent][exclName][iter_n->first] = numerator/denominator; ++species_n; } } } void LauSPlot::fillCNBranches() { if (!cnTree_) { - cerr<<"ERROR in LauSPlot::fillCNBranches : Tree not created, cannot fill branches."<definedCNBranches()) { this->defineCNBranches(); } cnTree_->Fill(); } void LauSPlot::fillSWeightBranches() { if (!sweightTree_) { - cerr<<"ERROR in LauSPlot::fillSWeightBranches : Tree not created, cannot fill branches."<definedSWeightBranches()) { this->copyEventWeights(0); this->defineSWeightBranches(); } for (Int_t iEvent = 0; iEvent < nEvents_; ++iEvent) { this->copyEventWeights(iEvent); sweightTree_->Fill(); } } void LauSPlot::copyEventWeights(Int_t iEvent) { for (std::map::const_iterator excl_iter = sWeights_[iEvent].begin(); excl_iter != sWeights_[iEvent].end(); ++excl_iter) { const TString& exclName = excl_iter->first; const NumbMap& species = excl_iter->second; for (NumbMap::const_iterator spec_iter = species.begin(); spec_iter != species.end(); ++spec_iter) { const TString& specName = spec_iter->first; sWeightsCurrent_[exclName][specName] = spec_iter->second; } } } void LauSPlot::writeOutResults() { // write out the results // remove the transient objects that we don't want (re-)written to the file if (eventList_) { delete eventList_; eventList_ = 0; } if (inputTree_) { delete inputTree_; inputTree_ = 0; } // first add the input tree as a friend of the output tree this->addFriendTree(); // then write everything to the file and clean up file_->cd(); file_->Write(); file_->Close(); delete file_; file_ = 0; } void LauSPlot::addFriendTree() { if (!sweightTree_) { - cerr<<"ERROR in LauSPlot::addFriendTree : Tree not created, cannot add friend."<AddFriend(inputTreeName_,fileName_); } void Yields(Int_t &, Double_t *, Double_t &f, Double_t *x, Int_t /*iflag*/) { // FCN-function for Minuit f = 0.0; TVirtualFitter *fitter = TVirtualFitter::GetFitter(); LauSPlot* theModel = dynamic_cast(fitter->GetObjectFit()); const std::vector& pdfTot = theModel->totalPdf(); Double_t ntot(0.0); for (std::vector::const_iterator evt_iter = pdfTot.begin(); evt_iter != pdfTot.end(); ++evt_iter) { // loop over events const LauSPlot::NumbMap& species = (*evt_iter); Int_t ispecies(0); Double_t lik(0.0); ntot = 0.0; for (LauSPlot::NumbMap::const_iterator spec_iter = species.begin(); spec_iter != species.end(); ++spec_iter) { // loop over species lik += x[ispecies] * spec_iter->second; ntot += x[ispecies]; ++ispecies; } if (lik < 0.0) { // make f the worst possible value to force MINUIT // out of this region of parameter space f = -DBL_MAX; break; } else { f += TMath::Log(lik); } } // extended likelihood f = (ntot-f); }