Page MenuHomeHEPForge

No OneTemporary

diff --git a/doc/release.notes b/doc/release.notes
index ea98684..03b093d 100644
--- a/doc/release.notes
+++ b/doc/release.notes
@@ -1,76 +1,87 @@
///////////////////////////////////////////////////////////////
/// ///
/// This is the History file for the Laura++ package. ///
/// ///
///////////////////////////////////////////////////////////////
+20th December 2013 Daniel Craik
+
+* Added Lau2DCubicSpline which provides cubic spline interpolation of a histogram
+ - Added Lau2DSplineDP which uses a spline to model variation across a DP (eg efficiency)
+ - Added pABC, Lau2DAbsDP, for Lau2DHistDP and Lau2DSplineDP and moved common code
+ in Lau2DHistDP and Lau2DSplineDP into ABC Lau2DAbsHistDP
+ - LauEffModel, LauDPDepSumPdf and LauDPDepMapPdf modified to use Lau2DAbsDP instead of
+ Lau2DHistDP
+ - setEffSpline method added to LauEffModel and useSpline flag added to constructor for
+ LauDPDepSumPdf to allow use of splines
+
5th December 2013 Thomas Latham
* Fix small bug in GenFitKpipi example where background asymmetry parameter had
its limits the wrong way around
4th December 2013 Daniel Craik
* Updated 2D chi-squared code to use adaptive binning.
3rd December 2013 Thomas Latham
* Generalise the Makefile in the examples directory
- results in minor changes to the names of 3 of the binaries
29th November 2013 Thomas Latham
* Fixed bug in ResultsExtractor where the output file was not written
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
Laura++ v1r1p1
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
22nd November 2013 Thomas Latham
* Fixed bug in LauCPFitModel where values of q = -1 extra PDFs
were used regardless of the event charge.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
Laura++ v1r1
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
20th November 2013 Mark Whitehead
* Changed convention for the barrier factors, swapping from p* to p.
This seems to give more physically reasonable results.
The files updated are:
- src/LauGounarisSakuraiRes.cc
- src/LauRelBreitWignerRes.cc
18th October 2013 Thomas Latham
* Fix dependency problem in Makefile
8th October 2013 Thomas Latham
* Some fixes to yield implementation
* Minor bug fix in DP background histogram class
7th October 2013 Mark Whitehead
* Update to accept the yields and yield asymmetries as LauParameters.
All examples have been updated to reflect this change.
This updated the following files:
- inc/LauCPFitModel.hh
- inc/LauSimpleFitModel.hh
- inc/LauAbsFitModel.hh
- src/LauCPFitModel.cc
- src/LauSimpleFitModel.cc
* Addition of the following particles to src/LauResonanceMaker.cc
Ds*+-, Ds0*(2317)+-, Ds2*(2573)+-, Ds1*(2700)+- and Bs*0
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
Laura++ v1r0
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
13th September 2013 Thomas Latham
* Initial import of the package into HEPforge
diff --git a/inc/Lau2DAbsDP.hh b/inc/Lau2DAbsDP.hh
new file mode 100644
index 0000000..fe7ee38
--- /dev/null
+++ b/inc/Lau2DAbsDP.hh
@@ -0,0 +1,57 @@
+
+// Copyright University of Warwick 2004 - 2013.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+// Authors:
+// Thomas Latham
+// John Back
+// Paul Harrison
+
+/*! \file Lau2DAbsDP.hh
+ \brief File containing declaration of Lau2DAbsDP class.
+*/
+
+/*! \class Lau2DAbsDP
+ \brief Pure abstract base class for defining a variation across a 2D DP.
+
+ Pure abstract base class for defining an unnormalised variation across a 2D DP.
+*/
+
+#ifndef LAU_2DABS_DP
+#define LAU_2DABS_DP
+
+#include "Rtypes.h"
+
+class TH2;
+class LauDaughters;
+class LauKinematics;
+
+class Lau2DAbsDP {
+
+ public:
+ //!Constructor
+ Lau2DAbsDP() {}
+
+ //! Destructor
+ virtual ~Lau2DAbsDP() {}
+
+ //! Perform the interpolation
+ /*!
+ \param [in] x the x-axis value
+ \param [in] y the y-aixs value
+ \return the interpolated histogram value
+ */
+ virtual Double_t interpolateXY(Double_t x, Double_t y) const=0;
+
+ private:
+ //! Copy constructor - not implemented
+ Lau2DAbsDP( const Lau2DAbsDP& rhs );
+
+ //! Copy assignment operator - not implemented
+ Lau2DAbsDP& operator=(const Lau2DAbsDP& rhs);
+
+ ClassDef(Lau2DAbsDP,0) // Abstract base class for 2D DP variation
+};
+
+#endif
diff --git a/inc/Lau2DAbsHistDP.hh b/inc/Lau2DAbsHistDP.hh
new file mode 100644
index 0000000..7341beb
--- /dev/null
+++ b/inc/Lau2DAbsHistDP.hh
@@ -0,0 +1,117 @@
+
+// Copyright University of Warwick 2004 - 2013.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+// Authors:
+// Thomas Latham
+// John Back
+// Paul Harrison
+
+/*! \file Lau2DAbsHistDP.hh
+ \brief File containing declaration of Lau2DAbsHistDP class.
+*/
+
+/*! \class Lau2DAbsHistDP
+ \brief Abstract base class for defining a variation across a 2D DP based on a histogram.
+
+ Abstract base class for defining an unnormalised variation across a 2D DP based on a histogram.
+ Contains helper methods to vary bin contents in the input histogram within uncertainties and
+ scale the input histogram to match a given average value.
+ The histogram can be defined in the conventional DP (m13Sq vs m23Sq) or in the square DP and
+ the upper half can be used to describe symmetric DPs.
+*/
+
+#ifndef LAU_2DABSHIST_DP
+#define LAU_2DABSHIST_DP
+
+#include "Lau2DAbsDP.hh"
+
+class TH2;
+class LauDaughters;
+class LauKinematics;
+
+class Lau2DAbsHistDP : public Lau2DAbsDP {
+
+ public:
+ //! Constructor
+ /*!
+ \param [in] daughters the daughter particles
+ \param [in] useUpperHalfOnly boolean flag to specify that the supplied histogram contains only the upper half of a symmetric DP
+ \param [in] squareDP boolean flag to specify whether the supplied histogram is in square DP coordinates
+ */
+ Lau2DAbsHistDP(const LauDaughters* daughters, Bool_t useUpperHalfOnly = kFALSE, Bool_t squareDP = kFALSE);
+
+ //! Copy constructor
+ //Lau2DAbsHistDP( const Lau2DAbsHistDP& rhs );
+
+ //! Destructor
+ virtual ~Lau2DAbsHistDP();
+
+ //! Perform the interpolation
+ /*!
+ \param [in] x the x-axis value
+ \param [in] y the y-axis value
+ \return the interpolated histogram value
+ */
+ virtual Double_t interpolateXY(Double_t x, Double_t y) const=0;
+
+ protected:
+ //! Fluctuate the contents of each histogram bin independently, in accordance with their errors
+ /*!
+ \param [in,out] hist the histogram
+ */
+ void doBinFluctuation(TH2* hist);
+
+ //! Rescale the histogram bin contents based on the desired average efficiency and its uncertainty
+ /*!
+ The new average is sampled from a Gaussian distribution G(x;avEff,avEffError).
+ The histogram is then rescaled according to newAvg/oldAvg.
+ \param [in,out] hist the histogram
+ \param [in] avEff the desired average efficiency
+ \param [in] avEffError the error on that efficiency
+ */
+ void raiseOrLowerBins(TH2* hist, Double_t avEff, Double_t avEffError);
+
+ //! Compute the average bin content for bins within the kinematic boundary
+ /*!
+ This method just uses the raw bin contents with no interpolation
+ \param [in,out] hist the histogram
+ \return the average value over the DP
+ */
+ Double_t computeAverageContents(TH2 const * const hist) const;
+
+ //! Check whether the given co-oridinates are within the kinematic boundary
+ /*!
+ \param [in] x the x co-ordinate
+ \param [in] y the y co-ordinate
+ \return true if the co-oridinates are within the kinematic boundary, otherwise false
+ */
+ Bool_t withinDPBoundaries(Double_t x, Double_t y) const;
+
+ //! If only using the upper half of the (symmetric) DP then transform into the correct half
+ /*!
+ \param [in,out] x the x co-ordinate
+ \param [in,out] y the y co-ordinate
+ */
+ void getUpperHalf(Double_t& x, Double_t& y) const;
+
+ private:
+ //! Copy constructor - not implemented
+ Lau2DAbsHistDP( const Lau2DAbsHistDP& rhs );
+
+ //! Copy assignment operator - not implemented
+ Lau2DAbsHistDP& operator=(const Lau2DAbsHistDP& rhs);
+
+ //! Kinematics used to check events are within DP boundary
+ const LauKinematics* kinematics_;
+
+ //! Boolean for using the upper half of DP
+ Bool_t upperHalf_;
+ //! Boolean for using square DP variables
+ Bool_t squareDP_;
+
+ ClassDef(Lau2DAbsHistDP,0) // Abstract base class for 2D DP variations based on a histogram
+};
+
+#endif
diff --git a/inc/Lau2DCubicSpline.hh b/inc/Lau2DCubicSpline.hh
new file mode 100644
index 0000000..87f6be7
--- /dev/null
+++ b/inc/Lau2DCubicSpline.hh
@@ -0,0 +1,201 @@
+
+// Copyright University of Warwick 2006 - 2013.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+// Authors:
+// Thomas Latham
+// John Back
+// Paul Harrison
+
+/*! \file Lau2DCubicSpline.hh
+ \brief File containing declaration of Lau2DCubicSpline class.
+*/
+
+/*! \class Lau2DCubicSpline
+ \brief Class for defining a 2D cubic spline based on an input histogram
+
+ Class for defining a 2D cubic spline based on RooBinned2DBicubicBase by
+ Manuel Tobias Schiller <manuel.schiller@nikhef.nl> (2012-08-29).
+
+ Smoothly interpolate between bin midpoints of a 2D histogram with
+ equal-sized bins
+
+ The interpolation function coincides with the saved histogram values in bin
+ midpoints; a cubic interpolation polynomial is used. Bins must all have the
+ same size. The interpolation polynomial in a "cell" between four bin
+ midpoints is:
+
+ \f[ p(x, y) = \sum_{i=0}^3 \sum_{j=0}^3 a_{ij} x^i y^j \f]
+
+ The coefficients \f$a_{ij}\f$ are determined by the requirement that
+ function value, first derivatives and the mixed second derivative must agree
+ with that of the binned function at the four bin midpoints at the edges of
+ the cell. Symmetric finite differences are used to approximate these
+ derivatives.
+
+ For each cell, the coefficients are determined at constuction time. The
+ object also keeps a cache of 2D integrals over complete cells, such that 2D
+ integrations can be done analytically in a reasonably short amount of time.
+*/
+
+#ifndef LAU_2DCUBICSPLINE
+#define LAU_2DCUBICSPLINE
+
+class TH2;
+
+class Lau2DCubicSpline
+{
+ private:
+ //! Length of coefficient record in array
+ enum { NCoeff = 16, CoeffRecLen = 17 };
+
+ public:
+ //! Constructor from histogram
+ /*!
+ /param h the histogram
+ */
+ Lau2DCubicSpline(const TH2& h);
+
+ //! Destructor
+ virtual ~Lau2DCubicSpline();
+
+ //! Evaluate the function at given point
+ /*!
+ \param [in] x the x co-ordinate
+ \param [in] y the y co-ordinate
+ */
+ virtual Double_t evaluate(Double_t x, Double_t y) const;
+
+ //! Evaluate analytical integral in x, y, or x and y
+ /*!
+ \param [in] x1 the lower x limit
+ \param [in] x2 the upper x limit
+ \param [in] y1 the lower y limit
+ \param [in] y2 the upper y limit
+ */
+ virtual Double_t analyticalIntegral(Double_t x1, Double_t x2, Double_t y1, Double_t y2) const;
+
+
+ private:
+ //! Copy constructor - not implemented
+ Lau2DCubicSpline( const Lau2DCubicSpline& rhs );
+
+ //! Copy assignment operator - not implemented
+ Lau2DCubicSpline& operator=(const Lau2DCubicSpline& rhs);
+
+ //! Number of bins in x axis
+ Int_t nBinsX;
+
+ //! Number of bins in y axis
+ Int_t nBinsY;
+
+ //! Bin size in x
+ Double_t binSizeX;
+
+ //! Bin size in y
+ Double_t binSizeY;
+
+ //! Minimum x value
+ Double_t xmin;
+
+ //! Maximum x value
+ Double_t xmax;
+
+ //! Minimum y value
+ Double_t ymin;
+
+ //! Maximum y value
+ Double_t ymax;
+
+ //! Coefficients of interpolation polynomials
+ std::vector<Double_t> coeffs;
+
+ //! Get contents of a given bin from a histogram
+ /*!
+ \param [in] h the histogram
+ \param [in] xbin the x bin index
+ \param [in] ybin the y bin index
+ \return the bin contents
+ */
+ Double_t histcont(const TH2& h, Int_t xbin, Int_t ybin) const;
+
+ //! Get d/dx finite difference in a given bin from a histogram
+ /*!
+ \param [in] h the histogram
+ \param [in] xbin the x bin index
+ \param [in] ybin the y bin index
+ \return the d/dx finite difference
+ */
+ Double_t dhistdx(const TH2& h, Int_t xbin, Int_t ybin) const;
+
+ //! Get d/dy finite difference in a given bin from a histogram
+ /*!
+ \param [in] h the histogram
+ \param [in] xbin the x bin index
+ \param [in] ybin the y bin index
+ \return the d/dy finite difference
+ */
+ Double_t dhistdy(const TH2& h, Int_t xbin, Int_t ybin) const;
+
+ //! Get d^2/dydx finite difference in a given bin from a histogram
+ /*!
+ \param [in] h the histogram
+ \param [in] xbin the x bin index
+ \param [in] ybin the y bin index
+ \return the d^2/dydx finite difference
+ */
+ Double_t d2histdxdy(const TH2& h, Int_t xbin, Int_t ybin) const;
+
+ //! Const access to coefficients
+ /*!
+ \param [in] binx the x bin index
+ \param [in] biny the y bin index
+ \param [in] theCoeff the coefficient index
+ \return the coefficient
+ */
+ inline const Double_t& coeff(Int_t binx, Int_t biny, Int_t theCoeff) const
+ { return coeffs[theCoeff + CoeffRecLen * (binx + nBinsX * biny)]; }
+
+ //! Access to coefficients
+ /*!
+ \param [in] binx the x bin index
+ \param [in] biny the y bin index
+ \param [in] theCoeff the coefficient index
+ \return the coefficient
+ */
+ inline Double_t& coeff(Int_t binx, Int_t biny, Int_t theCoeff)
+ { return coeffs[theCoeff + CoeffRecLen * (binx + nBinsX * biny)]; }
+
+ //! Evaluate integral over x at a given y from (x1, y) to (x2, y)
+ /*!
+ \param [in] x1 lower x limit
+ \param [in] x2 upper x limit
+ \param [in] y y value
+ \return integral over x
+ */
+ Double_t evalX(Double_t x1, Double_t x2, Double_t y) const;
+
+ //! Evaluate integral over y at a given x from (x, y1) to (x, y2)
+ /*!
+ \param [in] x x value
+ \param [in] y1 lower y limit
+ \param [in] y2 upper y limit
+ \return integral over y
+ */
+ Double_t evalY(Double_t x, Double_t y1, Double_t y2) const;
+
+ //! Evaluate integral over x and y from (x1, y1) to (x2, y2)
+ /*!
+ \param [in] x1 lower x limit
+ \param [in] x2 upper x limit
+ \param [in] y1 lower y limit
+ \param [in] y2 upper y limit
+ \return integral over x and y
+ */
+ Double_t evalXY(Double_t x1, Double_t x2, Double_t y1, Double_t y2) const;
+
+ ClassDef(Lau2DCubicSpline, 0);
+};
+
+#endif
diff --git a/inc/Lau2DHistDP.hh b/inc/Lau2DHistDP.hh
index eb3fba6..301d31d 100644
--- a/inc/Lau2DHistDP.hh
+++ b/inc/Lau2DHistDP.hh
@@ -1,134 +1,114 @@
// Copyright University of Warwick 2004 - 2013.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Authors:
// Thomas Latham
// John Back
// Paul Harrison
/*! \file Lau2DHistDP.hh
\brief File containing declaration of Lau2DHistDP class.
*/
/*! \class Lau2DHistDP
\brief Class for defining a 2D DP histogram.
Class for defining a 2D DP histogram.
Employs linear interpolation to get the histogram value based on how far away a point in (x,y)
is to nearby bin centres. The returned values are not normalised to the total histogram area
(useful for efficiency histograms for example).
The histogram can be defined in the conventional DP (m13Sq vs m23Sq) or in the square DP.
*/
#ifndef LAU_2DHIST_DP
#define LAU_2DHIST_DP
-#include "Rtypes.h"
+#include "Lau2DAbsHistDP.hh"
class TH2;
class LauDaughters;
class LauKinematics;
-class Lau2DHistDP {
+class Lau2DHistDP : public Lau2DAbsHistDP {
public:
//! Constructor
/*!
\param [in] hist the 2D DP histogram
\param [in] daughters the daughter particles
\param [in] useInterpolation boolean flag to determine whether linear interpolation between bins should be used or simply the raw bin values
\param [in] fluctuateBins boolean flag to determine whether the bin contents should be fluctuated in accordance with their errors (useful for systematic error evaluation)
\param [in] avEff the desired average efficiency - see Lau2DHistDP::raiseOrLowerBins
\param [in] avEffError the error on that efficiency - see Lau2DHistDP::raiseOrLowerBins
\param [in] useUpperHalfOnly boolean flag to specify that the supplied histogram contains only the upper half of a symmetric DP
\param [in] squareDP boolean flag to specify whether the supplied histogram is in square DP coordinates
*/
Lau2DHistDP(const TH2* hist, const LauDaughters* daughters,
Bool_t useInterpolation = kTRUE, Bool_t fluctuateBins = kFALSE,
Double_t avEff = -1.0, Double_t avEffError = -1.0, Bool_t useUpperHalfOnly = kFALSE, Bool_t squareDP = kFALSE);
//! Copy constructor
- Lau2DHistDP( const Lau2DHistDP& rhs );
+ //Lau2DHistDP( const Lau2DHistDP& rhs );
//! Destructor
virtual ~Lau2DHistDP();
//! Perform the interpolation
/*!
\param [in] x the x-axis value
\param [in] y the y-aixs value
\return the interpolated histogram value
*/
Double_t interpolateXY(Double_t x, Double_t y) const;
protected:
//! Get the raw bin content from the histogram
/*!
\param [in] xBinNo the x-axis bin number
\param [in] yBinNo the y-axis bin number
\return the bin conent
*/
Double_t getBinHistValue(Int_t xBinNo, Int_t yBinNo) const;
- //! Fluctuate the contents of each histogram bin independently, in accordance with their errors
- void doBinFluctuation();
+ private:
+ //! Copy constructor - not implemented
+ Lau2DHistDP( const Lau2DHistDP& rhs );
- //! Rescale the histogram bin contents based on the desired average efficiency and its uncertainty
- /*!
- The new average is sampled from a Gaussian distribution G(x;avEff,avEffError).
- The histogram is then rescaled according to newAvg/oldAvg.
- \param [in] avEff the desired average efficiency
- \param [in] avEffError the error on that efficiency
- */
- void raiseOrLowerBins(Double_t avEff, Double_t avEffError);
+ //! Copy assignment operator - not implemented
+ Lau2DHistDP& operator=(const Lau2DHistDP& rhs);
- //! Compute the average bin content for bins within the kinematic boundary
- /*!
- This method just uses the raw bin contents with no interpolation
- \return the average value over the DP
- */
- Double_t computeAverageContents() const;
-
- private:
//! The underlying histogram
TH2* hist_;
-
- //! Kinematics used to check events are within DP boundary
- const LauKinematics* kinematics_;
//! The histogram x-axis minimum
Double_t minX_;
//! The histogram x-axis maximum
Double_t maxX_;
//! The histogram y-axis minimum
Double_t minY_;
//! The histogram y-axis maximum
Double_t maxY_;
//! The histogram x-axis range
Double_t rangeX_;
//! The histogram y-axis range
Double_t rangeY_;
//! The histogram x-axis bin width
Double_t binXWidth_;
//! The histogram y-axis bin width
Double_t binYWidth_;
//! The number of bins on the x-axis of the histogram
Int_t nBinsX_;
//! The number of bins on the y-axis of the histogram
Int_t nBinsY_;
-
+
//! Control boolean for using the linear interpolation
Bool_t useInterpolation_;
- //! Boolean for using the upper half of DP
- Bool_t upperHalf_;
- //! Boolean for using square DP variables
- Bool_t squareDP_;
ClassDef(Lau2DHistDP,0) // 2D Histogram utility class for DP analyses
};
#endif
diff --git a/inc/Lau2DSplineDP.hh b/inc/Lau2DSplineDP.hh
new file mode 100644
index 0000000..9ecaba7
--- /dev/null
+++ b/inc/Lau2DSplineDP.hh
@@ -0,0 +1,76 @@
+
+// Copyright University of Warwick 2004 - 2013.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+// Authors:
+// Thomas Latham
+// John Back
+// Paul Harrison
+
+/*! \file Lau2DSplineDP.hh
+ \brief File containing declaration of Lau2DSplineDP class.
+*/
+
+/*! \class Lau2DSplineDP
+ \brief Class for defining variations across a 2D DP using a spline.
+
+ Class for defining variations across a 2D DP using a spline.
+ Employs a 2D cubic spline to get the histogram value based on an input histogram.
+ The returned values are not normalised to the total histogram area
+ (useful for efficiency histograms for example).
+ The histogram can be defined in the conventional DP (m13Sq vs m23Sq) or in the square DP.
+*/
+
+#ifndef LAU_2DSPLINE_DP
+#define LAU_2DSPLINE_DP
+
+#include "Lau2DAbsHistDP.hh"
+
+class TH2;
+class Lau2DCubicSpline;
+class LauDaughters;
+class LauKinematics;
+
+class Lau2DSplineDP : public Lau2DAbsHistDP {
+
+ public:
+ //! Constructor
+ /*!
+ \param [in] hist the 2D DP histogram
+ \param [in] daughters the daughter particles
+ \param [in] fluctuateBins boolean flag to determine whether the bin contents should be fluctuated in accordance with their errors (useful for systematic error evaluation)
+ \param [in] avEff the desired average efficiency - see Lau2DSplineDP::raiseOrLowerBins
+ \param [in] avEffError the error on that efficiency - see Lau2DSplineDP::raiseOrLowerBins
+ \param [in] useUpperHalfOnly boolean flag to specify that the supplied histogram contains only the upper half of a symmetric DP
+ \param [in] squareDP boolean flag to specify whether the supplied histogram is in square DP coordinates
+ */
+ Lau2DSplineDP(const TH2* hist, const LauDaughters* daughters,
+ Bool_t fluctuateBins = kFALSE, Double_t avEff = -1.0,
+ Double_t avEffError = -1.0, Bool_t useUpperHalfOnly = kFALSE, Bool_t squareDP = kFALSE);
+
+ //! Destructor
+ virtual ~Lau2DSplineDP();
+
+ //! Perform the interpolation
+ /*!
+ \param [in] x the x-axis value
+ \param [in] y the y-aixs value
+ \return the interpolated histogram value
+ */
+ Double_t interpolateXY(Double_t x, Double_t y) const;
+
+ private:
+ //! Copy constructor - not implemented
+ Lau2DSplineDP( const Lau2DSplineDP& rhs );
+
+ //! Copy assignment operator - not implemented
+ Lau2DSplineDP& operator=(const Lau2DSplineDP& rhs);
+
+ //! A 2D cubic spline generated from the histogram
+ Lau2DCubicSpline* spline_;
+
+ ClassDef(Lau2DSplineDP,0) // 2D Spline utility class for DP analyses
+};
+
+#endif
diff --git a/inc/LauDPDepMapPdf.hh b/inc/LauDPDepMapPdf.hh
index dae84cf..95fb24a 100644
--- a/inc/LauDPDepMapPdf.hh
+++ b/inc/LauDPDepMapPdf.hh
@@ -1,147 +1,150 @@
// Copyright University of Warwick 2009 - 2013.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Authors:
// Thomas Latham
// John Back
// Paul Harrison
/*! \file LauDPDepMapPdf.hh
\brief File containing declaration of LauDPDepMapPdf class.
*/
/*! \class LauDPDepMapPdf
\brief Class to allow having different PDFs in different regions of the DP.
Class to allow having different PDFs in different regions of the DP.
The DP regions are specified in a 2D histogram or in a 1D histogram of some projection of the DP.
Each PDF is then associated with that region.
*/
#ifndef LAU_DPDEPMAP_PDF
#define LAU_DPDEPMAP_PDF
#include <vector>
#include "Rtypes.h"
#include "LauAbsPdf.hh"
class LauDaughters;
-class Lau2DHistDP;
+class Lau2DAbsDP;
class LauParameter;
class TH2;
class TH1;
class LauDPDepMapPdf : public LauAbsPdf {
public:
//! Define possibilties for the DP axes
enum DPAxis {
M12, /*!< dependence is on m^2_12 */
M13, /*!< dependence is on m^2_13 */
M23, /*!< dependence is on m^2_23 */
CentreDist, /*!< dependence is on the distance from the DP centre */
MMIN, /*!< dependence is on the minimum of m^2_13 and m^2_23 */
MMAX /*!< dependence is on the maximum of m^2_13 and m^2_23 */
};
//! Constructor - map described by 2D histogram
/*!
\param [in] pdfs the PDFs
\param [in] daughters the daughter particles
\param [in] dpHisto the 2D histogram
\param [in] upperHalf specifies whether only upper half of DP is supplied in the case of a symmetric DP
*/
LauDPDepMapPdf( const std::vector<LauAbsPdf*> &pdfs,
const LauDaughters* daughters,
const TH2* dpHisto, Bool_t upperHalf = kFALSE);
//! Constructor - map described by 1D histogram of a DP "axis"
/*!
\param [in] pdfs the PDFs
\param [in] daughters the daughter particles
\param [in] dpAxisHisto the 1D histogram
\param [in] dpAxis the DP axis
*/
LauDPDepMapPdf( const std::vector<LauAbsPdf*> &pdfs,
const LauDaughters* daughters,
const TH1* dpAxisHisto,
DPAxis dpAxis);
//! Destructor
virtual ~LauDPDepMapPdf();
//! Copy constructor
- LauDPDepMapPdf(const LauDPDepMapPdf& other);
+ //LauDPDepMapPdf(const LauDPDepMapPdf& other);
//! Specifies whether or not the PDF is DP dependent.
/*!
\return true if the PDF is DP-dependent (the default)
*/
virtual Bool_t isDPDependent() const {return kTRUE;}
//! Cache information from data
/*!
\param [in] inputData the input data
*/
virtual void cacheInfo(const LauFitDataTree& inputData);
//! Calculate the likelihood (and intermediate info) for a given abscissa
/*!
\param [in] abscissas the values of the abscissa(s)
*/
virtual void calcLikelihoodInfo(const LauAbscissas& abscissas);
//! Calculate the likelihood (and intermediate info) for a given event number
/*!
\param [in] iEvt event number
*/
virtual void calcLikelihoodInfo(UInt_t iEvt);
//! Check that PDF is positive
virtual void checkPositiveness(); // Nothing to check here.
//! Calculate the normalisation
virtual void calcNorm();
//! Calculate the PDF height
/*!
\param [in] kinematics the current DP kinematics
*/
virtual void calcPDFHeight( const LauKinematics* kinematics );
protected:
//! Determine the DP region
/*!
\param [in] m13Sq the invariant mass squared of daughters 1 and 3
\param [in] m23Sq the invariant mass squared of daughters 2 and 3
*/
UInt_t determineDPRegion( Double_t m13Sq, Double_t m23Sq ) const;
private:
+ //! Copy constructor - not implemented
+ LauDPDepMapPdf(const LauDPDepMapPdf& other);
+
//! Daughter particles
LauDaughters* daughters_;
//! The PDFs
std::vector<LauAbsPdf*> pdfs_;
//! 2D histogram - DP
- Lau2DHistDP* dpDependence_;
+ Lau2DAbsDP* dpDependence_;
//! 1D histogram - DP axis
TH1* dpAxisDependence_;
//! The DP axis we depend on
DPAxis dpAxis_;
//! Cached indices values
std::vector<UInt_t> indices_;
ClassDef(LauDPDepMapPdf,0) // Define the sum PDF
};
#endif
diff --git a/inc/LauDPDepSumPdf.hh b/inc/LauDPDepSumPdf.hh
index d935d87..1098da2 100644
--- a/inc/LauDPDepSumPdf.hh
+++ b/inc/LauDPDepSumPdf.hh
@@ -1,151 +1,156 @@
// Copyright University of Warwick 2009 - 2013.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Authors:
// Thomas Latham
// John Back
// Paul Harrison
/*! \file LauDPDepSumPdf.hh
\brief File containing declaration of LauDPDepSumPdf class.
*/
/*! \class LauDPDepSumPdf
\brief Class for defining a PDF that is the DP-dependent sum of two other PDFs.
Class for defining a PDF that is the sum of two other PDFs, where the relative fraction of the two PDFs depends on the Dalitz-plot position.
*/
#ifndef LAU_DPDEPSUM_PDF
#define LAU_DPDEPSUM_PDF
#include "Rtypes.h"
#include "LauAbsPdf.hh"
class LauDaughters;
class LauEffModel;
class LauParameter;
class TH2;
class LauDPDepSumPdf : public LauAbsPdf {
public:
//! Define possibilties for the DP axes
enum DPAxis {
M12, /*!< dependence is on m^2_12 */
M13, /*!< dependence is on m^2_13 */
M23, /*!< dependence is on m^2_23 */
CentreDist, /*!< dependence is on the distance from the DP centre */
MMIN, /*!< dependence is on the minimum of m^2_13 and m^2_23 */
MMAX /*!< dependence is on the maximum of m^2_13 and m^2_23 */
};
//! Constructor - fraction determined by 2D histogram
/*!
\param [in] pdf1 the first PDF
\param [in] pdf2 the second PDF
\param [in] daughters the daughter particles
\param [in] dpHisto the 2D histogram
\param [in] upperHalf specifies whether only upper half of DP is supplied in the case of a symmetric DP
+ \param [in] useSpline specifies whether a spline is used to interpolate the histogram
*/
LauDPDepSumPdf(LauAbsPdf* pdf1, LauAbsPdf* pdf2,
const LauDaughters* daughters,
- const TH2* dpHisto, Bool_t upperHalf = kFALSE);
+ const TH2* dpHisto, Bool_t upperHalf = kFALSE,
+ Bool_t useSpline = kFALSE);
//! Constructor - fraction determined by a polynomial of a DP "axis"
/*!
\param [in] pdf1 the first PDF
\param [in] pdf2 the second PDF
\param [in] frac the fractional contribution of the first PDF to the final PDF
\param [in] daughters the daughter particles
\param [in] fracCoeffs the coefficients of the DP dependence of the PDF fraction
\param [in] dpAxis the DP axis that defines the parameter dependence
*/
LauDPDepSumPdf(LauAbsPdf* pdf1, LauAbsPdf* pdf2,
LauParameter* frac,
const LauDaughters* daughters,
const std::vector<Double_t>& fracCoeffs,
DPAxis dpAxis);
//! Destructor
virtual ~LauDPDepSumPdf();
//! Copy constructor
- LauDPDepSumPdf(const LauDPDepSumPdf& other);
+ //LauDPDepSumPdf(const LauDPDepSumPdf& other);
//! Specifies whether or not the PDF is DP dependent.
/*!
\return true if the PDF is DP-dependent (the default)
*/
virtual Bool_t isDPDependent() const {return kTRUE;}
//! Cache information from data
/*!
\param [in] inputData the input data
*/
virtual void cacheInfo(const LauFitDataTree& inputData);
//! Calculate the likelihood (and intermediate info) for a given abscissa
/*!
\param [in] abscissas the values of the abscissa(s)
*/
virtual void calcLikelihoodInfo(const LauAbscissas& abscissas);
//! Calculate the likelihood (and intermediate info) for a given event number
/*!
\param [in] iEvt event number
*/
virtual void calcLikelihoodInfo(UInt_t iEvt);
//! Check that PDF is positive
virtual void checkPositiveness();
//! Calculate the normalisation
virtual void calcNorm();
//! Calculate the PDF height
/*!
\param [in] kinematics the current DP kinematics
*/
virtual void calcPDFHeight( const LauKinematics* kinematics );
protected:
//! Scale fraction according to DP position
/*!
\param [in] dpPos the DP position
*/
void scaleFrac( Double_t dpPos );
private:
+ //! Copy constructor - not implemented
+ LauDPDepSumPdf(const LauDPDepSumPdf& other);
+
//! Daughter particles
LauDaughters* daughters_;
//! First PDF
LauAbsPdf* pdf1_;
//! Second PDF
LauAbsPdf* pdf2_;
//! Fractional contribution of first PDF to the final PDF
LauParameter* frac_;
//! Fractional contribution of first PDF to the final PDF
Double_t fracVal_;
//! DP dependence
LauEffModel* dpDependence_;
//! Polynomial used to scale fraction
const std::vector<Double_t> fracCoeffs_;
//! The DP axis we depend on
DPAxis dpAxis_;
//! Cached values of fractions
std::vector<Double_t> fractions_;
ClassDef(LauDPDepSumPdf,0) // Define the sum PDF
};
#endif
diff --git a/inc/LauEffModel.hh b/inc/LauEffModel.hh
index 14cf0de..aff5361 100644
--- a/inc/LauEffModel.hh
+++ b/inc/LauEffModel.hh
@@ -1,118 +1,131 @@
// Copyright University of Warwick 2004 - 2013.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Authors:
// Thomas Latham
// John Back
// Paul Harrison
/*! \file LauEffModel.hh
\brief File containing declaration of LauEffModel class.
*/
/*! \class LauEffModel
\brief Class that implements the efficiency description across the signal Dalitz plot.
Class that defines the efficiency model variation across the signal Dalitz plot.
The method is based in a predetermined two-dimensional histogram to characterize the phase space acceptance.
The efficiency variation is defined in terms of x = m_13^2, y = m_23^2 for the Dalitz plot (default)
or x = m', y = theta' for the square Dalitz plot
*/
#ifndef LAUEFFMODEL
#define LAUEFFMODEL
#include "Rtypes.h"
class TH2;
class LauDaughters;
class LauKinematics;
class LauVetoes;
-class Lau2DHistDP;
+class Lau2DAbsDP;
class LauEffModel {
public:
//! Constructor
/*!
\param [in] daughters the daughters particles of the Dalitz plot model
\param [in] vetoes the object describing the vetoes applied in the phase space
*/
LauEffModel(const LauDaughters* daughters, const LauVetoes* vetoes);
//! Destructor
virtual ~LauEffModel();
- //! Copy constructor
- /*!
- \param [in] rhs the LauEffModel to be copied
- */
- LauEffModel( const LauEffModel& rhs );
-
//! Set the efficiency variation across the phase space using a predetermined 2D histogram.
/*!
The efficiency is defined in terms of x = m_13^2, y = m_23^2 or x = m', y = theta' for the square Dalitz plot
\param [in] effHisto the 2-dimensional histogram that describes the efficiency variation
\param [in] useInterpolation boolean flag decision to switch on/off linear interpolation between bins should be used or simply the raw bin values.
\param [in] fluctuateBins boolean flag to determine whether the bin contents should be fluctuated in accordance with their errors.
\param [in] avEff the desired average efficiency - see Lau2DHistDP::raiseOrLowerBins, values less than zero switch off this behaviour
\param [in] absError the error on that efficiency - see Lau2DHistDP::raiseOrLowerBins, values less than zero switch off this behaviour
\param [in] useUpperHalfOnly boolean flag to determine whether, in the case of a symmetric DP, the histogram supplied only includes the upper half of the DP.
\param [in] squareDP boolean flag to determine whether the supplied histogram is given in square DP coordinates
*/
void setEffHisto(const TH2* effHisto,
Bool_t useInterpolation = kTRUE, Bool_t fluctuateBins = kFALSE,
Double_t avEff = -1.0, Double_t absError = -1.0,
Bool_t useUpperHalfOnly = kFALSE, Bool_t squareDP = kFALSE);
+ //! Set the efficiency variation across the phase space using a spline based on a predetermined 2D histogram.
+ /*!
+ The efficiency is defined in terms of x = m_13^2, y = m_23^2 or x = m', y = theta' for the square Dalitz plot
+
+ \param [in] effHisto the 2-dimensional histogram that describes the efficiency variation
+ \param [in] fluctuateBins boolean flag to determine whether the bin contents should be fluctuated in accordance with their errors.
+ \param [in] avEff the desired average efficiency - see Lau2DHistDP::raiseOrLowerBins, values less than zero switch off this behaviour
+ \param [in] absError the error on that efficiency - see Lau2DHistDP::raiseOrLowerBins, values less than zero switch off this behaviour
+ \param [in] useUpperHalfOnly boolean flag to determine whether, in the case of a symmetric DP, the histogram supplied only includes the upper half of the DP.
+ \param [in] squareDP boolean flag to determine whether the supplied histogram is given in square DP coordinates
+ */
+ void setEffSpline(const TH2* effHisto,
+ Bool_t fluctuateBins = kFALSE,
+ Double_t avEff = -1.0, Double_t absError = -1.0,
+ Bool_t useUpperHalfOnly = kFALSE, Bool_t squareDP = kFALSE);
+
//! Determine the efficiency for a given point in the Dalitz plot.
/*!
The method uses the 2D histogram set by the setEffHisto() function and the vetoes information.
\param [in] kinematics the object that defines the DP position
\return the efficiency value at the given point in the DP
*/
Double_t calcEfficiency( const LauKinematics* kinematics ) const;
//! Determine whether the given DP position is outside the vetoes
/*!
\param [in] kinematics the object that defines the DP position
\return kTRUE if the DP position is outside all veto regions, kFALSE otherwise
*/
Bool_t passVeto( const LauKinematics* kinematics ) const;
//! Determine whether the efficiency histogram has had its bins fluctuated within their errors
Bool_t fluctuateEffHisto() const {return fluctuateEffHisto_;}
private:
+ //! Copy constructor - not implemented
+ LauEffModel( const LauEffModel& rhs );
+
//! Get the efficiency from a two-dimensional histogram by interpolating in x and y
/*!
\param [in] xVal the value to be interpolated in x
\param [in] yVal the value to be interpolated in y
*/
Double_t getEffHistValue(Double_t xVal, Double_t yVal) const;
//! The daughters object
const LauDaughters* daughters_;
//! The vetoes object
const LauVetoes* vetoes_;
//! The efficiency histogram object
- Lau2DHistDP* effHisto_;
+ Lau2DAbsDP* effHisto_;
//! Use of the square Dalitz plot
Bool_t squareDP_;
//! Fluctuate histogram within the error
Bool_t fluctuateEffHisto_;
ClassDef(LauEffModel, 0) // Implement the signal efficiency across the DP
};
#endif
diff --git a/inc/Laura++_LinkDef.h b/inc/Laura++_LinkDef.h
index 6605016..fada6d8 100644
--- a/inc/Laura++_LinkDef.h
+++ b/inc/Laura++_LinkDef.h
@@ -1,88 +1,92 @@
#ifdef __CINT__
#pragma link off all globals;
#pragma link off all classes;
#pragma link off all functions;
#pragma link C++ class Lau1DHistPdf;
+#pragma link C++ class Lau2DAbsDP;
+#pragma link C++ class Lau2DAbsHistDP;
+#pragma link C++ class Lau2DCubicSpline;
#pragma link C++ class Lau2DHistDP;
#pragma link C++ class Lau2DHistDPPdf;
#pragma link C++ class Lau2DHistPdf;
+#pragma link C++ class Lau2DSplineDP;
#pragma link C++ class LauAbsBkgndDPModel;
#pragma link C++ class LauAbsCoeffSet;
#pragma link C++ class LauAbsDPDynamics;
#pragma link C++ class LauAbsFitModel;
#pragma link C++ class LauAbsPdf;
#pragma link C++ class LauAbsResonance;
#pragma link C++ class LauArgusPdf;
#pragma link C++ class LauAsymmCalc;
#pragma link C++ class LauBelleCPCoeffSet;
#pragma link C++ class LauBelleNR;
#pragma link C++ class LauBelleSymNR;
#pragma link C++ class LauBifurcatedGaussPdf;
#pragma link C++ class LauBkgndDPModel;
#pragma link C++ class LauBreitWignerRes;
#pragma link C++ class LauCacheData;
#pragma link C++ class LauCartesianCPCoeffSet;
#pragma link C++ class LauChebychevPdf;
#pragma link C++ class LauCleoCPCoeffSet;
#pragma link C++ class LauComplex;
#pragma link C++ class LauCPFitModel;
#pragma link C++ class LauCruijffPdf;
#pragma link C++ class LauCrystalBallPdf;
#pragma link C++ class LauDabbaRes;
#pragma link C++ class LauDatabasePDG;
#pragma link C++ class LauDaughters;
#pragma link C++ class LauDPDepBifurGaussPdf;
#pragma link C++ class LauDPDepCruijffPdf;
#pragma link C++ class LauDPDepGaussPdf;
#pragma link C++ class LauDPDepMapPdf;
#pragma link C++ class LauDPDepSumPdf;
#pragma link C++ class LauEffModel;
#pragma link C++ class LauEmbeddedData;
#pragma link C++ class LauExponentialPdf;
#pragma link C++ class LauFitDataTree;
#pragma link C++ class LauFitNtuple;
#pragma link C++ class LauFlatteRes;
#pragma link C++ class LauGaussPdf;
#pragma link C++ class LauGenNtuple;
#pragma link C++ class LauGounarisSakuraiRes;
#pragma link C++ class LauIntegrals;
#pragma link C++ class LauIsobarDynamics;
#pragma link C++ class LauKappaRes;
#pragma link C++ class LauKinematics;
#pragma link C++ class LauKMatrixProdPole;
#pragma link C++ class LauKMatrixProdSVP;
#pragma link C++ class LauKMatrixPropagator;
#pragma link C++ class LauKMatrixPropFactory;
#pragma link C++ class LauLASSBWRes;
#pragma link C++ class LauLASSNRRes;
#pragma link C++ class LauLASSRes;
#pragma link C++ class LauLinearPdf;
#pragma link C++ class LauMagPhaseCoeffSet;
#pragma link C++ class LauMagPhaseCPCoeffSet;
#pragma link C++ class LauNovosibirskPdf;
#pragma link C++ class LauNRAmplitude;
#pragma link C++ class LauParameter;
#pragma link C++ class LauParametricStepFuncPdf;
#pragma link C++ class LauParamFixed;
#pragma link C++ class LauParticlePDG;
#pragma link C++ class LauPrint;
#pragma link C++ class LauRealImagCoeffSet;
#pragma link C++ class LauRelBreitWignerRes;
#pragma link C++ class LauResonanceInfo;
#pragma link C++ class LauResonanceMaker;
#pragma link C++ class LauScfMap;
#pragma link C++ class LauSigmaRes;
#pragma link C++ class LauSigmoidPdf;
#pragma link C++ class LauSimpleFitModel;
#pragma link C++ class LauSPlot;
#pragma link C++ class LauString;
#pragma link C++ class LauSumPdf;
#pragma link C++ class LauTextFileParser;
#pragma link C++ class LauVetoes;
#pragma link C++ namespace LauConstants;
#pragma link C++ namespace LauFitter;
#pragma link C++ namespace LauRandom;
#endif
diff --git a/src/Lau2DAbsHistDP.cc b/src/Lau2DAbsHistDP.cc
new file mode 100644
index 0000000..a379fd1
--- /dev/null
+++ b/src/Lau2DAbsHistDP.cc
@@ -0,0 +1,123 @@
+
+// Copyright University of Warwick 2004 - 2013.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+// Authors:
+// Thomas Latham
+// John Back
+// Paul Harrison
+
+/*! \file Lau2DAbsHistDP.cc
+ \brief File containing implementation of Lau2DAbsHistDP class.
+*/
+
+#include <iostream>
+
+#include "TAxis.h"
+#include "TH2.h"
+#include "TRandom.h"
+#include "TSystem.h"
+
+#include "Lau2DAbsHistDP.hh"
+#include "LauDaughters.hh"
+#include "LauKinematics.hh"
+#include "LauRandom.hh"
+
+ClassImp(Lau2DAbsHistDP)
+
+
+Lau2DAbsHistDP::Lau2DAbsHistDP(const LauDaughters* daughters, Bool_t useUpperHalfOnly, Bool_t squareDP) :
+ kinematics_( (daughters!=0) ? daughters->getKinematics() : 0 ),
+ upperHalf_(useUpperHalfOnly),
+ squareDP_(squareDP)
+{
+}
+
+Lau2DAbsHistDP::~Lau2DAbsHistDP()
+{
+}
+
+void Lau2DAbsHistDP::doBinFluctuation(TH2* hist)
+{
+ TRandom* random = LauRandom::zeroSeedRandom();
+
+ Int_t nBinsX = static_cast<Int_t>(hist->GetNbinsX());
+ Int_t nBinsY = static_cast<Int_t>(hist->GetNbinsY());
+
+ for (Int_t i(0); i<nBinsX; ++i) {
+ for (Int_t j(0); j<nBinsY; ++j) {
+ Double_t currentContent = hist->GetBinContent(i+1,j+1);
+ Double_t currentError = hist->GetBinError(i+1,j+1);
+ Double_t newContent = random->Gaus(currentContent,currentError);
+ if (newContent<0.0) {
+ hist->SetBinContent(i+1,j+1,0.0);
+ } else {
+ hist->SetBinContent(i+1,j+1,newContent);
+ }
+ }
+ }
+}
+
+void Lau2DAbsHistDP::raiseOrLowerBins(TH2* hist, Double_t avEff, Double_t avEffError)
+{
+ TRandom* random = LauRandom::zeroSeedRandom();
+
+ Double_t curAvg = this->computeAverageContents(hist);
+ Double_t newAvg = random->Gaus(avEff,avEffError);
+
+ hist->Scale( newAvg / curAvg );
+}
+
+Double_t Lau2DAbsHistDP::computeAverageContents(TH2 const * const hist) const
+{
+ Double_t totalContent(0.0);
+ Int_t binsWithinDPBoundary(0);
+
+ Int_t nBinsX = static_cast<Int_t>(hist->GetNbinsX());
+ Int_t nBinsY = static_cast<Int_t>(hist->GetNbinsY());
+
+ // Loop through the bins and include any that have their centre or any
+ // of the four corners within the kinematic boundary
+ for ( Int_t i(0); i<nBinsX; ++i ) {
+ Double_t binXCentre = hist->GetXaxis()->GetBinCenter(i+1);
+ Double_t binXLowerEdge = hist->GetXaxis()->GetBinLowEdge(i+1);
+ Double_t binXUpperEdge = hist->GetXaxis()->GetBinUpEdge(i+1);
+
+ for ( Int_t j(0); j<nBinsY; ++j ) {
+ Double_t binYCentre = hist->GetYaxis()->GetBinCenter(i+1);
+ Double_t binYLowerEdge = hist->GetYaxis()->GetBinLowEdge(i+1);
+ Double_t binYUpperEdge = hist->GetYaxis()->GetBinUpEdge(i+1);
+
+ if ( withinDPBoundaries( binXCentre, binYCentre ) ||
+ withinDPBoundaries( binXLowerEdge, binYLowerEdge ) ||
+ withinDPBoundaries( binXUpperEdge, binYUpperEdge ) ||
+ withinDPBoundaries( binXLowerEdge, binYUpperEdge ) ||
+ withinDPBoundaries( binXUpperEdge, binYLowerEdge ) ) {
+
+ totalContent += hist->GetBinContent(i+1, j+1);
+ ++binsWithinDPBoundary;
+ }
+ }
+ }
+
+ return totalContent/binsWithinDPBoundary;
+}
+
+Bool_t Lau2DAbsHistDP::withinDPBoundaries(Double_t x, Double_t y) const
+{
+ return squareDP_ ? kinematics_->withinSqDPLimits(x,y) : kinematics_->withinDPLimits(x,y);
+}
+
+void Lau2DAbsHistDP::getUpperHalf(Double_t& x, Double_t& y) const
+{
+ if ( upperHalf_ == kTRUE ) {
+ if ( squareDP_ == kFALSE && x > y ) {
+ Double_t temp = y;
+ y = x;
+ x = temp;
+ } else if ( squareDP_ == kTRUE && y > 0.5 ) {
+ y = 1.0 - y;
+ }
+ }
+}
diff --git a/src/Lau2DCubicSpline.cc b/src/Lau2DCubicSpline.cc
new file mode 100644
index 0000000..6456740
--- /dev/null
+++ b/src/Lau2DCubicSpline.cc
@@ -0,0 +1,349 @@
+
+// Copyright University of Warwick 2004 - 2013.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+// Authors:
+// Thomas Latham
+// John Back
+// Paul Harrison
+
+/*! \file Lau2DCubicSpline.cc
+ \brief File containing implementation of Lau2DCubicSpline class.
+
+ Class for defining a 2D cubic spline based on RooBinned2DBicubicBase by
+ Manuel Tobias Schiller <manuel.schiller@nikhef.nl> (2012-08-29).
+*/
+
+#include <cmath>
+#include <cstdlib>
+#include <iostream>
+
+#include <TH2.h>
+#include <TSystem.h>
+
+#include "Lau2DCubicSpline.hh"
+
+Lau2DCubicSpline::~Lau2DCubicSpline()
+{ }
+
+inline Double_t Lau2DCubicSpline::histcont(
+ const TH2& h, Int_t xbin, Int_t ybin) const
+{
+ //reflect until we're in range
+ while(xbin < 0 || xbin >= nBinsX - 1) {
+ if(xbin < 0) xbin = -xbin-1;
+ if(xbin >= nBinsX -1) xbin = 2*(nBinsX-1) - xbin - 1;
+ }
+ while(ybin < 0 || ybin >= nBinsY - 1) {
+ if(ybin < 0) ybin = -ybin-1;
+ if(ybin >= nBinsY -1) ybin = 2*(nBinsY-1) - ybin - 1;
+ }
+
+ return h.GetBinContent(1 + xbin, 1 + ybin);
+}
+
+inline Double_t Lau2DCubicSpline::dhistdx(
+ const TH2& h, Int_t xbin, Int_t ybin) const
+{
+ return 0.5 * (histcont(h, xbin + 1, ybin) -
+ histcont(h, xbin - 1, ybin));
+}
+
+inline Double_t Lau2DCubicSpline::dhistdy(
+ const TH2& h, Int_t xbin, Int_t ybin) const
+{
+ return 0.5 * (histcont(h, xbin, ybin + 1) -
+ histcont(h, xbin, ybin - 1));
+}
+
+inline Double_t Lau2DCubicSpline::d2histdxdy(
+ const TH2& h, Int_t xbin, Int_t ybin) const
+{
+ return 0.5 * (histcont(h, xbin - 1, ybin - 1) -
+ histcont(h, xbin + 1, ybin - 1) +
+ histcont(h, xbin + 1, ybin + 1) -
+ histcont(h, xbin - 1, ybin + 1));
+}
+
+Lau2DCubicSpline::Lau2DCubicSpline(const TH2& h) :
+ nBinsX(1 + h.GetNbinsX()), nBinsY(1 + h.GetNbinsY()),
+ binSizeX(h.GetXaxis()->GetBinWidth(1)),
+ binSizeY(h.GetYaxis()->GetBinWidth(1)),
+ xmin(h.GetXaxis()->GetBinCenter(1) - binSizeX),
+ xmax(h.GetXaxis()->GetBinCenter(nBinsX - 1) + binSizeX),
+ ymin(h.GetYaxis()->GetBinCenter(1) - binSizeY),
+ ymax(h.GetYaxis()->GetBinCenter(nBinsY - 1) + binSizeY),
+ coeffs(CoeffRecLen * nBinsX * nBinsY)
+{
+ TAxis *xaxis = h.GetXaxis(), *yaxis = h.GetYaxis();
+ // verify that all bins have same size
+ for (Int_t i = 1; i < nBinsX; ++i) {
+ if (std::abs(xaxis->GetBinWidth(i) / binSizeX - 1.) > 1e-9)
+ std::cerr << "ERROR in Lau2DCubicSpline constructor : the histogram has variable bin sizes." << std::endl;
+ gSystem->Exit(EXIT_FAILURE);
+ }
+ for (Int_t i = 1; i < nBinsY; ++i) {
+ if (std::abs(yaxis->GetBinWidth(i) / binSizeY - 1.) > 1e-9)
+ std::cerr << "ERROR in Lau2DCubicSpline constructor : the histogram has variable bin sizes." << std::endl;
+ gSystem->Exit(EXIT_FAILURE);
+ }
+ // ok, go through histogram to precalculate the interpolation coefficients
+ // in rectangles between bin centres
+ //
+ // for that purpose, we map each of those rectangles to the unit square
+ for (Int_t j = -1; j < nBinsY - 1; ++j) {
+ for (Int_t i = -1; i < nBinsX - 1; ++i) {
+ const Double_t rhs[NCoeff] = {
+ // function values in bin centres
+ histcont(h, i, j),
+ histcont(h, i + 1, j),
+ histcont(h, i, j + 1),
+ histcont(h, i + 1, j + 1),
+ // df/dx in bin centres (finite difference approximation)
+ dhistdx(h, i, j),
+ dhistdx(h, i + 1, j),
+ dhistdx(h, i, j + 1),
+ dhistdx(h, i + 1, j + 1),
+ // df/dy in bin centres (finite difference approximation)
+ dhistdy(h, i, j),
+ dhistdy(h, i + 1, j),
+ dhistdy(h, i, j + 1),
+ dhistdy(h, i + 1, j + 1),
+ // d^2f/dxdy in bin centres (finite difference approximation)
+ d2histdxdy(h, i, j),
+ d2histdxdy(h, i + 1, j),
+ d2histdxdy(h, i, j + 1),
+ d2histdxdy(h, i + 1, j + 1)
+ };
+ // work out solution - strange array placement is due to the fact
+ // that terms with x/y to high powers can be small, so they should
+ // be added up first during evaluation to avoid cancellation
+ // issues; at the same time you want to access them in order to
+ // not confuse the CPU cache, so they're stored back to front
+ //
+ // a_00 ... a_30
+ coeff(1 + i, 1 + j, 15) = rhs[0];
+ coeff(1 + i, 1 + j, 14) = rhs[4];
+ coeff(1 + i, 1 + j, 13) =
+ 3. * (-rhs[0] + rhs[1]) - 2. * rhs[4] - rhs[5];
+ coeff(1 + i, 1 + j, 12) =
+ 2. * (rhs[0] - rhs[1]) + rhs[4] + rhs[5];
+ // a_31 ... a_31
+ coeff(1 + i, 1 + j, 11) = rhs[8];
+ coeff(1 + i, 1 + j, 10) = rhs[12];
+ coeff(1 + i, 1 + j, 9) =
+ 3. * (-rhs[8] + rhs[9]) - 2. * rhs[12] - rhs[13];
+ coeff(1 + i, 1 + j, 8) =
+ 2. * (rhs[8] - rhs[9]) + rhs[12] + rhs[13];
+ // a_02 ... a_32
+ coeff(1 + i, 1 + j, 7) =
+ 3. * (-rhs[0] + rhs[2]) - 2. * rhs[8] - rhs[10];
+ coeff(1 + i, 1 + j, 6) =
+ 3. * (-rhs[4] + rhs[6]) - 2. * rhs[12] - rhs[14];
+ coeff(1 + i, 1 + j, 5) =
+ 9. * (rhs[0] - rhs[1] - rhs[2] + rhs[3]) +
+ 6. * (rhs[4] - rhs[6] + rhs[8] - rhs[9]) + 4. * rhs[12] +
+ 3. * (rhs[5] - rhs[7] + rhs[10] - rhs[11]) +
+ 2. * (rhs[13] + rhs[14]) + rhs[15];
+ coeff(1 + i, 1 + j, 4) =
+ 6. * (-rhs[0] + rhs[1] + rhs[2] - rhs[3]) +
+ 4. * (-rhs[8] + rhs[9]) +
+ 3. * (-rhs[4] - rhs[5] + rhs[6] + rhs[7]) +
+ 2. * (-rhs[10] + rhs[11] - rhs[12] - rhs[13]) -
+ rhs[14] - rhs[15];
+ // a_03 ... a_33
+ coeff(1 + i, 1 + j, 3) =
+ 2. * (rhs[0] - rhs[2]) + rhs[8] + rhs[10];
+ coeff(1 + i, 1 + j, 2) =
+ 2. * (rhs[4] - rhs[6]) + rhs[12] + rhs[14];
+ coeff(1 + i, 1 + j, 1) =
+ 6. * (-rhs[0] + rhs[1] + rhs[2] - rhs[3]) +
+ 4. * (-rhs[4] + rhs[6]) +
+ 3. * (-rhs[8] + rhs[9] - rhs[10] + rhs[11]) +
+ 2. * (- rhs[5] + rhs[7] - rhs[12] - rhs[14]) -
+ rhs[13] - rhs[15];
+ coeff(1 + i, 1 + j, 0) =
+ 4. * (rhs[0] - rhs[1] - rhs[2] + rhs[3]) +
+ 2. * (rhs[4] + rhs[5] - rhs[6] - rhs[7] +
+ rhs[8] - rhs[9] + rhs[10] - rhs[11]) +
+ rhs[12] + rhs[13] + rhs[14] + rhs[15];
+ // coeff(1 + i, 1 + j, 17) contains integral of function over the
+ // square in "unit square coordinates", i.e. neglecting bin widths
+ // this is done to help speed up calculations of 2D integrals
+ Double_t sum = 0.;
+ for (Int_t k = 0; k < NCoeff; ++k)
+ sum += coeff(1 + i, 1 + j, k) /
+ Double_t((4 - (k % 4)) * (4 - (k / 4)));
+ coeff(1 + i, 1 + j, NCoeff) = sum;
+ }
+ }
+}
+
+Double_t Lau2DCubicSpline::evaluate(Double_t x, Double_t y) const
+{
+ // protect against NaN and out of range
+ if (x <= xmin || x >= xmax || y <= ymin || y >= ymax || x != x || y != y)
+ return 0.;
+ // find the bin in question
+ const Int_t binx = Int_t(Double_t(nBinsX) * (x - xmin) / (xmax - xmin));
+ const Int_t biny = Int_t(Double_t(nBinsY) * (y - ymin) / (ymax - ymin));
+ // get low edge of bin
+ const Double_t xlo = Double_t(nBinsX - binx) / Double_t(nBinsX) * xmin +
+ Double_t(binx) / Double_t(nBinsX) * xmax;
+ const Double_t ylo = Double_t(nBinsY - biny) / Double_t(nBinsY) * ymin +
+ Double_t(biny) / Double_t(nBinsY) * ymax;
+ // normalise to coordinates in unit sqare
+ const Double_t hx = (x - xlo) / binSizeX;
+ const Double_t hy = (y - ylo) / binSizeY;
+ // monomials
+ const Double_t hxton[4] = { hx * hx * hx, hx * hx, hx, 1. };
+ const Double_t hyton[4] = { hy * hy * hy, hy * hy, hy, 1. };
+ // sum up
+ Double_t retVal = 0.;
+ for (Int_t k = 0; k < NCoeff; ++k)
+ retVal += coeff(binx, biny, k) * hxton[k % 4] * hyton[k / 4];
+
+ return retVal;
+}
+
+Double_t Lau2DCubicSpline::analyticalIntegral(Double_t x1, Double_t x2, Double_t y1, Double_t y2) const
+{
+ if(y1==y2) return evalX(x1, x2, y1);
+ if(x1==x2) return evalY(x1, y1, y2);
+ return evalXY(x1, x2, y1, y2);
+}
+
+Double_t Lau2DCubicSpline::evalX(Double_t x1, Double_t x2, Double_t y) const
+{
+ // protect against NaN
+ if (x1 != x1 || x2 != x2 || y != y) return 0.;
+ // find the bin in question
+ const Int_t biny = Int_t(Double_t(nBinsY) * (y - ymin) / (ymax - ymin));
+ // get low edge of bin
+ const Double_t ylo = Double_t(nBinsY - biny) / Double_t(nBinsY) * ymin +
+ Double_t(biny) / Double_t(nBinsY) * ymax;
+ // normalise to coordinates in unit sqare
+ const Double_t hy = (y - ylo) / binSizeY;
+ // monomials
+ const Double_t hyton[4] = { hy * hy * hy, hy * hy, hy, 1. };
+ // integral
+ Double_t sum = 0.;
+ for (Int_t binx = 0; binx < nBinsX; ++binx) {
+ // get low/high edge of bin
+ const Double_t xhi = Double_t(nBinsX - binx - 1) / Double_t(nBinsX) * xmin +
+ Double_t(binx + 1) / Double_t(nBinsX) * xmax;
+ if (xhi < x1) continue;
+ const Double_t xlo = Double_t(nBinsX - binx) / Double_t(nBinsX) * xmin +
+ Double_t(binx) / Double_t(nBinsX) * xmax;
+ if (xlo > x2) break;
+ // work out integration range
+ const Double_t a = ((xlo > x1) ? 0. : (x1 - xlo)) / binSizeX;
+ const Double_t b = ((xhi < x2) ? binSizeX : (x2 - xlo)) / binSizeX;
+ // integrated monomials
+ const Double_t hxton[4] = { 0.25 * (b * b * b * b - a * a * a * a),
+ (b * b * b - a * a * a) / 3., 0.5 * (b * b - a * a), b - a };
+ Double_t lsum = 0.;
+ for (Int_t k = 0; k < NCoeff; ++k)
+ lsum += coeff(binx, biny, k) * hxton[k % 4] * hyton[k / 4];
+ sum += lsum;
+ }
+ // move from unit square coordinates to user coordinates
+ return sum * binSizeX;
+}
+
+Double_t Lau2DCubicSpline::evalY(Double_t x, Double_t y1, Double_t y2) const
+{
+ // protect against NaN
+ if (x != x || y1 != y1 || y2 != y2) return 0.;
+ // find the bin in question
+ const Int_t binx = Int_t(Double_t(nBinsX) * (x - xmin) / (xmax - xmin));
+ // get low edge of bin
+ const Double_t xlo = Double_t(nBinsX - binx) / Double_t(nBinsX) * xmin +
+ Double_t(binx) / Double_t(nBinsX) * xmax;
+ // normalise to coordinates in unit sqare
+ const Double_t hx = (x - xlo) / binSizeX;
+ // monomials
+ const Double_t hxton[4] = { hx * hx * hx, hx * hx, hx, 1. };
+ // integral
+ Double_t sum = 0.;
+ for (Int_t biny = 0; biny < nBinsY; ++biny) {
+ // get low/high edge of bin
+ const Double_t yhi = Double_t(nBinsY - biny - 1) / Double_t(nBinsY) * ymin +
+ Double_t(biny + 1) / Double_t(nBinsY) * ymax;
+ if (yhi < y1) continue;
+ const Double_t ylo = Double_t(nBinsY - biny) / Double_t(nBinsY) * ymin +
+ Double_t(biny) / Double_t(nBinsY) * ymax;
+ if (ylo > y2) break;
+ // work out integration range
+ const Double_t a = ((ylo > y1) ? 0. : (y1 - ylo)) / binSizeY;
+ const Double_t b = ((yhi < y2) ? binSizeY : (y2 - ylo)) / binSizeY;
+ // integrated monomials
+ const Double_t hyton[4] = { 0.25 * (b * b * b * b - a * a * a * a),
+ (b * b * b - a * a * a) / 3., 0.5 * (b * b - a * a), b - a };
+ Double_t lsum = 0.;
+ for (Int_t k = 0; k < NCoeff; ++k)
+ lsum += coeff(binx, biny, k) * hxton[k % 4] * hyton[k / 4];
+ sum += lsum;
+ }
+ // move from unit square coordinates to user coordinates
+ return sum * binSizeY;
+}
+
+Double_t Lau2DCubicSpline::evalXY(
+ Double_t x1, Double_t x2, Double_t y1, Double_t y2) const
+{
+ // protect against NaN
+ if (x1 != x1 || x2 != x2 || y1 != y1 || y2 != y2) return 0.;
+ // integral
+ Double_t sum = 0.;
+ for (Int_t biny = 0; biny < nBinsY; ++biny) {
+ // get low/high edge of bin
+ const Double_t yhi = Double_t(nBinsY - biny - 1) / Double_t(nBinsY) * ymin +
+ Double_t(biny + 1) / Double_t(nBinsY) * ymax;
+ if (yhi < y1) continue;
+ const Double_t ylo = Double_t(nBinsY - biny) / Double_t(nBinsY) * ymin +
+ Double_t(biny) / Double_t(nBinsY) * ymax;
+ if (ylo > y2) break;
+ // work out integration range
+ const Double_t ay = ((ylo > y1) ? 0. : (y1 - ylo)) / binSizeY;
+ const Double_t by = ((yhi < y2) ? binSizeY : (y2 - ylo)) / binSizeY;
+ const Bool_t yFullyContained = std::abs(by - ay - 1.0) < 1e-15;
+ // integrated monomials
+ const Double_t hyton[4] = {
+ 0.25 * (by * by * by * by - ay * ay * ay * ay),
+ (by * by * by - ay * ay * ay) / 3., 0.5 * (by * by - ay * ay),
+ by - ay };
+ for (Int_t binx = 0; binx < nBinsX; ++binx) {
+ // get low/high edge of bin
+ const Double_t xhi = Double_t(nBinsX - binx - 1) / Double_t(nBinsX) * xmin +
+ Double_t(binx + 1) / Double_t(nBinsX) * xmax;
+ if (xhi < x1) continue;
+ const Double_t xlo = Double_t(nBinsX - binx) / Double_t(nBinsX) * xmin +
+ Double_t(binx) / Double_t(nBinsX) * xmax;
+ if (xlo > x2) break;
+ // work out integration range
+ const Double_t ax = ((xlo > x1) ? 0. : (x1 - xlo)) / binSizeX;
+ const Double_t bx = ((xhi < x2) ? binSizeX : (x2 - xlo)) / binSizeX;
+ const Bool_t xFullyContained = std::abs(bx - ax - 1.0) < 1e-15;
+ if (xFullyContained && yFullyContained) {
+ // for fully contained bins, we have cached the integral
+ sum += coeff(binx, biny, NCoeff);
+ continue;
+ }
+ // integrated monomials
+ const Double_t hxton[4] = {
+ 0.25 * (bx * bx * bx * bx - ax * ax * ax * ax),
+ (bx * bx * bx - ax * ax * ax) / 3., 0.5 * (bx * bx - ax * ax),
+ bx - ax };
+ // integrate over bin
+
+ Double_t lsum = 0.;
+ for (Int_t k = 0; k < NCoeff; ++k)
+ lsum += coeff(binx, biny, k) * hxton[k % 4] * hyton[k / 4];
+ sum += lsum;
+ }
+ }
+ // move from unit square coordinates to user coordinates
+ return sum * binSizeX * binSizeY;
+}
diff --git a/src/Lau2DHistDP.cc b/src/Lau2DHistDP.cc
index fd97779..d414aa9 100644
--- a/src/Lau2DHistDP.cc
+++ b/src/Lau2DHistDP.cc
@@ -1,367 +1,242 @@
// Copyright University of Warwick 2004 - 2013.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Authors:
// Thomas Latham
// John Back
// Paul Harrison
/*! \file Lau2DHistDP.cc
\brief File containing implementation of Lau2DHistDP class.
*/
#include <iostream>
#include "TAxis.h"
#include "TH2.h"
#include "TRandom.h"
#include "TSystem.h"
#include "Lau2DHistDP.hh"
#include "LauDaughters.hh"
#include "LauKinematics.hh"
#include "LauRandom.hh"
ClassImp(Lau2DHistDP)
Lau2DHistDP::Lau2DHistDP(const TH2* hist, const LauDaughters* daughters,
Bool_t useInterpolation, Bool_t fluctuateBins,
Double_t avEff, Double_t avEffError, Bool_t useUpperHalfOnly, Bool_t squareDP) :
+ Lau2DAbsHistDP(daughters,useUpperHalfOnly,squareDP),
hist_(hist ? dynamic_cast<TH2*>(hist->Clone()) : 0),
- kinematics_( (daughters!=0) ? daughters->getKinematics() : 0 ),
minX_(0.0), maxX_(0.0),
minY_(0.0), maxY_(0.0),
rangeX_(0.0), rangeY_(0.0),
binXWidth_(0.0), binYWidth_(0.0),
nBinsX_(0), nBinsY_(0),
- useInterpolation_(useInterpolation),
- upperHalf_(useUpperHalfOnly),
- squareDP_(squareDP)
+ useInterpolation_(useInterpolation)
{
if ( ! hist_ ) {
std::cerr << "ERROR in Lau2DHistDP constructor : the histogram pointer is null." << std::endl;
gSystem->Exit(EXIT_FAILURE);
}
// Save various attributes of the histogram
// (axis ranges, number of bins, areas)
TAxis* xAxis = hist_->GetXaxis();
minX_ = static_cast<Double_t>(xAxis->GetXmin());
maxX_ = static_cast<Double_t>(xAxis->GetXmax());
rangeX_ = maxX_ - minX_;
TAxis* yAxis = hist_->GetYaxis();
minY_ = static_cast<Double_t>(yAxis->GetXmin());
maxY_ = static_cast<Double_t>(yAxis->GetXmax());
rangeY_ = maxY_ - minY_;
nBinsX_ = static_cast<Int_t>(hist_->GetNbinsX());
nBinsY_ = static_cast<Int_t>(hist_->GetNbinsY());
binXWidth_ = static_cast<Double_t>(TMath::Abs(rangeX_)/(nBinsX_*1.0));
binYWidth_ = static_cast<Double_t>(TMath::Abs(rangeY_)/(nBinsY_*1.0));
if (fluctuateBins) {
- this->doBinFluctuation();
+ this->doBinFluctuation(hist_);
}
if (avEff > 0.0 && avEffError > 0.0) {
- this->raiseOrLowerBins(avEff,avEffError);
+ this->raiseOrLowerBins(hist_,avEff,avEffError);
}
}
-Lau2DHistDP::Lau2DHistDP( const Lau2DHistDP& rhs ) :
- hist_(rhs.hist_ ? dynamic_cast<TH2*>(rhs.hist_->Clone()) : 0),
- kinematics_( rhs.kinematics_ ),
- minX_(rhs.minX_), maxX_(rhs.maxX_),
- minY_(rhs.minY_), maxY_(rhs.maxY_),
- rangeX_(rhs.rangeX_), rangeY_(rhs.rangeY_),
- binXWidth_(rhs.binXWidth_), binYWidth_(rhs.binYWidth_),
- nBinsX_(rhs.nBinsX_), nBinsY_(rhs.nBinsY_),
- useInterpolation_(rhs.useInterpolation_),
- upperHalf_(rhs.upperHalf_),
- squareDP_(rhs.squareDP_)
-{
-}
-
Lau2DHistDP::~Lau2DHistDP()
{
delete hist_;
hist_ = 0;
}
Double_t Lau2DHistDP::getBinHistValue(Int_t xBinNo, Int_t yBinNo) const
{
if (xBinNo < 0) {
xBinNo = 0;
} else if (xBinNo >= nBinsX_) {
return 0.0;
}
if (yBinNo < 0) {
yBinNo = 0;
} else if (yBinNo >= nBinsY_) {
return 0.0;
}
Double_t value = hist_->GetBinContent(xBinNo+1, yBinNo+1);
return value;
}
Double_t Lau2DHistDP::interpolateXY(Double_t x, Double_t y) const
{
// This function returns the interpolated value of the histogram function
// for the given values of x and y by finding the adjacent bins and extrapolating
// using weights based on the inverse distance of the point from the adajcent
// bin centres.
// Here, x = m13^2, y = m23^2, or m', theta' for square DP co-ordinates
// If we're only using one half then flip co-ordinates
// appropriately for conventional or square DP
- if ( upperHalf_ == kTRUE ) {
- if ( squareDP_ == kFALSE && x > y ) {
- Double_t temp = y;
- y = x;
- x = temp;
- } else if ( squareDP_ == kTRUE && y > 0.5 ) {
- y = 1.0 - y;
- }
- }
+ getUpperHalf(x,y);
// First ask whether the point is inside the kinematic region.
- Bool_t withinDP(kFALSE);
- if (squareDP_ == kTRUE) {
- withinDP = kinematics_->withinSqDPLimits(x,y);
- } else {
- withinDP = kinematics_->withinDPLimits(x,y);
- }
- if (withinDP == kFALSE) {
+ if (withinDPBoundaries(x,y) == kFALSE) {
std::cerr << "WARNING in Lau2DHistDP::interpolateXY : Given position is outside the DP boundary, returning 0.0." << std::endl;
return 0.0;
}
// Find the 2D histogram bin for x and y
Int_t i = Int_t((x - minX_)*nBinsX_/rangeX_);
Int_t j = Int_t((y - minY_)*nBinsY_/rangeY_);
if (i >= nBinsX_) {i = nBinsX_ - 1;}
if (j >= nBinsY_) {j = nBinsY_ - 1;}
// Ask whether we want to do the interpolation, since this method is
// not reliable for low statistics histograms.
if (useInterpolation_ == kFALSE) {return getBinHistValue(i,j);}
// Find the bin centres (actual co-ordinate positions, not histogram indices)
Double_t cbinx = Double_t(i+0.5)*rangeX_/nBinsX_ + minX_;
Double_t cbiny = Double_t(j+0.5)*rangeY_/nBinsY_ + minY_;
// If bin centres are outside kinematic region, do not extrapolate
- if (squareDP_ == kTRUE) {
- withinDP = kinematics_->withinSqDPLimits(cbinx, cbiny);
- } else {
- withinDP = kinematics_->withinDPLimits(cbinx, cbiny);
- }
- if (withinDP == kFALSE) {return getBinHistValue(i,j);}
+ if (withinDPBoundaries(cbinx,cbiny) == kFALSE) {return getBinHistValue(i,j);}
// Find the adjacent bins
Double_t deltax = x - cbinx;
Double_t deltay = y - cbiny;
Int_t i_adj(0), j_adj(0);
if (deltax > 0.0) {
i_adj = i + 1;
} else {
i_adj = i - 1;
}
if (deltay > 0.0) {
j_adj = j + 1;
} else {
j_adj = j - 1;
}
Bool_t isXBoundary(kFALSE), isYBoundary(kFALSE);
Double_t value(0.0);
if (i_adj >= nBinsX_ || i_adj < 0) {isYBoundary = kTRUE;}
if (j_adj >= nBinsY_ || j_adj < 0) {isXBoundary = kTRUE;}
// In the corners, do no interpolation. Use entry in bin (i,j)
if (isXBoundary == kTRUE && isYBoundary == kTRUE) {
value = getBinHistValue(i,j);
} else if (isXBoundary == kTRUE && isYBoundary == kFALSE) {
// Find the adjacent x bin centre
Double_t cbinx_adj = Double_t(i_adj+0.5)*rangeX_/nBinsX_ + minX_;
- if (squareDP_ == kTRUE) {
- withinDP = kinematics_->withinSqDPLimits(cbinx_adj, y);
- } else {
- withinDP = kinematics_->withinDPLimits(cbinx_adj, y);
- }
-
- if (withinDP == kFALSE) {
+ if (withinDPBoundaries(cbinx_adj, y) == kFALSE) {
// The adjacent bin is outside the DP range. Don't extrapolate.
value = getBinHistValue(i,j);
} else {
Double_t dx0 = TMath::Abs(x - cbinx);
Double_t dx1 = TMath::Abs(cbinx_adj - x);
Double_t inter_denom = dx0 + dx1;
Double_t value1 = getBinHistValue(i,j);
Double_t value2 = getBinHistValue(i_adj,j);
value = (value1*dx1 + value2*dx0)/inter_denom;
}
} else if (isYBoundary == kTRUE && isXBoundary == kFALSE) {
// Find the adjacent y bin centre
Double_t cbiny_adj = Double_t(j_adj+0.5)*rangeY_/nBinsY_ + minY_;
- if (squareDP_ == kTRUE) {
- withinDP = kinematics_->withinSqDPLimits(x, cbiny_adj);
- } else {
- withinDP = kinematics_->withinDPLimits(x, cbiny_adj);
- }
-
- if (withinDP == kFALSE) {
+ if (withinDPBoundaries(x, cbiny_adj) == kFALSE) {
// The adjacent bin is outside the DP range. Don't extrapolate.
value = getBinHistValue(i,j);
} else {
Double_t dy0 = TMath::Abs(y - cbiny);
Double_t dy1 = TMath::Abs(cbiny_adj - y);
Double_t inter_denom = dy0 + dy1;
Double_t value1 = getBinHistValue(i,j);
Double_t value2 = getBinHistValue(i,j_adj);
value = (value1*dy1 + value2*dy0)/inter_denom;
}
} else {
// 2D linear interpolation using inverse distance as weights.
// Find the adjacent x and y bin centres
Double_t cbinx_adj = Double_t(i_adj+0.5)*rangeX_/nBinsX_ + minX_;
Double_t cbiny_adj = Double_t(j_adj+0.5)*rangeY_/nBinsY_ + minY_;
- if (squareDP_ == kTRUE) {
- withinDP = kinematics_->withinSqDPLimits(cbinx_adj, cbiny_adj);
- } else {
- withinDP = kinematics_->withinDPLimits(cbinx_adj, cbiny_adj);
- }
-
- if (withinDP == kFALSE) {
+ if (withinDPBoundaries(cbinx_adj, cbiny_adj) == kFALSE) {
// The adjacent bin is outside the DP range. Don't extrapolate.
value = getBinHistValue(i,j);
} else {
Double_t dx0 = TMath::Abs(x - cbinx);
Double_t dx1 = TMath::Abs(cbinx_adj - x);
Double_t dy0 = TMath::Abs(y - cbiny);
Double_t dy1 = TMath::Abs(cbiny_adj - y);
Double_t inter_denom = (dx0 + dx1)*(dy0 + dy1);
Double_t value1 = getBinHistValue(i,j);
Double_t value2 = getBinHistValue(i_adj,j);
Double_t value3 = getBinHistValue(i,j_adj);
Double_t value4 = getBinHistValue(i_adj,j_adj);
value = value1*dx1*dy1 + value2*dx0*dy1 + value3*dx1*dy0 + value4*dx0*dy0;
value /= inter_denom;
}
}
return value;
}
-void Lau2DHistDP::doBinFluctuation()
-{
- TRandom* random = LauRandom::zeroSeedRandom();
- for (Int_t i(0); i<nBinsX_; ++i) {
- for (Int_t j(0); j<nBinsY_; ++j) {
- Double_t currentContent = hist_->GetBinContent(i+1,j+1);
- Double_t currentError = hist_->GetBinError(i+1,j+1);
- Double_t newContent = random->Gaus(currentContent,currentError);
- if (newContent<0.0) {
- hist_->SetBinContent(i+1,j+1,0.0);
- } else {
- hist_->SetBinContent(i+1,j+1,newContent);
- }
- }
- }
-}
-
-void Lau2DHistDP::raiseOrLowerBins(Double_t avEff, Double_t avEffError)
-{
- TRandom* random = LauRandom::zeroSeedRandom();
-
- Double_t curAvg = this->computeAverageContents();
- Double_t newAvg = random->Gaus(avEff,avEffError);
-
- hist_->Scale( newAvg / curAvg );
-}
-
-Double_t Lau2DHistDP::computeAverageContents() const
-{
- Double_t totalContent(0.0);
- Int_t binsWithinDPBoundary(0);
-
- // Loop through the bins and include any that have their centre or any
- // of the four corners within the kinematic boundary
- for ( Int_t i(0); i<nBinsX_; ++i ) {
- Double_t binXCentre = hist_->GetXaxis()->GetBinCenter(i+1);
- Double_t binXLowerEdge = hist_->GetXaxis()->GetBinLowEdge(i+1);
- Double_t binXUpperEdge = hist_->GetXaxis()->GetBinUpEdge(i+1);
-
- for ( Int_t j(0); j<nBinsX_; ++j ) {
- Double_t binYCentre = hist_->GetYaxis()->GetBinCenter(i+1);
- Double_t binYLowerEdge = hist_->GetYaxis()->GetBinLowEdge(i+1);
- Double_t binYUpperEdge = hist_->GetYaxis()->GetBinUpEdge(i+1);
-
- if ( squareDP_ ) {
- if ( kinematics_->withinSqDPLimits( binXCentre, binYCentre ) ||
- kinematics_->withinSqDPLimits( binXLowerEdge, binYLowerEdge ) ||
- kinematics_->withinSqDPLimits( binXUpperEdge, binYUpperEdge ) ||
- kinematics_->withinSqDPLimits( binXLowerEdge, binYUpperEdge ) ||
- kinematics_->withinSqDPLimits( binXUpperEdge, binYLowerEdge ) ) {
-
- totalContent += this->getBinHistValue( i, j );
- ++binsWithinDPBoundary;
- }
- } else {
- if ( kinematics_->withinDPLimits( binXCentre, binYCentre ) ||
- kinematics_->withinDPLimits( binXLowerEdge, binYLowerEdge ) ||
- kinematics_->withinDPLimits( binXUpperEdge, binYUpperEdge ) ||
- kinematics_->withinDPLimits( binXLowerEdge, binYUpperEdge ) ||
- kinematics_->withinDPLimits( binXUpperEdge, binYLowerEdge ) ) {
-
- totalContent += this->getBinHistValue( i, j );
- ++binsWithinDPBoundary;
- }
- }
- }
- }
-
- return totalContent/binsWithinDPBoundary;
-}
-
-
diff --git a/src/Lau2DSplineDP.cc b/src/Lau2DSplineDP.cc
new file mode 100644
index 0000000..ba0f302
--- /dev/null
+++ b/src/Lau2DSplineDP.cc
@@ -0,0 +1,83 @@
+
+// Copyright University of Warwick 2004 - 2013.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+// Authors:
+// Thomas Latham
+// John Back
+// Paul Harrison
+
+/*! \file Lau2DSplineDP.cc
+ \brief File containing implementation of Lau2DSplineDP class.
+*/
+
+#include <iostream>
+
+#include "TAxis.h"
+#include "TH2.h"
+#include "TRandom.h"
+#include "TSystem.h"
+
+#include "Lau2DSplineDP.hh"
+#include "Lau2DCubicSpline.hh"
+#include "LauDaughters.hh"
+#include "LauKinematics.hh"
+#include "LauRandom.hh"
+
+ClassImp(Lau2DSplineDP)
+
+
+Lau2DSplineDP::Lau2DSplineDP(const TH2* hist, const LauDaughters* daughters,
+ Bool_t fluctuateBins, Double_t avEff, Double_t avEffError,
+ Bool_t useUpperHalfOnly, Bool_t squareDP) :
+ Lau2DAbsHistDP(daughters,useUpperHalfOnly,squareDP),
+ spline_(0)
+{
+ //We may need to modify the histogram so clone it
+ TH2* tempHist(hist ? dynamic_cast<TH2*>(hist->Clone()) : 0);
+
+ if ( ! tempHist ) {
+ std::cerr << "ERROR in Lau2DSplineDP constructor : the histogram pointer is null." << std::endl;
+ gSystem->Exit(EXIT_FAILURE);
+ }
+
+ if (fluctuateBins) {
+ this->doBinFluctuation(tempHist);
+ }
+ if (avEff > 0.0 && avEffError > 0.0) {
+ this->raiseOrLowerBins(tempHist,avEff,avEffError);
+ }
+
+ spline_ = new Lau2DCubicSpline(*tempHist);
+
+ delete tempHist;
+}
+
+Lau2DSplineDP::~Lau2DSplineDP()
+{
+ delete spline_;
+ spline_ = 0;
+}
+
+Double_t Lau2DSplineDP::interpolateXY(Double_t x, Double_t y) const
+{
+ // This function returns the interpolated value of the histogram function
+ // for the given values of x and y by finding the adjacent bins and extrapolating
+ // using weights based on the inverse distance of the point from the adajcent
+ // bin centres.
+ // Here, x = m13^2, y = m23^2, or m', theta' for square DP co-ordinates
+
+ // If we're only using one half then flip co-ordinates
+ // appropriately for conventional or square DP
+ getUpperHalf(x,y);
+
+ // First ask whether the point is inside the kinematic region.
+ if (withinDPBoundaries(x,y) == kFALSE) {
+ std::cerr << "WARNING in Lau2DSplineDP::interpolateXY : Given position is outside the DP boundary, returning 0.0." << std::endl;
+ return 0.0;
+ }
+
+ return spline_->evaluate(x,y);
+
+}
diff --git a/src/LauDPDepMapPdf.cc b/src/LauDPDepMapPdf.cc
index 244ba73..1209f56 100644
--- a/src/LauDPDepMapPdf.cc
+++ b/src/LauDPDepMapPdf.cc
@@ -1,375 +1,375 @@
// Copyright University of Warwick 2009 - 2013.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Authors:
// Thomas Latham
// John Back
// Paul Harrison
/*! \file LauDPDepMapPdf.cc
\brief File containing implementation of LauDPDepMapPdf class.
*/
#include <iostream>
#include <vector>
using std::cout;
using std::cerr;
using std::endl;
#include "TH1.h"
#include "TMath.h"
#include "TSystem.h"
#include "LauConstants.hh"
#include "LauDaughters.hh"
#include "Lau2DHistDP.hh"
#include "LauParameter.hh"
#include "LauDPDepMapPdf.hh"
ClassImp(LauDPDepMapPdf)
LauDPDepMapPdf::LauDPDepMapPdf(const std::vector<LauAbsPdf*>& pdfs,
const LauDaughters* daughters,
const TH2* dpHisto, Bool_t upperHalf) :
LauAbsPdf((!pdfs.empty() && pdfs[0]) ? pdfs[0]->varNames() : std::vector<TString>(), std::vector<LauParameter*>(), (!pdfs.empty() && pdfs[0]) ? pdfs[0]->getMinAbscissas() : LauFitData(), (!pdfs.empty() && pdfs[0]) ? pdfs[0]->getMaxAbscissas() : LauFitData()),
daughters_( new LauDaughters(*daughters) ),
pdfs_(pdfs),
dpDependence_( new Lau2DHistDP(dpHisto, daughters, kFALSE, kFALSE, 0, 0, upperHalf, daughters->squareDP()) ),
dpAxisDependence_(0),
dpAxis_( CentreDist ),
indices_(0)
{
// Constructor for the PDF map.
// The index into the PDF collection is read from the DP histogram.
// So the first thing we have to do is check the pointers are all valid.
for ( std::vector<LauAbsPdf*>::const_iterator iter = pdfs_.begin(); iter != pdfs_.end(); ++iter){
if (!(*iter)) {
cerr<<"ERROR in LauDPDepMapPdf constructor: one of the PDF pointers is null."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
// Next check that the abscissa ranges are the same for each PDF
if (pdfs_[0]->getMinAbscissa() != (*iter)->getMinAbscissa()) {
cerr<<"ERROR in LauDPDepMapPdf constructor: minimum abscissa values not the same for two PDFs."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
if (pdfs_[0]->getMaxAbscissa() != (*iter)->getMaxAbscissa()) {
cerr<<"ERROR in LauDPDepMapPdf constructor: maximum abscissa values not the same for two PDFs."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
// Also check that both PDFs are expecting the same number of input variables
if (pdfs_[0]->nInputVars() != (*iter)->nInputVars()) {
cerr<<"ERROR in LauDPDepMapPdf constructor: number of input variables not the same for two PDFs."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
// Also check that both PDFs are expecting the same variable name(s)
if (pdfs_[0]->varNames() != (*iter)->varNames()) {
cerr<<"ERROR in LauDPDepMapPdf constructor: variable name(s) not the same for two PDFs."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
}
// Then we need to grab all the parameters and pass them to the base class.
// This is so that when we are asked for them they can be put into the fit.
for ( std::vector<LauAbsPdf*>::const_iterator iter = pdfs_.begin(); iter != pdfs_.end(); ++iter){
std::vector<LauParameter*>& pdfpars = (*iter)->getParameters();
this->addParameters(pdfpars);
}
// Cache the normalisation factor
this->calcNorm();
}
LauDPDepMapPdf::LauDPDepMapPdf(const std::vector<LauAbsPdf*>& pdfs,
const LauDaughters* daughters,
const TH1* dpAxisHisto,
DPAxis dpAxis) :
LauAbsPdf((!pdfs.empty() && pdfs[0]) ? pdfs[0]->varNames() : std::vector<TString>(), std::vector<LauParameter*>(), (!pdfs.empty() && pdfs[0]) ? pdfs[0]->getMinAbscissas() : LauFitData(), (!pdfs.empty() && pdfs[0]) ? pdfs[0]->getMaxAbscissas() : LauFitData()),
daughters_( new LauDaughters(*daughters) ),
pdfs_(pdfs),
dpDependence_( 0 ),
dpAxisDependence_(dpAxisHisto ? dynamic_cast<TH1*>(dpAxisHisto->Clone()) : 0),
dpAxis_( dpAxis ),
indices_( 0 )
{
// Constructor for the PDFs map.
// The index into the PDF collection is read from one of
// the DP axes.
if ( dpAxisDependence_ ) {
dpAxisDependence_->SetDirectory(0);
}
// So the first thing we have to do is check the pointers are all valid.
for ( std::vector<LauAbsPdf*>::const_iterator iter = pdfs_.begin(); iter != pdfs_.end(); ++iter){
if (!(*iter)) {
cerr<<"ERROR in LauDPDepMapPdf constructor: one of the PDF pointers is null."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
// Next check that the abscissa ranges are the same for each PDF
if (pdfs_[0]->getMinAbscissa() != (*iter)->getMinAbscissa()) {
cerr<<"ERROR in LauDPDepMapPdf constructor: minimum abscissa values not the same for two PDFs."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
if (pdfs_[0]->getMaxAbscissa() != (*iter)->getMaxAbscissa()) {
cerr<<"ERROR in LauDPDepMapPdf constructor: maximum abscissa values not the same for two PDFs."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
// Also check that both PDFs are expecting the same number of input variables
if (pdfs_[0]->nInputVars() != (*iter)->nInputVars()) {
cerr<<"ERROR in LauDPDepMapPdf constructor: number of input variables not the same for two PDFs."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
// Also check that both PDFs are expecting the same variable name(s)
if (pdfs_[0]->varNames() != (*iter)->varNames()) {
cerr<<"ERROR in LauDPDepMapPdf constructor: variable name(s) not the same for two PDFs."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
}
// Then we need to grab all the parameters and pass them to the base class.
// This is so that when we are asked for them they can be put into the fit.
for ( std::vector<LauAbsPdf*>::const_iterator iter = pdfs_.begin(); iter != pdfs_.end(); ++iter){
std::vector<LauParameter*>& pdfpars = (*iter)->getParameters();
this->addParameters(pdfpars);
}
// Cache the normalisation factor
this->calcNorm();
}
LauDPDepMapPdf::~LauDPDepMapPdf()
{
// Destructor
delete daughters_; daughters_ = 0;
delete dpDependence_; dpDependence_ = 0;
delete dpAxisDependence_; dpAxisDependence_ = 0;
}
-LauDPDepMapPdf::LauDPDepMapPdf(const LauDPDepMapPdf& other) : LauAbsPdf(other.varName(), other.getParameters(), other.getMinAbscissa(), other.getMaxAbscissa()),
+/*LauDPDepMapPdf::LauDPDepMapPdf(const LauDPDepMapPdf& other) : LauAbsPdf(other.varName(), other.getParameters(), other.getMinAbscissa(), other.getMaxAbscissa()),
daughters_( other.daughters_ ),
pdfs_( other.pdfs_ ),
dpDependence_( (other.dpDependence_) ? new Lau2DHistDP( *other.dpDependence_ ) : 0 ),
dpAxisDependence_( other.dpAxisDependence_ ? dynamic_cast<TH1*>( other.dpAxisDependence_->Clone()) : 0),
dpAxis_( other.dpAxis_ ),
indices_( other.indices_ )
{
// Copy constructor
this->setRandomFun(other.getRandomFun());
this->calcNorm();
-}
+}*/
UInt_t LauDPDepMapPdf::determineDPRegion( Double_t m13Sq, Double_t m23Sq ) const
{
UInt_t regionIndex(0);
LauKinematics* kinematics = daughters_->getKinematics();
kinematics->updateKinematics( m13Sq, m23Sq );
if ( dpDependence_ ) {
if (daughters_->squareDP()){
Double_t mprime = kinematics->getmPrime();
Double_t thprime = kinematics->getThetaPrime();
regionIndex = static_cast<UInt_t>( dpDependence_->interpolateXY( mprime, thprime ) );
} else {
regionIndex = static_cast<UInt_t>( dpDependence_->interpolateXY( m13Sq, m23Sq ) );
}
} else if ( dpAxisDependence_ ) {
Double_t dpPos(0.0);
if ( dpAxis_ == M12 ) {
dpPos = kinematics->calcThirdMassSq(m13Sq,m23Sq);
} else if ( dpAxis_ == M13 ) {
dpPos = m13Sq;
} else if ( dpAxis_ == M23 ) {
dpPos = m23Sq;
} else if ( dpAxis_ == MMIN ) {
dpPos = TMath::Min( m13Sq, m23Sq );
} else if ( dpAxis_ == MMAX ) {
dpPos = TMath::Max( m13Sq, m23Sq );
} else {
dpPos = kinematics->distanceFromDPCentre(m13Sq,m23Sq);
}
Int_t bin = dpAxisDependence_->FindFixBin( dpPos );
regionIndex = static_cast<UInt_t>( dpAxisDependence_->GetBinContent( bin ) );
} else {
// This should never happen
cerr << "ERROR in LauDPDepMapPdf::determineDPRegion : No means of determining the region!" << endl;
gSystem->Exit(EXIT_FAILURE);
}
return regionIndex;
}
void LauDPDepMapPdf::calcLikelihoodInfo(const LauAbscissas& abscissas)
{
// Check that the given abscissa is within the allowed range
if (!this->checkRange(abscissas)) {
gSystem->Exit(EXIT_FAILURE);
}
// Create a set of absissas that doesn't contain the DP variables
UInt_t nVars = this->nInputVars();
LauAbscissas noDPVars( nVars );
for ( UInt_t i(0); i < nVars; ++i ) {
noDPVars[i] = abscissas[i];
}
// Find which region of the DP we're in
// The DP variables will be abscissas[nInputVars] and
// abscissas[nInputVars+1] (if present).
UInt_t regionIndex(0);
if ( abscissas.size() == nVars+2 ) {
Double_t m13Sq = abscissas[nVars];
Double_t m23Sq = abscissas[nVars+1];
regionIndex = this->determineDPRegion( m13Sq, m23Sq );
} else {
// This should never happen
cerr << "ERROR in LauDPDepMapPdf::calcLikelihoodInfo : DP vars not supplied, no means of determining the region!" << endl;
gSystem->Exit(EXIT_FAILURE);
}
// Check that the region index is valid
if ( regionIndex >= pdfs_.size() ) {
cerr << "ERROR in LauDPDepMapPdf::calcLikelihoodInfo : No PDF supplied for region " << regionIndex << endl;
gSystem->Exit(EXIT_FAILURE);
}
// Evaluate the normalised PDF values
LauAbsPdf* pdf = pdfs_[regionIndex];
if ( pdf->isDPDependent() ) {
pdf->calcLikelihoodInfo(abscissas);
} else {
pdf->calcLikelihoodInfo(noDPVars);
}
Double_t result = pdf->getUnNormLikelihood();
this->setUnNormPDFVal(result);
Double_t norm = pdf->getNorm();
this->setNorm(norm);
}
void LauDPDepMapPdf::calcNorm()
{
// Nothing to do here, since it is already normalized
this->setNorm(1.0);
}
void LauDPDepMapPdf::calcPDFHeight( const LauKinematics* kinematics )
{
// This method gives you the maximum possible height of the PDF.
// It combines the maximum heights of the two individual PDFs.
// So it would give the true maximum if the two individual maxima coincided.
// It is guaranteed to always be >= the true maximum.
Double_t m13Sq = kinematics->getm13Sq();
Double_t m23Sq = kinematics->getm23Sq();
// Find which region of the DP we're in
UInt_t regionIndex = this->determineDPRegion( m13Sq, m23Sq );
// Check that the region index is valid
if ( regionIndex >= pdfs_.size() ) {
cerr << "ERROR in LauDPDepMapPdf::calcPDFHeight : No PDF supplied for region " << regionIndex << endl;
gSystem->Exit(EXIT_FAILURE);
}
// Evaluate the normalised PDF values
LauAbsPdf* pdf = pdfs_[regionIndex];
// Update the heights of the individual PDFs
pdf->calcPDFHeight( kinematics );
// Find the PDF maximum
Double_t height = pdf->getMaxHeight();
this->setMaxHeight(height);
}
void LauDPDepMapPdf::checkPositiveness()
{
for ( std::vector<LauAbsPdf*>::const_iterator iter = pdfs_.begin(); iter != pdfs_.end(); ++iter){
(*iter)->checkPositiveness();
}
}
// Override the base class methods for cacheInfo and calcLikelihoodInfo(UInt_t iEvt).
// For both of these we delegate some functionality to the constituent PDFs.
void LauDPDepMapPdf::cacheInfo(const LauFitDataTree& inputData)
{
// delegate to the sub-PDFs to cache their information
for ( std::vector<LauAbsPdf*>::const_iterator iter = pdfs_.begin(); iter != pdfs_.end(); ++iter){
(*iter)->cacheInfo( inputData );
}
// now check that the DP variables are included in the data
Bool_t hasBranch = inputData.haveBranch( "m13Sq" );
hasBranch &= inputData.haveBranch( "m23Sq" );
if (!hasBranch) {
cerr<<"ERROR in LauDPDepMapPdf::cacheInfo : Input data does not contain Dalitz plot variables m13Sq and/or m23Sq."<<endl;
return;
}
// determine whether we are caching our PDF value
Bool_t doCaching( this->nFixedParameters() == this->nParameters() );
this->cachePDF( doCaching );
if ( !doCaching ) {
// in this case we seem to be doing a fit where the parameters are floating
// so need to mark that the PDF height is no longer up to date
this->heightUpToDate(kFALSE);
}
// clear the vector and reserve enough space
UInt_t nEvents = inputData.nEvents();
indices_.clear();
indices_.reserve(nEvents);
// loop through the events, determine the fraction and store
for (UInt_t iEvt = 0; iEvt < nEvents; ++iEvt) {
const LauFitData& dataValues = inputData.getData(iEvt);
Double_t m13Sq = dataValues.find("m13Sq")->second;
Double_t m23Sq = dataValues.find("m23Sq")->second;
UInt_t regionIndex = this->determineDPRegion( m13Sq, m23Sq );
indices_.push_back( regionIndex );
}
}
void LauDPDepMapPdf::calcLikelihoodInfo(UInt_t iEvt)
{
// Get the fraction value for this event
UInt_t regionIndex = indices_[iEvt];
// Evaluate the normalised PDF value
LauAbsPdf* pdf = pdfs_[regionIndex];
pdf->calcLikelihoodInfo(iEvt);
Double_t result = pdf->getUnNormLikelihood();
this->setUnNormPDFVal(result);
Double_t norm = pdf->getNorm();
this->setNorm(norm);
}
diff --git a/src/LauDPDepSumPdf.cc b/src/LauDPDepSumPdf.cc
index 21ee51d..8fe7a68 100644
--- a/src/LauDPDepSumPdf.cc
+++ b/src/LauDPDepSumPdf.cc
@@ -1,436 +1,440 @@
// Copyright University of Warwick 2009 - 2013.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Authors:
// Thomas Latham
// John Back
// Paul Harrison
/*! \file LauDPDepSumPdf.cc
\brief File containing implementation of LauDPDepSumPdf class.
*/
#include <iostream>
#include <vector>
using std::cout;
using std::cerr;
using std::endl;
using std::vector;
#include "TMath.h"
#include "TSystem.h"
#include "LauConstants.hh"
#include "LauDaughters.hh"
#include "LauEffModel.hh"
#include "LauParameter.hh"
#include "LauDPDepSumPdf.hh"
ClassImp(LauDPDepSumPdf)
LauDPDepSumPdf::LauDPDepSumPdf(LauAbsPdf* pdf1, LauAbsPdf* pdf2,
const LauDaughters* daughters,
- const TH2* dpHisto, Bool_t upperHalf) :
+ const TH2* dpHisto, Bool_t upperHalf, Bool_t useSpline) :
LauAbsPdf(pdf1 ? pdf1->varNames() : vector<TString>(), vector<LauParameter*>(), pdf1 ? pdf1->getMinAbscissas() : LauFitData(), pdf1 ? pdf1->getMaxAbscissas() : LauFitData()),
daughters_( new LauDaughters(*daughters) ),
pdf1_(pdf1),
pdf2_(pdf2),
frac_(0),
fracVal_(0.5),
dpDependence_( new LauEffModel(daughters, 0) ),
dpAxis_( CentreDist )
{
// Constructor for the sum PDF.
// We are defining the sum as:
// f x (PDF1/S(PDF1)) + (1-f) x (PDF2/S(PDF2))
// where f is the fraction, x is multiplication, PDFi is the i'th PDF,
// and S(PDFi) is the integral of the i'th PDF.
// The value of the fraction is read from the DP histogram.
- dpDependence_->setEffHisto( dpHisto, kTRUE, kFALSE, 0.0, 0.0, upperHalf, daughters->squareDP() );
+ if(useSpline) {
+ dpDependence_->setEffSpline( dpHisto, kFALSE, 0.0, 0.0, upperHalf, daughters->squareDP());
+ } else {
+ dpDependence_->setEffHisto( dpHisto, kTRUE, kFALSE, 0.0, 0.0, upperHalf, daughters->squareDP() );
+ }
// So the first thing we have to do is check the pointers are all valid.
if (!pdf1 || !pdf2) {
cerr<<"ERROR in LauDPDepSumPdf constructor: one of the 2 PDF pointers is null."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
// Next check that the abscissa ranges are the same for each PDF
if (pdf1->getMinAbscissa() != pdf2->getMinAbscissa()) {
cerr<<"ERROR in LauDPDepSumPdf constructor: minimum abscissa values not the same for the two PDFs."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
if (pdf1->getMaxAbscissa() != pdf2->getMaxAbscissa()) {
cerr<<"ERROR in LauDPDepSumPdf constructor: maximum abscissa values not the same for the two PDFs."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
// Also check that both PDFs are expecting the same number of input variables
if (pdf1->nInputVars() != pdf2->nInputVars()) {
cerr<<"ERROR in LauDPDepSumPdf constructor: number of input variables not the same for the two PDFs."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
// Also check that both PDFs are expecting the same variable name(s)
if (pdf1->varNames() != pdf2->varNames()) {
cerr<<"ERROR in LauDPDepSumPdf constructor: variable name(s) not the same for the two PDFs."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
// Then we need to grab all the parameters and pass them to the base class.
// This is so that when we are asked for them they can be put into the fit.
// The number of parameters is the number in PDF1 + the number in PDF2.
UInt_t nPar(pdf1->nParameters()+pdf2->nParameters());
vector<LauParameter*> params; params.reserve(nPar);
vector<LauParameter*>& pdf1pars = pdf1->getParameters();
vector<LauParameter*>& pdf2pars = pdf2->getParameters();
for (vector<LauParameter*>::iterator iter = pdf1pars.begin(); iter != pdf1pars.end(); ++iter) {
params.push_back(*iter);
}
for (vector<LauParameter*>::iterator iter = pdf2pars.begin(); iter != pdf2pars.end(); ++iter) {
params.push_back(*iter);
}
this->addParameters(params);
// Cache the normalisation factor
this->calcNorm();
}
LauDPDepSumPdf::LauDPDepSumPdf(LauAbsPdf* pdf1, LauAbsPdf* pdf2,
LauParameter* frac,
const LauDaughters* daughters,
const std::vector<Double_t>& fracCoeffs,
DPAxis dpAxis) :
LauAbsPdf(pdf1 ? pdf1->varNames() : vector<TString>(), vector<LauParameter*>(), pdf1 ? pdf1->getMinAbscissas() : LauFitData(), pdf1 ? pdf1->getMaxAbscissas() : LauFitData()),
daughters_( new LauDaughters(*daughters) ),
pdf1_(pdf1),
pdf2_(pdf2),
frac_(frac),
fracVal_( frac ? frac->value() : 0.0 ),
dpDependence_( 0 ),
fracCoeffs_( fracCoeffs ),
dpAxis_( dpAxis )
{
// Constructor for the sum PDF.
// We are defining the sum as:
// f x (PDF1/S(PDF1)) + (1-f) x (PDF2/S(PDF2))
// where f is the fraction, x is multiplication, PDFi is the i'th PDF,
// and S(PDFi) is the integral of the i'th PDF.
// The value of the fraction has a polynomial dependence on one of
// the DP axes.
// So the first thing we have to do is check the pointers are all valid.
if (!pdf1 || !pdf2) {
cerr<<"ERROR in LauDPDepSumPdf constructor: one of the 2 PDF pointers is null."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
if ( !frac ) {
cerr<<"ERROR in LauDPDepSumPdf constructor: the fraction parameter pointer is null."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
// Next check that the abscissa ranges are the same for each PDF
if (pdf1->getMinAbscissa() != pdf2->getMinAbscissa()) {
cerr<<"ERROR in LauDPDepSumPdf constructor: minimum abscissa values not the same for the two PDFs."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
if (pdf1->getMaxAbscissa() != pdf2->getMaxAbscissa()) {
cerr<<"ERROR in LauDPDepSumPdf constructor: maximum abscissa values not the same for the two PDFs."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
// Also check that both PDFs are expecting the same number of input variables
if (pdf1->nInputVars() != pdf2->nInputVars()) {
cerr<<"ERROR in LauDPDepSumPdf constructor: number of input variables not the same for the two PDFs."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
// Also check that both PDFs are expecting the same variable name(s)
if (pdf1->varNames() != pdf2->varNames()) {
cerr<<"ERROR in LauDPDepSumPdf constructor: variable name(s) not the same for the two PDFs."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
// Then we need to grab all the parameters and pass them to the base class.
// This is so that when we are asked for them they can be put into the fit.
// The number of parameters is the number in PDF1 + the number in PDF2.
UInt_t nPar( pdf1->nParameters() + pdf2->nParameters() + 1 );
vector<LauParameter*> params; params.reserve(nPar);
params.push_back(frac);
vector<LauParameter*>& pdf1pars = pdf1->getParameters();
vector<LauParameter*>& pdf2pars = pdf2->getParameters();
for (vector<LauParameter*>::iterator iter = pdf1pars.begin(); iter != pdf1pars.end(); ++iter) {
params.push_back(*iter);
}
for (vector<LauParameter*>::iterator iter = pdf2pars.begin(); iter != pdf2pars.end(); ++iter) {
params.push_back(*iter);
}
this->addParameters(params);
// Now check that we can find the fraction parameter ok
frac_ = this->findParameter("frac");
if (frac_ == 0) {
cerr<<"ERROR in LauDPDepSumPdf constructor: parameter \"frac\" not found."<<endl;
gSystem->Exit(EXIT_FAILURE);
}
// Cache the normalisation factor
this->calcNorm();
}
LauDPDepSumPdf::~LauDPDepSumPdf()
{
// Destructor
delete daughters_; daughters_ = 0;
delete dpDependence_; dpDependence_ = 0;
}
-LauDPDepSumPdf::LauDPDepSumPdf(const LauDPDepSumPdf& other) : LauAbsPdf(other.varName(), other.getParameters(), other.getMinAbscissa(), other.getMaxAbscissa()),
+/*LauDPDepSumPdf::LauDPDepSumPdf(const LauDPDepSumPdf& other) : LauAbsPdf(other.varName(), other.getParameters(), other.getMinAbscissa(), other.getMaxAbscissa()),
daughters_( other.daughters_ ),
pdf1_( other.pdf1_ ),
pdf2_( other.pdf2_ ),
frac_( other.frac_ ),
fracVal_( other.fracVal_ ),
dpDependence_( (other.dpDependence_) ? new LauEffModel( *other.dpDependence_ ) : 0 ),
fracCoeffs_( other.fracCoeffs_ ),
dpAxis_( other.dpAxis_ ),
fractions_( other.fractions_ )
{
// Copy constructor
this->setRandomFun(other.getRandomFun());
this->calcNorm();
-}
+}*/
void LauDPDepSumPdf::calcLikelihoodInfo(const LauAbscissas& abscissas)
{
// Check that the given abscissa is within the allowed range
if (!this->checkRange(abscissas)) {
gSystem->Exit(EXIT_FAILURE);
}
LauAbscissas noDPVars(1);
noDPVars[0] = abscissas[0];
// Evaluate the normalised PDF values
if ( pdf1_->isDPDependent() ) {
pdf1_->calcLikelihoodInfo(abscissas);
} else {
pdf1_->calcLikelihoodInfo(noDPVars);
}
if ( pdf2_->isDPDependent() ) {
pdf2_->calcLikelihoodInfo(abscissas);
} else {
pdf2_->calcLikelihoodInfo(noDPVars);
}
Double_t result1 = pdf1_->getLikelihood();
Double_t result2 = pdf2_->getLikelihood();
// Determine the fraction
// The DP variables will be abscissas[nInputVars] and
// abscissas[nInputVars+1] (if present).
UInt_t nVars = this->nInputVars();
if ( abscissas.size() == nVars+2 ) {
Double_t m13Sq = abscissas[nVars];
Double_t m23Sq = abscissas[nVars+1];
LauKinematics* kinematics = daughters_->getKinematics();
if ( dpDependence_ ) {
kinematics->updateKinematics( m13Sq, m23Sq );
fracVal_ = dpDependence_->calcEfficiency( kinematics );
} else {
fracVal_ = frac_->value();
Double_t dpPos(0.0);
if ( dpAxis_ == M12 ) {
dpPos = kinematics->calcThirdMassSq(m13Sq,m23Sq);
} else if ( dpAxis_ == M13 ) {
dpPos = m13Sq;
} else if ( dpAxis_ == M23 ) {
dpPos = m23Sq;
} else if ( dpAxis_ == MMIN ) {
dpPos = TMath::Min( m13Sq, m23Sq );
} else if ( dpAxis_ == MMAX ) {
dpPos = TMath::Max( m13Sq, m23Sq );
} else {
dpPos = kinematics->distanceFromDPCentre(m13Sq,m23Sq);
}
this->scaleFrac( dpPos );
}
}
// Add them together
Double_t result = fracVal_ * result1 + (1.0-fracVal_) * result2;
this->setUnNormPDFVal(result);
}
void LauDPDepSumPdf::scaleFrac( Double_t dpPos )
{
Int_t power = 1;
for (std::vector<Double_t>::const_iterator iter = fracCoeffs_.begin(); iter != fracCoeffs_.end(); ++iter) {
Double_t coeff = (*iter);
fracVal_ += coeff * TMath::Power(dpPos,power);
++power;
}
}
void LauDPDepSumPdf::calcNorm()
{
// Nothing to do here, since it is already normalized
this->setNorm(1.0);
}
void LauDPDepSumPdf::calcPDFHeight( const LauKinematics* kinematics )
{
// This method gives you the maximum possible height of the PDF.
// It combines the maximum heights of the two individual PDFs.
// So it would give the true maximum if the two individual maxima coincided.
// It is guaranteed to always be >= the true maximum.
// Update the heights of the individual PDFs
pdf1_->calcPDFHeight( kinematics );
pdf2_->calcPDFHeight( kinematics );
// Get the up to date parameter values
if ( dpDependence_ ) {
fracVal_ = dpDependence_->calcEfficiency( kinematics );
} else {
fracVal_ = frac_->value();
Double_t dpPos(0.0);
if ( dpAxis_ == M12 ) {
dpPos = kinematics->getm12Sq();
} else if ( dpAxis_ == M13 ) {
dpPos = kinematics->getm13Sq();
} else if ( dpAxis_ == M23 ) {
dpPos = kinematics->getm23Sq();
} else if ( dpAxis_ == MMIN ) {
Double_t m13Sq = kinematics->getm13Sq();
Double_t m23Sq = kinematics->getm23Sq();
dpPos = TMath::Min( m13Sq, m23Sq );
} else if ( dpAxis_ == MMAX ) {
Double_t m13Sq = kinematics->getm13Sq();
Double_t m23Sq = kinematics->getm23Sq();
dpPos = TMath::Max( m13Sq, m23Sq );
} else {
dpPos = kinematics->distanceFromDPCentre();
}
this->scaleFrac( dpPos );
}
// Find the (un-normalised) individual PDF maxima
Double_t height1 = pdf1_->getMaxHeight();
Double_t height2 = pdf2_->getMaxHeight();
// Get the individual PDF normalisation factors
Double_t norm1 = pdf1_->getNorm();
Double_t norm2 = pdf2_->getNorm();
// Calculate the normalised individual PDF maxima
height1 /= norm1;
height2 /= norm2;
// Combine these heights together
Double_t height = fracVal_ * height1 + (1-fracVal_) * height2;
this->setMaxHeight(height);
}
void LauDPDepSumPdf::checkPositiveness()
{
pdf1_->checkPositiveness();
pdf2_->checkPositiveness();
}
// Override the base class methods for cacheInfo and calcLikelihoodInfo(UInt_t iEvt).
// For both of these we delegate to the two constituent PDFs.
void LauDPDepSumPdf::cacheInfo(const LauFitDataTree& inputData)
{
// delegate to the two sub-PDFs to cache their information
pdf1_->cacheInfo(inputData);
pdf2_->cacheInfo(inputData);
// now check that the DP variables are included in the data
Bool_t hasBranch = inputData.haveBranch( "m13Sq" );
hasBranch &= inputData.haveBranch( "m23Sq" );
if (!hasBranch) {
cerr<<"ERROR in LauDPDepSumPdf::cacheInfo : Input data does not contain Dalitz plot variables m13Sq and/or m23Sq."<<endl;
return;
}
// determine whether we are caching our PDF value
Bool_t doCaching( this->nFixedParameters() == this->nParameters() );
this->cachePDF( doCaching );
if ( !doCaching ) {
// in this case we seem to be doing a fit where the parameters are floating
// so need to mark that the PDF height is no longer up to date
this->heightUpToDate(kFALSE);
}
// clear the vector and reserve enough space
UInt_t nEvents = inputData.nEvents();
fractions_.clear();
fractions_.reserve(nEvents);
// loop through the events, determine the fraction and store
for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) {
const LauFitData& dataValues = inputData.getData(iEvt);
Double_t m13Sq = dataValues.find("m13Sq")->second;
Double_t m23Sq = dataValues.find("m23Sq")->second;
LauKinematics* kinematics = daughters_->getKinematics();
if ( dpDependence_ ) {
// if we're using the histogram then just
// determine the fraction and store
kinematics->updateKinematics( m13Sq, m23Sq );
fracVal_ = dpDependence_->calcEfficiency( kinematics );
} else {
// if we're scaling the fraction parameter then we
// just store the scaling info since the parameter
// might be floating
fracVal_ = frac_->value();
Double_t dpPos(0.0);
if ( dpAxis_ == M12 ) {
dpPos = kinematics->calcThirdMassSq(m13Sq,m23Sq);
} else if ( dpAxis_ == M13 ) {
dpPos = m13Sq;
} else if ( dpAxis_ == M23 ) {
dpPos = m23Sq;
} else if ( dpAxis_ == MMIN ) {
dpPos = TMath::Min( m13Sq, m23Sq );
} else if ( dpAxis_ == MMAX ) {
dpPos = TMath::Max( m13Sq, m23Sq );
} else {
dpPos = kinematics->distanceFromDPCentre(m13Sq,m23Sq);
}
this->scaleFrac( dpPos );
fracVal_ -= frac_->value();
}
fractions_.push_back( fracVal_ );
}
}
void LauDPDepSumPdf::calcLikelihoodInfo(UInt_t iEvt)
{
// Get the fraction value for this event
fracVal_ = fractions_[iEvt];
if ( frac_ ) {
// if we're scaling the parameter then need to add the
// current value of the parameter
fracVal_ += frac_->value();
}
// Evaluate the normalised PDF values
pdf1_->calcLikelihoodInfo(iEvt);
pdf2_->calcLikelihoodInfo(iEvt);
Double_t result1 = pdf1_->getLikelihood();
Double_t result2 = pdf2_->getLikelihood();
// Add them together
Double_t result = fracVal_ * result1 + (1-fracVal_) * result2;
this->setUnNormPDFVal(result);
}
diff --git a/src/LauEffModel.cc b/src/LauEffModel.cc
index e7982ca..df1ec86 100644
--- a/src/LauEffModel.cc
+++ b/src/LauEffModel.cc
@@ -1,142 +1,175 @@
// Copyright University of Warwick 2004 - 2013.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Authors:
// Thomas Latham
// John Back
// Paul Harrison
/*! \file LauEffModel.cc
\brief File containing implementation of LauEffModel class.
*/
#include <cstdlib>
#include <iostream>
using std::cout;
using std::cerr;
using std::endl;
#include "TSystem.h"
#include "Lau2DHistDP.hh"
+#include "Lau2DSplineDP.hh"
#include "LauDaughters.hh"
#include "LauEffModel.hh"
#include "LauKinematics.hh"
#include "LauVetoes.hh"
ClassImp(LauEffModel)
LauEffModel::LauEffModel(const LauDaughters* daughters, const LauVetoes* vetoes) :
daughters_( daughters ),
vetoes_( vetoes ),
effHisto_( 0 ),
squareDP_( kFALSE ),
fluctuateEffHisto_( kFALSE )
{
if ( daughters_ == 0 ) {
cerr << "ERROR in LauEffModel Constructor : invalid pointer to daughters object supplied." << endl;
gSystem->Exit(EXIT_FAILURE);
}
}
-LauEffModel::LauEffModel( const LauEffModel& rhs ) :
+/*LauEffModel::LauEffModel( const LauEffModel& rhs ) :
daughters_( rhs.daughters_ ),
vetoes_( rhs.vetoes_ ),
effHisto_( rhs.effHisto_ ? new Lau2DHistDP( *rhs.effHisto_ ) : 0 ),
squareDP_( rhs.squareDP_ ),
fluctuateEffHisto_( rhs.fluctuateEffHisto_ )
{
-}
+}*/
LauEffModel::~LauEffModel()
{
if (effHisto_ != 0) {
delete effHisto_; effHisto_ = 0;
}
}
void LauEffModel::setEffHisto(const TH2* effHisto, Bool_t useInterpolation,
Bool_t fluctuateBins, Double_t avEff, Double_t absError,
Bool_t useUpperHalfOnly, Bool_t squareDP)
{
// Set the efficiency across the Dalitz plot using a predetermined 2D histogram
// with x = m_13^2, y = m_23^2.
Bool_t upperHalf( kFALSE );
if ( daughters_->gotSymmetricalDP() && useUpperHalfOnly == kTRUE) {upperHalf = kTRUE;}
cout<<"Efficiency histogram has upperHalf = "<<static_cast<Int_t>(upperHalf)<<endl;
squareDP_ = squareDP;
+ if(effHisto_) {
+ delete effHisto_;
+ effHisto_=0;
+ }
+
// Copy the histogram.
effHisto_ = new Lau2DHistDP(effHisto, daughters_,
useInterpolation, fluctuateBins,
avEff, absError, upperHalf, squareDP);
fluctuateEffHisto_ = fluctuateBins;
if (avEff > 0.0 && absError > 0.0) {
fluctuateEffHisto_ = kTRUE;
}
}
+void LauEffModel::setEffSpline(const TH2* effHisto,
+ Bool_t fluctuateBins, Double_t avEff, Double_t absError,
+ Bool_t useUpperHalfOnly, Bool_t squareDP)
+{
+ // Set the efficiency across the Dalitz plot using a predetermined 2D histogram
+ // with x = m_13^2, y = m_23^2.
+ Bool_t upperHalf( kFALSE );
+ if ( daughters_->gotSymmetricalDP() && useUpperHalfOnly == kTRUE) {upperHalf = kTRUE;}
+ cout<<"Efficiency histogram has upperHalf = "<<static_cast<Int_t>(upperHalf)<<endl;
+
+ squareDP_ = squareDP;
+
+ if(effHisto_) {
+ delete effHisto_;
+ effHisto_=0;
+ }
+
+ // Copy the histogram.
+ effHisto_ = new Lau2DSplineDP(effHisto, daughters_,
+ fluctuateBins, avEff, absError, upperHalf, squareDP);
+ fluctuateEffHisto_ = fluctuateBins;
+
+ if (avEff > 0.0 && absError > 0.0) {
+ fluctuateEffHisto_ = kTRUE;
+ }
+}
+
Double_t LauEffModel::getEffHistValue(Double_t xVal, Double_t yVal) const
{
// Get the efficiency from the 2D histo.
Double_t eff(1.0);
if (effHisto_ != 0) {
eff = effHisto_->interpolateXY(xVal, yVal);
}
return eff;
}
Double_t LauEffModel::calcEfficiency( const LauKinematics* kinematics ) const
{
// Routine to calculate the efficiency for the given event/point in
// the Dalitz plot. This routine uses the 2D histogram set by the
// setEffHisto() funciton.
Double_t eff(1.0);
// Check for vetoes
Bool_t vetoOK(kTRUE);
if ( vetoes_ != 0 ) {
vetoOK = vetoes_->passVeto(kinematics);
}
if (vetoOK == kFALSE) {
// We failed the veto.
eff = 0.0;
} else {
// We are using a 2D histogram representation of the efficiency across the Dalitz plot.
// First find out which bin we are in given the x and y Dalitz plot mass values
// Here, x = m13^2, y = m23^2 or if using square DP x = m', y = theta'.
if (squareDP_ == kTRUE) {
eff = this->getEffHistValue(kinematics->getmPrime(), kinematics->getThetaPrime());
} else {
eff = this->getEffHistValue(kinematics->getm13Sq(), kinematics->getm23Sq());
}
}
// Check that the efficiency is in the allowed range (0-1)
if ( eff < 0.0 ) {
std::cerr << "WARNING in LauEffModel::calcEfficiency : Efficiency is less than 0 - setting to 0. You may want to check your histogram!" << std::endl;
eff = 0.0;
} else if ( eff > 1.0 ) {
std::cerr << "WARNING in LauEffModel::calcEfficiency : Efficiency is greater than 1 - setting to 1. You may want to check your histogram!" << std::endl;
eff = 1.0;
}
return eff;
}
Bool_t LauEffModel::passVeto( const LauKinematics* kinematics ) const
{
Bool_t pass = kTRUE;
if ( vetoes_ != 0 ) {
pass = vetoes_->passVeto( kinematics );
}
return pass;
}

File Metadata

Mime Type
text/x-diff
Expires
Tue, Nov 19, 5:49 PM (1 d, 14 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3805454
Default Alt Text
(105 KB)

Event Timeline