Page Menu
Home
HEPForge
Search
Configure Global Search
Log In
Files
F7878738
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
37 KB
Subscribers
None
View Options
diff --git a/include/YODA/Histo1D.h b/include/YODA/Histo1D.h
--- a/include/YODA/Histo1D.h
+++ b/include/YODA/Histo1D.h
@@ -1,523 +1,551 @@
// -*- C++ -*-
//
// This file is part of YODA -- Yet more Objects for Data Analysis
// Copyright (C) 2008-2016 The YODA collaboration (see AUTHORS for details)
//
#ifndef YODA_Histo1D_h
#define YODA_Histo1D_h
#include "YODA/AnalysisObject.h"
#include "YODA/HistoBin1D.h"
#include "YODA/Dbn1D.h"
#include "YODA/Scatter2D.h"
#include "YODA/Axis1D.h"
#include "YODA/Exceptions.h"
#include <vector>
#include <string>
#include <map>
namespace YODA {
/// Convenience typedef
typedef Axis1D<HistoBin1D, Dbn1D> Histo1DAxis;
/// A one-dimensional histogram.
class Histo1D : public AnalysisObject {
public:
/// Convenience typedefs
typedef Histo1DAxis Axis;
typedef Axis::Bins Bins;
typedef HistoBin1D Bin;
/// @name Constructors
//@{
/// Default constructor
Histo1D(const std::string& path="", const std::string& title="")
: AnalysisObject("Histo1D", path, title),
_axis()
{ }
/// Constructor giving range and number of bins.
Histo1D(size_t nbins, double lower, double upper,
const std::string& path="", const std::string& title="")
: AnalysisObject("Histo1D", path, title),
_axis(nbins, lower, upper)
{ }
/// @brief Constructor giving explicit bin edges.
///
/// For n bins, binedges.size() == n+1, the last one being the upper bound
/// of the last bin
Histo1D(const std::vector<double>& binedges,
const std::string& path="", const std::string& title="")
: AnalysisObject("Histo1D", path, title),
_axis(binedges)
{ }
/// Constructor accepting an explicit collection of bins.
Histo1D(const std::vector<Bin>& bins,
const std::string& path="", const std::string& title="")
: AnalysisObject("Histo1D", path, title),
_axis(bins)
{ }
/// Copy constructor with optional new path
/// @todo Also allow title setting from the constructor?
Histo1D(const Histo1D& h, const std::string& path="");
/// Constructor from a Scatter2D's binning, with optional new path
/// @todo Also allow title setting from the constructor?
Histo1D(const Scatter2D& s, const std::string& path="");
/// Constructor from a Profile1D's binning, with optional new path
/// @todo Also allow title setting from the constructor?
Histo1D(const Profile1D& p, const std::string& path="");
/// @brief State-setting constructor
///
/// Intended principally for internal persistency use.
Histo1D(const std::vector<HistoBin1D>& bins,
const Dbn1D& dbn_tot, const Dbn1D& dbn_uflow, const Dbn1D& dbn_oflow,
const std::string& path="", const std::string& title="")
: AnalysisObject("Histo1D", path, title),
_axis(bins, dbn_tot, dbn_uflow, dbn_oflow)
{ }
/// Assignment operator
Histo1D& operator = (const Histo1D& h1) {
AnalysisObject::operator = (h1); //< AO treatment of paths etc.
_axis = h1._axis;
return *this;
}
/// Make a copy on the stack
Histo1D clone() const {
return Histo1D(*this);
}
/// Make a copy on the heap, via 'new'
Histo1D* newclone() const {
return new Histo1D(*this);
}
//@}
/// Fill dimension of this data object
size_t dim() const { return 1; }
/// @name Modifiers
//@{
/// @brief Reset the histogram.
///
/// Keep the binning but set all bin contents and related quantities to zero
virtual void reset() {
_axis.reset();
}
/// Fill histo by value and weight
void fill(double x, double weight=1.0);
/// Fill histo bin i with the given weight
void fillBin(size_t i, double weight=1.0);
/// Rescale as if all fill weights had been different by factor @a scalefactor.
void scaleW(double scalefactor) {
setAnnotation("ScaledBy", annotation<double>("ScaledBy", 1.0) * scalefactor);
_axis.scaleW(scalefactor);
}
/// Normalize the (visible) histo area to the @a normto value.
///
/// If @a includeoverflows is true, the original normalisation is computed with
/// the overflow bins included, so that the resulting visible normalisation can
/// be less than @a normto. This is probably what you want.
void normalize(double normto=1.0, bool includeoverflows=true) {
const double oldintegral = integral(includeoverflows);
if (oldintegral == 0) throw WeightError("Attempted to normalize a histogram with null area");
/// @todo Check that this is the desired behaviour
scaleW(normto / oldintegral);
}
/// Merge together the bin range with indices from @a from to @a to, inclusive
void mergeBins(size_t from, size_t to) {
_axis.mergeBins(from, to);
}
/// Merge every group of n bins, starting from the LHS
void rebinBy(unsigned int n, size_t begin=0, size_t end=UINT_MAX) {
_axis.rebinBy(n, begin, end);
}
/// Overloaded alias for rebinBy
void rebin(unsigned int n, size_t begin=0, size_t end=UINT_MAX) {
rebinBy(n, begin, end);
}
/// Rebin to the given list of bin edges
void rebinTo(const std::vector<double>& newedges) {
_axis.rebinTo(newedges);
}
/// Overloaded alias for rebinTo
void rebin(const std::vector<double>& newedges) {
rebinTo(newedges);
}
//@}
public:
/// @name Bin accessors
//@{
/// Number of bins on this axis (not counting under/overflow)
size_t numBins() const { return bins().size(); }
/// Low edge of this histo's axis
double xMin() const { return _axis.xMin(); }
/// High edge of this histo's axis
double xMax() const { return _axis.xMax(); }
/// All bin edges on this histo's axis
///
/// @note This only returns the finite edges, i.e. -inf and +inf are removed
/// @todo Make the +-inf stripping controllable by a default-valued bool arg
const std::vector<double> xEdges() const { return _axis.xEdges(); }
/// Access the bin vector
std::vector<YODA::HistoBin1D>& bins() { return _axis.bins(); }
/// Access the bin vector (const version)
const std::vector<YODA::HistoBin1D>& bins() const { return _axis.bins(); }
/// Access a bin by index (non-const version)
HistoBin1D& bin(size_t index) { return _axis.bins()[index]; }
/// Access a bin by index (const version)
const HistoBin1D& bin(size_t index) const { return _axis.bins()[index]; }
/// Access a bin index by coordinate
/// @todo Convert to ssize_t?
int binIndexAt(double x) {
return _axis.binIndexAt(x);
}
/// Access a bin by coordinate (const version)
const HistoBin1D& binAt(double x) const { return _axis.binAt(x); }
/// Access summary distribution, including gaps and overflows (non-const version)
Dbn1D& totalDbn() { return _axis.totalDbn(); }
/// Access summary distribution, including gaps and overflows (const version)
const Dbn1D& totalDbn() const { return _axis.totalDbn(); }
/// Set summary distribution, mainly for persistency: CAREFUL!
void setTotalDbn(const Dbn1D& dbn) { _axis.setTotalDbn(dbn); }
/// Access underflow (non-const version)
Dbn1D& underflow() { return _axis.underflow(); }
/// Access underflow (const version)
const Dbn1D& underflow() const { return _axis.underflow(); }
/// Set underflow distribution, mainly for persistency: CAREFUL!
void setUnderflow(const Dbn1D& dbn) { _axis.setUnderflow(dbn); }
/// Access overflow (non-const version)
Dbn1D& overflow() { return _axis.overflow(); }
/// Access overflow (const version)
const Dbn1D& overflow() const { return _axis.overflow(); }
/// Set overflow distribution, mainly for persistency: CAREFUL!
void setOverflow(const Dbn1D& dbn) { _axis.setOverflow(dbn); }
/// Add a new bin specifying its lower and upper bound
void addBin(double from, double to) { _axis.addBin(from, to); }
/// Add new bins by specifying a vector of edges
void addBins(std::vector<double> edges) { _axis.addBins(edges); }
// /// Add new bins specifying a beginning and end of each of them
// void addBins(std::vector<std::pair<double,double> > edges) {
// _axis.addBins(edges);
// }
/// Add a new bin, perhaps already populated: CAREFUL!
void addBin(const HistoBin1D& b) { _axis.addBin(b); }
/// @brief Bins addition operator
///
/// Add multiple bins without resetting
void addBins(const Bins& bins) {
_axis.addBins(bins);
}
/// Remove a bin
void eraseBin(size_t index) { _axis.eraseBin(index); }
//@}
/// @name Whole histo data
//@{
/// Get the total area (sumW) of the histogram
double integral(bool includeoverflows=true) const { return sumW(includeoverflows); }
/// @brief Get the integrated area of the histogram between bins @a binindex1 and @a binindex2.
///
/// @note The area of bin @a binindex2 _is_ included in the returned
/// value. To include the underflow and overflow areas, you should add them
/// explicitly with the underflow() and overflow() methods.
///
/// @todo Allow int bin index args for type compatibility with binIndexAt()?
double integralRange(size_t binindex1, size_t binindex2) const {
assert(binindex2 >= binindex1);
if (binindex1 >= numBins()) throw RangeError("binindex1 is out of range");
if (binindex2 >= numBins()) throw RangeError("binindex2 is out of range");
double rtn = 0;
for (size_t i = binindex1; i < binindex2; ++i) {
rtn += bin(i).sumW();
}
return rtn;
}
/// @brief Get the integrated area of the histogram up to bin @a binindex.
///
/// @note The area of bin @a binindex _is_ included in the returned
/// value. To not include the underflow, set includeunderflow=false.
///
/// @todo Allow int bin index args for type compatibility with binIndexAt()?
double integralTo(size_t binindex, bool includeunderflow=true) const {
double rtn = includeunderflow ? underflow().sumW() : 0;
rtn += integralRange(0, binindex);
return rtn;
}
/// Get the number of fills
unsigned long numEntries(bool includeoverflows=true) const;
/// Get the effective number of fills
double effNumEntries(bool includeoverflows=true) const;
/// Get sum of weights in histo
double sumW(bool includeoverflows=true) const;
/// Get sum of squared weights in histo
double sumW2(bool includeoverflows=true) const;
/// Get the mean in x
double xMean(bool includeoverflows=true) const;
/// Get the variance in x
double xVariance(bool includeoverflows=true) const;
/// Get the standard deviation in x
double xStdDev(bool includeoverflows=true) const {
if (includeoverflows) return _axis.totalDbn().xStdDev();
return std::sqrt(xVariance(includeoverflows));
}
/// Get the standard error in x
double xStdErr(bool includeoverflows=true) const;
/// Get the RMS in x
double xRMS(bool includeoverflows=true) const;
//@}
/// @name Adding and subtracting histograms
//@{
/// @brief Add another histogram to this one
///
/// @note Adding histograms will unset any ScaledBy attribute from prevous calls to scaleW or normalize.
Histo1D& operator += (const Histo1D& toAdd) {
if (hasAnnotation("ScaledBy")) rmAnnotation("ScaledBy");
_axis += toAdd._axis;
return *this;
// if (!hasAnnotation("ScaledBy") && !toAdd.hasAnnotation("ScaledBy")) {
// _axis += toAdd._axis;
// } else {
// // Undo scaling of both histograms
// double scaledBy = annotation<double>("ScaledBy", 1.0);
// _axis.scaleW(1.0/scaledBy);
// double toAddScaledBy = toAdd.annotation<double>("ScaledBy", 1.0);
// Axis1D<HistoBin1D, Dbn1D> toAddAxis = toAdd._axis;
// toAddAxis.scaleW(1.0/toAddScaledBy);
// _axis += toAddAxis;
// // Re-apply combined scaling
// double newScaledBy = scaledBy*toAddScaledBy/(scaledBy+toAddScaledBy);
// _axis.scaleW(newScaledBy);
// setAnnotation("ScaledBy", newScaledBy);
// }
/// @todo What about if one histo sets ScaledBy, and the other doesn't?!? Aaaargh
// return *this;
}
/// @brief Subtract another histogram from this one
///
/// @note Subtracting histograms will unset any ScaledBy attribute from prevous calls to scaleW or normalize.
Histo1D& operator -= (const Histo1D& toSubtract) {
if (hasAnnotation("ScaledBy")) rmAnnotation("ScaledBy");
_axis -= toSubtract._axis;
return *this;
}
//@}
protected:
/// Access a bin by coordinate (non-const version)
HistoBin1D& binAt(double x) { return _axis.binAt(x); }
private:
/// @name Bin data
//@{
/// Definition of bin edges and contents
Axis1D<HistoBin1D, Dbn1D> _axis;
//@}
};
/// Convenience typedef
typedef Histo1D H1D;
/// @name Combining histos: global operators
//@{
/// Add two histograms
inline Histo1D add(const Histo1D& first, const Histo1D& second) {
Histo1D tmp = first;
if (first.path() != second.path()) tmp.setPath("");
tmp += second;
return tmp;
}
/// Add two histograms
inline Histo1D operator + (const Histo1D& first, const Histo1D& second) {
return add(first, second);
}
/// Subtract two histograms
inline Histo1D subtract(const Histo1D& first, const Histo1D& second) {
Histo1D tmp = first;
if (first.path() != second.path()) tmp.setPath("");
tmp -= second;
return tmp;
}
/// Subtract two histograms
inline Histo1D operator - (const Histo1D& first, const Histo1D& second) {
return subtract(first, second);
}
/// @todo Add multiply(H1, H1) -> Scatter2D?
/// Divide two histograms, with an uncorrelated error treatment
///
/// @todo Wouldn't it be nice to be able to supply a correlation matrix or function as optional arg?
///
/// @note The two histos must have _exactly_ the same binning.
Scatter2D divide(const Histo1D& numer, const Histo1D& denom);
/// Divide two histograms, with an uncorrelated error treatment
///
/// @note The two histos must have _exactly_ the same binning.
inline Scatter2D operator / (const Histo1D& numer, const Histo1D& denom) {
return divide(numer, denom);
}
+ /// Add histogram and scatter
+ Scatter2D add(const Histo1D& histo, const Scatter2D& scatt);
+
+ inline Scatter2D add(const Scatter2D& scatt, const Histo1D& histo) {
+ return add(histo, scatt);
+ }
+
+ /// Subtract scatter from histogram
+ Scatter2D subtract(const Histo1D& histo, const Scatter2D& scatt);
+
+ /// Subtract histogram from scatter
+ Scatter2D subtract(const Scatter2D& scatt, const Histo1D& histo);
+
+ /// Multiply histogram with scatter
+ Scatter2D multiply(const Histo1D& histo, const Scatter2D& scatt);
+
+ /// Multiply scatter with histogram
+ inline Scatter2D multiply(const Scatter2D& scatt, const Histo1D& histo) {
+ return multiply(histo, scatt);
+ }
+
+ /// Divide histogram by scatter
+ Scatter2D divide(const Histo1D& numer, const Scatter2D& denom);
+
+ /// Divide scatter by histogram
+ Scatter2D divide(const Scatter2D& numer, const Histo1D& denom);
+
/// @todo Add functions/operators on pointers
+
/// @brief Calculate a histogrammed efficiency ratio of two histograms
///
/// @note The two histos must have _exactly_ the same binning.
///
/// @note An efficiency is not the same thing as a standard division of two
/// histograms: the errors are treated as correlated via binomial statistics.
Scatter2D efficiency(const Histo1D& accepted, const Histo1D& total);
/// @brief Calculate the asymmetry (a-b)/(a+b) of two histograms
///
/// @note The two histos must have _exactly_ the same binning.
inline Scatter2D asymm(const Histo1D& a, const Histo1D& b) {
return (a-b) / (a+b);
}
/// @brief Convert a Histo1D to a Scatter2D representing the integral of the histogram
///
/// @note The integral histo errors are calculated as sqrt(binvalue), as if they
/// are uncorrelated. This is not in general true for integral histograms, so if you
/// need accurate errors you should explicitly monitor bin-to-bin correlations.
///
/// The includeunderflow param chooses whether the underflow bin is included
/// in the integral numbers as an offset.
///
/// @todo Rename/alias as mkIntegral
Scatter2D toIntegralHisto(const Histo1D& h, bool includeunderflow=true);
/// @brief Convert a Histo1D to a Scatter2D where each bin is a fraction of the total
///
/// @note This sounds weird: let's explain a bit more! Sometimes we want to
/// take a histo h, make an integral histogram H from it, and then divide H by
/// the total integral of h, such that every bin in H represents the
/// cumulative efficiency of that bin as a fraction of the total. I.e. an
/// integral histo, scaled by 1/total_integral and with binomial errors.
///
/// The includeunderflow param behaves as for toIntegral, and applies to both
/// the initial integration and the integral used for the scaling. The
/// includeoverflow param applies only to obtaining the scaling factor.
///
/// @todo Rename/alias as mkIntegralEff
Scatter2D toIntegralEfficiencyHisto(const Histo1D& h, bool includeunderflow=true, bool includeoverflow=true);
//@}
}
#endif
diff --git a/src/Histo1D.cc b/src/Histo1D.cc
--- a/src/Histo1D.cc
+++ b/src/Histo1D.cc
@@ -1,272 +1,577 @@
// -*- C++ -*-
//
// This file is part of YODA -- Yet more Objects for Data Analysis
// Copyright (C) 2008-2016 The YODA collaboration (see AUTHORS for details)
//
#include "YODA/Histo1D.h"
#include "YODA/Profile1D.h"
#include "YODA/Scatter2D.h"
#include "YODA/Utils/StringUtils.h"
using namespace std;
namespace YODA {
void Histo1D::fill(double x, double weight) {
if ( std::isnan(x) ) throw RangeError("X is NaN");
// Fill the overall distribution
_axis.totalDbn().fill(x, weight);
// Fill the bins and overflows
/// Unify this with Profile1D's version, when binning and inheritance are reworked
if (inRange(x, _axis.xMin(), _axis.xMax())) {
try {
/// @todo Replace try block with a check that there is a bin at x
binAt(x).fill(x, weight);
} catch (const RangeError& re) { }
} else if (x < _axis.xMin()) {
_axis.underflow().fill(x, weight);
} else if (x >= _axis.xMax()) {
_axis.overflow().fill(x, weight);
}
// Lock the axis now that a fill has happened
_axis._setLock(true);
}
void Histo1D::fillBin(size_t i, double weight) {
fill(bin(i).xMid(), weight);
}
/////////////// COMMON TO ALL BINNED
unsigned long Histo1D::numEntries(bool includeoverflows) const {
if (includeoverflows) return totalDbn().numEntries();
unsigned long n = 0;
for (const Bin& b : bins()) n += b.numEntries();
return n;
}
double Histo1D::effNumEntries(bool includeoverflows) const {
if (includeoverflows) return totalDbn().effNumEntries();
double n = 0;
for (const Bin& b : bins()) n += b.effNumEntries();
return n;
}
double Histo1D::sumW(bool includeoverflows) const {
if (includeoverflows) return _axis.totalDbn().sumW();
double sumw = 0;
for (const Bin& b : bins()) sumw += b.sumW();
return sumw;
}
double Histo1D::sumW2(bool includeoverflows) const {
if (includeoverflows) return _axis.totalDbn().sumW2();
double sumw2 = 0;
for (const Bin& b : bins()) sumw2 += b.sumW2();
return sumw2;
}
// ^^^^^^^^^^^^^
double Histo1D::xMean(bool includeoverflows) const {
if (includeoverflows) return _axis.totalDbn().xMean();
Dbn1D dbn;
for (const HistoBin1D& b : bins()) dbn += b.dbn();
return dbn.xMean();
}
double Histo1D::xVariance(bool includeoverflows) const {
if (includeoverflows) return _axis.totalDbn().xVariance();
Dbn1D dbn;
for (const HistoBin1D& b : bins()) dbn += b.dbn();
return dbn.xVariance();
}
double Histo1D::xStdErr(bool includeoverflows) const {
if (includeoverflows) return _axis.totalDbn().xStdErr();
Dbn1D dbn;
for (const HistoBin1D& b : bins()) dbn += b.dbn();
return dbn.xStdErr();
}
double Histo1D::xRMS(bool includeoverflows) const {
if (includeoverflows) return _axis.totalDbn().xRMS();
Dbn1D dbn;
for (const HistoBin1D& b : bins()) dbn += b.dbn();
return dbn.xRMS();
}
////////////////////////////////////////
/// Copy constructor with optional new path
Histo1D::Histo1D(const Histo1D& h, const std::string& path)
: AnalysisObject("Histo1D", (path.size() == 0) ? h.path() : path, h, h.title())
{
_axis = h._axis;
}
/// Constructor from a Scatter2D's binning, with optional new path
Histo1D::Histo1D(const Scatter2D& s, const std::string& path)
: AnalysisObject("Histo1D", (path.size() == 0) ? s.path() : path, s, s.title())
{
std::vector<HistoBin1D> bins;
for (const Scatter2D::Point& p : s.points()) {
bins.push_back(HistoBin1D(p.xMin(), p.xMax()));
}
_axis = Histo1DAxis(bins);
}
/// Constructor from a Profile1D's binning, with optional new path
Histo1D::Histo1D(const Profile1D& p, const std::string& path)
: AnalysisObject("Histo1D", (path.size() == 0) ? p.path() : path, p, p.title())
{
std::vector<HistoBin1D> bins;
for (const ProfileBin1D& b : p.bins()) {
bins.push_back(HistoBin1D(b.xMin(), b.xMax()));
}
_axis = Histo1DAxis(bins);
}
////////////////////////////////////////
// Divide two histograms
Scatter2D divide(const Histo1D& numer, const Histo1D& denom) {
Scatter2D rtn;
for (size_t i = 0; i < numer.numBins(); ++i) {
const HistoBin1D& b1 = numer.bin(i);
const HistoBin1D& b2 = denom.bin(i);
/// @todo Create a compatibleBinning function? Or just compare vectors of edges().
if (!fuzzyEquals(b1.xMin(), b2.xMin()) || !fuzzyEquals(b1.xMax(), b2.xMax()))
throw BinningError("x binnings are not equivalent in " + numer.path() + " / " + denom.path());
// Assemble the x value and error
// Use the midpoint of the "bin" for the new central x value, in the absence of better information
const double x = b1.xMid();
const double exminus = x - b1.xMin();
const double explus = b1.xMax() - x;
// Assemble the y value and error
/// @todo Provide optional alt behaviours to fill with NaN or remove the invalid point or throw
double y, ey;
if (b2.height() == 0 || (b1.height() == 0 && b1.heightErr() != 0)) { ///< @todo Ok?
y = std::numeric_limits<double>::quiet_NaN();
ey = std::numeric_limits<double>::quiet_NaN();
// throw LowStatsError("Requested division of empty bin");
} else {
y = b1.height() / b2.height();
/// @todo Is this the exact error treatment for all (uncorrelated) cases? Behaviour around 0? +1 and -1 fills?
const double relerr_1 = b1.heightErr() != 0 ? b1.relErr() : 0;
const double relerr_2 = b2.heightErr() != 0 ? b2.relErr() : 0;
ey = y * sqrt(sqr(relerr_1) + sqr(relerr_2));
}
/// Deal with +/- errors separately, inverted for the denominator contributions:
/// @todo check correctness with different signed numerator and denominator.
//const double eyplus = y * sqrt( sqr(p1.yErrPlus()/p1.y()) + sqr(p2.yErrMinus()/p2.y()) );
//const double eyminus = y * sqrt( sqr(p1.yErrMinus()/p1.y()) + sqr(p2.yErrPlus()/p2.y()) );
rtn.addPoint(x, y, exminus, explus, ey, ey);
}
assert(rtn.numPoints() == numer.numBins());
return rtn;
}
+ ////////////////////////////////////////
+
+
+ Scatter2D add(const Histo1D& histo, const Scatter2D& scatt) {
+ if (histo.numBins() != scatt.numPoints()) throw BinningError("Histogram binning incompatible with number of scatter points");
+
+ Scatter2D rtn = scatt.clone();
+ if (histo.path() != scatt.path()) rtn.setPath("");
+ if (rtn.hasAnnotation("ScaledBy")) rtn.rmAnnotation("ScaledBy");
+
+ for (size_t i = 0; i < rtn.numPoints(); ++i) {
+ const HistoBin1D& b = histo.bin(i);
+ const Point2D& s = scatt.point(i);
+
+ /// @todo Create a compatibleBinning function? Or just compare vectors of edges().
+ if (!fuzzyEquals(b.xMin(), s.x() - s.xErrMinus()) || !fuzzyEquals(b.xMax(), s.x() + s.xErrPlus()))
+ throw BinningError("x binnings are not equivalent in " + histo.path() + " + " + scatt.path());
+
+
+ // convert bin to scatter point
+ double biny;
+ try {
+ biny = b.height();
+ } catch (const Exception&) { // LowStatsError or WeightError
+ biny = 0;
+ }
+ double biney;
+ try {
+ biney = b.heightErr();
+ } catch (const Exception&) { // LowStatsError or WeightError
+ biney = 0;
+ }
+ // combine with scatter values
+ double newy = biny + s.y();
+ double newey_p = sqrt(sqr(biney) + sqr(s.yErrPlus()));
+ double newey_m = sqrt(sqr(biney) + sqr(s.yErrMinus()));
+ // set new values
+ Point2D& t = rtn.point(i);
+ t.setY(newy);
+ t.setYErrMinus(newey_p);
+ t.setYErrPlus(newey_m);
+ }
+
+ assert(rtn.numPoints() == histo.numBins());
+ return rtn;
+ }
+
+
+ ////////////////////////////////////////
+
+
+ Scatter2D subtract(const Histo1D& histo, const Scatter2D& scatt) {
+ if (histo.numBins() != scatt.numPoints()) throw BinningError("Histogram binning incompatible with number of scatter points");
+
+ Scatter2D rtn = scatt.clone();
+ if (histo.path() != scatt.path()) rtn.setPath("");
+ if (rtn.hasAnnotation("ScaledBy")) rtn.rmAnnotation("ScaledBy");
+
+ for (size_t i = 0; i < rtn.numPoints(); ++i) {
+ const HistoBin1D& b = histo.bin(i);
+ const Point2D& s = scatt.point(i);
+
+ /// @todo Create a compatibleBinning function? Or just compare vectors of edges().
+ if (!fuzzyEquals(b.xMin(), s.x() - s.xErrMinus()) || !fuzzyEquals(b.xMax(), s.x() + s.xErrPlus()))
+ throw BinningError("x binnings are not equivalent in " + histo.path() + " - " + scatt.path());
+
+
+ // convert bin to scatter point
+ double biny;
+ try {
+ biny = b.height();
+ } catch (const Exception&) { // LowStatsError or WeightError
+ biny = 0;
+ }
+ double biney;
+ try {
+ biney = b.heightErr();
+ } catch (const Exception&) { // LowStatsError or WeightError
+ biney = 0;
+ }
+ // combine with scatter values
+ double newy = biny - s.y();
+ double newey_p = sqrt(sqr(biney) + sqr(s.yErrPlus()));
+ double newey_m = sqrt(sqr(biney) + sqr(s.yErrMinus()));
+ // set new values
+ Point2D& t = rtn.point(i);
+ t.setY(newy);
+ t.setYErrMinus(newey_p);
+ t.setYErrPlus(newey_m);
+ }
+
+ assert(rtn.numPoints() == histo.numBins());
+ return rtn;
+ }
+
+
+ ////////////////////////////////////////
+
+
+ Scatter2D subtract(const Scatter2D& scatt, const Histo1D& histo) {
+ if (histo.numBins() != scatt.numPoints()) throw BinningError("Histogram binning incompatible with number of scatter points");
+
+ Scatter2D rtn = scatt.clone();
+ if (histo.path() != scatt.path()) rtn.setPath("");
+ if (rtn.hasAnnotation("ScaledBy")) rtn.rmAnnotation("ScaledBy");
+
+ for (size_t i = 0; i < rtn.numPoints(); ++i) {
+ const HistoBin1D& b = histo.bin(i);
+ const Point2D& s = scatt.point(i);
+
+ /// @todo Create a compatibleBinning function? Or just compare vectors of edges().
+ if (!fuzzyEquals(b.xMin(), s.x() - s.xErrMinus()) || !fuzzyEquals(b.xMax(), s.x() + s.xErrPlus()))
+ throw BinningError("x binnings are not equivalent in " + scatt.path() + " - " + histo.path());
+
+
+ // convert bin to scatter point
+ double biny;
+ try {
+ biny = b.height();
+ } catch (const Exception&) { // LowStatsError or WeightError
+ biny = 0;
+ }
+ double biney;
+ try {
+ biney = b.heightErr();
+ } catch (const Exception&) { // LowStatsError or WeightError
+ biney = 0;
+ }
+ // combine with scatter values
+ double newy = s.y() - biny;
+ double newey_p = sqrt(sqr(biney) + sqr(s.yErrPlus()));
+ double newey_m = sqrt(sqr(biney) + sqr(s.yErrMinus()));
+ // set new values
+ Point2D& t = rtn.point(i);
+ t.setY(newy);
+ t.setYErrMinus(newey_p);
+ t.setYErrPlus(newey_m);
+ }
+
+ assert(rtn.numPoints() == histo.numBins());
+ return rtn;
+ }
+
+
+ ////////////////////////////////////////
+
+
+ Scatter2D multiply(const Histo1D& histo, const Scatter2D& scatt) {
+ if (histo.numBins() != scatt.numPoints()) throw BinningError("Histogram binning incompatible with number of scatter points");
+
+ Scatter2D rtn = scatt.clone();
+ if (histo.path() != scatt.path()) rtn.setPath("");
+ if (rtn.hasAnnotation("ScaledBy")) rtn.rmAnnotation("ScaledBy");
+
+ for (size_t i = 0; i < rtn.numPoints(); ++i) {
+ const HistoBin1D& b = histo.bin(i);
+ const Point2D& s = scatt.point(i);
+
+ /// @todo Create a compatibleBinning function? Or just compare vectors of edges().
+ if (!fuzzyEquals(b.xMin(), s.x() - s.xErrMinus()) || !fuzzyEquals(b.xMax(), s.x() + s.xErrPlus()))
+ throw BinningError("x binnings are not equivalent in " + histo.path() + " * " + scatt.path());
+
+
+ // convert bin to scatter point
+ double biny;
+ try {
+ biny = b.height();
+ } catch (const Exception&) { // LowStatsError or WeightError
+ biny = 0;
+ }
+ double biney;
+ try {
+ biney = b.relErr();
+ } catch (const Exception&) { // LowStatsError or WeightError
+ biney = 0;
+ }
+ // combine with scatter values
+ double newy = biny * s.y();
+ double newey_p = newy * sqrt(sqr(biney) + sqr(s.yErrPlus() / s.y()));
+ double newey_m = newy * sqrt(sqr(biney) + sqr(s.yErrMinus() / s.y()));
+ // set new values
+ Point2D& t = rtn.point(i);
+ t.setY(newy);
+ t.setYErrMinus(newey_p);
+ t.setYErrPlus(newey_m);
+ }
+
+ assert(rtn.numPoints() == histo.numBins());
+ return rtn;
+ }
+
+
+ ////////////////////////////////////////
+
+
+ Scatter2D divide(const Histo1D& numer, const Scatter2D& denom) {
+ if (numer.numBins() != denom.numPoints()) throw BinningError("Histogram binning incompatible with number of scatter points");
+
+ Scatter2D rtn = denom.clone();
+ if (numer.path() != denom.path()) rtn.setPath("");
+ if (rtn.hasAnnotation("ScaledBy")) rtn.rmAnnotation("ScaledBy");
+
+ for (size_t i = 0; i < rtn.numPoints(); ++i) {
+ const HistoBin1D& b = numer.bin(i);
+ const Point2D& s = denom.point(i);
+
+ /// @todo Create a compatibleBinning function? Or just compare vectors of edges().
+ if (!fuzzyEquals(b.xMin(), s.x() - s.xErrMinus()) || !fuzzyEquals(b.xMax(), s.x() + s.xErrPlus()))
+ throw BinningError("x binnings are not equivalent in " + numer.path() + " / " + denom.path());
+
+
+ // convert bin to scatter point
+ double biny;
+ try {
+ biny = b.height();
+ } catch (const Exception&) { // LowStatsError or WeightError
+ biny = 0;
+ }
+ double biney;
+ try {
+ biney = b.relErr();
+ } catch (const Exception&) { // LowStatsError or WeightError
+ biney = 0;
+ }
+ // combine with scatter values
+ double newy, newey_p, newey_m;
+ if (s.y() == 0 || (b.height() == 0 && b.heightErr() != 0)) { ///< @todo Ok?
+ newy = std::numeric_limits<double>::quiet_NaN();
+ newey_m = newey_p = std::numeric_limits<double>::quiet_NaN();
+ // throw LowStatsError("Requested division of empty bin");
+ } else {
+ newy = biny / s.y();
+ newey_p = newy * sqrt(sqr(biney) + sqr(s.yErrPlus() / s.y()));
+ newey_m = newy * sqrt(sqr(biney) + sqr(s.yErrMinus() / s.y()));
+ }
+ // set new values
+ Point2D& t = rtn.point(i);
+ t.setY(newy);
+ t.setYErrMinus(newey_p);
+ t.setYErrPlus(newey_m);
+ }
+
+ assert(rtn.numPoints() == numer.numBins());
+ return rtn;
+ }
+
+
+ ////////////////////////////////////////
+
+
+ Scatter2D divide(const Scatter2D& numer, const Histo1D& denom) {
+ if (numer.numPoints() != denom.numBins()) throw BinningError("Histogram binning incompatible with number of scatter points");
+
+ Scatter2D rtn = numer.clone();
+ if (numer.path() != denom.path()) rtn.setPath("");
+ if (rtn.hasAnnotation("ScaledBy")) rtn.rmAnnotation("ScaledBy");
+
+ for (size_t i = 0; i < rtn.numPoints(); ++i) {
+ const Point2D& s = numer.point(i);
+ const HistoBin1D& b = denom.bin(i);
+
+ /// @todo Create a compatibleBinning function? Or just compare vectors of edges().
+ if (!fuzzyEquals(b.xMin(), s.x() - s.xErrMinus()) || !fuzzyEquals(b.xMax(), s.x() + s.xErrPlus()))
+ throw BinningError("x binnings are not equivalent in " + numer.path() + " / " + denom.path());
+
+
+ // convert bin to scatter point
+ double biny;
+ try {
+ biny = b.height();
+ } catch (const Exception&) { // LowStatsError or WeightError
+ biny = 0;
+ }
+ double biney;
+ try {
+ biney = b.relErr();
+ } catch (const Exception&) { // LowStatsError or WeightError
+ biney = 0;
+ }
+ // combine with scatter values
+ double newy, newey_p, newey_m;
+ if (b.height() == 0 || (s.y() == 0 && s.yErrAvg() != 0)) { ///< @todo Ok?
+ newy = std::numeric_limits<double>::quiet_NaN();
+ newey_m = newey_p = std::numeric_limits<double>::quiet_NaN();
+ // throw LowStatsError("Requested division of empty bin");
+ } else {
+ newy = s.y() / biny;
+ newey_p = newy * sqrt(sqr(biney) + sqr(s.yErrPlus() / s.y()));
+ newey_m = newy * sqrt(sqr(biney) + sqr(s.yErrMinus() / s.y()));
+ }
+ // set new values
+ Point2D& t = rtn.point(i);
+ t.setY(newy);
+ t.setYErrMinus(newey_p);
+ t.setYErrPlus(newey_m);
+ }
+
+ assert(rtn.numPoints() == denom.numBins());
+ return rtn;
+ }
+
+
+ /// @todo Add functions/operators on pointers
+
+
// Calculate a histogrammed efficiency ratio of two histograms
Scatter2D efficiency(const Histo1D& accepted, const Histo1D& total) {
Scatter2D tmp = divide(accepted, total);
for (size_t i = 0; i < accepted.numBins(); ++i) {
const HistoBin1D& b_acc = accepted.bin(i);
const HistoBin1D& b_tot = total.bin(i);
Point2D& point = tmp.point(i);
/// BEGIN DIMENSIONALITY-INDEPENDENT BIT TO SHARE WITH H2
// Check that the numerator is consistent with being a subset of the denominator
/// @note Neither effNumEntries nor sumW are guaranteed to satisfy num <= den for general weights!
if (b_acc.numEntries() > b_tot.numEntries())
throw UserError("Attempt to calculate an efficiency when the numerator is not a subset of the denominator: "
+ Utils::toStr(b_acc.numEntries()) + " entries / " + Utils::toStr(b_tot.numEntries()) + " entries");
// If no entries on the denominator, set eff = err = 0 and move to the next bin
double eff = std::numeric_limits<double>::quiet_NaN();
double err = std::numeric_limits<double>::quiet_NaN();
try {
if (b_tot.sumW() != 0) {
eff = b_acc.sumW() / b_tot.sumW(); //< Actually this is already calculated by the division...
err = sqrt(abs( ((1-2*eff)*b_acc.sumW2() + sqr(eff)*b_tot.sumW2()) / sqr(b_tot.sumW()) ));
}
} catch (const LowStatsError& e) {
//
}
/// END DIMENSIONALITY-INDEPENDENT BIT TO SHARE WITH H2
point.setY(eff, err);
}
return tmp;
}
// Convert a Histo1D to a Scatter2D representing the integral of the histogram
Scatter2D toIntegralHisto(const Histo1D& h, bool includeunderflow) {
/// @todo Check that the histogram binning has no gaps, otherwise throw a BinningError
Scatter2D tmp = mkScatter(h);
double integral = includeunderflow ? h.underflow().sumW() : 0.0;
for (size_t i = 0; i < h.numBins(); ++i) {
Point2D& point = tmp.point(i);
integral += h.bin(i).sumW();
const double err = sqrt(integral); //< @todo Should be sqrt(sumW2)? Or more complex, cf. Simon etc.?
point.setY(integral, err);
}
return tmp;
}
Scatter2D toIntegralEfficiencyHisto(const Histo1D& h, bool includeunderflow, bool includeoverflow) {
Scatter2D rtn = toIntegralHisto(h, includeunderflow);
const double integral = h.integral() - (includeoverflow ? 0 : h.overflow().sumW());
// If the integral is empty, the (integrated) efficiency values may as well all be zero, so return here
/// @todo Or throw a LowStatsError exception if h.effNumEntries() == 0?
/// @todo Provide optional alt behaviours
/// @todo Need to check that bins are all positive? Integral could be zero due to large +ve/-ve in different bins :O
if (integral == 0) return rtn;
/// @todo Should the total integral *error* be sqrt(sumW2)? Or more complex, cf. Simon etc.?
const double integral_err = sqrt(integral);
// Normalize and compute binomial errors
for (Point2D& p : rtn.points()) {
const double eff = p.y() / integral;
const double ey = sqrt(abs( ((1-2*eff)*sqr(p.y()/p.yErrAvg()) + sqr(eff)*sqr(integral_err)) / sqr(integral) ));
p.setY(eff, ey);
}
return rtn;
}
}
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Tue, Nov 19, 6:48 PM (1 d, 16 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3805671
Default Alt Text
(37 KB)
Attached To
rYODAHG yodahg
Event Timeline
Log In to Comment