Page Menu
Home
HEPForge
Search
Configure Global Search
Log In
Files
F10881299
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
129 KB
Subscribers
None
View Options
diff --git a/analyses/pluginMC/TEST.cc b/analyses/pluginMC/TEST.cc
new file mode 100644
--- /dev/null
+++ b/analyses/pluginMC/TEST.cc
@@ -0,0 +1,80 @@
+// -*- C++ -*-
+#include "Rivet/Analysis.hh"
+#include "Rivet/Projections/ChargedFinalState.hh"
+#include "Rivet/Projections/PrimaryParticles.hh"
+#include "Rivet/Tools/Correlators.hh"
+
+
+namespace Rivet {
+
+
+ class TEST : public CumulantAnalysis {
+ public:
+
+ /// @name Constructors etc.
+ //@{
+
+ /// Constructor
+ TEST() : CumulantAnalysis("TEST") {
+ }
+ //@}
+
+
+ public:
+
+ /// @name Analysis methods
+ //@{
+ /// Book histograms and initialise projections before the run
+ void init() {
+
+ ChargedFinalState cfs(-1.0, 1.0);
+ declare(cfs, "CFS");
+ ChargedFinalState pp(Cuts::abseta < 2.0);
+ declare(pp, "PP");
+ h_c22 = bookScatter2D("c22",120,0,120);
+ h_c23 = bookScatter2D("c23",120,0,120);
+ ec22 = bookECorrelator<2,2>("ec22",h_c22);
+ ec23 = bookECorrelator<3,2>("ec32",h_c22);
+ pair<int, int> max = getMaxValues();
+ // Declare correlator projections.
+ declare(Correlators(pp, max.first, max.second),"CRS");
+ }
+
+ /// Perform the per-event analysis
+ void analyze(const Event& event) {
+ ec22->fill(apply<ChargedFinalState>(event,"CFS").particles().size(),
+ apply<Correlators>(event,"CRS"), event.weight());
+ ec23->fill(apply<ChargedFinalState>(event,"CFS").particles().size(),
+ apply<Correlators>(event,"CRS"), event.weight());
+ }
+
+
+ /// Normalise histograms etc., after the run
+ void finalize() {
+ CumulantAnalysis::finalize();
+ cnTwoInt(h_c22,ec22);
+ }
+
+
+ //@}
+
+ private:
+
+ /// @name Histograms
+ //@{
+
+ Scatter2DPtr h_c22;
+ ECorrPtr ec22;
+ Scatter2DPtr h_c23;
+ ECorrPtr ec23;
+ //@}
+
+
+ };
+
+
+
+ // The hook for the plugin system
+ DECLARE_RIVET_PLUGIN(TEST);
+
+}
diff --git a/analyses/pluginMC/TEST.info b/analyses/pluginMC/TEST.info
new file mode 100644
--- /dev/null
+++ b/analyses/pluginMC/TEST.info
@@ -0,0 +1,28 @@
+Name: TEST
+Year: 2010
+Summary: Pseudorapidities at three energies, charged multiplicity at 7 TeV
+Experiment: ALICE
+RunInfo:
+ Diffractive events need to be enabled.
+NumEvents: 1000000
+Beams: [p+, p+]
+Energies: [900]
+Description:
+ 'This is an ALICE publication with pseudorapities for 0.9, 2.36 and $7\;\TeV$
+ and the charged multiplicity at $7\;\TeV$. The analysis requires at least on
+ charged particle in the event. Only the INEL distributions are considered here'
+BibKey: Aamodt:2010pp
+BibTeX: '@article{Aamodt:2010pp,
+ author = "Aamodt, K. and others",
+ title = "{Charged-particle multiplicity measurement in
+ proton-proton collisions at $\sqrt{s} = 7$ TeV with ALICE at LHC}",
+ collaboration = "ALICE",
+ journal = "Eur.Phys.J.",
+ volume = "C68",
+ pages = "345-354",
+ doi = "10.1140/epjc/s10052-010-1350-2",
+ year = "2010",
+ eprint = "1004.3514",
+ archivePrefix = "arXiv",
+ primaryClass = "hep-ex",
+}'
diff --git a/analyses/pluginMC/TEST.plot b/analyses/pluginMC/TEST.plot
new file mode 100644
--- /dev/null
+++ b/analyses/pluginMC/TEST.plot
@@ -0,0 +1,6 @@
+# BEGIN PLOT /TEST/d04-x01-y01
+Title=Pseudorapidity $\sqrt(s)=0.9$ TeV, INEL $>0$
+XLabel=$\eta$
+YLabel=$\text{d}N/\text{d}\eta$
+LogY=0
+# END PLOT
diff --git a/analyses/pluginMC/TEST.yoda b/analyses/pluginMC/TEST.yoda
new file mode 100644
--- /dev/null
+++ b/analyses/pluginMC/TEST.yoda
@@ -0,0 +1,18 @@
+BEGIN YODA_SCATTER2D /REF/TEST/d04-x01-y01
+IsRef=1
+Path=/REF/TEST/d04-x01-y01
+Title=
+Type=Scatter2D
+# xval xerr- xerr+ yval yerr- yerr+
+-9.000000e-01 1.000000e-01 1.000000e-01 4.000000e+00 7.615773e-02 7.615773e-02
+-7.000000e-01 1.000000e-01 1.000000e-01 3.870000e+00 7.615773e-02 7.615773e-02
+-5.000000e-01 1.000000e-01 1.000000e-01 3.800000e+00 7.615773e-02 7.615773e-02
+-3.000000e-01 1.000000e-01 1.000000e-01 3.700000e+00 6.324555e-02 6.324555e-02
+-1.000000e-01 1.000000e-01 1.000000e-01 3.670000e+00 6.324555e-02 6.324555e-02
+1.000000e-01 1.000000e-01 1.000000e-01 3.730000e+00 6.324555e-02 6.324555e-02
+3.000000e-01 1.000000e-01 1.000000e-01 3.720000e+00 6.708204e-02 6.708204e-02
+5.000000e-01 1.000000e-01 1.000000e-01 3.770000e+00 7.615773e-02 7.615773e-02
+7.000000e-01 1.000000e-01 1.000000e-01 3.920000e+00 7.615773e-02 7.615773e-02
+9.000000e-01 1.000000e-01 1.000000e-01 4.010000e+00 7.615773e-02 7.615773e-02
+END YODA_SCATTER2D
+
diff --git a/include/Rivet/Analysis.hh b/include/Rivet/Analysis.hh
--- a/include/Rivet/Analysis.hh
+++ b/include/Rivet/Analysis.hh
@@ -1,1226 +1,1225 @@
// -*- C++ -*-
#ifndef RIVET_Analysis_HH
#define RIVET_Analysis_HH
#include "Rivet/Config/RivetCommon.hh"
#include "Rivet/AnalysisInfo.hh"
#include "Rivet/Event.hh"
#include "Rivet/Projection.hh"
#include "Rivet/ProjectionApplier.hh"
#include "Rivet/ProjectionHandler.hh"
#include "Rivet/AnalysisLoader.hh"
#include "Rivet/Tools/Cuts.hh"
#include "Rivet/Tools/Logging.hh"
#include "Rivet/Tools/ParticleUtils.hh"
#include "Rivet/Tools/BinnedHistogram.hh"
#include "Rivet/Tools/RivetMT2.hh"
#include "Rivet/Tools/RivetYODA.hh"
#include "Rivet/Tools/Percentile.hh"
#include "Rivet/Projections/CentralityProjection.hh"
/// @def vetoEvent
/// Preprocessor define for vetoing events, including the log message and return.
#define vetoEvent \
do { MSG_DEBUG("Vetoing event on line " << __LINE__ << " of " << __FILE__); return; } while(0)
namespace Rivet {
// Forward declaration
class AnalysisHandler;
/// @brief This is the base class of all analysis classes in Rivet.
///
/// There are
/// three virtual functions which should be implemented in base classes:
///
/// void init() is called by Rivet before a run is started. Here the
/// analysis class should book necessary histograms. The needed
/// projections should probably rather be constructed in the
/// constructor.
///
/// void analyze(const Event&) is called once for each event. Here the
/// analysis class should apply the necessary Projections and fill the
/// histograms.
///
/// void finalize() is called after a run is finished. Here the analysis
/// class should do whatever manipulations are necessary on the
/// histograms. Writing the histograms to a file is, however, done by
/// the Rivet class.
class Analysis : public ProjectionApplier {
/// The AnalysisHandler is a friend.
friend class AnalysisHandler;
-
public:
/// @name Standard constructors and destructors.
//@{
// /// The default constructor.
// Analysis();
/// Constructor
Analysis(const std::string& name);
/// The destructor.
virtual ~Analysis() {}
//@}
public:
/// @name Main analysis methods
//@{
/// Initialize this analysis object. A concrete class should here
/// book all necessary histograms. An overridden function must make
/// sure it first calls the base class function.
virtual void init() { }
/// Analyze one event. A concrete class should here apply the
/// necessary projections on the \a event and fill the relevant
/// histograms. An overridden function must make sure it first calls
/// the base class function.
virtual void analyze(const Event& event) = 0;
/// Finalize this analysis object. A concrete class should here make
/// all necessary operations on the histograms. Writing the
/// histograms to a file is, however, done by the Rivet class. An
/// overridden function must make sure it first calls the base class
/// function.
virtual void finalize() { }
//@}
public:
/// @name Metadata
/// Metadata is used for querying from the command line and also for
/// building web pages and the analysis pages in the Rivet manual.
//@{
/// Get the actual AnalysisInfo object in which all this metadata is stored.
const AnalysisInfo& info() const {
assert(_info && "No AnalysisInfo object :O");
return *_info;
}
/// @brief Get the name of the analysis.
///
/// By default this is computed by combining the results of the
/// experiment, year and Spires ID metadata methods and you should
/// only override it if there's a good reason why those won't
/// work. If options has been set for this instance, a
/// corresponding string is appended at the end.
virtual std::string name() const {
return ( (info().name().empty()) ? _defaultname : info().name() ) + _optstring;
}
// get name of reference data file, which could be different from plugin name
virtual std::string getRefDataName() const {
return (info().getRefDataName().empty()) ? _defaultname : info().getRefDataName();
}
// set name of reference data file, which could be different from plugin name
virtual void setRefDataName(const std::string& ref_data="") {
info().setRefDataName(!ref_data.empty() ? ref_data : name());
}
/// Get the Inspire ID code for this analysis.
virtual std::string inspireId() const {
return info().inspireId();
}
/// Get the SPIRES ID code for this analysis (~deprecated).
virtual std::string spiresId() const {
return info().spiresId();
}
/// @brief Names & emails of paper/analysis authors.
///
/// Names and email of authors in 'NAME \<EMAIL\>' format. The first
/// name in the list should be the primary contact person.
virtual std::vector<std::string> authors() const {
return info().authors();
}
/// @brief Get a short description of the analysis.
///
/// Short (one sentence) description used as an index entry.
/// Use @a description() to provide full descriptive paragraphs
/// of analysis details.
virtual std::string summary() const {
return info().summary();
}
/// @brief Get a full description of the analysis.
///
/// Full textual description of this analysis, what it is useful for,
/// what experimental techniques are applied, etc. Should be treated
/// as a chunk of restructuredText (http://docutils.sourceforge.net/rst.html),
/// with equations to be rendered as LaTeX with amsmath operators.
virtual std::string description() const {
return info().description();
}
/// @brief Information about the events needed as input for this analysis.
///
/// Event types, energies, kinematic cuts, particles to be considered
/// stable, etc. etc. Should be treated as a restructuredText bullet list
/// (http://docutils.sourceforge.net/rst.html)
virtual std::string runInfo() const {
return info().runInfo();
}
/// Experiment which performed and published this analysis.
virtual std::string experiment() const {
return info().experiment();
}
/// Collider on which the experiment ran.
virtual std::string collider() const {
return info().collider();
}
/// When the original experimental analysis was published.
virtual std::string year() const {
return info().year();
}
/// The luminosity in inverse femtobarn
virtual std::string luminosityfb() const {
return info().luminosityfb();
}
/// Journal, and preprint references.
virtual std::vector<std::string> references() const {
return info().references();
}
/// BibTeX citation key for this article.
virtual std::string bibKey() const {
return info().bibKey();
}
/// BibTeX citation entry for this article.
virtual std::string bibTeX() const {
return info().bibTeX();
}
/// Whether this analysis is trusted (in any way!)
virtual std::string status() const {
return (info().status().empty()) ? "UNVALIDATED" : info().status();
}
/// Any work to be done on this analysis.
virtual std::vector<std::string> todos() const {
return info().todos();
}
/// Return the allowed pairs of incoming beams required by this analysis.
virtual const std::vector<PdgIdPair>& requiredBeams() const {
return info().beams();
}
/// Declare the allowed pairs of incoming beams required by this analysis.
virtual Analysis& setRequiredBeams(const std::vector<PdgIdPair>& requiredBeams) {
info().setBeams(requiredBeams);
return *this;
}
/// Sets of valid beam energy pairs, in GeV
virtual const std::vector<std::pair<double, double> >& requiredEnergies() const {
return info().energies();
}
/// Get vector of analysis keywords
virtual const std::vector<std::string> & keywords() const {
return info().keywords();
}
/// Declare the list of valid beam energy pairs, in GeV
virtual Analysis& setRequiredEnergies(const std::vector<std::pair<double, double> >& requiredEnergies) {
info().setEnergies(requiredEnergies);
return *this;
}
/// Return true if this analysis needs to know the process cross-section.
/// @todo Remove this and require HepMC >= 2.06
bool needsCrossSection() const {
return info().needsCrossSection();
}
/// Declare whether this analysis needs to know the process cross-section from the generator.
/// @todo Remove this and require HepMC >= 2.06
Analysis& setNeedsCrossSection(bool needed=true) {
info().setNeedsCrossSection(needed);
return *this;
}
//@}
/// @name Internal metadata modifying methods
//@{
/// Get the actual AnalysisInfo object in which all this metadata is stored (non-const).
AnalysisInfo& info() {
assert(_info && "No AnalysisInfo object :O");
return *_info;
}
//@}
/// @name Run conditions
//@{
/// Incoming beams for this run
const ParticlePair& beams() const;
/// Incoming beam IDs for this run
const PdgIdPair beamIds() const;
/// Centre of mass energy for this run
double sqrtS() const;
//@}
/// @name Analysis / beam compatibility testing
//@{
/// Check if analysis is compatible with the provided beam particle IDs and energies
bool isCompatible(const ParticlePair& beams) const;
/// Check if analysis is compatible with the provided beam particle IDs and energies
bool isCompatible(PdgId beam1, PdgId beam2, double e1, double e2) const;
/// Check if analysis is compatible with the provided beam particle IDs and energies
bool isCompatible(const PdgIdPair& beams, const std::pair<double,double>& energies) const;
//@}
/// Set the cross section from the generator
Analysis& setCrossSection(double xs); //, double xserr=0.0);
/// Access the controlling AnalysisHandler object.
AnalysisHandler& handler() const { return *_analysishandler; }
protected:
/// Get a Log object based on the name() property of the calling analysis object.
Log& getLog() const;
/// Get the process cross-section in pb. Throws if this hasn't been set.
double crossSection() const;
/// Get the process cross-section per generated event in pb. Throws if this
/// hasn't been set.
double crossSectionPerEvent() const;
/// @brief Get the number of events seen (via the analysis handler).
///
/// @note Use in the finalize phase only.
size_t numEvents() const;
/// @brief Get the sum of event weights seen (via the analysis handler).
///
/// @note Use in the finalize phase only.
double sumW() const;
/// Alias
double sumOfWeights() const { return sumW(); }
/// @brief Get the sum of squared event weights seen (via the analysis handler).
///
/// @note Use in the finalize phase only.
double sumW2() const;
protected:
/// @name Histogram paths
//@{
/// Get the canonical histogram "directory" path for this analysis.
const std::string histoDir() const;
/// Get the canonical histogram path for the named histogram in this analysis.
const std::string histoPath(const std::string& hname) const;
/// Get the canonical histogram path for the numbered histogram in this analysis.
const std::string histoPath(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const;
/// Get the internal histogram name for given d, x and y (cf. HepData)
const std::string mkAxisCode(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const;
/// Alias
/// @deprecated Prefer the "mk" form, consistent with other "making function" names
const std::string makeAxisCode(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
return mkAxisCode(datasetId, xAxisId, yAxisId);
}
//@}
/// @name Histogram reference data
//@{
/// Get reference data for a named histo
/// @todo SFINAE to ensure that the type inherits from YODA::AnalysisObject?
template <typename T=YODA::Scatter2D>
const T& refData(const string& hname) const {
_cacheRefData();
MSG_TRACE("Using histo bin edges for " << name() << ":" << hname);
if (!_refdata[hname]) {
MSG_ERROR("Can't find reference histogram " << hname);
throw Exception("Reference data " + hname + " not found.");
}
return dynamic_cast<T&>(*_refdata[hname]);
}
/// Get reference data for a numbered histo
/// @todo SFINAE to ensure that the type inherits from YODA::AnalysisObject?
template <typename T=YODA::Scatter2D>
const T& refData(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
const string hname = makeAxisCode(datasetId, xAxisId, yAxisId);
return refData(hname);
}
//@}
/// @name Counter booking
//@{
/// Book a counter.
CounterPtr bookCounter(const std::string& name,
const std::string& title="");
// const std::string& valtitle=""
/// Book a counter, using a path generated from the dataset and axis ID codes
///
/// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
CounterPtr bookCounter(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
const std::string& title="");
// const std::string& valtitle=""
//@}
/// @name 1D histogram booking
//@{
/// Book a 1D histogram with @a nbins uniformly distributed across the range @a lower - @a upper .
Histo1DPtr bookHisto1D(const std::string& name,
size_t nbins, double lower, double upper,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D histogram with non-uniform bins defined by the vector of bin edges @a binedges .
Histo1DPtr bookHisto1D(const std::string& name,
const std::vector<double>& binedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D histogram with non-uniform bins defined by the vector of bin edges @a binedges .
Histo1DPtr bookHisto1D(const std::string& name,
const std::initializer_list<double>& binedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D histogram with binning from a reference scatter.
Histo1DPtr bookHisto1D(const std::string& name,
const Scatter2D& refscatter,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D histogram, using the binnings in the reference data histogram.
Histo1DPtr bookHisto1D(const std::string& name,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D histogram, using the binnings in the reference data histogram.
///
/// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
Histo1DPtr bookHisto1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
//@}
/// @name 2D histogram booking
//@{
/// Book a 2D histogram with @a nxbins and @a nybins uniformly
/// distributed across the ranges @a xlower - @a xupper and @a
/// ylower - @a yupper respectively along the x- and y-axis.
Histo2DPtr bookHisto2D(const std::string& name,
size_t nxbins, double xlower, double xupper,
size_t nybins, double ylower, double yupper,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D histogram with non-uniform bins defined by the
/// vectors of bin edges @a xbinedges and @a ybinedges.
Histo2DPtr bookHisto2D(const std::string& name,
const std::vector<double>& xbinedges,
const std::vector<double>& ybinedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D histogram with non-uniform bins defined by the
/// vectors of bin edges @a xbinedges and @a ybinedges.
Histo2DPtr bookHisto2D(const std::string& name,
const std::initializer_list<double>& xbinedges,
const std::initializer_list<double>& ybinedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D histogram with binning from a reference scatter.
Histo2DPtr bookHisto2D(const std::string& name,
const Scatter3D& refscatter,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D histogram, using the binnings in the reference data histogram.
Histo2DPtr bookHisto2D(const std::string& name,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D histogram, using the binnings in the reference data histogram.
///
/// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
Histo2DPtr bookHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
//@}
/// @name 1D profile histogram booking
//@{
/// Book a 1D profile histogram with @a nbins uniformly distributed across the range @a lower - @a upper .
Profile1DPtr bookProfile1D(const std::string& name,
size_t nbins, double lower, double upper,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D profile histogram with non-uniform bins defined by the vector of bin edges @a binedges .
Profile1DPtr bookProfile1D(const std::string& name,
const std::vector<double>& binedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D profile histogram with non-uniform bins defined by the vector of bin edges @a binedges .
Profile1DPtr bookProfile1D(const std::string& name,
const std::initializer_list<double>& binedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D profile histogram with binning from a reference scatter.
Profile1DPtr bookProfile1D(const std::string& name,
const Scatter2D& refscatter,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D profile histogram, using the binnings in the reference data histogram.
Profile1DPtr bookProfile1D(const std::string& name,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D profile histogram, using the binnings in the reference data histogram.
///
/// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
Profile1DPtr bookProfile1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
//@}
/// @name 2D profile histogram booking
//@{
/// Book a 2D profile histogram with @a nxbins and @a nybins uniformly
/// distributed across the ranges @a xlower - @a xupper and @a ylower - @a
/// yupper respectively along the x- and y-axis.
Profile2DPtr bookProfile2D(const std::string& name,
size_t nxbins, double xlower, double xupper,
size_t nybins, double ylower, double yupper,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D profile histogram with non-uniform bins defined by the vectorx
/// of bin edges @a xbinedges and @a ybinedges.
Profile2DPtr bookProfile2D(const std::string& name,
const std::vector<double>& xbinedges,
const std::vector<double>& ybinedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D profile histogram with non-uniform bins defined by the vectorx
/// of bin edges @a xbinedges and @a ybinedges.
Profile2DPtr bookProfile2D(const std::string& name,
const std::initializer_list<double>& xbinedges,
const std::initializer_list<double>& ybinedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D profile histogram with binning from a reference scatter.
Profile2DPtr bookProfile2D(const std::string& name,
const Scatter3D& refscatter,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D profile histogram, using the binnings in the reference data histogram.
Profile2DPtr bookProfile2D(const std::string& name,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D profile histogram, using the binnings in the reference data histogram.
///
/// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
Profile2DPtr bookProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
//@}
/// @name 2D scatter booking
//@{
/// @brief Book a 2-dimensional data point set with the given name.
///
/// @note Unlike histogram booking, scatter booking by default makes no
/// attempt to use reference data to pre-fill the data object. If you want
/// this, which is sometimes useful e.g. when the x-position is not really
/// meaningful and can't be extracted from the data, then set the @a
/// copy_pts parameter to true. This creates points to match the reference
/// data's x values and errors, but with the y values and errors zeroed...
/// assuming that there is a reference histo with the same name: if there
/// isn't, an exception will be thrown.
Scatter2DPtr bookScatter2D(const std::string& name,
bool copy_pts=false,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// @brief Book a 2-dimensional data point set, using the binnings in the reference data histogram.
///
/// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
///
/// @note Unlike histogram booking, scatter booking by default makes no
/// attempt to use reference data to pre-fill the data object. If you want
/// this, which is sometimes useful e.g. when the x-position is not really
/// meaningful and can't be extracted from the data, then set the @a
/// copy_pts parameter to true. This creates points to match the reference
/// data's x values and errors, but with the y values and errors zeroed.
Scatter2DPtr bookScatter2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
bool copy_pts=false,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// @brief Book a 2-dimensional data point set with equally spaced x-points in a range.
///
/// The y values and errors will be set to 0.
Scatter2DPtr bookScatter2D(const std::string& name,
size_t npts, double lower, double upper,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// @brief Book a 2-dimensional data point set based on provided contiguous "bin edges".
///
/// The y values and errors will be set to 0.
Scatter2DPtr bookScatter2D(const std::string& hname,
const std::vector<double>& binedges,
const std::string& title,
const std::string& xtitle,
const std::string& ytitle);
//@}
public:
/// @name accessing options for this Analysis instance.
//@{
/// Get an option for this analysis instance as a string.
std::string getOption(std::string optname) {
if ( _options.find(optname) != _options.end() )
return _options.find(optname)->second;
return "";
}
/// Get an option for this analysis instance converted to a
/// specific type (given by the specified @a def value).
template<typename T>
T getOption(std::string optname, T def) {
if (_options.find(optname) == _options.end()) return def;
std::stringstream ss;
ss << _options.find(optname)->second;
T ret;
ss >> ret;
return ret;
}
//@}
/// @brief Book a CentralityProjection
///
/// Using a SingleValueProjection, @a proj, giving the value of an
/// experimental observable to be used as a centrality estimator,
/// book a CentralityProjection based on the experimentally
/// measured pecentiles of this observable (as given by the
/// reference data for the @a calHistName histogram in the @a
/// calAnaName analysis. If a preloaded file with the output of a
/// run using the @a calAnaName analysis contains a valid
/// generated @a calHistName histogram, it will be used as an
/// optional percentile binning. Also if this preloaded file
/// contains a histogram with the name @a calHistName with an
/// appended "_IMP" This histogram will be used to add an optional
/// centrality percentile based on the generated impact
/// parameter. If @increasing is true, a low (high) value of @proj
/// is assumed to correspond to a more peripheral (central) event.
const CentralityProjection&
declareCentrality(const SingleValueProjection &proj,
string calAnaName, string calHistName,
const string projName, bool increasing = false);
/// @brief Book a Pecentile wrapper around AnalysisObjects.
///
/// Based on a previously registered CentralityProjection named @a
/// projName book one AnalysisObject for each @a centralityBin and
/// name them according to the corresponding code in the @a ref
/// vector.
template <class T>
Percentile<T> bookPercentile(string projName,
vector<pair<float, float> > centralityBins,
vector<tuple<int, int, int> > ref) {
typedef typename ReferenceTraits<T>::RefT RefT;
Percentile<T> pctl(this, projName);
const int nCent = centralityBins.size();
for (int iCent = 0; iCent < nCent; ++iCent) {
const string axisCode = makeAxisCode(std::get<0>(ref[iCent]),
std::get<1>(ref[iCent]),
std::get<2>(ref[iCent]));
shared_ptr<T> ao;
CounterPtr cnt;
try {
ao = getAnalysisObject<T>(axisCode);
MSG_TRACE("Found old " << histoPath(axisCode));
}
catch (Exception) {
const RefT & refscatter = refData<RefT>(axisCode);
ao = make_shared<T>(refscatter, histoPath(axisCode));
addAnalysisObject(ao);
MSG_TRACE("Created new " << histoPath(axisCode));
}
try {
cnt = getAnalysisObject<Counter>("TMP/COUNTER/" + axisCode);
MSG_TRACE("Found old " << histoPath("TMP/COUNTER/" + axisCode));
}
catch (Exception) {
cnt = make_shared<Counter>(histoPath("TMP/COUNTER/" + axisCode));
addAnalysisObject(cnt);
MSG_TRACE("Created new " << histoPath("TMP/COUNTER/" + axisCode));
}
pctl.add(ao, cnt, centralityBins[iCent]);
}
return pctl;
}
/// @brief Book Pecentile wrappers around AnalysisObjects.
///
/// Based on a previously registered CentralityProjection named @a
/// projName book one (or several) AnalysisObject(s) named
/// according to @a ref where the x-axis will be filled according
/// to the percentile output(s) of the @projName.
template <class T>
PercentileXaxis<T> bookPercentileXaxis(string projName,
tuple<int, int, int> ref) {
typedef typename ReferenceTraits<T>::RefT RefT;
PercentileXaxis<T> pctl(this, projName);
const string axisCode = makeAxisCode(std::get<0>(ref),
std::get<1>(ref),
std::get<2>(ref));
shared_ptr<T> ao;
CounterPtr cnt;
try {
ao = getAnalysisObject<T>(histoPath(axisCode));
}
catch (Exception) {
const RefT & refscatter = refData<RefT>(axisCode);
ao = make_shared<T>(refscatter, axisCode);
addAnalysisObject(ao);
}
pctl.add(proj, ao, make_shared<Counter>());
return pctl;
}
/// @name Analysis object manipulation
/// @todo Should really be protected: only public to keep BinnedHistogram happy for now...
//@{
/// Multiplicatively scale the given counter, @a cnt, by factor @s factor.
void scale(CounterPtr cnt, double factor);
/// Multiplicatively scale the given counters, @a cnts, by factor @s factor.
/// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
/// @todo Use SFINAE for a generic iterable of CounterPtrs
void scale(const std::vector<CounterPtr>& cnts, double factor) {
for (auto& c : cnts) scale(c, factor);
}
/// @todo YUCK!
template <std::size_t array_size>
void scale(const CounterPtr (&cnts)[array_size], double factor) {
// for (size_t i = 0; i < std::extent<decltype(cnts)>::value; ++i) scale(cnts[i], factor);
for (auto& c : cnts) scale(c, factor);
}
/// Normalize the given histogram, @a histo, to area = @a norm.
void normalize(Histo1DPtr histo, double norm=1.0, bool includeoverflows=true);
/// Normalize the given histograms, @a histos, to area = @a norm.
/// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
/// @todo Use SFINAE for a generic iterable of Histo1DPtrs
void normalize(const std::vector<Histo1DPtr>& histos, double norm=1.0, bool includeoverflows=true) {
for (auto& h : histos) normalize(h, norm, includeoverflows);
}
/// @todo YUCK!
template <std::size_t array_size>
void normalize(const Histo1DPtr (&histos)[array_size], double norm=1.0, bool includeoverflows=true) {
for (auto& h : histos) normalize(h, norm, includeoverflows);
}
/// Multiplicatively scale the given histogram, @a histo, by factor @s factor.
void scale(Histo1DPtr histo, double factor);
/// Multiplicatively scale the given histograms, @a histos, by factor @s factor.
/// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
/// @todo Use SFINAE for a generic iterable of Histo1DPtrs
void scale(const std::vector<Histo1DPtr>& histos, double factor) {
for (auto& h : histos) scale(h, factor);
}
/// @todo YUCK!
template <std::size_t array_size>
void scale(const Histo1DPtr (&histos)[array_size], double factor) {
for (auto& h : histos) scale(h, factor);
}
/// Normalize the given histogram, @a histo, to area = @a norm.
void normalize(Histo2DPtr histo, double norm=1.0, bool includeoverflows=true);
/// Normalize the given histograms, @a histos, to area = @a norm.
/// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
/// @todo Use SFINAE for a generic iterable of Histo2DPtrs
void normalize(const std::vector<Histo2DPtr>& histos, double norm=1.0, bool includeoverflows=true) {
for (auto& h : histos) normalize(h, norm, includeoverflows);
}
/// @todo YUCK!
template <std::size_t array_size>
void normalize(const Histo2DPtr (&histos)[array_size], double norm=1.0, bool includeoverflows=true) {
for (auto& h : histos) normalize(h, norm, includeoverflows);
}
/// Multiplicatively scale the given histogram, @a histo, by factor @s factor.
void scale(Histo2DPtr histo, double factor);
/// Multiplicatively scale the given histograms, @a histos, by factor @s factor.
/// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
/// @todo Use SFINAE for a generic iterable of Histo2DPtrs
void scale(const std::vector<Histo2DPtr>& histos, double factor) {
for (auto& h : histos) scale(h, factor);
}
/// @todo YUCK!
template <std::size_t array_size>
void scale(const Histo2DPtr (&histos)[array_size], double factor) {
for (auto& h : histos) scale(h, factor);
}
/// Helper for counter division.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(CounterPtr c1, CounterPtr c2, Scatter1DPtr s) const;
/// Helper for histogram division with raw YODA objects.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(const YODA::Counter& c1, const YODA::Counter& c2, Scatter1DPtr s) const;
/// Helper for histogram division.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const;
/// Helper for histogram division with raw YODA objects.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const;
/// Helper for profile histogram division.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(Profile1DPtr p1, Profile1DPtr p2, Scatter2DPtr s) const;
/// Helper for profile histogram division with raw YODA objects.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(const YODA::Profile1D& p1, const YODA::Profile1D& p2, Scatter2DPtr s) const;
/// Helper for 2D histogram division.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(Histo2DPtr h1, Histo2DPtr h2, Scatter3DPtr s) const;
/// Helper for 2D histogram division with raw YODA objects.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(const YODA::Histo2D& h1, const YODA::Histo2D& h2, Scatter3DPtr s) const;
/// Helper for 2D profile histogram division.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(Profile2DPtr p1, Profile2DPtr p2, Scatter3DPtr s) const;
/// Helper for 2D profile histogram division with raw YODA objects
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(const YODA::Profile2D& p1, const YODA::Profile2D& p2, Scatter3DPtr s) const;
/// Helper for histogram efficiency calculation.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void efficiency(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const;
/// Helper for histogram efficiency calculation.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void efficiency(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const;
/// Helper for histogram asymmetry calculation.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void asymm(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const;
/// Helper for histogram asymmetry calculation.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void asymm(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const;
/// Helper for converting a differential histo to an integral one.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void integrate(Histo1DPtr h, Scatter2DPtr s) const;
/// Helper for converting a differential histo to an integral one.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void integrate(const Histo1D& h, Scatter2DPtr s) const;
//@}
public:
/// List of registered analysis data objects
const vector<AnalysisObjectPtr>& analysisObjects() const {
return _analysisobjects;
}
protected:
/// @name Data object registration, retrieval, and removal
//@{
/// Register a data object in the histogram system
void addAnalysisObject(AnalysisObjectPtr ao);
/// Get a data object from the histogram system
template <typename AO=YODA::AnalysisObject>
const std::shared_ptr<AO> getAnalysisObject(const std::string& name) const {
foreach (const AnalysisObjectPtr& ao, analysisObjects()) {
if (ao->path() == histoPath(name)) return dynamic_pointer_cast<AO>(ao);
}
throw LookupError("Data object " + histoPath(name) + " not found");
}
/// Get a data object from the histogram system (non-const)
template <typename AO=YODA::AnalysisObject>
std::shared_ptr<AO> getAnalysisObject(const std::string& name) {
foreach (const AnalysisObjectPtr& ao, analysisObjects()) {
if (ao->path() == histoPath(name)) return dynamic_pointer_cast<AO>(ao);
}
throw LookupError("Data object " + histoPath(name) + " not found");
}
/// Unregister a data object from the histogram system (by name)
void removeAnalysisObject(const std::string& path);
/// Unregister a data object from the histogram system (by pointer)
void removeAnalysisObject(AnalysisObjectPtr ao);
/// Get all data object from the AnalysisHandler.
vector<AnalysisObjectPtr> getAllData(bool includeorphans) const;
/// Get a data object from another analysis (e.g. preloaded
/// calibration histogram).
/// Get a data object from the histogram system (non-const)
template <typename AO=YODA::AnalysisObject>
std::shared_ptr<AO> getAnalysisObject(const std::string & ananame,
const std::string& name) {
std::string path = "/" + ananame + "/" + name;
for ( AnalysisObjectPtr ao : getAllData(true) ) {
if ( ao->path() == path )
return dynamic_pointer_cast<AO>(ao);
}
return std::shared_ptr<AO>();
}
/// Get a named Histo1D object from the histogram system
const Histo1DPtr getHisto1D(const std::string& name) const {
return getAnalysisObject<Histo1D>(name);
}
/// Get a named Histo1D object from the histogram system (non-const)
Histo1DPtr getHisto1D(const std::string& name) {
return getAnalysisObject<Histo1D>(name);
}
/// Get a Histo1D object from the histogram system by axis ID codes (non-const)
const Histo1DPtr getHisto1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
return getAnalysisObject<Histo1D>(makeAxisCode(datasetId, xAxisId, yAxisId));
}
/// Get a Histo1D object from the histogram system by axis ID codes (non-const)
Histo1DPtr getHisto1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) {
return getAnalysisObject<Histo1D>(makeAxisCode(datasetId, xAxisId, yAxisId));
}
/// Get a named Histo2D object from the histogram system
const Histo2DPtr getHisto2D(const std::string& name) const {
return getAnalysisObject<Histo2D>(name);
}
/// Get a named Histo2D object from the histogram system (non-const)
Histo2DPtr getHisto2D(const std::string& name) {
return getAnalysisObject<Histo2D>(name);
}
/// Get a Histo2D object from the histogram system by axis ID codes (non-const)
const Histo2DPtr getHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
return getAnalysisObject<Histo2D>(makeAxisCode(datasetId, xAxisId, yAxisId));
}
/// Get a Histo2D object from the histogram system by axis ID codes (non-const)
Histo2DPtr getHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) {
return getAnalysisObject<Histo2D>(makeAxisCode(datasetId, xAxisId, yAxisId));
}
/// Get a named Profile1D object from the histogram system
const Profile1DPtr getProfile1D(const std::string& name) const {
return getAnalysisObject<Profile1D>(name);
}
/// Get a named Profile1D object from the histogram system (non-const)
Profile1DPtr getProfile1D(const std::string& name) {
return getAnalysisObject<Profile1D>(name);
}
/// Get a Profile1D object from the histogram system by axis ID codes (non-const)
const Profile1DPtr getProfile1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
return getAnalysisObject<Profile1D>(makeAxisCode(datasetId, xAxisId, yAxisId));
}
/// Get a Profile1D object from the histogram system by axis ID codes (non-const)
Profile1DPtr getProfile1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) {
return getAnalysisObject<Profile1D>(makeAxisCode(datasetId, xAxisId, yAxisId));
}
/// Get a named Profile2D object from the histogram system
const Profile2DPtr getProfile2D(const std::string& name) const {
return getAnalysisObject<Profile2D>(name);
}
/// Get a named Profile2D object from the histogram system (non-const)
Profile2DPtr getProfile2D(const std::string& name) {
return getAnalysisObject<Profile2D>(name);
}
/// Get a Profile2D object from the histogram system by axis ID codes (non-const)
const Profile2DPtr getProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
return getAnalysisObject<Profile2D>(makeAxisCode(datasetId, xAxisId, yAxisId));
}
/// Get a Profile2D object from the histogram system by axis ID codes (non-const)
Profile2DPtr getProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) {
return getAnalysisObject<Profile2D>(makeAxisCode(datasetId, xAxisId, yAxisId));
}
/// Get a named Scatter2D object from the histogram system
const Scatter2DPtr getScatter2D(const std::string& name) const {
return getAnalysisObject<Scatter2D>(name);
}
/// Get a named Scatter2D object from the histogram system (non-const)
Scatter2DPtr getScatter2D(const std::string& name) {
return getAnalysisObject<Scatter2D>(name);
}
/// Get a Scatter2D object from the histogram system by axis ID codes (non-const)
const Scatter2DPtr getScatter2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
return getAnalysisObject<Scatter2D>(makeAxisCode(datasetId, xAxisId, yAxisId));
}
/// Get a Scatter2D object from the histogram system by axis ID codes (non-const)
Scatter2DPtr getScatter2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) {
return getAnalysisObject<Scatter2D>(makeAxisCode(datasetId, xAxisId, yAxisId));
}
//@}
private:
/// Name passed to constructor (used to find .info analysis data file, and as a fallback)
string _defaultname;
/// Pointer to analysis metadata object
unique_ptr<AnalysisInfo> _info;
/// Storage of all plot objects
/// @todo Make this a map for fast lookup by path?
vector<AnalysisObjectPtr> _analysisobjects;
/// @name Cross-section variables
//@{
double _crossSection;
bool _gotCrossSection;
//@}
/// The controlling AnalysisHandler object.
AnalysisHandler* _analysishandler;
/// Collection of cached refdata to speed up many autobookings: the
/// reference data file should only be read once.
mutable std::map<std::string, AnalysisObjectPtr> _refdata;
/// Options the (this instance of) the analysis
map<string, string> _options;
/// The string of options.
string _optstring;
private:
/// @name Utility functions
//@{
/// Get the reference data for this paper and cache it.
void _cacheRefData() const;
//@}
/// The assignment operator is private and must never be called.
/// In fact, it should not even be implemented.
Analysis& operator=(const Analysis&);
};
}
// Include definition of analysis plugin system so that analyses automatically see it when including Analysis.hh
#include "Rivet/AnalysisBuilder.hh"
/// @def DECLARE_RIVET_PLUGIN
/// Preprocessor define to prettify the global-object plugin hook mechanism.
#define DECLARE_RIVET_PLUGIN(clsname) Rivet::AnalysisBuilder<clsname> plugin_ ## clsname
/// @def DECLARE_ALIASED_RIVET_PLUGIN
/// Preprocessor define to prettify the global-object plugin hook mechanism, with an extra alias name for this analysis.
// #define DECLARE_ALIASED_RIVET_PLUGIN(clsname, alias) Rivet::AnalysisBuilder<clsname> plugin_ ## clsname ## ( ## #alias ## )
#define DECLARE_ALIASED_RIVET_PLUGIN(clsname, alias) DECLARE_RIVET_PLUGIN(clsname)( #alias )
/// @def DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR
/// Preprocessor define to prettify the manky constructor with name string argument
#define DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR(clsname) clsname() : Analysis(# clsname) {}
/// @def DEFAULT_RIVET_ANALYSIS_CTOR
/// Slight abbreviation for DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR
#define DEFAULT_RIVET_ANALYSIS_CTOR(clsname) DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR(clsname)
#endif
diff --git a/include/Rivet/Makefile.am b/include/Rivet/Makefile.am
--- a/include/Rivet/Makefile.am
+++ b/include/Rivet/Makefile.am
@@ -1,185 +1,186 @@
## Internal headers - not to be installed
nobase_dist_noinst_HEADERS =
## Public headers - to be installed
nobase_pkginclude_HEADERS =
## Rivet interface
nobase_pkginclude_HEADERS += \
Rivet.hh \
Run.hh \
Event.hh \
ParticleBase.hh \
Particle.fhh Particle.hh \
Jet.fhh Jet.hh \
Projection.fhh Projection.hh \
ProjectionApplier.hh \
ProjectionHandler.hh \
Analysis.hh \
AnalysisHandler.hh \
AnalysisInfo.hh \
AnalysisBuilder.hh \
AnalysisLoader.hh
## Build config stuff
nobase_pkginclude_HEADERS += \
Config/RivetCommon.hh \
Config/RivetConfig.hh \
Config/BuildOptions.hh
## Projections
nobase_pkginclude_HEADERS += \
Projections/AliceCommon.hh \
Projections/AxesDefinition.hh \
Projections/Beam.hh \
Projections/BeamThrust.hh \
Projections/CentralEtHCM.hh \
Projections/CentralityProjection.hh \
Projections/ChargedFinalState.hh \
Projections/ChargedLeptons.hh \
Projections/ConstLossyFinalState.hh \
Projections/DirectFinalState.hh \
Projections/DISFinalState.hh \
Projections/DISKinematics.hh \
Projections/DISLepton.hh \
Projections/DressedLeptons.hh \
Projections/FastJets.hh \
Projections/FinalPartons.hh \
Projections/FinalState.hh \
Projections/FoxWolframMoments.hh \
Projections/FParameter.hh \
Projections/GeneratedPercentileProjection.hh \
Projections/HadronicFinalState.hh \
Projections/HeavyHadrons.hh \
Projections/Hemispheres.hh \
Projections/IdentifiedFinalState.hh \
Projections/ImpactParameterProjection.hh \
Projections/IndirectFinalState.hh \
Projections/InitialQuarks.hh \
Projections/InvMassFinalState.hh \
Projections/JetAlg.hh \
Projections/JetShape.hh \
Projections/LeadingParticlesFinalState.hh \
Projections/LossyFinalState.hh \
Projections/MergedFinalState.hh \
Projections/MissingMomentum.hh \
Projections/MixedFinalState.hh \
Projections/NeutralFinalState.hh \
Projections/NonHadronicFinalState.hh \
Projections/NonPromptFinalState.hh \
Projections/ParisiTensor.hh \
Projections/ParticleFinder.hh \
Projections/PartonicTops.hh \
Projections/PercentileProjection.hh \
Projections/PrimaryHadrons.hh \
Projections/PrimaryParticles.hh \
Projections/PromptFinalState.hh \
Projections/SingleValueProjection.hh \
Projections/SmearedParticles.hh \
Projections/SmearedJets.hh \
Projections/SmearedMET.hh \
Projections/Sphericity.hh \
Projections/Spherocity.hh \
Projections/TauFinder.hh \
Projections/Thrust.hh \
Projections/TriggerCDFRun0Run1.hh \
Projections/TriggerCDFRun2.hh \
Projections/TriggerProjection.hh \
Projections/TriggerUA5.hh \
Projections/UnstableFinalState.hh \
Projections/UserCentEstimate.hh \
Projections/VetoedFinalState.hh \
Projections/VisibleFinalState.hh \
Projections/WFinder.hh \
Projections/ZFinder.hh
## Meta-projection convenience headers
nobase_pkginclude_HEADERS += \
Projections/FinalStates.hh \
Projections/Smearing.hh
## Analysis base class headers
# TODO: Move to Rivet/AnalysisTools header dir?
nobase_pkginclude_HEADERS += \
Analyses/MC_Cent_pPb.hh \
Analyses/MC_ParticleAnalysis.hh \
Analyses/MC_JetAnalysis.hh \
Analyses/MC_JetSplittings.hh
## Tools
nobase_pkginclude_HEADERS += \
Tools/AliceCommon.hh \
Tools/AtlasCommon.hh \
Tools/BeamConstraint.hh \
Tools/BinnedHistogram.hh \
Tools/CentralityBinner.hh \
Tools/Cmp.fhh \
Tools/Cmp.hh \
+ Tools/Correlators.hh \
Tools/Cutflow.hh \
Tools/Cuts.fhh \
Tools/Cuts.hh \
Tools/Exceptions.hh \
Tools/JetUtils.hh \
Tools/Logging.hh \
Tools/Random.hh \
Tools/ParticleBaseUtils.hh \
Tools/ParticleIdUtils.hh \
Tools/ParticleUtils.hh \
Tools/ParticleName.hh \
Tools/Percentile.hh \
Tools/PrettyPrint.hh \
Tools/RivetPaths.hh \
Tools/RivetSTL.hh \
Tools/RivetFastJet.hh \
Tools/RivetHepMC.hh \
Tools/RivetYODA.hh \
Tools/RivetMT2.hh \
Tools/SmearingFunctions.hh \
Tools/MomentumSmearingFunctions.hh \
Tools/ParticleSmearingFunctions.hh \
Tools/JetSmearingFunctions.hh \
Tools/TypeTraits.hh \
Tools/Utils.hh \
Tools/Nsubjettiness/AxesDefinition.hh \
Tools/Nsubjettiness/ExtraRecombiners.hh \
Tools/Nsubjettiness/MeasureDefinition.hh \
Tools/Nsubjettiness/Njettiness.hh \
Tools/Nsubjettiness/NjettinessPlugin.hh \
Tools/Nsubjettiness/Nsubjettiness.hh \
Tools/Nsubjettiness/TauComponents.hh \
Tools/Nsubjettiness/XConePlugin.hh
nobase_dist_noinst_HEADERS += \
Tools/osdir.hh
## Maths
nobase_pkginclude_HEADERS += \
Math/Matrices.hh \
Math/Vector3.hh \
Math/VectorN.hh \
Math/MatrixN.hh \
Math/MatrixDiag.hh \
Math/MathHeader.hh \
Math/Vectors.hh \
Math/LorentzTrans.hh \
Math/Matrix3.hh \
Math/MathUtils.hh \
Math/Vector4.hh \
Math/Math.hh \
Math/Units.hh \
Math/Constants.hh \
Math/eigen/util.h \
Math/eigen/regressioninternal.h \
Math/eigen/regression.h \
Math/eigen/vector.h \
Math/eigen/ludecompositionbase.h \
Math/eigen/ludecomposition.h \
Math/eigen/linearsolver.h \
Math/eigen/linearsolverbase.h \
Math/eigen/matrix.h \
Math/eigen/vectorbase.h \
Math/eigen/projective.h \
Math/eigen/matrixbase.h
diff --git a/include/Rivet/Tools/Correlators.hh b/include/Rivet/Tools/Correlators.hh
new file mode 100644
--- /dev/null
+++ b/include/Rivet/Tools/Correlators.hh
@@ -0,0 +1,1084 @@
+// -*- C++ -*-
+#ifndef RIVET_Correlators_HH
+#define RIVET_Correlators_HH
+
+#include "Rivet/Projection.hh"
+#include "Rivet/Projections/ParticleFinder.hh"
+#include <complex>
+#include "YODA/Scatter2D.h"
+#include "Rivet/Analysis.hh"
+/* File contains tools for calculating flow coefficients using
+ * correlators.
+ * Classes:
+ * Correlators: Calculates single event correlators of a given harmonic.
+ * Cumulants: An additional base class for flow analyses
+ * (use as: class MyAnalysis : public Analysis, Cumulants {};)
+ * Includes a framework for calculating cumulants and flow coefficients
+ * from single event correlators, including automatic handling of statistical
+ * errors. Contains multiple internal sub-classes:
+ * CorBinBase: Base class for correlators binned in event or particle observables.
+ * CorSingleBin: A simple bin for correlators.
+ * CorBin: Has the interface of a simple bin, but does automatic calculation
+ * of statistical errors by a bootstrap method.
+ * ECorrelator: Data type for event averaged correlators.
+ * @author Vytautas Vislavicius, Christine O. Rasmussen, Christian Bierlich.
+ */
+
+namespace Rivet {
+
+ /* @brief Projection for calculating correlators for flow measurements.
+ *
+ * A projection which calculates Q-vectors and P-vectors, and projects
+ * them out as correlators. Implementation follows the description of
+ * the ''Generic Framework''
+ * Phys. Rev. C 83 (2011) 044913, arXiv: 1010.0233
+ * Phys. Rev. C 89 (2014) 064904, arXiv: 1312.3572
+ *
+ */
+
+ class Correlators : public Projection {
+
+ public:
+
+ // Constructor. Takes parameters @parm fsp, the Final State
+ // projection correlators should be constructed from, @parm nMaxIn,
+ // the maximal sum of harmonics, eg. for
+ // c_2{2} = {2,-2} = 2 + 2 = 4
+ // c_2{4} = {2,2,-2,-2} = 2 + 2 + 2 + 2 = 8
+ // c_4{2} = {4,-4} = 4 + 4 = 8
+ // c_4{4} = {4,4,-4,-4} = 4 + 4 + 4 + 4 = 16.
+ // @parm pMaxIn is the maximal number of particles
+ // you want to correlate and @parm pTbinEdgesIn is the (lower)
+ // edges of pT bins, the last one the upper edge of the final bin.
+ Correlators(const ParticleFinder& fsp, int nMaxIn = 2,
+ int pMaxIn = 0, vector<double> pTbinEdgesIn = {});
+
+ // Constructor which takes a Scatter2D to estimate bin edges.
+ Correlators(const ParticleFinder& fsp, int nMaxIn,
+ int pMaxIn, const Scatter2DPtr hIn);
+
+ /// @brief Integrated correlator of @parm n harmonic, with the
+ /// number of powers being the size of @parm n.
+ /// Eg. @parm n should be:
+ /// <<2>>_2 => n = {2, -2}
+ /// <<4>>_2 => n = {2, 2, -2, -2}
+ /// <<2>>_4 => n = {4, -4}
+ /// <<4>>_4 => n = {4, 4, -4, 4} and so on.
+ const pair<double,double> intCorrelator(vector<int> n) const;
+
+ /// @brief pT differential correlator of @parm n harmonic, with the
+ /// number of powers being the size of @parm n.
+ /// The method can include overflow/underflow bins in the
+ /// beginning/end of the returned vector, by toggling
+ /// @parm overflow = true.
+ const vector<pair<double,double>> pTBinnedCorrelators(vector<int> n,
+ bool overflow = false) const;
+
+ /// @brief Integrated correlator of @parm n1 harmonic, with the
+ /// number of powers being the size of @parm n1. This method
+ /// imposes an eta gap, correlating with another phase space,
+ /// where another Correlators projection @parm other should be
+ /// defined. The harmonics of the other phase space is given
+ /// as @parm n2.
+ /// To get eg. integrated <<4>>_2, @parm n1 should be:
+ /// n1 = {2, 2} and n2 = {-2, -2}
+ const pair<double,double> intCorrelatorGap(const Correlators& other,
+ vector<int> n1, vector<int> n2) const;
+
+ /// @brief pT differential correlators of @parm n1 harmonic, with the
+ /// number of powers being the size of @parm n1. This method
+ /// imposes an eta gap, correlating with another phase space,
+ /// where another Correlators projection @parm other should be
+ /// defined. The harmonics of the other phase space is given
+ /// as @parm n2.
+ /// To get eg. differential <<4'>_2, @parm n1 should be:
+ /// n1 = {2, 2} and @parm n2: n2 = {-2, -2}.
+ /// To get eg. differential <<2'>>_4, @parm n1 should be:
+ /// n1 = {4} and @parm n2: n2 = {-4}.
+ /// The method can include overflow/underflow
+ /// bins in the beginning/end of the returned vector, by toggling
+ /// @parm overflow = true.
+ const vector<pair<double,double>> pTBinnedCorrelatorsGap(
+ const Correlators& other, vector<int> n1, vector<int> n2,
+ bool oveflow = false) const;
+
+ /// @brief Construct a harmonic vectors from @parm n harmonics
+ /// and @parm m number of particles.
+ /// TODO: In C++14 this can be done much nicer with TMP.
+ static vector<int> hVec(int n, int m) {
+ if (m%2 != 0) {
+ cout << "Harmonic Vector: Number of particles "
+ "must be an even number." << endl;
+ return {};
+ }
+ vector<int> ret = {};
+ for (int i = 0; i < m; ++i) {
+ if (i < m/2) ret.push_back(n);
+ else ret.push_back(-n);
+ }
+ return ret;
+ }
+
+ /// @brief Return the maximal values for n, p to be used in the
+ /// constructor of Correlators(xxx, nMax, pMax, xxxx)
+ static pair<int,int> getMaxValues(vector< vector<int> >& hList){
+ int nMax = 0, pMax = 0;
+ for (vector<int> h : hList) {
+ int tmpN = 0, tmpP = 0;
+ for ( int i = 0; i < int(h.size()); ++i) {
+ tmpN += abs(h[i]);
+ ++tmpP;
+ }
+ if (tmpN > nMax) nMax = tmpN;
+ if (tmpP > pMax) pMax = tmpP;
+ }
+ return make_pair(nMax,pMax);
+ }
+ // Clone on the heap.
+ DEFAULT_RIVET_PROJ_CLONE(Correlators);
+
+ protected:
+
+ // @brief Project function. Loops over array and calculates Q vectors
+ // and P vectors if needed
+ void project(const Event& e);
+
+ // @brief Compare against other projection. Test if harmonics, pT-bins
+ // and underlying final state are similar.
+ int compare(const Projection& p) const {
+ const Correlators* other = dynamic_cast<const Correlators*>(&p);
+ if (nMax != other->nMax) return UNDEFINED;
+ if (pMax != other->pMax) return UNDEFINED;
+ if (pTbinEdges != other->pTbinEdges) return UNDEFINED;
+ return mkPCmp(p, "FS");
+ };
+
+ // @brief Calculate correlators from one particle.
+ void fillCorrelators(const Particle& p, const double& weight);
+
+ // @brief Return a Q-vector.
+ const complex<double> getQ(int n, int p) const {
+ bool isNeg = (n < 0);
+ if (isNeg) return conj( qVec[abs(n)][p] );
+ else return qVec[n][p];
+ };
+
+ // @brief Return a P-vector.
+ const complex<double> getP(int n, int p, double pT = 0.) const {
+ bool isNeg = (n < 0);
+ map<double, Vec2D>::const_iterator pTitr = pVec.lower_bound(pT);
+ if (pTitr == pVec.end()) return DBL_NAN;
+ if (isNeg) return conj( pTitr->second[abs(n)][p] );
+ else return pTitr->second[n][p];
+ };
+
+ private:
+ // Find correlators by recursion. Order = M (# of particles),
+ // n's are harmonics, p's are the powers of the weights
+ const complex<double> recCorr(int order, vector<int> n,
+ vector<int> p, bool useP, double pT = 0.) const;
+
+ // Two-particle correlator Eq. (19) p. 6
+ // Flag if p-vectors or q-vectors should be used to
+ // calculate the correlator.
+ const complex<double> twoPartCorr(int n1, int n2, int p1 = 1,
+ int p2 = 1, double pT = 0., bool useP = false) const;
+
+ // Set elements in vectors to zero.
+ void setToZero();
+
+ // Shorthands for setting and comparing to zero.
+ const complex<double> _ZERO = {0., 0.};
+ const double _TINY = 1e-10;
+
+ // Shorthand typedefs for vec<vec<complex>>.
+ typedef vector< vector<complex<double>> > Vec2D;
+
+ // Define Q-vectors and p-vectors
+ Vec2D qVec; // Q[n][p]
+ map<double, Vec2D> pVec; // p[pT][n][p]
+
+ // The max values of n and p to be calculated.
+ int nMax, pMax;
+ // pT bin-edges
+ vector<double> pTbinEdges;
+ bool isPtDiff;
+
+ /// End class Correlators
+ };
+
+
+ /// @brief Tools for flow analyses.
+ /// The following are helper classes to construct event averaged correlators
+ /// as well as cummulants and flow coefficents from the basic event
+ // correlators defined above. They are all encapsulated in a Cumulants
+ // class, which can be used as a(nother) base class for flow analyses,
+ // to ensure access.
+
+ class CumulantAnalysis : public Analysis {
+ private:
+
+ // Number of bins used for bootstrap calculation of statistical
+ // uncertainties. It is hard coded, and shout NOT be changed unless there
+ // are good reasons to do so.
+ static const int BOOT_BINS = 9;
+
+ // Enum for choosing the method of error calculation.
+ enum Error {
+ VARIANCE,
+ ENVELOPE
+ };
+
+ // The desired error method. Can be changed in the analysis constructor
+ // by setting it appropriately.
+ Error errorMethod;
+
+ /// @brief Base class for correlator bins.
+ class CorBinBase {
+ public:
+ // Derived class should have fill and mean defined.
+ virtual void fill(const pair<double, double>& cor, const double& weight) = 0;
+ virtual const double mean() const = 0;
+ };
+
+ /// @brief The CorSingleBin is the basic quantity filled in an ECorrelator.
+ /// It is a simple counter with an even simpler structure than normal
+ /// YODA type DBNs, but added functionality to test for out of
+ /// bounds correlators.
+ class CorSingleBin : public CorBinBase {
+
+ public:
+ /// @brief The default constructor.
+ CorSingleBin() : _sumWX(0.), _sumW(0.), _sumW2(0.), _numEntries(0.) {}
+
+ /// @brief Fill a correlator bin with the return type from a
+ /// Correlator (a pair giving numerator and denominator of <M>_event).
+ void fill(const pair<double, double>& cor, const double& weight) {
+ _numEntries += 1.;
+ // Test if denominator for the single event average is zero.
+ if (cor.second < 1e-10) return;
+ // The single event average <M> is then cor.first / cor.second.
+ // With weights this becomes just:
+ _sumWX += cor.first * weight;
+ _sumW += weight * cor.second;
+ _sumW2 += weight * weight * cor.second * cor.second;
+ }
+
+ const double mean() const {
+ if (_sumW < 1e-10) return 0;
+ return _sumWX / _sumW;
+ }
+
+ // @brief Sum of weights.
+ const double sumW() const {
+ return _sumW;
+ }
+
+ const double sumW2() const {
+ return _sumW2;
+ }
+
+ // @brief Sum of weight * X.
+ const double sumWX() const {
+ return _sumWX;
+ }
+
+ // @brief Number of entries.
+ const double numEntries() const {
+ return _numEntries;
+ }
+
+ private:
+ double _sumWX, _sumW, _sumW2, _numEntries;
+
+ }; // End of CorSingleBin sub-class.
+
+ /// @brief The CorBin is the basic bin quantity in ECorrelators.
+ /// It consists of several CorSingleBins, to facilitate
+ /// bootstrapping calculation of statistical uncertainties.
+ class CorBin : public CorBinBase {
+ public:
+ // @brief The constructor. nBins signifies the period of the bootstrap
+ // calculation, and should never be changed here, but in its definition
+ // above -- and only if there are good reasons to do so.
+ CorBin() : binIndex(0), nBins(BOOT_BINS) {
+ for(size_t i = 0; i < nBins; ++i) bins.push_back(CorSingleBin());
+ }
+
+ // @brief Fill the correct underlying bin and take a step.
+ void fill(const pair<double, double>& cor, const double& weight) {
+ // Test if denominator for the single event average is zero.
+ if (cor.second < 1e-10) return;
+ // Fill the correct bin.
+ bins[binIndex].fill(cor, weight);
+ if (binIndex == nBins - 1) binIndex = 0;
+ else ++binIndex;
+ }
+
+ // @brief Calculate the total sample mean with all
+ // available statistics.
+ const double mean() const {
+ double sow = 0;
+ double sowx = 0;
+ for(auto b : bins) {
+ if (b.sumW() < 1e-10) continue;
+ sow += b.sumW();
+ sowx += b.sumWX();
+ }
+ return sowx / sow;
+ }
+
+ // @brief Return a copy of the bins.
+ const vector<CorSingleBin> getBins() const {
+ return bins;
+ }
+
+ // @brief Return the bins as pointers to the base class.
+ template<class T=CorBinBase>
+ const vector<T*> getBinPtrs() {
+ vector<T*> ret(bins.size());
+ transform(bins.begin(), bins.end(), ret.begin(),
+ [](CorSingleBin& b) {return &b;});
+ return ret;
+ }
+
+ private:
+ vector<CorSingleBin> bins;
+ size_t binIndex;
+ size_t nBins;
+
+ }; // End of CorBin sub-class.
+
+ public:
+ /// @brief The ECorrelator is a helper class to calculate all event
+ /// averages of correlators, in order to construct cumulants.
+ /// It can be binned in any variable.
+ class ECorrelator {
+
+ public:
+ /// @brief Constructor. Takes as argument the desired harmonic and number
+ /// of correlated particles as a generic framework style vector, eg,
+ /// {2, -2} for <<2>>_2, no binning.
+ /// TODO: Implement functionality for this if needed.
+ //ECorrelator(vector<int> h) : h1(h), h2({}),
+ // binX(0), binContent(0), reference() {
+ //};
+
+ /// @brief Constructor. Takes as argument the desired harmonic and number
+ /// of correlated particles as a generic framework style vector, eg,
+ /// {2, -2} for <<2>>_2 and binning.
+ ECorrelator(vector<int> h, vector<double> binIn) : h1(h), h2({}),
+ binX(binIn), binContent(binIn.size() - 1), reference() {};
+
+ /// @brief Constructor for gapped correlator. Takes as argument the
+ /// desired harmonics for the two final states, and binning.
+ ECorrelator(vector<int> h1In, vector<int> h2In, vector<double> binIn) :
+ h1(h1In), h2(h2In), binX(binIn), binContent(binIn.size() - 1),
+ reference() {};
+
+ /// @brief Fill the appropriate bin given an input (per event)
+ /// observable, eg. centrality.
+ void fill(const double& obs, const Correlators& c,
+ const double weight = 1.0) {
+ int index = getBinIndex(obs);
+ if (index < 0) return;
+ binContent[index].fill(c.intCorrelator(h1), weight);
+ }
+
+ /// @brief Fill the appropriate bin given an input (per event)
+ /// observable, eg. centrality. Using a rapidity gap between
+ /// two Correlators.
+ void fill(const double& obs, const Correlators& c1,
+ const Correlators& c2, const double weight = 1.0) {
+ if (!h2.size()) {
+ cout << "Trying to fill gapped correlator, but harmonics behind "
+ "the gap (h2) are not given!" << endl;
+ return;
+ }
+ int index = getBinIndex(obs);
+ if (index < 0) return;
+ binContent[index].fill(c1.intCorrelatorGap(c2, h1, h2), weight);
+ }
+
+ /// @brief Fill the bins with the appropriate correlator, taking the
+ /// binning directly from the Correlators object, and filling also the
+ /// reference flow.
+ void fill(const Correlators& c, const double& weight = 1.0) {
+ vector< pair<double, double> > diffCorr = c.pTBinnedCorrelators(h1);
+ // We always skip overflow when calculating the all event average.
+ if (diffCorr.size() != binX.size() - 1)
+ cout << "Tried to fill event with wrong binning (ungapped)" << endl;
+ for (size_t i = 0; i < diffCorr.size(); ++i) {
+ int index = getBinIndex(binX[i]);
+ if (index < 0) return;
+ binContent[index].fill(diffCorr[i], weight);
+ }
+ reference.fill(c.intCorrelator(h1), weight);
+ }
+
+ /// @brief Fill bins with the appropriate correlator, taking the binning
+ /// directly from the Correlators object, and also the reference flow.
+ /// Using a rapidity gap between two Correlators.
+ void fill(const Correlators& c1, const Correlators& c2,
+ const double& weight = 1.0) {
+ if (!h2.size()) {
+ cout << "Trying to fill gapped correlator, but harmonics behind "
+ "the gap (h2) are not given!" << endl;
+ return;
+ }
+ vector< pair<double, double> > diffCorr = c1.pTBinnedCorrelatorsGap(c2, h1, h2);
+ // We always skip overflow when calculating the all event average.
+ if (diffCorr.size() != binX.size() - 1)
+ cout << "Tried to fill event with wrong binning (gapped)" << endl;
+ for (size_t i = 0; i < diffCorr.size(); ++i) {
+ int index = getBinIndex(binX[i]);
+ if (index < 0) return;
+ binContent[index].fill(diffCorr[i], weight);
+ }
+ reference.fill(c1.intCorrelatorGap(c2, h1, h2), weight);
+ }
+
+ /// @brief Get a copy of the bin contents.
+ const vector<CorBin> getBins() const {
+ return binContent;
+ }
+
+ // @brief Return the bins as pointers to the base class.
+ const vector<CorBinBase*> getBinPtrs() {
+ vector<CorBinBase*> ret(binContent.size());
+ transform(binContent.begin(), binContent.end(), ret.begin(),
+ [](CorBin& b) {return &b;});
+ return ret;
+ }
+
+ /// @brief Get a copy of the bin x-values.
+ const vector<double> getBinX() const {
+ return binX;
+ }
+
+ /// @brief Get a copy of the @ret h1 harmonic vector.
+ const vector<int> getH1() const {
+ return h1;
+ }
+
+ /// @brief Get a copy of the @ret h2 harmonic vector.
+ const vector<int> getH2() const {
+ return h2;
+ }
+
+ /// @brief Replace reference flow bin with another reference
+ /// flow bin, eg. calculated in another phase space or with
+ /// other pid.
+ void setReference(CorBin refIn) {
+ reference = refIn;
+ }
+
+ /// @brief Extract the reference flow from a differential event
+ /// averaged correlator.
+ const CorBin getReference() const {
+ if (reference.mean() < 1e-10)
+ cout << "Warning: ECorrelator, reference bin is zero." << endl;
+ return reference;
+ }
+ /// @brief set the @parm prIn list of profile histograms associated
+ /// with the internal bins. Is called automatically when booking, no
+ /// need to call it yourself.
+ void setProfs(list<Profile1DPtr> prIn) {
+ profs = prIn;
+ cout << "TEST: " << (*prIn.begin())->effNumEntries() << endl;
+ }
+
+ /// @brief begin() iterator for the list of associated profile histograms.
+ list<Profile1DPtr>::iterator profBegin() {
+ return profs.begin();
+ }
+
+ /// @brief end() iterator for the list of associated profile histograms.
+ list<Profile1DPtr>::iterator profEnd() {
+ return profs.end();
+ }
+
+ private:
+ /// @brief Get correct bin index for a given @parm obs value.
+ const int getBinIndex(const double& obs) const {
+ // Find the correct index of binContent.
+ // If we are in overflow, just skip.
+ if (obs >= binX.back()) return -1;
+ // If we are in underflow, ditto.
+ if (obs < binX[0]) return -1;
+ int index = 0;
+ for (int i = 0, N = binX.size() - 1; i < N; ++i, ++index)
+ if (obs >= binX[i] && obs < binX[i + 1]) break;
+ return index;
+ }
+
+ // The harmonics vectors.
+ vector<int> h1;
+ vector<int> h2;
+ // The bins.
+ vector<double> binX;
+ vector<CorBin> binContent;
+ // The reference flow.
+ CorBin reference;
+ // The profile histograms associated with the CorBins for streaming.
+ list<Profile1DPtr> profs;
+
+ }; // End of ECorrelator sub-class.
+
+ // @brief Get the correct max N and max P for the set of
+ // booked correlators.
+ const pair<int, int> getMaxValues() const {
+ vector< vector<int>> harmVecs;
+ for ( auto eItr = eCorrPtrs.begin(); eItr != eCorrPtrs.end(); ++eItr) {
+ vector<int> h1 = (*eItr)->getH1();
+ vector<int> h2 = (*eItr)->getH2();
+ if (h1.size() > 0) harmVecs.push_back(h1);
+ if (h2.size() > 0) harmVecs.push_back(h2);
+ }
+ if (harmVecs.size() == 0) {
+ cout << "Warning: You tried to extract max values from harmonic "
+ "vectors, but have not booked any." << endl;
+ return pair<int,int>();
+ }
+ return Correlators::getMaxValues(harmVecs);
+ }
+
+ // Typedeffing shared pointer to ECorrelator.
+ typedef shared_ptr<ECorrelator> ECorrPtr;
+
+ // @brief Book an ECorrelator in the same way as a histogram.
+ ECorrPtr bookECorrelator(const string name, const vector<int>& h, const Scatter2DPtr hIn) {
+ vector<double> binIn;
+ for (auto b : hIn->points()) binIn.push_back(b.xMin());
+ binIn.push_back(hIn->points().back().xMax());
+ ECorrPtr ecPtr = ECorrPtr(new ECorrelator(h, binIn));
+ list<Profile1DPtr> eCorrProfs;
+ for (int i = 0; i < BOOT_BINS; ++i)
+ eCorrProfs.push_back(bookProfile1D(name+"-e"+to_string(i),*hIn));
+ ecPtr->setProfs(eCorrProfs);
+ eCorrPtrs.push_back(ecPtr);
+ return ecPtr;
+ }
+
+ // @brief Book a gapped ECorrelator with two harmonic vectors.
+ ECorrPtr bookECorrelator(const string name, const vector<int>& h1,
+ const vector<int>& h2, const Scatter2DPtr hIn ) {
+ vector<double> binIn;
+ for (auto b : hIn->points()) binIn.push_back(b.xMin());
+ binIn.push_back(hIn->points().back().xMax());
+ ECorrPtr ecPtr = ECorrPtr(new ECorrelator(h1, h2, binIn));
+ list<Profile1DPtr> eCorrProfs;
+ for (int i = 0; i < BOOT_BINS; ++i)
+ eCorrProfs.push_back(bookProfile1D(name+"-e"+to_string(i),*hIn));
+ ecPtr->setProfs(eCorrProfs);
+ eCorrPtrs.push_back(ecPtr);
+ return ecPtr;
+ }
+
+ // @brief Short hand for gapped correlators which splits the harmonic
+ // vector into negative and positive components.
+ ECorrPtr bookECorrelatorGap (const string name, const vector<int>& h,
+ const Scatter2DPtr hIn) {
+ const vector<int> h1(h.begin(), h.begin() + h.size() / 2);
+ const vector<int> h2(h.begin() + h.size() / 2, h.end());
+ return bookECorrelator(name, h1, h2, hIn);
+ }
+
+ // @brief Templated version of correlator booking which takes
+ // @parm N desired harmonic and @parm M number of particles.
+ template<unsigned int N, unsigned int M>
+ ECorrPtr bookECorrelator(const string name, const Scatter2DPtr hIn) {
+ return bookECorrelator(name, Correlators::hVec(N, M), hIn);
+ }
+
+ // @brief Templated version of gapped correlator booking which takes
+ // @parm N desired harmonic and @parm M number of particles.
+ template<unsigned int N, unsigned int M>
+ ECorrPtr bookECorrelatorGap(const string name, const Scatter2DPtr hIn) {
+ return bookECorrelatorGap(name, Correlators::hVec(N, M), hIn);
+ }
+
+ // @brief Finalize MUST be explicitly called for this base class with a
+ // CumulantsAnalysis::finalize() call as the first thing in each analysis,
+ // in order to stream the contents of ECorrelators to a yoda file for
+ // reentrant finalize.
+ void finalize() {
+ for (auto ecItr = eCorrPtrs.begin(); ecItr != eCorrPtrs.end(); ++ecItr)
+ corrPlot(list<Profile1DPtr>((*ecItr)->profBegin(),
+ (*ecItr)->profEnd()), *ecItr);
+ }
+ private:
+
+ /// Bookkeeping of the event averaged correlators.
+ list<ECorrPtr> eCorrPtrs;
+
+ public:
+
+ // @brief Constructor. Use CumulantAnalysis as base class for the
+ // analysis to have access to functionality.
+ CumulantAnalysis (string n) : Analysis(n), errorMethod(ENVELOPE) {};
+ // @brief Helper method for turning correlators into Scatter2Ds.
+ // Takes @parm h a pointer to the resulting Scatter2D, @parm binx
+ // the x-bins and a function @parm func defining the transformation.
+ // Makes no attempt at statistical errors.
+ // See usage in the methods below.
+ // Can also be used directly in the analysis if a user wants to
+ // perform an unforseen transformation from correlators to
+ // Scatter2D.
+ template<typename T>
+ static void fillScatter(Scatter2DPtr h, vector<double>& binx, T func) {
+ vector<YODA::Point2D> points;
+ for (int i = 0, N = binx.size() - 1; i < N; ++i) {
+ double xMid = (binx[i] + binx[i + 1]) / 2.0;
+ double xeMin = fabs(xMid - binx[i]);
+ double xeMax = fabs(xMid - binx[i + 1]);
+ double yVal = func(i);
+ if (isnan(yVal)) yVal = 0.;
+ double yErr = 0;
+ points.push_back(YODA::Point2D(xMid, yVal, xeMin, xeMax, yErr, yErr));
+ }
+ h->reset();
+ h->points().clear();
+ for (int i = 0, N = points.size(); i < N; ++i) h->addPoint(points[i]);
+ }
+
+ // @brief Helper method for turning correlators into Scatter2Ds
+ // with error estimates.
+ // Takes @parm h a pointer to the resulting Scatter2D, @parm binx
+ // the x-bins, a function @parm func defining the transformation
+ // and a vector of errors @parm err.
+ // See usage in the methods below.
+ // Can also be used directly in the analysis if a user wants to
+ // perform an unforseen transformation from correlators to
+ // Scatter2D.
+ template<typename F>
+ const void fillScatter(Scatter2DPtr h, vector<double>& binx, F func,
+ vector<pair<double, double> >& yErr) const {
+ vector<YODA::Point2D> points;
+ for (int i = 0, N = binx.size() - 1; i < N; ++i) {
+ double xMid = (binx[i] + binx[i + 1]) / 2.0;
+ double xeMin = fabs(xMid - binx[i]);
+ double xeMax = fabs(xMid - binx[i + 1]);
+ double yVal = func(i);
+ if (isnan(yVal))
+ points.push_back(YODA::Point2D(xMid, 0., xeMin, xeMax,0., 0.));
+ else
+ points.push_back(YODA::Point2D(xMid, yVal, xeMin, xeMax,
+ yErr[i].first, yErr[i].second));
+ }
+ h->reset();
+ h->points().clear();
+ for (int i = 0, N = points.size(); i < N; ++i)
+ h->addPoint(points[i]);
+ }
+
+
+ /// @brief Take the @parm n th power of all points in @parm hIn,
+ /// and put the result in @parm hOut. Optionally put a
+ /// @parm k constant below the root.
+ static void nthPow(Scatter2DPtr hOut, const Scatter2DPtr hIn,
+ const double& n, const double& k = 1.0) {
+ if (n == 0 || n == 1) {
+ cout << "Error: Do not take the 0th or 1st power of a Scatter2D,"
+ " use scale instead." << endl;
+ return;
+ }
+ if (hIn->points().size() != hOut->points().size()) {
+ cout << "nthRoot: Scatterplots: " << hIn->name() << " and " <<
+ hOut->name() << " not initialized with same length." << endl;
+ return;
+ }
+ vector<YODA::Point2D> points;
+ // The error pre-factor is k^(1/n) / n by Taylors formula.
+ double eFac = pow(k,1./n)/n;
+ for (auto b : hIn->points()) {
+ double yVal = pow(k * b.y(),n);
+ if (isnan(yVal))
+ points.push_back(YODA::Point2D(b.x(), 0., b.xErrMinus(),
+ b.xErrPlus(), 0, 0 ));
+ else {
+ double yemin = abs(eFac * pow(yVal,1./(n - 1.))) * b.yErrMinus();
+ if (isnan(yemin)) yemin = b.yErrMinus();
+ double yemax = abs(eFac * pow(yVal,1./(n - 1.))) * b.yErrPlus();
+ if (isnan(yemax)) yemax = b.yErrPlus();
+ points.push_back(YODA::Point2D(b.x(), yVal, b.xErrMinus(),
+ b.xErrPlus(), yemin, yemax ));
+ }
+ }
+ hOut->reset();
+ hOut->points().clear();
+ for (int i = 0, N = points.size(); i < N; ++i)
+ hOut->addPoint(points[i]);
+ }
+
+ /// @brief Take the @parm n th power of all points in @parm h,
+ /// and put the result back in the same Scatter2D. Optionally put a
+ /// @parm k constant below the root.
+ static void nthPow(Scatter2DPtr h, const double& n,
+ const double& k = 1.0) {
+ if (n == 0 || n == 1) {
+ cout << "Error: Do not take the 0th or 1st power of a Scatter2D,"
+ " use scale instead." << endl;
+ return;
+ }
+ vector<YODA::Point2D> points;
+ vector<YODA::Point2D> pIn = h->points();
+ // The error pre-factor is k^(1/n) / n by Taylors formula.
+ double eFac = pow(k,1./n)/n;
+ for (auto b : pIn) {
+ double yVal = pow(k * b.y(),n);
+ if (isnan(yVal))
+ points.push_back(YODA::Point2D(b.x(), 0., b.xErrMinus(),
+ b.xErrPlus(), 0, 0 ));
+ else {
+ double yemin = abs(eFac * pow(yVal,1./(n - 1.))) * b.yErrMinus();
+ if (isnan(yemin)) yemin = b.yErrMinus();
+ double yemax = abs(eFac * pow(yVal,1./(n - 1.))) * b.yErrPlus();
+ if (isnan(yemax)) yemax = b.yErrPlus();
+ points.push_back(YODA::Point2D(b.x(), yVal, b.xErrMinus(),
+ b.xErrPlus(), yemin, yemax ));
+ }
+ }
+ h->reset();
+ h->points().clear();
+ for (int i = 0, N = points.size(); i < N; ++i) h->addPoint(points[i]);
+ }
+
+ // @brief Calculate the bootstrapped sample variance for the observable
+ // given by correlators and the transformation @parm func.
+ template<typename T>
+ static pair<double, double> sampleVariance(T func) {
+ // First we calculate the mean (two pass calculation).
+ double avg = 0.;
+ for (int i = 0; i < BOOT_BINS; ++i) avg += func(i);
+ avg /= double(BOOT_BINS);
+ // Then we find the variance.
+ double var = 0.;
+ for (int i = 0; i < BOOT_BINS; ++i) var += pow(func(i) - avg, 2.);
+ var /= (double(BOOT_BINS) - 1);
+ return pair<double, double>(var, var);
+ }
+
+ // @brief Calculate the bootstrapped sample envelope for the observable
+ // given by correlators and the transformation @parm func.
+ template<typename T>
+ static pair<double, double> sampleEnvelope(T func) {
+ // First we calculate the mean.
+ double avg = 0.;
+ for (int i = 0; i < BOOT_BINS; ++i) avg += func(i);
+ avg /= double(BOOT_BINS);
+ double yMax = avg;
+ double yMin = avg;
+ // Then we find the envelope using the mean as initial value.
+ for (int i = 0; i < BOOT_BINS; ++i) {
+ double yVal = func(i);
+ if (yMin > yVal) yMin = yVal;
+ else if (yMax < yVal) yMax = yVal;
+ }
+ return pair<double, double>(fabs(avg - yMin), fabs(yMax - avg));
+ }
+
+ // @brief Selection method for which sample error to use, given
+ // in the constructor.
+ template<typename T>
+ const pair<double, double> sampleError(T func) const {
+ if (errorMethod == VARIANCE) return sampleVariance(func);
+ else if (errorMethod == ENVELOPE) return sampleEnvelope(func);
+ else
+ cout << "Error: Error method not found!" << endl;
+ return pair<double, double>(0.,0.);
+ }
+
+ // @brief Two particle integrated cn.
+ const void cnTwoInt(Scatter2DPtr h, ECorrPtr e2) const {
+ vector<CorBin> bins = e2->getBins();
+ vector<double> binx = e2->getBinX();
+ // Assert bin size.
+ if (binx.size() - 1 != bins.size()){
+ cout << "cnTwoInt: Bin size (x,y) differs!" << endl;
+ return;
+ }
+ vector<CorBinBase*> binPtrs;
+ // The mean value of the cumulant.
+ auto cn = [&] (int i) { return binPtrs[i]->mean(); };
+ // Error calculation.
+ vector<pair<double, double> > yErr;
+ for (int j = 0, N = bins.size(); j < N; ++j) {
+ binPtrs = bins[j].getBinPtrs();
+ yErr.push_back(sampleError(cn));
+ }
+ binPtrs = e2->getBinPtrs();
+ fillScatter(h, binx, cn, yErr);
+ }
+
+ // @brief Two particle integrated vn.
+ const void vnTwoInt(Scatter2DPtr h, ECorrPtr e2) const {
+ cnTwoInt(h, e2);
+ nthPow(h, 0.5);
+ }
+
+ // @brief Put an event averaged correlator into a Scatter2D.
+ // Reduces to cnTwoInt, but better with a proper name.
+ const void corrPlot(Scatter2DPtr h, ECorrPtr e) const {
+ cnTwoInt(h, e);
+ }
+
+ // @brief Put an event averaged correlator into Profile1Ds, one
+ // for each bootstrapping bin.
+ const void corrPlot(list<Profile1DPtr> profs, ECorrPtr e) const {
+ vector<CorBin> corBins = e->getBins();
+ vector<double> binx = e->getBinX();
+ // Assert bin size.
+ if (binx.size() - 1 != corBins.size()){
+ cout << "corrPlot: Bin size (x,y) differs!" << endl;
+ return;
+ }
+ list<Profile1DPtr>::iterator hItr = profs.begin();
+ // Loop over the boostrapped correlators.
+ for (size_t i = 0; i < profs.size(); ++i, ++hItr) {
+ vector<YODA::ProfileBin1D> profBins;
+ for (size_t j = 0, N = binx.size() - 1; j < N; ++j) {
+ vector<CorSingleBin*> binPtrs =
+ corBins[j].getBinPtrs<CorSingleBin>();
+ // Construct bin of the profiled quantities. We have no information
+ // (and no desire to add it) of sumWX of the profile, so really
+ // we should use a Dbn1D - but that does not work for Profile1D's.
+ profBins.push_back( YODA::ProfileBin1D((*hItr)->bin(j).xEdges(),
+ YODA::Dbn2D( binPtrs[i]->numEntries(), binPtrs[i]->sumW(),
+ binPtrs[i]->sumW2(), 0., 0., binPtrs[i]->sumWX(), 0, 0)));
+ }
+ // Reset the bins of the profiles.
+ (*hItr)->reset();
+ (*hItr)->bins().clear();
+ // Add our new bins.
+ for (int j = 0, N = profBins.size(); j < N; ++j) {
+ (*hItr)->addBin(profBins[j]);
+ }
+ } // End loop of bootstrapped correlators.
+ }
+
+ // @brief Four particle integrated cn.
+ const void cnFourInt(Scatter2DPtr h, ECorrPtr e2, ECorrPtr e4) const {
+ auto e2bins = e2->getBins();
+ auto e4bins = e4->getBins();
+ auto binx = e2->getBinX();
+ if (binx.size() - 1 != e2bins.size()){
+ cout << "cnFourInt: Bin size (x,y) differs!" << endl;
+ return;
+ }
+ if (binx != e4->getBinX()) {
+ cout << "Error in cnFourInt: Correlator x-binning differs!" << endl;
+ return;
+ }
+ vector<CorBinBase*> e2binPtrs;
+ vector<CorBinBase*> e4binPtrs;
+ auto cn = [&] (int i) {
+ double e22 = e2binPtrs[i]->mean() * e2binPtrs[i]->mean();
+ return e4binPtrs[i]->mean() - 2. * e22;
+ };
+ // Error calculation.
+ vector<pair<double, double> > yErr;
+ for (int j = 0, N = e2bins.size(); j < N; ++j) {
+ e2binPtrs = e2bins[j].getBinPtrs();
+ e4binPtrs = e4bins[j].getBinPtrs();
+ yErr.push_back(sampleError(cn));
+ }
+ // Put the bin ptrs back in place.
+ e2binPtrs = e2->getBinPtrs();
+ e4binPtrs = e4->getBinPtrs();
+ fillScatter(h, binx, cn, yErr);
+ }
+
+ // @brief Four particle integrated vn
+ const void vnFourInt(Scatter2DPtr h, ECorrPtr e2, ECorrPtr e4) const {
+ cnFourInt(h, e2, e4);
+ nthPow(h, 0.25, -1.0);
+ }
+
+ // @brief Six particle integrated cn.
+ const void cnSixInt(Scatter2DPtr h, ECorrPtr e2, ECorrPtr e4,
+ ECorrPtr e6) const {
+ auto e2bins = e2->getBins();
+ auto e4bins = e4->getBins();
+ auto e6bins = e6->getBins();
+ auto binx = e2->getBinX();
+ if (binx.size() - 1 != e2bins.size()){
+ cout << "cnSixInt: Bin size (x,y) differs!" << endl;
+ return;
+ }
+ if (binx != e4->getBinX() || binx != e6->getBinX()) {
+ cout << "Error in cnSixInt: Correlator x-binning differs!" << endl;
+ return;
+ }
+ vector<CorBinBase*> e2binPtrs;
+ vector<CorBinBase*> e4binPtrs;
+ vector<CorBinBase*> e6binPtrs;
+ auto cn = [&] (int i) {
+ double e23 = pow(e2binPtrs[i]->mean(), 3.0);
+ return e6binPtrs[i]->mean() - 9.*e2binPtrs[i]->mean()*e4binPtrs[i]->mean() +
+ 12.*e23;
+ };
+ // Error calculation.
+ vector<pair<double, double> > yErr;
+ for (int j = 0, N = e2bins.size(); j < N; ++j) {
+ e2binPtrs = e2bins[j].getBinPtrs();
+ e4binPtrs = e4bins[j].getBinPtrs();
+ e6binPtrs = e6bins[j].getBinPtrs();
+ yErr.push_back(sampleError(cn));
+ }
+ // Put the bin ptrs back in place.
+ e2binPtrs = e2->getBinPtrs();
+ e4binPtrs = e4->getBinPtrs();
+ e6binPtrs = e6->getBinPtrs();
+ fillScatter(h, binx, cn, yErr);
+ }
+
+ // @brief Six particle integrated vn
+ const void vnSixInt(Scatter2DPtr h, ECorrPtr e2, ECorrPtr e4,
+ ECorrPtr e6) const {
+ cnSixInt(h, e2, e4, e6);
+ nthPow(h, 1./6., 0.25);
+ }
+
+ // @brief Eight particle integrated cn.
+ const void cnEightInt(Scatter2DPtr h, ECorrPtr e2, ECorrPtr e4,
+ ECorrPtr e6, ECorrPtr e8) const {
+ auto e2bins = e2->getBins();
+ auto e4bins = e4->getBins();
+ auto e6bins = e6->getBins();
+ auto e8bins = e8->getBins();
+ auto binx = e2->getBinX();
+ if (binx.size() - 1 != e2bins.size()){
+ cout << "cnEightInt: Bin size (x,y) differs!" << endl;
+ return;
+ }
+ if (binx != e4->getBinX() || binx != e6->getBinX() ||
+ binx != e8->getBinX()) {
+ cout << "Error in cnEightInt: Correlator x-binning differs!" << endl;
+ return;
+ }
+ vector<CorBinBase*> e2binPtrs;
+ vector<CorBinBase*> e4binPtrs;
+ vector<CorBinBase*> e6binPtrs;
+ vector<CorBinBase*> e8binPtrs;
+ auto cn = [&] (int i ) {
+ double e22 = e2binPtrs[i]->mean() * e2binPtrs[i]->mean();
+ double e24 = e22 * e22;
+ double e42 = e4binPtrs[i]->mean() * e4binPtrs[i]->mean();
+ return e8binPtrs[i]->mean() - 16. * e6binPtrs[i]->mean() *
+ e2binPtrs[i]->mean() - 18. * e42 + 144. * e4binPtrs[i]->mean()*e22
+ - 144. * e24;
+ };
+ // Error calculation.
+ vector<pair<double, double> > yErr;
+ for (int j = 0, N = e2bins.size(); j < N; ++j) {
+ e2binPtrs = e2bins[j].getBinPtrs();
+ e4binPtrs = e4bins[j].getBinPtrs();
+ e6binPtrs = e6bins[j].getBinPtrs();
+ e8binPtrs = e8bins[j].getBinPtrs();
+ yErr.push_back(sampleError(cn));
+ }
+ // Put the bin ptrs back in place.
+ e2binPtrs = e2->getBinPtrs();
+ e4binPtrs = e4->getBinPtrs();
+ e6binPtrs = e6->getBinPtrs();
+ e8binPtrs = e8->getBinPtrs();
+ fillScatter(h, binx, cn, yErr);
+ }
+
+ // @brief Eight particle integrated vn
+ const void vnEightInt(Scatter2DPtr h, ECorrPtr e2, ECorrPtr e4,
+ ECorrPtr e6, ECorrPtr e8) const {
+ cnEightInt(h, e2, e4, e6, e8);
+ nthPow(h, 1./8., -1./33.);
+ }
+
+ // @brief Two particle differential vn.
+ const void vnTwoDiff(Scatter2DPtr h, ECorrPtr e2Dif) const {
+ auto e2bins = e2Dif->getBins();
+ auto ref = e2Dif->getReference();
+ auto binx = e2Dif->getBinX();
+ if (binx.size() -1 != e2bins.size()) {
+ cout << "vnTwoDif: Bin size (x,y) differs!" << endl;
+ return;
+ }
+ vector<CorBinBase*> e2binPtrs;
+ vector<CorBinBase*> refPtrs;
+ auto vn = [&] (int i) {
+ // Test reference flow.
+ if (ref.mean() <= 0) return 0.;
+ return e2binPtrs[i]->mean() / sqrt(ref.mean());
+ };
+ // We need here a seperate error function, as we don't
+ // iterate over the reference flow.
+ auto vnerr = [&] (int i) {
+ // Test reference flow.
+ if (refPtrs[i]->mean() <=0) return 0.;
+ return e2binPtrs[i]->mean() / sqrt(refPtrs[i]->mean());
+ };
+ // Error calculation.
+ vector<pair<double, double> > yErr;
+ refPtrs = ref.getBinPtrs();
+ for (int j = 0, N = e2bins.size(); j < N; ++j) {
+ e2binPtrs = e2bins[j].getBinPtrs();
+ yErr.push_back(sampleError(vnerr));
+ }
+ // Put the e2binPtrs back in place.
+ e2binPtrs = e2Dif->getBinPtrs();
+ fillScatter(h, binx, vn);
+ }
+
+ // @brief Four particle differential vn.
+ const void vnFourDiff(Scatter2DPtr h, ECorrPtr e2Dif,
+ ECorrPtr e4Dif) const {
+ auto e2bins = e2Dif->getBins();
+ auto e4bins = e4Dif->getBins();
+ auto ref2 = e2Dif->getReference();
+ auto ref4 = e4Dif->getReference();
+ auto binx = e2Dif->getBinX();
+ if (binx.size() - 1 != e2bins.size()){
+ cout << "vnFourDif: Bin size (x,y) differs!" << endl;
+ return;
+ }
+ if (binx != e4Dif->getBinX()) {
+ cout << "Error in vnFourDif: Correlator x-binning differs!" << endl;
+ return;
+ }
+ vector<CorBinBase*> e2binPtrs;
+ vector<CorBinBase*> e4binPtrs;
+ vector<CorBinBase*> ref2Ptrs;
+ vector<CorBinBase*> ref4Ptrs;
+ double denom = 2 * ref2.mean() * ref2.mean() - ref4.mean();
+ auto vn = [&] (int i) {
+ // Test denominator.
+ if (denom <= 0 ) return 0.;
+ return ((2 * ref2.mean() * e2bins[i].mean() - e4bins[i].mean()) /
+ pow(denom, 0.75));
+ };
+ // We need here a seperate error function, as we don't
+ // iterate over the reference flow.
+ auto vnerr = [&] (int i) {
+ double denom2 = 2 * ref2Ptrs[i]->mean() * ref2Ptrs[i]->mean() -
+ ref4Ptrs[i]->mean();
+ // Test denominator.
+ if (denom2 <= 0) return 0.;
+ return ((2 * ref2Ptrs[i]->mean() * e2binPtrs[i]->mean() -
+ e4binPtrs[i]->mean()) / pow(denom2, 0.75));
+ };
+ // Error calculation.
+ vector<pair<double, double> > yErr;
+ ref2Ptrs = ref2.getBinPtrs();
+ ref4Ptrs = ref4.getBinPtrs();
+ for (int j = 0, N = e2bins.size(); j < N; ++j) {
+ e2binPtrs = e2bins[j].getBinPtrs();
+ e4binPtrs = e4bins[j].getBinPtrs();
+ yErr.push_back(sampleError(vnerr));
+ }
+ // Put the binPtrs back in place.
+ e2binPtrs = e2Dif->getBinPtrs();
+ e4binPtrs = e4Dif->getBinPtrs();
+ fillScatter(h, binx, vn, yErr);
+ }
+ }; // End class CumulantAnalysis.
+} // End Rivet namespace.
+#endif
diff --git a/src/Core/AnalysisHandler.cc b/src/Core/AnalysisHandler.cc
--- a/src/Core/AnalysisHandler.cc
+++ b/src/Core/AnalysisHandler.cc
@@ -1,556 +1,553 @@
// -*- C++ -*-
#include "Rivet/Config/RivetCommon.hh"
#include "Rivet/AnalysisHandler.hh"
#include "Rivet/Analysis.hh"
#include "Rivet/Tools/ParticleName.hh"
#include "Rivet/Tools/BeamConstraint.hh"
#include "Rivet/Tools/Logging.hh"
#include "Rivet/Projections/Beam.hh"
#include "YODA/IO.h"
namespace Rivet {
AnalysisHandler::AnalysisHandler(const string& runname)
: _runname(runname),
_eventcounter("/_EVTCOUNT"),
_xs(NAN), _xserr(NAN),
_initialised(false), _ignoreBeams(false), _dumpPeriod(0), _dumping(false)
{ }
AnalysisHandler::~AnalysisHandler()
{ }
Log& AnalysisHandler::getLog() const {
return Log::getLog("Rivet.Analysis.Handler");
}
void AnalysisHandler::init(const GenEvent& ge) {
if (_initialised)
throw UserError("AnalysisHandler::init has already been called: cannot re-initialize!");
setRunBeams(Rivet::beams(ge));
MSG_DEBUG("Initialising the analysis handler");
_eventcounter.reset();
// Check that analyses are beam-compatible, and remove those that aren't
const size_t num_anas_requested = analysisNames().size();
vector<string> anamestodelete;
for (const AnaHandle a : _analyses) {
if (!_ignoreBeams && !a->isCompatible(beams())) {
//MSG_DEBUG(a->name() << " requires beams " << a->requiredBeams() << " @ " << a->requiredEnergies() << " GeV");
anamestodelete.push_back(a->name());
}
}
for (const string& aname : anamestodelete) {
MSG_WARNING("Analysis '" << aname << "' is incompatible with the provided beams: removing");
removeAnalysis(aname);
}
if (num_anas_requested > 0 && analysisNames().empty()) {
cerr << "All analyses were incompatible with the first event's beams\n"
<< "Exiting, since this probably wasn't intentional!" << endl;
exit(1);
}
// Warn if any analysis' status is not unblemished
for (const AnaHandle a : analyses()) {
if (toUpper(a->status()) == "PRELIMINARY") {
MSG_WARNING("Analysis '" << a->name() << "' is preliminary: be careful, it may change and/or be renamed!");
} else if (toUpper(a->status()) == "OBSOLETE") {
MSG_WARNING("Analysis '" << a->name() << "' is obsolete: please update!");
} else if (toUpper(a->status()).find("UNVALIDATED") != string::npos) {
MSG_WARNING("Analysis '" << a->name() << "' is unvalidated: be careful, it may be broken!");
}
}
// Initialize the remaining analyses
for (AnaHandle a : _analyses) {
MSG_DEBUG("Initialising analysis: " << a->name());
try {
// Allow projection registration in the init phase onwards
a->_allowProjReg = true;
a->init();
//MSG_DEBUG("Checking consistency of analysis: " << a->name());
//a->checkConsistency();
} catch (const Error& err) {
cerr << "Error in " << a->name() << "::init method: " << err.what() << endl;
exit(1);
}
MSG_DEBUG("Done initialising analysis: " << a->name());
}
_initialised = true;
MSG_DEBUG("Analysis handler initialised");
}
void AnalysisHandler::analyze(const GenEvent& ge) {
// Call init with event as template if not already initialised
if (!_initialised) init(ge);
assert(_initialised);
// Ensure that beam details match those from the first event (if we're checking beams)
if ( !_ignoreBeams ) {
const PdgIdPair beams = Rivet::beamIds(ge);
const double sqrts = Rivet::sqrtS(ge);
if (!compatible(beams, _beams) || !fuzzyEquals(sqrts, sqrtS())) {
cerr << "Event beams mismatch: "
<< PID::toBeamsString(beams) << " @ " << sqrts/GeV << " GeV" << " vs. first beams "
<< this->beams() << " @ " << this->sqrtS()/GeV << " GeV" << endl;
exit(1);
}
}
// Create the Rivet event wrapper
/// @todo Filter/normalize the event here
Event event(ge);
// Weights
/// @todo Drop this / just report first weight when we support multiweight events
_eventcounter.fill(event.weight());
MSG_DEBUG("Event #" << _eventcounter.numEntries() << " weight = " << event.weight());
// Cross-section
#ifdef HEPMC_HAS_CROSS_SECTION
if (ge.cross_section()) {
_xs = ge.cross_section()->cross_section();
_xserr = ge.cross_section()->cross_section_error();
}
#endif
// Run the analyses
for (AnaHandle a : _analyses) {
MSG_TRACE("About to run analysis " << a->name());
try {
a->analyze(event);
} catch (const Error& err) {
cerr << "Error in " << a->name() << "::analyze method: " << err.what() << endl;
exit(1);
}
MSG_TRACE("Finished running analysis " << a->name());
}
if ( _dumpPeriod > 0 && numEvents()%_dumpPeriod == 0 ) {
MSG_INFO("Dumping intermediate results to " << _dumpFile << ".");
_dumping = true;
finalize();
_dumping = false;
writeData(_dumpFile);
}
}
void AnalysisHandler::analyze(const GenEvent* ge) {
if (ge == nullptr) {
MSG_ERROR("AnalysisHandler received null pointer to GenEvent");
//throw Error("AnalysisHandler received null pointer to GenEvent");
}
analyze(*ge);
}
void AnalysisHandler::finalize() {
if (!_initialised) return;
// First we make copies of all analysis objects.
map<string,AnalysisObjectPtr> backupAOs;
for (auto ao : getData(false, true) )
backupAOs[ao->path()] = AnalysisObjectPtr(ao->newclone());
// Now we run the (re-entrant) finalize() functions for all analyses.
MSG_INFO("Finalising analyses");
for (AnaHandle a : _analyses) {
a->setCrossSection(_xs);
try {
if ( !_dumping || a->info().reentrant() ) a->finalize();
else if ( _dumping )
MSG_INFO("Skipping periodic dump of " << a->name()
<< " as it is not declared reentrant.");
} catch (const Error& err) {
cerr << "Error in " << a->name() << "::finalize method: " << err.what() << endl;
exit(1);
}
}
// Now we copy all analysis objects to the list of finalized
// ones, and restore the value to their original ones.
_finalizedAOs.clear();
for ( auto ao : getData() )
_finalizedAOs.push_back(AnalysisObjectPtr(ao->newclone()));
for ( auto ao : getData(false, true) ) {
auto aoit = backupAOs.find(ao->path());
if ( aoit == backupAOs.end() ) {
AnaHandle ana = analysis(split(ao->path(), "/")[0]);
if ( ana ) ana->removeAnalysisObject(ao->path());
} else
copyao(aoit->second, ao);
}
// Print out number of events processed
const int nevts = _eventcounter.numEntries();
MSG_INFO("Processed " << nevts << " event" << (nevts != 1 ? "s" : ""));
// // Delete analyses
// MSG_DEBUG("Deleting analyses");
// _analyses.clear();
// Print out MCnet boilerplate
cout << endl;
cout << "The MCnet usage guidelines apply to Rivet: see http://www.montecarlonet.org/GUIDELINES" << endl;
cout << "Please acknowledge plots made with Rivet analyses, and cite arXiv:1003.0694 (http://arxiv.org/abs/1003.0694)" << endl;
}
AnalysisHandler& AnalysisHandler::addAnalysis(const string& analysisname, std::map<string, string> pars) {
// Make an option handle.
std::string parHandle = "";
for (map<string, string>::iterator par = pars.begin(); par != pars.end(); ++par) {
parHandle +=":";
parHandle += par->first + "=" + par->second;
}
return addAnalysis(analysisname + parHandle);
}
AnalysisHandler& AnalysisHandler::addAnalysis(const string& analysisname) {
// Check for a duplicate analysis
/// @todo Might we want to be able to run an analysis twice, with different params?
/// Requires avoiding histo tree clashes, i.e. storing the histos on the analysis objects.
string ananame = analysisname;
vector<string> anaopt = split(analysisname, ":");
if ( anaopt.size() > 1 ) ananame = anaopt[0];
AnaHandle analysis( AnalysisLoader::getAnalysis(ananame) );
if (analysis.get() != 0) { // < Check for null analysis.
MSG_DEBUG("Adding analysis '" << analysisname << "'");
map<string,string> opts;
for ( int i = 1, N = anaopt.size(); i < N; ++i ) {
vector<string> opt = split(anaopt[i], "=");
if ( opt.size() != 2 ) {
MSG_WARNING("Error in option specification. Skipping analysis "
<< analysisname);
return *this;
}
if ( !analysis->info().validOption(opt[0], opt[1]) ) {
MSG_WARNING("Cannot set option '" << opt[0] << "' to '" << opt[1]
<< "'. Skipping analysis " << analysisname);
return *this;
}
opts[opt[0]] = opt[1];
}
for ( auto opt: opts) {
analysis->_options[opt.first] = opt.second;
analysis->_optstring += ":" + opt.first + "=" + opt.second;
}
for (const AnaHandle& a : _analyses) {
if (a->name() == analysis->name() ) {
MSG_WARNING("Analysis '" << analysisname << "' already registered: skipping duplicate");
return *this;
}
}
analysis->_analysishandler = this;
_analyses.insert(analysis);
} else {
MSG_WARNING("Analysis '" << analysisname << "' not found.");
}
// MSG_WARNING(_analyses.size());
// for (const AnaHandle& a : _analyses) MSG_WARNING(a->name());
return *this;
}
AnalysisHandler& AnalysisHandler::removeAnalysis(const string& analysisname) {
std::shared_ptr<Analysis> toremove;
for (const AnaHandle a : _analyses) {
if (a->name() == analysisname) {
toremove = a;
break;
}
}
if (toremove.get() != 0) {
MSG_DEBUG("Removing analysis '" << analysisname << "'");
_analyses.erase(toremove);
}
return *this;
}
/////////////////////////////
void AnalysisHandler::addData(const std::vector<AnalysisObjectPtr>& aos) {
for (const AnalysisObjectPtr ao : aos) {
string path = ao->path();
if ( path.substr(0, 5) != "/RAW/" ) {
_orphanedPreloads.push_back(ao);
continue;
}
path = path.substr(4);
ao->setPath(path);
if (path.size() > 1) { // path > "/"
try {
const string ananame = split(path, "/")[0];
AnaHandle a = analysis(ananame);
a->addAnalysisObject(ao); /// @todo Need to statistically merge...
} catch (const Error& e) {
MSG_TRACE("Adding analysis object " << path <<
" to the list of orphans.");
_orphanedPreloads.push_back(ao);
}
}
}
}
void AnalysisHandler::
mergeYodas(const vector<string> & aofiles, bool equiv) {
vector< vector<AnalysisObjectPtr> > aosv;
vector<double> xsecs;
vector<double> xsecerrs;
vector<CounterPtr> sows;
set<string> ananames;
// First scan all files and extract analysis objects and add the
// corresponding anayses..
for ( auto file : aofiles ) {
Scatter1DPtr xsec;
CounterPtr sow;
// For each file make sure that cross section and sum-of-weights
// objects are present and stor all RAW ones in a vector;
vector<AnalysisObjectPtr> aos;
_eventcounter.reset();
try {
/// @todo Use new YODA SFINAE to fill the smart ptr vector directly
vector<YODA::AnalysisObject*> aos_raw;
YODA::read(file, aos_raw);
for (AnalysisObject* aor : aos_raw) {
AnalysisObjectPtr ao = AnalysisObjectPtr(aor);
if ( ao->path().substr(0, 5) != "/RAW/" ) continue;
ao->setPath(ao->path().substr(4));
if ( ao->path() == "/_XSEC" )
xsec = dynamic_pointer_cast<Scatter1D>(ao);
else if ( ao->path() == "/_EVTCOUNT" )
sow = dynamic_pointer_cast<Counter>(ao);
else {
string ananame = split(ao->path(), "/")[0];
if ( ananames.insert(ananame).second ) addAnalysis(ananame);
aos.push_back(ao);
}
}
if ( !xsec || !sow ) {
MSG_ERROR( "Error in AnalysisHandler::mergeYodas: The file " << file
<< " did not contain weights and cross section info.");
exit(1);
}
xsecs.push_back(xsec->point(0).x());
xsecerrs.push_back(sqr(xsec->point(0).xErrAvg()));
_eventcounter += *sow;
aosv.push_back(aos);
} catch (...) { //< YODA::ReadError&
throw UserError("Unexpected error in reading file: " + file);
}
}
// Now calculate the scale to be applied for all bins in a file
// and get the common cross section and sum of weights.
_xs = _xserr = 0.0;
for ( int i = 0, N = sows.size(); i < N; ++i ) {
double effnent = sows[i]->effNumEntries();
_xs += (equiv? effnent: 1.0)*xsecs[i];
_xserr += (equiv? sqr(effnent): 1.0)*xsecerrs[i];
}
vector<double> scales(sows.size(), 1.0);
if ( equiv ) {
_xs /= _eventcounter.effNumEntries();
_xserr = sqrt(_xserr)/_eventcounter.effNumEntries();
} else {
_xserr = sqrt(_xserr);
for ( int i = 0, N = sows.size(); i < N; ++i )
scales[i] = (_eventcounter.sumW()/sows[i]->sumW())*(xsecs[i]/_xs);
}
// Initialize the analyses allowing them to book analysis objects.
for (AnaHandle a : _analyses) {
MSG_DEBUG("Initialising analysis: " << a->name());
if ( !a->info().reentrant() )
MSG_WARNING("Analysis " << a->name() << " has not been validated to have "
<< "a reentrant finalize method. The result is unpredictable.");
try {
// Allow projection registration in the init phase onwards
a->_allowProjReg = true;
a->init();
//MSG_DEBUG("Checking consistency of analysis: " << a->name());
//a->checkConsistency();
} catch (const Error& err) {
cerr << "Error in " << a->name() << "::init method: " << err.what() << endl;
exit(1);
}
MSG_DEBUG("Done initialising analysis: " << a->name());
}
_initialised = true;
-
// Get a list of all anaysis objects to handle.
map<string,AnalysisObjectPtr> current;
for ( auto ao : getData(false, true) ) current[ao->path()] = ao;
-
// Go through all objects to be merged and add them to current
// after appropriate scaling.
for ( int i = 0, N = aosv.size(); i < N; ++i)
for ( auto ao : aosv[i] ) {
if ( ao->path() == "/_XSEC" || ao->path() == "_EVTCOUNT" ) continue;
auto aoit = current.find(ao->path());
if ( aoit == current.end() ) {
MSG_WARNING("" << ao->path() << " was not properly booked.");
continue;
}
if ( !addaos(aoit->second, ao, scales[i]) )
MSG_WARNING("Cannot merge objects with path " << ao->path()
<<" of type " << ao->annotation("Type") );
}
-
// Now we can simply finalize() the analysis, leaving the
// controlling program to write it out some yoda-file.
finalize();
}
void AnalysisHandler::readData(const string& filename) {
vector<AnalysisObjectPtr> aos;
try {
/// @todo Use new YODA SFINAE to fill the smart ptr vector directly
vector<YODA::AnalysisObject*> aos_raw;
YODA::read(filename, aos_raw);
for (AnalysisObject* aor : aos_raw) aos.push_back(AnalysisObjectPtr(aor));
} catch (...) { //< YODA::ReadError&
throw UserError("Unexpected error in reading file: " + filename);
}
if (!aos.empty()) addData(aos);
}
vector<AnalysisObjectPtr> AnalysisHandler::
getData(bool includeorphans, bool includetmps) const {
vector<AnalysisObjectPtr> rtn;
// Event counter
rtn.push_back( make_shared<Counter>(_eventcounter) );
// Cross-section + err as scatter
YODA::Scatter1D::Points pts; pts.insert(YODA::Point1D(_xs, _xserr));
rtn.push_back( make_shared<Scatter1D>(pts, "/_XSEC") );
// Analysis histograms
for (const AnaHandle a : analyses()) {
vector<AnalysisObjectPtr> aos = a->analysisObjects();
// MSG_WARNING(a->name() << " " << aos.size());
for (const AnalysisObjectPtr ao : aos) {
// Exclude paths from final write-out if they contain a "TMP" layer (i.e. matching "/TMP/")
/// @todo This needs to be much more nuanced for re-entrant histogramming
if ( !includetmps && ao->path().find("/TMP/" ) != string::npos) continue;
rtn.push_back(ao);
}
}
// Sort histograms alphanumerically by path before write-out
sort(rtn.begin(), rtn.end(), [](AnalysisObjectPtr a, AnalysisObjectPtr b) {return a->path() < b->path();});
if ( includeorphans )
rtn.insert(rtn.end(), _orphanedPreloads.begin(), _orphanedPreloads.end());
return rtn;
}
void AnalysisHandler::writeData(const string& filename) const {
vector<AnalysisObjectPtr> out = _finalizedAOs;
out.reserve(2*out.size());
vector<AnalysisObjectPtr> aos = getData(false, true);
for ( auto ao : aos ) {
ao = AnalysisObjectPtr(ao->newclone());
ao->setPath("/RAW" + ao->path());
out.push_back(ao);
}
try {
YODA::write(filename, out.begin(), out.end());
} catch (...) { //< YODA::WriteError&
throw UserError("Unexpected error in writing file: " + filename);
}
}
std::vector<std::string> AnalysisHandler::analysisNames() const {
std::vector<std::string> rtn;
for (AnaHandle a : _analyses) {
rtn.push_back(a->name());
}
return rtn;
}
const AnaHandle AnalysisHandler::analysis(const std::string& analysisname) const {
for (const AnaHandle a : analyses())
if (a->name() == analysisname) return a;
throw Error("No analysis named '" + analysisname + "' registered in AnalysisHandler");
}
AnalysisHandler& AnalysisHandler::addAnalyses(const std::vector<std::string>& analysisnames) {
for (const string& aname : analysisnames) {
//MSG_DEBUG("Adding analysis '" << aname << "'");
addAnalysis(aname);
}
return *this;
}
AnalysisHandler& AnalysisHandler::removeAnalyses(const std::vector<std::string>& analysisnames) {
for (const string& aname : analysisnames) {
removeAnalysis(aname);
}
return *this;
}
bool AnalysisHandler::needCrossSection() const {
bool rtn = false;
for (const AnaHandle a : _analyses) {
if (!rtn) rtn = a->needsCrossSection();
if (rtn) break;
}
return rtn;
}
AnalysisHandler& AnalysisHandler::setCrossSection(double xs) {
_xs = xs;
return *this;
}
bool AnalysisHandler::hasCrossSection() const {
return (!std::isnan(crossSection()));
}
AnalysisHandler& AnalysisHandler::addAnalysis(Analysis* analysis) {
analysis->_analysishandler = this;
_analyses.insert(AnaHandle(analysis));
return *this;
}
PdgIdPair AnalysisHandler::beamIds() const {
return Rivet::beamIds(beams());
}
double AnalysisHandler::sqrtS() const {
return Rivet::sqrtS(beams());
}
void AnalysisHandler::setIgnoreBeams(bool ignore) {
_ignoreBeams=ignore;
}
}
diff --git a/src/Tools/Correlators.cc b/src/Tools/Correlators.cc
new file mode 100644
--- /dev/null
+++ b/src/Tools/Correlators.cc
@@ -0,0 +1,233 @@
+// -*- C++ -*-
+#include "Rivet/Tools/Correlators.hh"
+
+namespace Rivet {
+
+ // Constructor
+ Correlators::Correlators(const ParticleFinder& fsp, int nMaxIn,
+ int pMaxIn, vector<double> pTbinEdgesIn) :
+ nMax(nMaxIn + 1), pMax(pMaxIn + 1), pTbinEdges(pTbinEdgesIn) {
+ setName("Correlators");
+ declareProjection(fsp, "FS");
+ isPtDiff = !pTbinEdges.empty();
+ if (isPtDiff) {
+ vector<double>::iterator underflow = pTbinEdges.begin();
+ pTbinEdges.insert(underflow,pTbinEdges[0]-1);
+ }
+ setToZero();
+ }
+
+ // Alternative constructor.
+ Correlators::Correlators(const ParticleFinder& fsp, int nMaxIn,
+ int pMaxIn, const Scatter2DPtr hIn) : nMax(nMaxIn + 1), pMax(pMaxIn + 1) {
+ for (auto b : hIn->points()) pTbinEdges.push_back(b.xMin());
+ pTbinEdges.push_back(hIn->points().back().xMax());
+ setName("Correlators");
+ declareProjection(fsp, "FS");
+ isPtDiff = !pTbinEdges.empty();
+ if (isPtDiff) {
+ vector<double>::iterator underflow = pTbinEdges.begin();
+ pTbinEdges.insert(underflow,pTbinEdges[0]-1);
+ }
+ setToZero();
+ }
+
+
+ // Set all elements in vectors to zero
+ void Correlators::setToZero(){
+ vector< complex<double> > pTmp(pMax, _ZERO);
+ Vec2D qTmp(nMax, pTmp);
+ qVec = qTmp;
+ if (isPtDiff) {
+ pVec.erase(pVec.begin(), pVec.end());
+ for (double pT : pTbinEdges)
+ pVec.insert(pair<double, Vec2D>(pT, qVec));
+ }
+ }
+
+ // Functions for output:
+ const pair<double,double> Correlators::intCorrelator(vector<int> n) const {
+ // Create vector of zeros for normalisation and vector of initial
+ // powers
+ int m = n.size();
+ vector<int> powers(m, 1);
+ vector<int> zeros(m, 0);
+ complex<double> num = recCorr(m, n, powers, false);
+ complex<double> den = recCorr(m, zeros, powers, false);
+ pair<double, double> ret;
+ ret.second = (den.real() < _TINY) ? 0. : den.real();
+ ret.first = num.real();
+ return ret;
+ }
+
+ const vector<pair<double,double>> Correlators::pTBinnedCorrelators(vector<int> n,
+ bool overflow) const {
+ // Create vector of zeros for normalisation and vector of initial
+ // powers
+ if (!isPtDiff)
+ cout << "You must book the correlator with a binning if you want to"
+ " extract binned correlators! Failing." << endl;
+ int m = n.size();
+ vector<int> powers(m, 1);
+ vector<int> zeros(m, 0);
+ vector<pair<double,double>> ret;
+ for (double pT : pTbinEdges){
+ complex<double> num = recCorr(m, n, powers, true, pT);
+ complex<double> den = recCorr(m, zeros, powers, true, pT);
+ pair<double, double> tmp;
+ tmp.second = (den.real() < _TINY) ? 0. : den.real();
+ tmp.first = num.real();
+ ret.push_back(tmp);
+ }
+ if (!overflow)
+ return vector<pair<double, double> > (ret.begin() + 1, ret.end() - 1);
+ return ret;
+ }
+
+ // M-particle correlation with eta-gap
+ const pair<double,double> Correlators::intCorrelatorGap(const Correlators& other,
+ vector<int> n1, vector<int> n2) const {
+ // Create vectors of zeros for normalisation and vectors of initial
+ // powers
+ int m1 = n1.size();
+ int m2 = n2.size();
+ // Return if too few particles in event.
+ vector<int> zero1(m1, 0);
+ vector<int> zero2(m2, 0);
+ vector<int> p1(m1, 1);
+ vector<int> p2(m2, 1);
+ complex<double> num1 = recCorr(m1, n1, p1, false);
+ complex<double> den1 = recCorr(m1, zero1, p1, false);
+ complex<double> num2 = other.recCorr(m2, n2, p2, false);
+ complex<double> den2 = other.recCorr(m2, zero2, p2, false);
+ complex<double> num = num1 * num2;
+ complex<double> den = den1 * den2;
+ pair<double, double> ret;
+ ret.second = (den1.real() < _TINY || den2.real() < _TINY) ? 0. :
+ den.real();
+ ret.first = num.real();
+ return ret;
+ }
+
+ // M-particle correlation with eta-gap
+ const vector<pair<double,double>> Correlators::pTBinnedCorrelatorsGap(
+ const Correlators& other, vector<int> n1, vector<int> n2,
+ bool overflow) const {
+ if (!isPtDiff)
+ cout << "You must book the correlator with a binning if you want to"
+ " extract binned correlators! Failing." << endl;
+ // Create vectors of zeros for normalisation and vectors of initial
+ // powers
+ int m1 = n1.size();
+ int m2 = n2.size();
+ vector<int> zero1(m1, 0);
+ vector<int> zero2(m2, 0);
+ vector<int> p1(m1, 1);
+ vector<int> p2(m2, 1);
+ vector<pair<double,double>> ret;
+ for (double pT : pTbinEdges) {
+ complex<double> num1 = recCorr(m1, n1, p1, true, pT);
+ complex<double> den1 = recCorr(m1, zero1, p1, true, pT);
+ complex<double> num2 = other.recCorr(m2, n2, p2, false);
+ complex<double> den2 = other.recCorr(m2, zero2, p2, false);
+ complex<double> num = num1 * num2;
+ complex<double> den = den1 * den2;
+ pair<double, double> tmp;
+ tmp.second = (den1.real() < _TINY || den2.real() < _TINY)
+ ? 0. : den.real();
+ tmp.first = num.real();
+ ret.push_back(tmp);
+ }
+ //If we don't want to include underflow/overflow, remove them here
+ if (!overflow)
+ return vector<pair<double, double> > (ret.begin() + 1, ret.end() - 1);
+ return ret;
+ }
+
+
+ // Project function. Loops over array and calculates Q vectors
+ void Correlators::project(const Event& e) {
+ setToZero();
+ // @TODO: Weight could be implemented to account for non-uniform
+ // acceptance if detector simulation is needed. If not, the weight
+ // can be unity. Note that this weight is not the MC event weight, which
+ // should be used when filling histograms (as usual).
+ const double w = 1.0;;
+ const Particles& parts = applyProjection<ParticleFinder>(e, "FS").particles();
+ // Check that we have at least two particles in the event
+ if (parts.size() > 2) {
+ for(const Particle& p : parts)
+ fillCorrelators(p, w);
+ }
+ }
+
+ // Calculate correlators from one particle
+ void Correlators::fillCorrelators(const Particle& p, const double& weight = 1.) {
+ for (int iN = 0; iN < nMax; ++iN)
+ for (int iP = 0; iP < pMax; ++iP) {
+ double real = cos(iN * p.phi());
+ double imag = sin(iN * p.phi());
+ complex<double> expi(real, imag);
+ complex<double> tmp = pow(weight, iP) * expi;
+ qVec[iN][iP] += tmp;
+ if (isPtDiff) {
+ map<double, Vec2D>::iterator pTitr = pVec.lower_bound(p.pT());
+ // Move to the correct bin.
+ if (pTitr != pVec.begin()) pTitr--;
+ pTitr->second[iN][iP] += tmp;
+ }
+ }
+ }
+
+ // Two-particle correlator Eq. (19) p. 6 in Generic Fr. paper.
+ const complex<double> Correlators::twoPartCorr(int n1, int n2, int p1,
+ int p2, double pT, bool useP) const {
+ complex<double> tmp1 = (!useP) ? getQ(n1, p1) :
+ getP(n1, p1, pT);
+ complex<double> tmp2 = getQ(n2, p2);
+ complex<double> tmp3 = (!useP) ? getQ(n1+n2, p1+p2) :
+ getP(n1+n2, p1+p2, pT);
+ complex<double> sum = tmp1 * tmp2 - tmp3;
+ return sum;
+ }
+
+ // Find correlators by recursion. Order = M (# of particles),
+ // n's are harmonics, p's are the powers of the weights
+ const complex<double> Correlators::recCorr(int order, vector<int> n,
+ vector<int> p, bool useP, double pT) const {
+ // Sanity checks
+ int nUsed = 0;
+ for (int i = 0, N = n.size(); i < N; ++i) nUsed += n[i];
+ if (nMax < nUsed)
+ cout <<"Requested n = " << nUsed << ", nMax = " << nMax << endl;
+ if (int(p.size()) > pMax)
+ cout << "Requested p = " << p.size() << ", pMax = " << pMax << endl;
+ // If order is 1, then return Q/p vector (important when dealing
+ // with gaps and one side has only one particle
+ if (order < 2)
+ return (!useP) ? getQ(n[0], p[0]) : getP(n[0], p[0], pT);
+ // Return 2-p correlator explicitly.
+ if ( order < 3 )
+ return twoPartCorr(n[0], n[1], p[0], p[1], pT, useP);
+
+ // Else find nth order harmonics by recursion
+ // at order M - 1
+ int orderNow = order - 1;
+ int nNow = n[orderNow];
+ int pNow = p[orderNow];
+ complex<double> recNow = getQ(n[orderNow], p[orderNow]);
+ recNow *= recCorr(orderNow, n, p, useP, pT);
+ for (int i = 0; i < orderNow; ++i){
+ vector<int> tmpN, tmpP;
+ for (int j = 0; j < orderNow; ++j){
+ tmpN.push_back(n[j]);
+ tmpP.push_back(p[j]);
+ }
+ tmpN[i] += nNow;
+ tmpP[i] += pNow;
+ recNow -= recCorr(orderNow, tmpN, tmpP, useP, pT);
+ }
+ return recNow;
+ }
+
+}
diff --git a/src/Tools/Makefile.am b/src/Tools/Makefile.am
--- a/src/Tools/Makefile.am
+++ b/src/Tools/Makefile.am
@@ -1,22 +1,23 @@
SUBDIRS = Nsubjettiness
noinst_LTLIBRARIES = libRivetTools.la
libRivetTools_la_SOURCES = \
BinnedHistogram.cc \
+ Correlators.cc \
Cuts.cc \
JetUtils.cc \
Random.cc \
Logging.cc \
ParticleUtils.cc \
ParticleName.cc \
Percentile.cc \
RivetYODA.cc \
RivetMT2.cc \
RivetPaths.cc \
Utils.cc \
binreloc.c
dist_noinst_HEADERS = binreloc.h lester_mt2_bisect.hh
libRivetTools_la_CPPFLAGS = $(AM_CPPFLAGS) -DENABLE_BINRELOC -DDEFAULTDATADIR=\"$(datadir)\" -DDEFAULTLIBDIR=\"$(libdir)\"
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Sat, May 3, 6:08 AM (1 d, 7 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
4982935
Default Alt Text
(129 KB)
Attached To
rRIVETHG rivethg
Event Timeline
Log In to Comment