diff --git a/include/Rivet/Analysis.hh b/include/Rivet/Analysis.hh --- a/include/Rivet/Analysis.hh +++ b/include/Rivet/Analysis.hh @@ -1,1271 +1,1333 @@ // -*- C++ -*- #ifndef RIVET_Analysis_HH #define RIVET_Analysis_HH #include "Rivet/Config/RivetCommon.hh" #include "Rivet/AnalysisInfo.hh" #include "Rivet/Event.hh" #include "Rivet/Projection.hh" #include "Rivet/ProjectionApplier.hh" #include "Rivet/ProjectionHandler.hh" #include "Rivet/AnalysisLoader.hh" #include "Rivet/Tools/Cuts.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Tools/ParticleUtils.hh" #include "Rivet/Tools/BinnedHistogram.hh" #include "Rivet/Tools/RivetMT2.hh" #include "Rivet/Tools/RivetYODA.hh" #include "Rivet/Tools/Percentile.hh" #include "Rivet/Projections/CentralityProjection.hh" +#include /// @def vetoEvent /// Preprocessor define for vetoing events, including the log message and return. #define vetoEvent \ do { MSG_DEBUG("Vetoing event on line " << __LINE__ << " of " << __FILE__); return; } while(0) namespace Rivet { // Convenience for analysis writers using std::cout; using std::cerr; using std::endl; + using std::tuple; using std::stringstream; using std::swap; using std::numeric_limits; // Forward declaration class AnalysisHandler; /// @brief This is the base class of all analysis classes in Rivet. /// /// There are /// three virtual functions which should be implemented in base classes: /// /// void init() is called by Rivet before a run is started. Here the /// analysis class should book necessary histograms. The needed /// projections should probably rather be constructed in the /// constructor. /// /// void analyze(const Event&) is called once for each event. Here the /// analysis class should apply the necessary Projections and fill the /// histograms. /// /// void finalize() is called after a run is finished. Here the analysis /// class should do whatever manipulations are necessary on the /// histograms. Writing the histograms to a file is, however, done by /// the Rivet class. class Analysis : public ProjectionApplier { /// The AnalysisHandler is a friend. friend class AnalysisHandler; public: /// @name Standard constructors and destructors. //@{ // /// The default constructor. // Analysis(); /// Constructor Analysis(const std::string& name); /// The destructor. virtual ~Analysis() {} //@} public: /// @name Main analysis methods //@{ /// Initialize this analysis object. A concrete class should here /// book all necessary histograms. An overridden function must make /// sure it first calls the base class function. virtual void init() { } /// Analyze one event. A concrete class should here apply the /// necessary projections on the \a event and fill the relevant /// histograms. An overridden function must make sure it first calls /// the base class function. virtual void analyze(const Event& event) = 0; /// Finalize this analysis object. A concrete class should here make /// all necessary operations on the histograms. Writing the /// histograms to a file is, however, done by the Rivet class. An /// overridden function must make sure it first calls the base class /// function. virtual void finalize() { } //@} public: /// @name Metadata /// Metadata is used for querying from the command line and also for /// building web pages and the analysis pages in the Rivet manual. //@{ /// Get the actual AnalysisInfo object in which all this metadata is stored. const AnalysisInfo& info() const { assert(_info && "No AnalysisInfo object :O"); return *_info; } /// @brief Get the name of the analysis. /// /// By default this is computed by combining the results of the /// experiment, year and Spires ID metadata methods and you should /// only override it if there's a good reason why those won't /// work. If options has been set for this instance, a /// corresponding string is appended at the end. virtual std::string name() const { return ( (info().name().empty()) ? _defaultname : info().name() ) + _optstring; } /// Get name of reference data file, which could be different from plugin name virtual std::string getRefDataName() const { return (info().getRefDataName().empty()) ? _defaultname : info().getRefDataName(); } /// Set name of reference data file, which could be different from plugin name virtual void setRefDataName(const std::string& ref_data="") { info().setRefDataName(!ref_data.empty() ? ref_data : name()); } /// Get the Inspire ID code for this analysis. virtual std::string inspireId() const { return info().inspireId(); } /// Get the SPIRES ID code for this analysis (~deprecated). virtual std::string spiresId() const { return info().spiresId(); } /// @brief Names & emails of paper/analysis authors. /// /// Names and email of authors in 'NAME \' format. The first /// name in the list should be the primary contact person. virtual std::vector authors() const { return info().authors(); } /// @brief Get a short description of the analysis. /// /// Short (one sentence) description used as an index entry. /// Use @a description() to provide full descriptive paragraphs /// of analysis details. virtual std::string summary() const { return info().summary(); } /// @brief Get a full description of the analysis. /// /// Full textual description of this analysis, what it is useful for, /// what experimental techniques are applied, etc. Should be treated /// as a chunk of restructuredText (http://docutils.sourceforge.net/rst.html), /// with equations to be rendered as LaTeX with amsmath operators. virtual std::string description() const { return info().description(); } /// @brief Information about the events needed as input for this analysis. /// /// Event types, energies, kinematic cuts, particles to be considered /// stable, etc. etc. Should be treated as a restructuredText bullet list /// (http://docutils.sourceforge.net/rst.html) virtual std::string runInfo() const { return info().runInfo(); } /// Experiment which performed and published this analysis. virtual std::string experiment() const { return info().experiment(); } /// Collider on which the experiment ran. virtual std::string collider() const { return info().collider(); } /// When the original experimental analysis was published. virtual std::string year() const { return info().year(); } /// The luminosity in inverse femtobarn virtual std::string luminosityfb() const { return info().luminosityfb(); } /// Journal, and preprint references. virtual std::vector references() const { return info().references(); } /// BibTeX citation key for this article. virtual std::string bibKey() const { return info().bibKey(); } /// BibTeX citation entry for this article. virtual std::string bibTeX() const { return info().bibTeX(); } /// Whether this analysis is trusted (in any way!) virtual std::string status() const { return (info().status().empty()) ? "UNVALIDATED" : info().status(); } /// Any work to be done on this analysis. virtual std::vector todos() const { return info().todos(); } /// Return the allowed pairs of incoming beams required by this analysis. virtual const std::vector& requiredBeams() const { return info().beams(); } /// Declare the allowed pairs of incoming beams required by this analysis. virtual Analysis& setRequiredBeams(const std::vector& requiredBeams) { info().setBeams(requiredBeams); return *this; } /// Sets of valid beam energy pairs, in GeV virtual const std::vector >& requiredEnergies() const { return info().energies(); } /// Get vector of analysis keywords virtual const std::vector & keywords() const { return info().keywords(); } /// Declare the list of valid beam energy pairs, in GeV virtual Analysis& setRequiredEnergies(const std::vector >& requiredEnergies) { info().setEnergies(requiredEnergies); return *this; } //@} /// @name Internal metadata modifying methods //@{ /// Get the actual AnalysisInfo object in which all this metadata is stored (non-const). AnalysisInfo& info() { assert(_info && "No AnalysisInfo object :O"); return *_info; } //@} /// @name Run conditions //@{ /// Incoming beams for this run const ParticlePair& beams() const; /// Incoming beam IDs for this run const PdgIdPair beamIds() const; /// Centre of mass energy for this run double sqrtS() const; /// Check if we are running rivet-merge. bool merging() const { return sqrtS() <= 0.0; } //@} /// @name Analysis / beam compatibility testing /// @todo Replace with beamsCompatible() with no args (calling beams() function internally) /// @todo Add beamsMatch() methods with same (shared-code?) tolerance as in beamsCompatible() //@{ /// Check if analysis is compatible with the provided beam particle IDs and energies bool isCompatible(const ParticlePair& beams) const; /// Check if analysis is compatible with the provided beam particle IDs and energies bool isCompatible(PdgId beam1, PdgId beam2, double e1, double e2) const; /// Check if analysis is compatible with the provided beam particle IDs and energies bool isCompatible(const PdgIdPair& beams, const std::pair& energies) const; //@} /// Access the controlling AnalysisHandler object. AnalysisHandler& handler() const { return *_analysishandler; } protected: /// Get a Log object based on the name() property of the calling analysis object. Log& getLog() const; /// Get the process cross-section in pb. Throws if this hasn't been set. double crossSection() const; /// Get the process cross-section per generated event in pb. Throws if this /// hasn't been set. double crossSectionPerEvent() const; /// @brief Get the number of events seen (via the analysis handler). /// /// @note Use in the finalize phase only. size_t numEvents() const; /// @brief Get the sum of event weights seen (via the analysis handler). /// /// @note Use in the finalize phase only. double sumW() const; /// Alias double sumOfWeights() const { return sumW(); } /// @brief Get the sum of squared event weights seen (via the analysis handler). /// /// @note Use in the finalize phase only. double sumW2() const; protected: /// @name Histogram paths //@{ /// Get the canonical histogram "directory" path for this analysis. const std::string histoDir() const; /// Get the canonical histogram path for the named histogram in this analysis. const std::string histoPath(const std::string& hname) const; /// Get the canonical histogram path for the numbered histogram in this analysis. const std::string histoPath(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const; /// Get the internal histogram name for given d, x and y (cf. HepData) const std::string mkAxisCode(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const; //@} /// @name Histogram reference data //@{ /// Get reference data for a named histo /// @todo SFINAE to ensure that the type inherits from YODA::AnalysisObject? template const T& refData(const string& hname) const { _cacheRefData(); MSG_TRACE("Using histo bin edges for " << name() << ":" << hname); if (!_refdata[hname]) { MSG_ERROR("Can't find reference histogram " << hname); throw Exception("Reference data " + hname + " not found."); } return dynamic_cast(*_refdata[hname]); } /// Get reference data for a numbered histo /// @todo SFINAE to ensure that the type inherits from YODA::AnalysisObject? template const T& refData(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { const string hname = mkAxisCode(datasetId, xAxisId, yAxisId); return refData(hname); } //@} /// @name Counter booking //@{ /// Book a counter. CounterPtr & book(CounterPtr &, const std::string& name, const std::string& title=""); // const std::string& valtitle="" /// Book a counter, using a path generated from the dataset and axis ID codes /// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way. CounterPtr & book(CounterPtr &, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const std::string& title=""); // const std::string& valtitle="" //@} /// @name 1D histogram booking //@{ /// Book a 1D histogram with @a nbins uniformly distributed across the range @a lower - @a upper . Histo1DPtr & book(Histo1DPtr &,const std::string& name, size_t nbins, double lower, double upper, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D histogram with non-uniform bins defined by the vector of bin edges @a binedges . Histo1DPtr & book(Histo1DPtr &,const std::string& name, const std::vector& binedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D histogram with non-uniform bins defined by the vector of bin edges @a binedges . Histo1DPtr & book(Histo1DPtr &,const std::string& name, const std::initializer_list& binedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D histogram with binning from a reference scatter. Histo1DPtr & book(Histo1DPtr &,const std::string& name, const Scatter2D& refscatter, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D histogram, using the binnings in the reference data histogram. Histo1DPtr & book(Histo1DPtr &,const std::string& name, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D histogram, using the binnings in the reference data histogram. /// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way. Histo1DPtr & book(Histo1DPtr &,unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); //@} /// @name 2D histogram booking //@{ /// Book a 2D histogram with @a nxbins and @a nybins uniformly /// distributed across the ranges @a xlower - @a xupper and @a /// ylower - @a yupper respectively along the x- and y-axis. Histo2DPtr & book(Histo2DPtr &,const std::string& name, size_t nxbins, double xlower, double xupper, size_t nybins, double ylower, double yupper, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D histogram with non-uniform bins defined by the /// vectors of bin edges @a xbinedges and @a ybinedges. Histo2DPtr & book(Histo2DPtr &,const std::string& name, const std::vector& xbinedges, const std::vector& ybinedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D histogram with non-uniform bins defined by the /// vectors of bin edges @a xbinedges and @a ybinedges. Histo2DPtr & book(Histo2DPtr &,const std::string& name, const std::initializer_list& xbinedges, const std::initializer_list& ybinedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D histogram with binning from a reference scatter. Histo2DPtr & book(Histo2DPtr &,const std::string& name, const Scatter3D& refscatter, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D histogram, using the binnings in the reference data histogram. Histo2DPtr & book(Histo2DPtr &,const std::string& name, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D histogram, using the binnings in the reference data histogram. /// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way. Histo2DPtr & book(Histo2DPtr &,unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); //@} /// @name 1D profile histogram booking //@{ /// Book a 1D profile histogram with @a nbins uniformly distributed across the range @a lower - @a upper . Profile1DPtr & book(Profile1DPtr &, const std::string& name, size_t nbins, double lower, double upper, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D profile histogram with non-uniform bins defined by the vector of bin edges @a binedges . Profile1DPtr & book(Profile1DPtr &, const std::string& name, const std::vector& binedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D profile histogram with non-uniform bins defined by the vector of bin edges @a binedges . Profile1DPtr & book(Profile1DPtr &, const std::string& name, const std::initializer_list& binedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D profile histogram with binning from a reference scatter. Profile1DPtr & book(Profile1DPtr &, const std::string& name, const Scatter2D& refscatter, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D profile histogram, using the binnings in the reference data histogram. Profile1DPtr & book(Profile1DPtr &, const std::string& name, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D profile histogram, using the binnings in the reference data histogram. /// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way. Profile1DPtr & book(Profile1DPtr &, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); //@} /// @name 2D profile histogram booking //@{ /// Book a 2D profile histogram with @a nxbins and @a nybins uniformly /// distributed across the ranges @a xlower - @a xupper and @a ylower - @a /// yupper respectively along the x- and y-axis. Profile2DPtr & book(Profile2DPtr &, const std::string& name, size_t nxbins, double xlower, double xupper, size_t nybins, double ylower, double yupper, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D profile histogram with non-uniform bins defined by the vectorx /// of bin edges @a xbinedges and @a ybinedges. Profile2DPtr & book(Profile2DPtr &, const std::string& name, const std::vector& xbinedges, const std::vector& ybinedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D profile histogram with non-uniform bins defined by the vectorx /// of bin edges @a xbinedges and @a ybinedges. Profile2DPtr & book(Profile2DPtr &, const std::string& name, const std::initializer_list& xbinedges, const std::initializer_list& ybinedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); - /// Book a 2D profile histogram with binning from a reference scatter. - // Profile2DPtr bookProfile2D(const std::string& name, + /// @todo REINSTATE + + // /// Book a 2D profile histogram with binning from a reference scatter. + // Profile2DPtr & book(const Profile2DPtr &, const std::string& name, // const Scatter3D& refscatter, // const std::string& title="", // const std::string& xtitle="", // const std::string& ytitle="", // const std::string& ztitle=""); // /// Book a 2D profile histogram, using the binnings in the reference data histogram. - // Profile2DPtr bookProfile2D(const std::string& name, + // Profile2DPtr & book(const Profile2DPtr &, const std::string& name, // const std::string& title="", // const std::string& xtitle="", // const std::string& ytitle="", // const std::string& ztitle=""); // /// Book a 2D profile histogram, using the binnings in the reference data histogram. // /// // /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way. - // Profile2DPtr bookProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, + // Profile2DPtr & book(const Profile2DPtr &, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, // const std::string& title="", // const std::string& xtitle="", // const std::string& ytitle="", // const std::string& ztitle=""); //@} /// @name 2D scatter booking //@{ /// @brief Book a 2-dimensional data point set with the given name. /// /// @note Unlike histogram booking, scatter booking by default makes no /// attempt to use reference data to pre-fill the data object. If you want /// this, which is sometimes useful e.g. when the x-position is not really /// meaningful and can't be extracted from the data, then set the @a /// copy_pts parameter to true. This creates points to match the reference /// data's x values and errors, but with the y values and errors zeroed... /// assuming that there is a reference histo with the same name: if there /// isn't, an exception will be thrown. Scatter2DPtr & book(Scatter2DPtr & s2d, const string& hname, bool copy_pts=false, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// @brief Book a 2-dimensional data point set, using the binnings in the reference data histogram. /// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way. /// /// @note Unlike histogram booking, scatter booking by default makes no /// attempt to use reference data to pre-fill the data object. If you want /// this, which is sometimes useful e.g. when the x-position is not really /// meaningful and can't be extracted from the data, then set the @a /// copy_pts parameter to true. This creates points to match the reference /// data's x values and errors, but with the y values and errors zeroed. Scatter2DPtr & book(Scatter2DPtr & s2d, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, bool copy_pts=false, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// @brief Book a 2-dimensional data point set with equally spaced x-points in a range. /// /// The y values and errors will be set to 0. Scatter2DPtr & book(Scatter2DPtr & s2d, const string& hname, size_t npts, double lower, double upper, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// @brief Book a 2-dimensional data point set based on provided contiguous "bin edges". /// /// The y values and errors will be set to 0. Scatter2DPtr & book(Scatter2DPtr & s2d, const string& hname, const std::vector& binedges, const std::string& title, const std::string& xtitle, const std::string& ytitle); /// Book a 2-dimensional data point set with x-points from an existing scatter and a new path. - Scatter2DPtr book(Scatter2DPtr & s2d, const Scatter2DPtr & scPtr, - const std::string& path, const std::string& title = "", - const std::string& xtitle = "", const std::string& ytitle = "" ); - + Scatter2DPtr book(Scatter2DPtr & s2d, const Scatter2DPtr & scPtr, + const std::string& path, + const std::string& title="", + const std::string& xtitle="", + const std::string& ytitle=""); + //@} public: /// @name Accessing options for this Analysis instance. //@{ /// Get an option for this analysis instance as a string. std::string getOption(std::string optname) { if ( _options.find(optname) != _options.end() ) return _options.find(optname)->second; return ""; } /// Get an option for this analysis instance converted to a /// specific type (given by the specified @a def value). template T getOption(std::string optname, T def) { if (_options.find(optname) == _options.end()) return def; std::stringstream ss; ss << _options.find(optname)->second; T ret; ss >> ret; return ret; } //@} /// @brief Book a CentralityProjection /// /// Using a SingleValueProjection, @a proj, giving the value of an /// experimental observable to be used as a centrality estimator, /// book a CentralityProjection based on the experimentally /// measured pecentiles of this observable (as given by the /// reference data for the @a calHistName histogram in the @a /// calAnaName analysis. If a preloaded file with the output of a /// run using the @a calAnaName analysis contains a valid /// generated @a calHistName histogram, it will be used as an /// optional percentile binning. Also if this preloaded file /// contains a histogram with the name @a calHistName with an /// appended "_IMP" This histogram will be used to add an optional /// centrality percentile based on the generated impact /// parameter. If @increasing is true, a low (high) value of @proj /// is assumed to correspond to a more peripheral (central) event. const CentralityProjection& declareCentrality(const SingleValueProjection &proj, string calAnaName, string calHistName, const string projName, bool increasing = false); - /// @brief Book a Pecentile wrapper around AnalysisObjects. + /// @brief Book a Percentile wrapper around AnalysisObjects. /// /// Based on a previously registered CentralityProjection named @a /// projName book one AnalysisObject for each @a centralityBin and /// name them according to the corresponding code in the @a ref /// vector. + /// + /// @todo Convert to just be called book() cf. others template Percentile bookPercentile(string projName, vector > centralityBins, vector > ref) { + typedef typename ReferenceTraits::RefT RefT; + typedef rivet_shared_ptr> WrapT; + Percentile pctl(this, projName); const int nCent = centralityBins.size(); for (int iCent = 0; iCent < nCent; ++iCent) { - const string axisCode = makeAxisCode(std::get<0>(ref[iCent]), - std::get<1>(ref[iCent]), - std::get<2>(ref[iCent])); - const RefT & refscatter = refData(axisCode); - shared_ptr ao = addOrGetCompatAO(make_shared(refscatter, histoPath(axisCode))); - CounterPtr cnt = - addOrGetCompatAO(make_shared(histoPath("TMP/COUNTER/" + axisCode))); - pctl.add(ao, cnt, centralityBins[iCent]); + const string axisCode = mkAxisCode(std::get<0>(ref[iCent]), + std::get<1>(ref[iCent]), + std::get<2>(ref[iCent])); + const RefT & refscatter = refData(axisCode); + + WrapT wtf(_weightNames(), T(refscatter, histoPath(axisCode))); + wtf = addAnalysisObject(wtf); + + CounterPtr cnt(_weightNames(), Counter(histoPath("TMP/COUNTER/" + axisCode))); + cnt = addAnalysisObject(cnt); + + pctl.add(wtf, cnt, centralityBins[iCent]); } return pctl; } - /// @brief Book Pecentile wrappers around AnalysisObjects. - /// - /// Based on a previously registered CentralityProjection named @a - /// projName book one (or several) AnalysisObject(s) named - /// according to @a ref where the x-axis will be filled according - /// to the percentile output(s) of the @projName. - template - PercentileXaxis bookPercentileXaxis(string projName, - tuple ref) { - typedef typename ReferenceTraits::RefT RefT; - PercentileXaxis pctl(this, projName); + // /// @brief Book Percentile wrappers around AnalysisObjects. + // /// + // /// Based on a previously registered CentralityProjection named @a + // /// projName book one (or several) AnalysisObject(s) named + // /// according to @a ref where the x-axis will be filled according + // /// to the percentile output(s) of the @projName. + // /// + // /// @todo Convert to just be called book() cf. others + // template + // PercentileXaxis bookPercentileXaxis(string projName, + // tuple ref) { - const string axisCode = makeAxisCode(std::get<0>(ref), - std::get<1>(ref), - std::get<2>(ref)); - const RefT & refscatter = refData(axisCode); - shared_ptr ao = addOrGetCompatAO(make_shared(refscatter, histoPath(axisCode))); - pctl.add(proj, ao, make_shared()); + // typedef typename ReferenceTraits::RefT RefT; + // typedef rivet_shared_ptr> WrapT; - return pctl; - } + // PercentileXaxis pctl(this, projName); + + // const string axisCode = mkAxisCode(std::get<0>(ref), + // std::get<1>(ref), + // std::get<2>(ref)); + // const RefT & refscatter = refData(axisCode); + + // WrapT wtf(_weightNames(), T(refscatter, histoPath(axisCode))); + // wtf = addAnalysisObject(wtf); + + // CounterPtr cnt(_weightNames(), Counter()); + // cnt = addAnalysisObject(cnt); + + // pctl.add(wtf, cnt); + // return pctl; + // } + + + private: + + // Functions that have to be defined in the .cc file to avoid circular #includes + + /// Get the list of weight names from the handler + vector _weightNames() const; + + /// Get the default/nominal weight index + size_t _defaultWeightIndex() const; + + /// Get an AO from another analysis + MultiweightAOPtr _getOtherAnalysisObject(const std::string & ananame, const std::string& name); + + /// Check that analysis objects aren't being booked/registered outside the init stage + void _checkBookInit() const; private: /// To be used in finalize context only: class CounterAdapter { public: CounterAdapter(double x) : x_(x ) {} CounterAdapter(const YODA::Counter & c) : x_(c.val() ) {} // CounterAdapter(CounterPtr cp) : x_(cp->val() ) {} CounterAdapter(const YODA::Scatter1D & s) : x_(s.points()[0].x()) { assert( s.numPoints() == 1 || "Can only scale by a single value."); } // CounterAdapter(Scatter1DPtr sp) : x_(sp->points()[0].x()) { // assert( sp->numPoints() == 1 || "Can only scale by a single value."); // } operator double() const { return x_; } private: double x_; }; public: double dbl(double x) { return x; } double dbl(const YODA::Counter & c) { return c.val(); } double dbl(const YODA::Scatter1D & s) { assert( s.numPoints() == 1 ); return s.points()[0].x(); } /// @name Analysis object manipulation /// @todo Should really be protected: only public to keep BinnedHistogram happy for now... //@{ /// Multiplicatively scale the given counter, @a cnt, by factor @s factor. void scale(CounterPtr cnt, CounterAdapter factor); /// Multiplicatively scale the given counters, @a cnts, by factor @s factor. /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh) /// @todo Use SFINAE for a generic iterable of CounterPtrs void scale(const std::vector& cnts, CounterAdapter factor) { for (auto& c : cnts) scale(c, factor); } /// @todo YUCK! template void scale(const CounterPtr (&cnts)[array_size], CounterAdapter factor) { // for (size_t i = 0; i < std::extent::value; ++i) scale(cnts[i], factor); for (auto& c : cnts) scale(c, factor); } /// Normalize the given histogram, @a histo, to area = @a norm. void normalize(Histo1DPtr histo, CounterAdapter norm=1.0, bool includeoverflows=true); /// Normalize the given histograms, @a histos, to area = @a norm. /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh) /// @todo Use SFINAE for a generic iterable of Histo1DPtrs void normalize(const std::vector& histos, CounterAdapter norm=1.0, bool includeoverflows=true) { for (auto& h : histos) normalize(h, norm, includeoverflows); } /// @todo YUCK! template void normalize(const Histo1DPtr (&histos)[array_size], CounterAdapter norm=1.0, bool includeoverflows=true) { for (auto& h : histos) normalize(h, norm, includeoverflows); } /// Multiplicatively scale the given histogram, @a histo, by factor @s factor. void scale(Histo1DPtr histo, CounterAdapter factor); /// Multiplicatively scale the given histograms, @a histos, by factor @s factor. /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh) /// @todo Use SFINAE for a generic iterable of Histo1DPtrs void scale(const std::vector& histos, CounterAdapter factor) { for (auto& h : histos) scale(h, factor); } /// @todo YUCK! template void scale(const Histo1DPtr (&histos)[array_size], CounterAdapter factor) { for (auto& h : histos) scale(h, factor); } /// Normalize the given histogram, @a histo, to area = @a norm. void normalize(Histo2DPtr histo, CounterAdapter norm=1.0, bool includeoverflows=true); /// Normalize the given histograms, @a histos, to area = @a norm. /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh) /// @todo Use SFINAE for a generic iterable of Histo2DPtrs void normalize(const std::vector& histos, CounterAdapter norm=1.0, bool includeoverflows=true) { for (auto& h : histos) normalize(h, norm, includeoverflows); } /// @todo YUCK! template void normalize(const Histo2DPtr (&histos)[array_size], CounterAdapter norm=1.0, bool includeoverflows=true) { for (auto& h : histos) normalize(h, norm, includeoverflows); } /// Multiplicatively scale the given histogram, @a histo, by factor @s factor. void scale(Histo2DPtr histo, CounterAdapter factor); /// Multiplicatively scale the given histograms, @a histos, by factor @s factor. /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh) /// @todo Use SFINAE for a generic iterable of Histo2DPtrs void scale(const std::vector& histos, CounterAdapter factor) { for (auto& h : histos) scale(h, factor); } /// @todo YUCK! template void scale(const Histo2DPtr (&histos)[array_size], CounterAdapter factor) { for (auto& h : histos) scale(h, factor); } /// Helper for counter division. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(CounterPtr c1, CounterPtr c2, Scatter1DPtr s) const; /// Helper for histogram division with raw YODA objects. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(const YODA::Counter& c1, const YODA::Counter& c2, Scatter1DPtr s) const; /// Helper for histogram division. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const; /// Helper for histogram division with raw YODA objects. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const; /// Helper for profile histogram division. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(Profile1DPtr p1, Profile1DPtr p2, Scatter2DPtr s) const; /// Helper for profile histogram division with raw YODA objects. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(const YODA::Profile1D& p1, const YODA::Profile1D& p2, Scatter2DPtr s) const; /// Helper for 2D histogram division. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(Histo2DPtr h1, Histo2DPtr h2, Scatter3DPtr s) const; /// Helper for 2D histogram division with raw YODA objects. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(const YODA::Histo2D& h1, const YODA::Histo2D& h2, Scatter3DPtr s) const; /// Helper for 2D profile histogram division. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(Profile2DPtr p1, Profile2DPtr p2, Scatter3DPtr s) const; /// Helper for 2D profile histogram division with raw YODA objects /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(const YODA::Profile2D& p1, const YODA::Profile2D& p2, Scatter3DPtr s) const; /// Helper for histogram efficiency calculation. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void efficiency(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const; /// Helper for histogram efficiency calculation. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void efficiency(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const; /// Helper for histogram asymmetry calculation. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void asymm(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const; /// Helper for histogram asymmetry calculation. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void asymm(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const; /// Helper for converting a differential histo to an integral one. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void integrate(Histo1DPtr h, Scatter2DPtr s) const; /// Helper for converting a differential histo to an integral one. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void integrate(const Histo1D& h, Scatter2DPtr s) const; //@} public: /// List of registered analysis data objects const vector& analysisObjects() const { return _analysisobjects; } protected: /// @name Data object registration, retrieval, and removal //@{ /// Register a data object in the histogram system - void addAnalysisObject(const MultiweightAOPtr & ao); - /* - void addAnalysisObject(AnalysisObjectPtr ao); + template + AO addAnalysisObject(const AO & aonew) { + _checkBookInit(); - /// Register a data object in the system and return its pointer, - /// or, if an object of the same path is already there, check if it - /// is compatible (eg. same type and same binning) and return that - /// object instead. Emits a warning if an incompatible object with - /// the same name is found and replcaces that with the given data - /// object. - template - std::shared_ptr addOrGetCompatAO(std::shared_ptr aonew) { - foreach (const AnalysisObjectPtr& ao, analysisObjects()) { - if ( ao->path() == aonew->path() ) { - std::shared_ptr aoold = dynamic_pointer_cast(ao); - if ( aoold && bookingCompatible(aonew, aoold) ) { - MSG_TRACE("Bound pre-existing data object " << aonew->path() - << " for " << name()); - return aoold; - } else { - MSG_WARNING("Found incompatible pre-existing data object with same path " - << aonew->path() << " for " << name()); - } + for (const MultiweightAOPtr& ao : analysisObjects()) { + + // Check AO base-name first + ao.get()->setActiveWeightIdx(_defaultWeightIndex()); + aonew.get()->setActiveWeightIdx(_defaultWeightIndex()); + if (ao->path() != aonew->path()) continue; + + // If base-name matches, check compatibility + // NB. This evil is because dynamic_ptr_cast can't work on rivet_shared_ptr directly + AO aoold = AO(dynamic_pointer_cast(ao.get())); //< OMG + if ( !aoold || !bookingCompatible(aonew, aoold) ) { + MSG_WARNING("Found incompatible pre-existing data object with same base path " + << aonew->path() << " for " << name()); + throw LookupError("Found incompatible pre-existing data object with same base path during AO booking"); } + + // Finally, check all weight variations + for (size_t weightIdx = 0; weightIdx < _weightNames().size(); ++weightIdx) { + aoold.get()->setActiveWeightIdx(weightIdx); + aonew.get()->setActiveWeightIdx(weightIdx); + if (aoold->path() != aonew->path()) { + MSG_WARNING("Found incompatible pre-existing data object with different weight-path " + << aonew->path() << " for " << name()); + throw LookupError("Found incompatible pre-existing data object with same weight-path during AO booking"); + } + } + + // They're fully compatible: bind and return + aoold.get()->unsetActiveWeight(); + MSG_TRACE("Bound pre-existing data object " << aoold->path() << " for " << name()); + return aoold; } - MSG_TRACE("Registered " << aonew->annotation("Type") << " " << aonew->path() - << " for " << name()); - addAnalysisObject(aonew); + + // No equivalent found + MSG_TRACE("Registered " << aonew->annotation("Type") << " " << aonew->path() << " for " << name()); + aonew.get()->unsetActiveWeight(); + + _analysisobjects.push_back(aonew); return aonew; } - /// Get a data object from the histogram system - template - const std::shared_ptr getAnalysisObject(const std::string& name) const { - foreach (const AnalysisObjectPtr& ao, analysisObjects()) { - if (ao->path() == histoPath(name)) return dynamic_pointer_cast(ao); - } - throw LookupError("Data object " + histoPath(name) + " not found"); - } - - /// Get a data object from the histogram system (non-const) - template - std::shared_ptr getAnalysisObject(const std::string& name) { - foreach (const AnalysisObjectPtr& ao, analysisObjects()) { - if (ao->path() == histoPath(name)) return dynamic_pointer_cast(ao); - } - throw LookupError("Data object " + histoPath(name) + " not found"); - } - */ - /// Unregister a data object from the histogram system (by name) void removeAnalysisObject(const std::string& path); /// Unregister a data object from the histogram system (by pointer) void removeAnalysisObject(const MultiweightAOPtr & ao); - /* - void removeAnalysisObject(AnalysisObjectPtr ao); - /// Get all data object from the AnalysisHandler. - vector getAllData(bool includeorphans) const; + // /// Get all data objects, for all analyses, from the AnalysisHandler + // /// @todo Can we remove this? Why not call handler().getData()? + // vector getAllData(bool includeorphans) const; + + + /// Get a Rivet data object from the histogram system + template + const AO getAnalysisObject(const std::string& name) const { + for (const MultiweightAOPtr& ao : analysisObjects()) { + ao.get()->setActiveWeightIdx(_defaultWeightIndex()); + if (ao->path() == histoPath(name)) { + return dynamic_pointer_cast(ao); + } + } + throw LookupError("Data object " + histoPath(name) + " not found"); + } + + + // /// Get a data object from the histogram system + // template + // const std::shared_ptr getAnalysisObject(const std::string& name) const { + // foreach (const AnalysisObjectPtr& ao, analysisObjects()) { + // if (ao->path() == histoPath(name)) return dynamic_pointer_cast(ao); + // } + // throw LookupError("Data object " + histoPath(name) + " not found"); + // } + + // /// Get a data object from the histogram system (non-const) + // template + // std::shared_ptr getAnalysisObject(const std::string& name) { + // foreach (const AnalysisObjectPtr& ao, analysisObjects()) { + // if (ao->path() == histoPath(name)) return dynamic_pointer_cast(ao); + // } + // throw LookupError("Data object " + histoPath(name) + " not found"); + // } + /// Get a data object from another analysis (e.g. preloaded /// calibration histogram). - /// Get a data object from the histogram system (non-const) - template - std::shared_ptr getAnalysisObject(const std::string & ananame, - const std::string& name) { - std::string path = "/" + ananame + "/" + name; - for ( AnalysisObjectPtr ao : getAllData(true) ) { - if ( ao->path() == path ) - return dynamic_pointer_cast(ao); - } - return std::shared_ptr(); - } - */ - - /* - /// Get a named Histo1D object from the histogram system - const Histo1DPtr getHisto1D(const std::string& name) const { - return getAnalysisObject(name); + template + AO getAnalysisObject(const std::string & ananame, + const std::string& name) { + MultiweightAOPtr ao = _getOtherAnalysisObject(ananame, name); + return dynamic_pointer_cast(ao); } - /// Get a named Histo1D object from the histogram system (non-const) - Histo1DPtr getHisto1D(const std::string& name) { - return getAnalysisObject(name); - } - /// Get a Histo1D object from the histogram system by axis ID codes (non-const) - const Histo1DPtr getHisto1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { - return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); - } + // /// Get a named Histo1D object from the histogram system + // const Histo1DPtr getHisto1D(const std::string& name) const { + // return getAnalysisObject(name); + // } - /// Get a Histo1D object from the histogram system by axis ID codes (non-const) - Histo1DPtr getHisto1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { - return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); - } + // /// Get a named Histo1D object from the histogram system (non-const) + // Histo1DPtr getHisto1D(const std::string& name) { + // return getAnalysisObject(name); + // } + // /// Get a Histo1D object from the histogram system by axis ID codes (non-const) + // const Histo1DPtr getHisto1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { + // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); + // } - /// Get a named Histo2D object from the histogram system - const Histo2DPtr getHisto2D(const std::string& name) const { - return getAnalysisObject(name); - } + // /// Get a Histo1D object from the histogram system by axis ID codes (non-const) + // Histo1DPtr getHisto1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { + // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); + // } - /// Get a named Histo2D object from the histogram system (non-const) - Histo2DPtr getHisto2D(const std::string& name) { - return getAnalysisObject(name); - } - /// Get a Histo2D object from the histogram system by axis ID codes (non-const) - const Histo2DPtr getHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { - return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); - } + // /// Get a named Histo2D object from the histogram system + // const Histo2DPtr getHisto2D(const std::string& name) const { + // return getAnalysisObject(name); + // } - /// Get a Histo2D object from the histogram system by axis ID codes (non-const) - Histo2DPtr getHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { - return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); - } + // /// Get a named Histo2D object from the histogram system (non-const) + // Histo2DPtr getHisto2D(const std::string& name) { + // return getAnalysisObject(name); + // } + // /// Get a Histo2D object from the histogram system by axis ID codes (non-const) + // const Histo2DPtr getHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { + // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); + // } - /// Get a named Profile1D object from the histogram system - const Profile1DPtr getProfile1D(const std::string& name) const { - return getAnalysisObject(name); - } + // /// Get a Histo2D object from the histogram system by axis ID codes (non-const) + // Histo2DPtr getHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { + // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); + // } - /// Get a named Profile1D object from the histogram system (non-const) - Profile1DPtr getProfile1D(const std::string& name) { - return getAnalysisObject(name); - } - /// Get a Profile1D object from the histogram system by axis ID codes (non-const) - const Profile1DPtr getProfile1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { - return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); - } + // /// Get a named Profile1D object from the histogram system + // const Profile1DPtr getProfile1D(const std::string& name) const { + // return getAnalysisObject(name); + // } - /// Get a Profile1D object from the histogram system by axis ID codes (non-const) - Profile1DPtr getProfile1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { - return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); - } + // /// Get a named Profile1D object from the histogram system (non-const) + // Profile1DPtr getProfile1D(const std::string& name) { + // return getAnalysisObject(name); + // } + // /// Get a Profile1D object from the histogram system by axis ID codes (non-const) + // const Profile1DPtr getProfile1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { + // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); + // } - /// Get a named Profile2D object from the histogram system - const Profile2DPtr getProfile2D(const std::string& name) const { - return getAnalysisObject(name); - } + // /// Get a Profile1D object from the histogram system by axis ID codes (non-const) + // Profile1DPtr getProfile1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { + // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); + // } - /// Get a named Profile2D object from the histogram system (non-const) - Profile2DPtr getProfile2D(const std::string& name) { - return getAnalysisObject(name); - } - /// Get a Profile2D object from the histogram system by axis ID codes (non-const) - const Profile2DPtr getProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { - return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); - } + // /// Get a named Profile2D object from the histogram system + // const Profile2DPtr getProfile2D(const std::string& name) const { + // return getAnalysisObject(name); + // } - /// Get a Profile2D object from the histogram system by axis ID codes (non-const) - Profile2DPtr getProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { - return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); - } + // /// Get a named Profile2D object from the histogram system (non-const) + // Profile2DPtr getProfile2D(const std::string& name) { + // return getAnalysisObject(name); + // } + // /// Get a Profile2D object from the histogram system by axis ID codes (non-const) + // const Profile2DPtr getProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { + // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); + // } - /// Get a named Scatter2D object from the histogram system - const Scatter2DPtr getScatter2D(const std::string& name) const { - return getAnalysisObject(name); - } + // /// Get a Profile2D object from the histogram system by axis ID codes (non-const) + // Profile2DPtr getProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { + // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); + // } - /// Get a named Scatter2D object from the histogram system (non-const) - Scatter2DPtr getScatter2D(const std::string& name) { - return getAnalysisObject(name); - } - /// Get a Scatter2D object from the histogram system by axis ID codes (non-const) - const Scatter2DPtr getScatter2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { - return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); - } + // /// Get a named Scatter2D object from the histogram system + // const Scatter2DPtr getScatter2D(const std::string& name) const { + // return getAnalysisObject(name); + // } - /// Get a Scatter2D object from the histogram system by axis ID codes (non-const) - Scatter2DPtr getScatter2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { - return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); - } - */ + // /// Get a named Scatter2D object from the histogram system (non-const) + // Scatter2DPtr getScatter2D(const std::string& name) { + // return getAnalysisObject(name); + // } + + // /// Get a Scatter2D object from the histogram system by axis ID codes (non-const) + // const Scatter2DPtr getScatter2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { + // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); + // } + + // /// Get a Scatter2D object from the histogram system by axis ID codes (non-const) + // Scatter2DPtr getScatter2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { + // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); + // } //@} private: /// Name passed to constructor (used to find .info analysis data file, and as a fallback) string _defaultname; /// Pointer to analysis metadata object unique_ptr _info; /// Storage of all plot objects /// @todo Make this a map for fast lookup by path? vector _analysisobjects; /// @name Cross-section variables //@{ double _crossSection; bool _gotCrossSection; //@} /// The controlling AnalysisHandler object. AnalysisHandler* _analysishandler; /// Collection of cached refdata to speed up many autobookings: the /// reference data file should only be read once. mutable std::map _refdata; /// Options the (this instance of) the analysis map _options; /// The string of options. string _optstring; private: /// @name Utility functions //@{ /// Get the reference data for this paper and cache it. void _cacheRefData() const; //@} /// The assignment operator is private and must never be called. /// In fact, it should not even be implemented. Analysis& operator=(const Analysis&); }; } // Include definition of analysis plugin system so that analyses automatically see it when including Analysis.hh #include "Rivet/AnalysisBuilder.hh" /// @def DECLARE_RIVET_PLUGIN /// Preprocessor define to prettify the global-object plugin hook mechanism. #define DECLARE_RIVET_PLUGIN(clsname) Rivet::AnalysisBuilder plugin_ ## clsname /// @def DECLARE_ALIASED_RIVET_PLUGIN /// Preprocessor define to prettify the global-object plugin hook mechanism, with an extra alias name for this analysis. // #define DECLARE_ALIASED_RIVET_PLUGIN(clsname, alias) Rivet::AnalysisBuilder plugin_ ## clsname ## ( ## #alias ## ) #define DECLARE_ALIASED_RIVET_PLUGIN(clsname, alias) DECLARE_RIVET_PLUGIN(clsname)( #alias ) /// @def DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR /// Preprocessor define to prettify the manky constructor with name string argument #define DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR(clsname) clsname() : Analysis(# clsname) {} /// @def DEFAULT_RIVET_ANALYSIS_CTOR /// Slight abbreviation for DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR #define DEFAULT_RIVET_ANALYSIS_CTOR(clsname) DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR(clsname) #endif diff --git a/include/Rivet/AnalysisHandler.hh b/include/Rivet/AnalysisHandler.hh --- a/include/Rivet/AnalysisHandler.hh +++ b/include/Rivet/AnalysisHandler.hh @@ -1,344 +1,342 @@ // -*- C++ -*- #ifndef RIVET_RivetHandler_HH #define RIVET_RivetHandler_HH #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Particle.hh" #include "Rivet/AnalysisLoader.hh" #include "Rivet/Tools/RivetYODA.hh" namespace Rivet { // Forward declaration and smart pointer for Analysis class Analysis; typedef std::shared_ptr AnaHandle; // Needed to make smart pointers compare equivalent in the STL set struct CmpAnaHandle { bool operator() (const AnaHandle& a, const AnaHandle& b) const { return a.get() < b.get(); } }; /// A class which handles a number of analysis objects to be applied to /// generated events. An {@link Analysis}' AnalysisHandler is also responsible /// for handling the final writing-out of histograms. class AnalysisHandler { public: /// @name Constructors and destructors. */ //@{ /// Preferred constructor, with optional run name. AnalysisHandler(const string& runname=""); /// @brief Destructor /// The destructor is not virtual, as this class should not be inherited from. ~AnalysisHandler(); //@} private: /// Get a logger object. Log& getLog() const; public: /// @name Run properties //@{ /// Get the name of this run. string runName() const; /// Get the number of events seen. Should only really be used by external /// steering code or analyses in the finalize phase. size_t numEvents() const; /// @brief Access the sum of the event weights seen /// /// This is the weighted equivalent of the number of events. It should only /// be used by external steering code or analyses in the finalize phase. double sumW() const { return _eventCounter->sumW(); } /// Access to the sum of squared-weights double sumW2() const { return _eventCounter->sumW2(); } /// Names of event weight categories const vector& weightNames() const { return _weightNames; } - /* - /// @brief Set the sum of weights - /// - /// This is useful if Rivet is steered externally and - /// the analyses are run for a sub-contribution of the events - /// (but of course have to be normalised to the total sum of weights) - /// - /// @todo What about the sumW2 term? That needs to be set coherently. Need a - /// new version, with all three N,sumW,sumW2 numbers (or a counter) - /// supplied. - /// - /// @deprecated Weight sums are no longer tracked this way... - void setSumOfWeights(const double& sum) { - //_sumOfWeights = sum; - throw Error("Can't set sum of weights independently, since it's now tracked by a Counter. " - "Please contact the Rivet authors if you need this."); + /// Get the index of the nominal weight-stream + size_t defaultWeightIndex() const { + return _defaultWeightIdx; } - */ /// Set the cross-section for the process being generated. AnalysisHandler& setCrossSection(double xs, double xserr); /// Get the cross-section known to the handler. Scatter1DPtr crossSection() const { return _xs; } + /// Get the nominal cross-section double nominalCrossSection() const { _xs.get()->setActiveWeightIdx(_defaultWeightIdx); const YODA::Scatter1D::Points& ps = _xs->points(); if (ps.size() != 1) { string errMsg = "cross section missing when requesting nominal cross section"; throw Error(errMsg); } double xs = ps[0].x(); _xs.get()->unsetActiveWeight(); return xs; } /// Set the beam particles for this run AnalysisHandler& setRunBeams(const ParticlePair& beams) { _beams = beams; MSG_DEBUG("Setting run beams = " << beams << " @ " << sqrtS()/GeV << " GeV"); return *this; } /// Get the beam particles for this run, usually determined from the first event. const ParticlePair& beams() const { return _beams; } /// Get beam IDs for this run, usually determined from the first event. /// @deprecated Use standalone beamIds(ah.beams()), to clean AH interface PdgIdPair beamIds() const; /// Get energy for this run, usually determined from the first event. /// @deprecated Use standalone sqrtS(ah.beams()), to clean AH interface double sqrtS() const; /// Setter for _ignoreBeams void setIgnoreBeams(bool ignore=true); //@} /// @name Handle analyses //@{ /// Get a list of the currently registered analyses' names. std::vector analysisNames() const; /// Get the collection of currently registered analyses. const std::set& analyses() const { return _analyses; } /// Get a registered analysis by name. const AnaHandle analysis(const std::string& analysisname) const; /// Add an analysis to the run list by object AnalysisHandler& addAnalysis(Analysis* analysis); /// @brief Add an analysis to the run list using its name. /// /// The actual Analysis to be used will be obtained via /// AnalysisLoader::getAnalysis(string). If no matching analysis is found, /// no analysis is added (i.e. the null pointer is checked and discarded. AnalysisHandler& addAnalysis(const std::string& analysisname); /// @brief Add an analysis with a map of analysis options. AnalysisHandler& addAnalysis(const std::string& analysisname, std::map pars); - + /// @brief Add analyses to the run list using their names. /// /// The actual {@link Analysis}' to be used will be obtained via /// AnalysisHandler::addAnalysis(string), which in turn uses /// AnalysisLoader::getAnalysis(string). If no matching analysis is found /// for a given name, no analysis is added, but also no error is thrown. AnalysisHandler& addAnalyses(const std::vector& analysisnames); /// Remove an analysis from the run list using its name. AnalysisHandler& removeAnalysis(const std::string& analysisname); /// Remove analyses from the run list using their names. AnalysisHandler& removeAnalyses(const std::vector& analysisnames); //@} /// @name Main init/execute/finalise //@{ /// Initialize a run, with the run beams taken from the example event. void init(const GenEvent& event); /// @brief Analyze the given \a event by reference. /// /// This function will call the AnalysisBase::analyze() function of all /// included analysis objects. void analyze(const GenEvent& event); /// @brief Analyze the given \a event by pointer. /// /// This function will call the AnalysisBase::analyze() function of all /// included analysis objects, after checking the event pointer validity. void analyze(const GenEvent* event); /// Finalize a run. This function calls the AnalysisBase::finalize() /// functions of all included analysis objects. void finalize(); //@} /// @name Histogram / data object access //@{ /// Add a vector of analysis objects to the current state. void addData(const std::vector& aos); /// Read analysis plots into the histo collection (via addData) from the named file. void readData(const std::string& filename); + /// Get all multi-weight Rivet analysis object wrappers + vector getRivetAOs() const; + + /// Get single-weight YODA analysis objects + vector getYodaAOs(bool includeorphans, + bool includetmps, + bool usefinalized) const; + /// Get all analyses' plots as a vector of analysis objects. - std::vector getData() const; + std::vector getData(bool includeorphans = false, + bool includetmps = false, + bool usefinalized = true) const; /// Write all analyses' plots (via getData) to the named file. void writeData(const std::string& filename) const; /// Tell Rivet to dump intermediate result to a file named @a /// dumpfile every @a period'th event. If @period is not positive, /// no dumping will be done. void dump(string dumpfile, int period) { _dumpPeriod = period; _dumpFile = dumpfile; } /// Take the vector of yoda files and merge them together using /// the cross section and weight information provided in each /// file. Each file in @a aofiles is assumed to have been produced /// by Rivet. By default the files are assumed to contain /// different processes (or the same processs but mutually /// exclusive cuts), but if @a equiv if ture, the files are /// assumed to contain output of completely equivalent (but /// statistically independent) Rivet runs. The corresponding /// analyses will be loaded and their analysis objects will be /// filled with the merged result. finalize() will be run on each - /// relevant anslysis. The resulting YODA file can then be rwitten + /// relevant analysis. The resulting YODA file can then be rwitten /// out by writeData(). If delopts is non-empty, it is assumed to /// contain names different options to be merged into the same /// analysis objects. void mergeYodas(const vector & aofiles, const vector & delopts = vector(), bool equiv = false); /// Helper function to strip specific options from data object paths. - void stripOptions(AnalysisObjectPtr ao, + void stripOptions(YODA::AnalysisObjectPtr ao, const vector & delopts) const; //@} /// Indicate which Rivet stage we're in. /// At the moment, only INIT is used to enable booking. enum class Stage { OTHER, INIT }; /// Which stage are we in? Stage stage() const { return _stage; } private: /// Current handler stage Stage _stage = Stage::OTHER; /// The collection of Analysis objects to be used. set _analyses; /// A vector of pre-loaded object which do not have a valid /// Analysis plugged in. - vector _orphanedPreloads; + vector _orphanedPreloads; /// A vector containing copies of analysis objects after /// finalize() has been run. - vector _finalizedAOs; + vector _finalizedAOs; /// @name Run properties //@{ /// Weight names std::vector _weightNames; std::vector > _subEventWeights; size_t _numWeightTypes; // always == WeightVector.size() /// Run name std::string _runname; /// Event counter - mutable Counter _eventCounter; + mutable CounterPtr _eventCounter; /// Cross-section known to AH Scatter1DPtr _xs; /// Beams used by this run. ParticlePair _beams; /// Flag to check if init has been called bool _initialised; /// Flag whether input event beams should be ignored in compatibility check bool _ignoreBeams; /// Current event number int _eventNumber; + /// The index in the weight vector for the nominal weight stream size_t _defaultWeightIdx; /// Determines how often Rivet runs finalize() and writes the /// result to a YODA file. int _dumpPeriod; /// The name of a YODA file to which Rivet periodically dumps /// results. string _dumpFile; /// Flag to indicate periodic dumping is in progress bool _dumping; //@} private: /// The assignment operator is private and must never be called. /// In fact, it should not even be implemented. AnalysisHandler& operator=(const AnalysisHandler&); /// The copy constructor is private and must never be called. In /// fact, it should not even be implemented. AnalysisHandler(const AnalysisHandler&); }; } #endif diff --git a/include/Rivet/Projections/CentralityProjection.hh b/include/Rivet/Projections/CentralityProjection.hh --- a/include/Rivet/Projections/CentralityProjection.hh +++ b/include/Rivet/Projections/CentralityProjection.hh @@ -1,103 +1,102 @@ // -*- C++ -*- #ifndef RIVET_CENTRALITYPROJECTION_HH #define RIVET_CENTRALITYPROJECTION_HH #include "Rivet/Projections/PercentileProjection.hh" #include "Rivet/Tools/RivetYODA.hh" #include namespace Rivet { /** @brief CentralityProjection is used together with the percentile-based analysis objects Percentile and PercentileXaxsis. The interior actually defines several different centrality estimates: the centrality observable used in the experiment with a reference calibration ("REF"); the same but using a user-defined calibration done with the corresponding minimum bias analysis ("GEN"); a centrality based on the impact parameter reported in HepMC::HeavyIon::impact_parameter, using a calibration histogram generated with the same minimum bias analysis ("IMP"). For HepMC3 it may optionally also include a direct report from the generator about the centrality, if available in HepMC::HeavyIon::centrality ("RAW"), and a user-defined generated centrality estimate communicated via the HepMC::HeavyIon::user_cent_estimate ("USR"). @author Leif Lönnblad */ class CentralityProjection: public SingleValueProjection { public: /// Default constructor. CentralityProjection() {} - + DEFAULT_RIVET_PROJ_CLONE(CentralityProjection); /// @BRIEF Add a new centality estimate. /// /// The SingelValueProjection, @a p, should return a value between 0 /// and 100, and the @a pname should be one of "REF", "GEN", "IMP", /// "USR", or "RAW", as described above. void add(const SingleValueProjection & p, string pname) { _projNames.push_back(pname); declare(p, pname); } /// Perform all internal projections. void project(const Event& e) { _values.clear(); for ( string pname : _projNames ) _values.push_back(apply(e, pname)()); if ( !_values.empty() ) set(_values[0]); } /// Cheek if no internal projections have been added. bool empty() const { return _projNames.empty(); } - + /// Return the percentile of the @a i'th projection. /// /// Note that operator() will return the zero'th projection. double operator[](int i) const { return _values[i]; } // Standard comparison function. - int compare(const Projection& p) const { - const CentralityProjection* other = - dynamic_cast(&p); - if (other->_projNames.size() == 0) return UNDEFINED; + CmpState compare(const Projection& p) const { + const CentralityProjection* other = dynamic_cast(&p); + if (other->_projNames.size() == 0) return CmpState::NEQ; for (string pname : _projNames) { bool hasPname = true; for (string p2name : other->_projNames){ if (pname != p2name) hasPname = false; } - if (!hasPname) return UNDEFINED; + if (!hasPname) return CmpState::NEQ; } - return EQUIVALENT; + return CmpState::EQ; } - /// THe list of names of the internal projections. + /// The list of names of the internal projections. vector projections() const { return _projNames; } private: - /// THe list of names of the internal projections. + /// The list of names of the internal projections. vector _projNames; /// The list of percentiles resulting from the last projection. vector _values; - + }; } #endif diff --git a/include/Rivet/Projections/GeneratedPercentileProjection.hh b/include/Rivet/Projections/GeneratedPercentileProjection.hh --- a/include/Rivet/Projections/GeneratedPercentileProjection.hh +++ b/include/Rivet/Projections/GeneratedPercentileProjection.hh @@ -1,41 +1,40 @@ // -*- C++ -*- #ifndef RIVET_GENERATEDPERCENTILEPROJECTION_HH #define RIVET_GENERATEDPERCENTILEPROJECTION_HH #include "Rivet/Projections/SingleValueProjection.hh" namespace Rivet { class GeneratedPercentileProjection: public SingleValueProjection { public: - + GeneratedPercentileProjection() { setName("GeneratedPercentileProjection"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(GeneratedPercentileProjection); protected: void project(const Event& e) { clear(); #if HEPMC_VERSION_CODE >= 3000000 const HepMC::HeavyIon * hi = e.genEvent()->heavy_ion(); if ( hi && hi->centrality >= 0.0 ) set(hi->centrality*100.0); #endif } - - int compare(const Projection& p) const { - return 0; + + CmpState compare(const Projection& p) const { + return CmpState::EQ; } - + }; } #endif - diff --git a/include/Rivet/Projections/ImpactParameterProjection.hh b/include/Rivet/Projections/ImpactParameterProjection.hh --- a/include/Rivet/Projections/ImpactParameterProjection.hh +++ b/include/Rivet/Projections/ImpactParameterProjection.hh @@ -1,39 +1,38 @@ // -*- C++ -*- #ifndef RIVET_IMPACTPARAMETERPROJECTION_HH #define RIVET_IMPACTPARAMETERPROJECTION_HH #include "Rivet/Projections/SingleValueProjection.hh" namespace Rivet { class ImpactParameterProjection: public SingleValueProjection { public: - + ImpactParameterProjection() { setName("ImpactParameterProjection"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(ImpactParameterProjection); protected: void project(const Event& e) { clear(); const HepMC::HeavyIon * hi = e.genEvent()->heavy_ion(); if ( hi && hi->impact_parameter() >= 0.0 ) set(hi->impact_parameter()); } - - int compare(const Projection& p) const { - return 0; + + CmpState compare(const Projection& p) const { + return CmpState::EQ; } - + }; } #endif - diff --git a/include/Rivet/Projections/PercentileProjection.hh b/include/Rivet/Projections/PercentileProjection.hh --- a/include/Rivet/Projections/PercentileProjection.hh +++ b/include/Rivet/Projections/PercentileProjection.hh @@ -1,138 +1,138 @@ // -*- C++ -*- #ifndef RIVET_PERCENTILEPROJECTION_HH #define RIVET_PERCENTILEPROJECTION_HH #include "Rivet/Projections/SingleValueProjection.hh" #include "Rivet/Tools/RivetYODA.hh" #include namespace Rivet { /** @brief class for projections that reports the percentile for a given SingleValueProjection when initialized with a Histo1D of the distribution in the SingleValueProjection. @author Leif Lönnblad */ class PercentileProjection : public SingleValueProjection { public: // Constructor taking a SingleValueProjection and a calibration // histogram. If increasing it means that low values corresponds to // lower percentiles. PercentileProjection(const SingleValueProjection & sv, Histo1DPtr calhist, bool increasing = false) : _calhist("EMPTY"), _increasing(increasing) { declare(sv, "OBSERVABLE"); if ( !calhist ) return; MSG_INFO("Constructing PercentileProjection from " << calhist->path()); _calhist = calhist->path(); int N = calhist->numBins(); double sum = calhist->sumW(); - + if ( increasing ) { double acc = calhist->underflow().sumW(); _table.insert(make_pair(calhist->bin(0).xEdges().first, 100.0*acc/sum)); for ( int i = 0; i < N; ++i ) { acc += calhist->bin(i).sumW(); _table.insert(make_pair(calhist->bin(i).xEdges().second, 100.0*acc/sum)); } } else { double acc = calhist->overflow().sumW(); _table.insert(make_pair(calhist->bin(N - 1).xEdges().second, 100.0*acc/sum)); for ( int i = N - 1; i >= 0; --i ) { acc += calhist->bin(i).sumW(); _table.insert(make_pair(calhist->bin(i).xEdges().first, 100.0*acc/sum)); } - } + } } // Constructor taking a SingleValueProjection and a calibration // histogram. If increasing it means that low values corresponds to // lower percentiles. PercentileProjection(const SingleValueProjection & sv, Scatter2DPtr calscat, bool increasing = false) : _calhist("EMPTY"), _increasing(increasing) { declare(sv, "OBSERVABLE"); if ( !calscat ) return; MSG_INFO("Constructing PercentileProjection from " << calscat->path()); _calhist = calscat->path(); int N = calscat->numPoints(); double sum = 0.0; for ( const auto & p : calscat->points() ) sum += p.y(); double acc = 0.0; if ( increasing ) { _table.insert(make_pair(calscat->point(0).xMin(), 100.0*acc/sum)); for ( int i = 0; i < N; ++i ) { acc += calscat->point(i).y(); _table.insert(make_pair(calscat->point(i).xMax(), 100.0*acc/sum)); } } else { _table.insert(make_pair(calscat->point(N - 1).xMax(), 100.0*acc/sum)); for ( int i = N - 1; i >= 0; --i ) { acc += calscat->point(i).y(); _table.insert(make_pair(calscat->point(i).xMin(), 100.0*acc/sum)); } - } + } } DEFAULT_RIVET_PROJ_CLONE(PercentileProjection); // The projection function takes the assigned SingeValueProjection // and sets the value of this projection to the corresponding // percentile. If no calibration has been provided, -1 will be // returned. If values are outside of the calibration histogram, 0 // or 100 will be returned. void project(const Event& e) { clear(); if ( _table.empty() ) return; double obs = apply(e, "OBSERVABLE")(); double pcnt = lookup(obs); if ( pcnt >= 0.0 ) set(pcnt); } // Standard comparison function. - int compare(const Projection& p) const { - const PercentileProjection pp = - dynamic_cast(p); - return mkNamedPCmp(p, "OBSERVABLE") || cmp(_increasing,pp._increasing) || + CmpState compare(const Projection& p) const { + const PercentileProjection pp = dynamic_cast(p); + return mkNamedPCmp(p, "OBSERVABLE") || + cmp(_increasing, pp._increasing) || cmp(_calhist, pp._calhist); } private: // The (interpolated) lookup table double lookup(double obs) const { auto low = _table.upper_bound(obs); if ( low == _table.end() ) return _increasing? 100.0: 0.0; if ( low == _table.begin() ) return _increasing? 0.0: 100.0; auto high = low--; return low->second + (obs - low->first)*(high->second - low->second)/ (high->first - low->first); } // Astring identifying the calibration histogram. string _calhist; // A lookup table to find (by interpolation) the percentile given // the value of the underlying SingleValueProjection. map _table; // A flag to say whether the distribution should be integrated from // below or above. bool _increasing; }; } #endif diff --git a/include/Rivet/Projections/UserCentEstimate.hh b/include/Rivet/Projections/UserCentEstimate.hh --- a/include/Rivet/Projections/UserCentEstimate.hh +++ b/include/Rivet/Projections/UserCentEstimate.hh @@ -1,41 +1,40 @@ // -*- C++ -*- #ifndef RIVET_USERCENTESTIMATE_HH #define RIVET_USERCENTESTIMATE_HH #include "Rivet/Projections/SingleValueProjection.hh" namespace Rivet { class UserCentEstimate: public SingleValueProjection { public: - + UserCentEstimate() { setName("UserCentEstimate"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(UserCentEstimate); protected: void project(const Event& e) { clear(); #if HEPMC_VERSION_CODE >= 3000000 const HepMC::HeavyIon * hi = e.genEvent()->heavy_ion(); if ( hi && hi->user_cent_estimate >= 0.0 ) set(hi->centrality*100.0); #endif } - - int compare(const Projection& p) const { - return 0; + + CmpState compare(const Projection& p) const { + return CmpState::EQ; } - + }; } #endif - diff --git a/include/Rivet/Tools/Exceptions.hh b/include/Rivet/Tools/Exceptions.hh --- a/include/Rivet/Tools/Exceptions.hh +++ b/include/Rivet/Tools/Exceptions.hh @@ -1,67 +1,68 @@ #ifndef RIVET_EXCEPTIONS_HH #define RIVET_EXCEPTIONS_HH #include #include #include namespace Rivet { /// @brief Generic runtime Rivet error. struct Error : public std::runtime_error { Error(const std::string& what) : std::runtime_error(what) {} }; /// @brief Rivet::Exception is a synonym for Rivet::Error. typedef Error Exception; /// @brief Error for e.g. use of invalid bin ranges. struct RangeError : public Error { RangeError(const std::string& what) : Error(what) {} }; /// @brief Error specialisation for places where alg logic has failed. struct LogicError : public Error { LogicError(const std::string& what) : Error(what) {} }; /// @brief Error specialisation for failures relating to particle ID codes. struct PidError : public Error { PidError(const std::string& what) : Error(what) {} }; /// @brief Error specialisation for failures relating to analysis info. struct InfoError : public Error { InfoError(const std::string& what) : Error(what) {} }; /// @brief Errors relating to event/bin weights /// /// Arises in computing statistical quantities because e.g. the bin /// weight is zero or negative. struct WeightError : public Error { WeightError(const std::string& what) : Error(what) {} }; /// @brief Error specialisation for where the problem is between the chair and the computer. struct UserError : public Error { UserError(const std::string& what) : Error(what) {} }; - /// @brief Error relating to looking up analyis objects in the register + + /// @brief Error relating to looking up analysis objects in the register struct LookupError : public Error { LookupError(const std::string& what) : Error(what) {} }; } #endif diff --git a/include/Rivet/Tools/RivetSTL.hh b/include/Rivet/Tools/RivetSTL.hh --- a/include/Rivet/Tools/RivetSTL.hh +++ b/include/Rivet/Tools/RivetSTL.hh @@ -1,196 +1,192 @@ #ifndef RIVET_RivetSTL_HH #define RIVET_RivetSTL_HH #include #include #include #include #include #include - #include #include - +#include +#include +// #include // #include -// #include // #include // #include // #include - // #include // #include // #include -#include -#include - namespace Rivet { /// We implicitly use STL entities in the Rivet namespace // using namespace std; using std::string; using std::to_string; using std::array; using std::vector; using std::list; using std::set; using std::multiset; using std::map; using std::multimap; using std::pair; using std::make_pair; using std::unique_ptr; using std::shared_ptr; using std::make_shared; using std::dynamic_pointer_cast; using std::initializer_list; using std::function; /// @name Streaming containers as string reps /// @todo Make these named toStr rather than operator<< /// @todo Make these generic to any iterable //@{ /// Convenient function for streaming out vectors of any streamable object. template inline std::ostream& operator<<(std::ostream& os, const std::vector& vec) { os << "[ "; for (size_t i=0; i inline std::ostream& operator<<(std::ostream& os, const std::list& vec) { os << "[ "; for (size_t i=0; i inline bool contains(const std::initializer_list& il, const T& x) { return find(begin(il), end(il), x) != end(il); } /// Does the vector @a v contain @a x? template inline bool contains(const std::vector& v, const T& x) { return find(begin(v), end(v), x) != end(v); } /// Does the list @a l contain @a x? template inline bool contains(const std::list& l, const T& x) { return find(begin(l), end(l), x) != end(l); } /// Does the set @a s contain @a x? template inline bool contains(const std::set& s, const T& x) { return find(begin(s), end(s), x) != end(s); } /// Does the map @a m contain the key @a key? template inline bool has_key(const std::map& m, const K& key) { return m.find(key) != end(m); } /// Does the map @a m contain the value @a val? template inline bool has_value(const std::map& m, const T& val) { for (typename std::map::const_iterator it = begin(m); it != end(m); ++it) { if (it->second == val) return true; } return false; } //@} } namespace std { /// @name Container filling and merging //@{ /// Append a single item to vector @a v template inline void operator+=(std::vector& v, const T& x) { v.push_back(x); } /// Append all the items from vector @a v2 to vector @a v1 template inline void operator+=(std::vector& v1, const std::vector& v2) { for (const auto& x : v2) v1.push_back(x); } /// Create a new vector from the concatenated items in vectors @a v1 and @a v2 template inline std::vector operator+(const std::vector& v1, const std::vector& v2) { std::vector rtn(v1); rtn += v2; return rtn; } /// Merge the contents of set @a s2 into @a s1 template inline void operator+=(std::set& s1, const std::set& s2) { for (const auto& x : s2) s1.insert(x); } /// Merge the contents of sets @a s1 and @a s2 template inline std::set operator+(const std::set& s1, const std::set& s2) { std::set rtn(s1); rtn += s2; return rtn; } //@} /// @name Function helpers //@{ /// Get a function pointer / hash integer from an std::function template inline size_t get_address(std::function f) { typedef T(fnType)(U...); fnType ** fnPointer = f.template target(); return (fnPointer != nullptr) ? reinterpret_cast(*fnPointer) : 0; } //@} } #endif diff --git a/include/Rivet/Tools/RivetYODA.hh b/include/Rivet/Tools/RivetYODA.hh --- a/include/Rivet/Tools/RivetYODA.hh +++ b/include/Rivet/Tools/RivetYODA.hh @@ -1,567 +1,566 @@ #ifndef RIVET_RIVETYODA_HH #define RIVET_RIVETYODA_HH #include "Rivet/Config/RivetCommon.hh" #include "YODA/AnalysisObject.h" #include "YODA/Counter.h" #include "YODA/Histo1D.h" #include "YODA/Histo2D.h" #include "YODA/Profile1D.h" #include "YODA/Profile2D.h" #include "YODA/Scatter1D.h" #include "YODA/Scatter2D.h" #include "YODA/Scatter3D.h" #include #include namespace YODA { typedef std::shared_ptr AnalysisObjectPtr; } namespace Rivet { class AnalysisObjectWrapper { public: virtual ~AnalysisObjectWrapper() {} virtual YODA::AnalysisObject* operator->() = 0; virtual YODA::AnalysisObject* operator->() const = 0; virtual const YODA::AnalysisObject & operator*() const = 0; /// @todo Rename to setActive(idx) virtual void setActiveWeightIdx(unsigned int iWeight) = 0; bool operator ==(const AnalysisObjectWrapper& p) { return (this == &p); } bool operator !=(const AnalysisObjectWrapper& p) { return (this != &p); } protected: /// @todo do we need this? // virtual void reset() = 0; }; /// @todo /// implement scatter1dptr and scatter2dptr here /// these need to be multi-weighted eventually. /* class Scatter1DPtr : public AnalysisObjectPtr { public: Scatter1DPtr() : _persistent() { } Scatter1DPtr(size_t len_of_weightvec, const YODA::Scatter1D& p) { for (size_t m = 0; m < len_of_weightvec; ++m) _persistent.push_back(make_shared(p)); } bool operator!() const { return !_persistent; } explicit operator bool() const { return bool(_persistent); } YODA::Scatter1D* operator->() { return _persistent.get(); } YODA::Scatter1D* operator->() const { return _persistent.get(); } YODA::Scatter1D & operator*() { return *_persistent; } const YODA::Scatter1D & operator*() const { return *_persistent; } protected: vector _persistent; }; class Scatter2DPtr : public AnalysisObjectPtr { public: Scatter2DPtr(size_t len_of_weightvec, const YODA::Scatter2D& p) { for (size_t m = 0; m < len_of_weightvec; ++m) _persistent.push_back(make_shared(p)); } Scatter2DPtr() : _persistent() { } bool operator!() { return !_persistent; } explicit operator bool() { return bool(_persistent); } YODA::Scatter2D* operator->() { return _persistent.get(); } YODA::Scatter2D* operator->() const { return _persistent.get(); } YODA::Scatter2D & operator*() { return *_persistent; } const YODA::Scatter2D & operator*() const { return *_persistent; } protected: vector _persistent; }; class Scatter3DPtr : public AnalysisObjectPtr { public: Scatter3DPtr(size_t len_of_weightvec, const YODA::Scatter3D& p) { for (size_t m = 0; m < len_of_weightvec; ++m) _persistent.push_back(make_shared(p)); } Scatter3DPtr() : _persistent() { } bool operator!() { return !_persistent; } explicit operator bool() { return bool(_persistent); } YODA::Scatter3D* operator->() { return _persistent.get(); } YODA::Scatter3D* operator->() const { return _persistent.get(); } YODA::Scatter3D & operator*() { return *_persistent; } const YODA::Scatter3D & operator*() const { return *_persistent; } protected: vector _persistent; }; */ class MultiweightAOWrapper : public AnalysisObjectWrapper { public: using Inner = YODA::AnalysisObject; virtual void newSubEvent() = 0; virtual void pushToPersistent(const vector >& weight) = 0; virtual YODA::AnalysisObjectPtr activeYODAPtr() const = 0; }; using Weight = double; template using Fill = pair; template using Fills = multiset>; // TODO TODO TODO // need to override the old fill method too! // otherwise we can't intercept existing fill calls in analysis code // TODO TODO TODO /// Wrappers for analysis objects to store all fills unaggregated, until collapsed template class TupleWrapper; template<> class TupleWrapper : public YODA::Counter { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Counter & h) : YODA::Counter(h) {} // todo: do we need to deal with users using fractions directly? void fill( double weight=1.0, double fraction=1.0 ) { fills_.insert( {YODA::Counter::FillType(),weight} ); } void reset() { fills_.clear(); } const Fills & fills() const { return fills_; } private: // x / weight pairs Fills fills_; }; template<> class TupleWrapper : public YODA::Histo1D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Histo1D & h) : YODA::Histo1D(h) {} // todo: do we need to deal with users using fractions directly? void fill( double x, double weight=1.0, double fraction=1.0 ) { if ( std::isnan(x) ) throw YODA::RangeError("X is NaN"); fills_.insert( { x , weight } ); } void reset() { fills_.clear(); } const Fills & fills() const { return fills_; } private: // x / weight pairs Fills fills_; }; template<> class TupleWrapper : public YODA::Profile1D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Profile1D & h) : YODA::Profile1D(h) {} // todo: do we need to deal with users using fractions directly? void fill( double x, double y, double weight=1.0, double fraction=1.0 ) { if ( std::isnan(x) ) throw YODA::RangeError("X is NaN"); if ( std::isnan(y) ) throw YODA::RangeError("Y is NaN"); fills_.insert( { YODA::Profile1D::FillType{x,y}, weight } ); } void reset() { fills_.clear(); } const Fills & fills() const { return fills_; } private: // x / weight pairs Fills fills_; }; template<> class TupleWrapper : public YODA::Histo2D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Histo2D & h) : YODA::Histo2D(h) {} // todo: do we need to deal with users using fractions directly? void fill( double x, double y, double weight=1.0, double fraction=1.0 ) { if ( std::isnan(x) ) throw YODA::RangeError("X is NaN"); if ( std::isnan(y) ) throw YODA::RangeError("Y is NaN"); fills_.insert( { YODA::Histo2D::FillType{x,y}, weight } ); } void reset() { fills_.clear(); } const Fills & fills() const { return fills_; } private: // x / weight pairs Fills fills_; }; template<> class TupleWrapper : public YODA::Profile2D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Profile2D & h) : YODA::Profile2D(h) {} // todo: do we need to deal with users using fractions directly? void fill( double x, double y, double z, double weight=1.0, double fraction=1.0 ) { if ( std::isnan(x) ) throw YODA::RangeError("X is NaN"); if ( std::isnan(y) ) throw YODA::RangeError("Y is NaN"); if ( std::isnan(z) ) throw YODA::RangeError("Z is NaN"); fills_.insert( { YODA::Profile2D::FillType{x,y,z}, weight } ); } void reset() { fills_.clear(); } const Fills & fills() const { return fills_; } private: // x / weight pairs Fills fills_; }; template<> class TupleWrapper : public YODA::Scatter1D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Scatter1D & h) : YODA::Scatter1D(h) {} }; template<> class TupleWrapper : public YODA::Scatter2D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Scatter2D & h) : YODA::Scatter2D(h) {} }; template<> class TupleWrapper : public YODA::Scatter3D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Scatter3D & h) : YODA::Scatter3D(h) {} }; template class Wrapper : public MultiweightAOWrapper { friend class Analysis; public: using Inner = T; /* @todo * some things are not really well-defined here * for instance: fill() in the finalize() method and integral() in * the analyze() method. */ Wrapper() = default; Wrapper(const vector& weightnames, const T & p); ~Wrapper(); typename T::Ptr active() const; /* @todo this probably need to loop over all? */ bool operator!() const { return !_active; } // Don't use active() here, assert will catch explicit operator bool() const { return static_cast(_active); } // Don't use active() here, assert will catch T * operator->() { return active().get(); } T * operator->() const { return active().get(); } T & operator*() { return *active(); } const T & operator*() const { return *active(); } /* @todo * these need to be re-thought out. void reset() { active()->reset(); } */ /* @todo * these probably need to loop over all? * do we even want to provide equality? */ /* @todo * how about no. friend bool operator==(Wrapper a, Wrapper b){ if (a._persistent.size() != b._persistent.size()) return false; for (size_t i = 0; i < a._persistent.size(); i++) { if (a._persistent.at(i) != b._persistent.at(i)) { return false; } } return true; } friend bool operator!=(Wrapper a, Wrapper b){ return !(a == b); } friend bool operator<(Wrapper a, Wrapper b){ if (a._persistent.size() >= b._persistent.size()) return false; for (size_t i = 0; i < a._persistent.size(); i++) { if (*(a._persistent.at(i)) >= *(b._persistent.at(i))) { return false; } } return true; } */ private: void setActiveWeightIdx(unsigned int iWeight) { _active = _persistent.at(iWeight); } /* this is for dev only---we shouldn't need this in real runs. */ void unsetActiveWeight() { _active.reset(); } void newSubEvent(); virtual YODA::AnalysisObjectPtr activeYODAPtr() const { return _active; } const vector & persistent() const { return _persistent; } /* to be implemented for each type */ void pushToPersistent(const vector >& weight); /* M of these, one for each weight */ vector _persistent; /* N of these, one for each event in evgroup */ vector::Ptr> _evgroup; typename T::Ptr _active; // do we need implicit cast? // operator typename T::Ptr () { // return _active; // } friend class AnalysisHandler; }; /// We need our own shared_ptr class, so we can dispatch -> and * /// all the way down to the inner YODA analysis objects /// /// TODO: provide remaining functionality that shared_ptr has (not needed right now) /// template class rivet_shared_ptr { public: + typedef T value_type; + rivet_shared_ptr() = default; rivet_shared_ptr(decltype(nullptr)) : _p(nullptr) {} /// Convenience constructor, pass through to the Wrapper constructor rivet_shared_ptr(const vector& weightNames, const typename T::Inner & p) : _p( make_shared(weightNames, p) ) {} template rivet_shared_ptr(const shared_ptr & p) : _p(p) {} template rivet_shared_ptr(const rivet_shared_ptr & p) : _p(p.get()) {} // Goes right through to the active YODA object's members T & operator->() { return *_p; } const T & operator->() const { return *_p; } // The active YODA object typename T::Inner & operator*() { return **_p; } const typename T::Inner & operator*() const { return **_p; } bool operator!() const { return !_p || !(*_p); } explicit operator bool() const { return _p && bool(*_p); } template bool operator==(const rivet_shared_ptr & other) const { return _p == other._p; } template bool operator!=(const rivet_shared_ptr & other) const { return _p != other._p; } template bool operator<(const rivet_shared_ptr & other) const { return _p < other._p; } template bool operator>(const rivet_shared_ptr & other) const { return _p > other._p; } template bool operator<=(const rivet_shared_ptr & other) const { return _p <= other._p; } template bool operator>=(const rivet_shared_ptr & other) const { return _p >= other._p; } shared_ptr get() const { return _p; } private: shared_ptr _p; }; // every object listed here needs a virtual fill method in YODA, // otherwise the Tuple fakery won't work. using MultiweightAOPtr = rivet_shared_ptr; using Histo1DPtr = rivet_shared_ptr>; using Histo2DPtr = rivet_shared_ptr>; using Profile1DPtr = rivet_shared_ptr>; using Profile2DPtr = rivet_shared_ptr>; using CounterPtr = rivet_shared_ptr>; using Scatter1DPtr = rivet_shared_ptr>; using Scatter2DPtr = rivet_shared_ptr>; using Scatter3DPtr = rivet_shared_ptr>; using YODA::Counter; using YODA::Histo1D; using YODA::HistoBin1D; using YODA::Histo2D; using YODA::HistoBin2D; using YODA::Profile1D; using YODA::ProfileBin1D; using YODA::Profile2D; using YODA::ProfileBin2D; using YODA::Scatter1D; using YODA::Point1D; using YODA::Scatter2D; using YODA::Point2D; using YODA::Scatter3D; using YODA::Point3D; /// Function to get a map of all the refdata in a paper with the /// given @a papername. map getRefData(const string& papername); /// @todo Also provide a Scatter3D getRefData() version? /// Get the file system path to the reference file for this paper. string getDatafilePath(const string& papername); /// Traits class to access the type of the AnalysisObject in the /// reference files. template struct ReferenceTraits {}; template<> struct ReferenceTraits { typedef Counter RefT; }; template<> struct ReferenceTraits { typedef Scatter1D RefT; }; template<> struct ReferenceTraits { typedef Scatter2D RefT; }; template<> struct ReferenceTraits { typedef Scatter2D RefT; }; template<> struct ReferenceTraits { typedef Scatter2D RefT; }; template<> struct ReferenceTraits { typedef Scatter3D RefT; }; template<> struct ReferenceTraits { typedef Scatter3D RefT; }; template<> struct ReferenceTraits { typedef Scatter3D RefT; }; /// If @a dst and @a src both are of same subclass T, copy the /// contents of @a src into @a dst and return true. Otherwise return /// false. template - inline bool aocopy(AnalysisObjectPtr src, AnalysisObjectPtr dst) { + inline bool aocopy(YODA::AnalysisObjectPtr src, YODA::AnalysisObjectPtr dst) { shared_ptr tsrc = dynamic_pointer_cast(src); if ( !tsrc ) return false; shared_ptr tdst = dynamic_pointer_cast(dst); if ( !tdst ) return false; *tdst = *tsrc; return true; } /// If @a dst and @a src both are of same subclass T, add the /// contents of @a src into @a dst and return true. Otherwise return /// false. template - inline bool aoadd(AnalysisObjectPtr dst, AnalysisObjectPtr src, double scale) { + inline bool aoadd(YODA::AnalysisObjectPtr dst, YODA::AnalysisObjectPtr src, double scale) { shared_ptr tsrc = dynamic_pointer_cast(src); if ( !tsrc ) return false; shared_ptr tdst = dynamic_pointer_cast(dst); if ( !tdst ) return false; tsrc->scaleW(scale); *tdst += *tsrc; return true; } /// If @a dst is the same subclass as @a src, copy the contents of @a /// src into @a dst and return true. Otherwise return false. - bool copyao(AnalysisObjectPtr src, AnalysisObjectPtr dst); + bool copyao(YODA::AnalysisObjectPtr src, YODA::AnalysisObjectPtr dst); /// If @a dst is the same subclass as @a src, scale the contents of /// @a src with @a scale and add it to @a dst and return true. Otherwise /// return false. - bool addaos(AnalysisObjectPtr dst, AnalysisObjectPtr src, double scale); + bool addaos(YODA::AnalysisObjectPtr dst, YODA::AnalysisObjectPtr src, double scale); /// Check if two analysis objects have the same binning or, if not /// binned, are in other ways compatible. - // inline bool bookingCompatible(CounterPtr, CounterPtr) { - // return true; - // } template inline bool bookingCompatible(TPtr a, TPtr b) { return a->sameBinning(*b); } inline bool bookingCompatible(CounterPtr, CounterPtr) { return true; } inline bool bookingCompatible(Scatter1DPtr a, Scatter1DPtr b) { return a->numPoints() == b->numPoints(); } inline bool bookingCompatible(Scatter2DPtr a, Scatter2DPtr b) { return a->numPoints() == b->numPoints(); } inline bool bookingCompatible(Scatter3DPtr a, Scatter3DPtr b) { return a->numPoints() == b->numPoints(); } } #endif diff --git a/src/Core/Analysis.cc b/src/Core/Analysis.cc --- a/src/Core/Analysis.cc +++ b/src/Core/Analysis.cc @@ -1,1156 +1,1048 @@ // -*- C++ -*- #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Analysis.hh" #include "Rivet/AnalysisHandler.hh" #include "Rivet/AnalysisInfo.hh" #include "Rivet/Tools/BeamConstraint.hh" #include "Rivet/Projections/ImpactParameterProjection.hh" #include "Rivet/Projections/GeneratedPercentileProjection.hh" #include "Rivet/Projections/UserCentEstimate.hh" #include "Rivet/Projections/CentralityProjection.hh" namespace Rivet { Analysis::Analysis(const string& name) : _analysishandler(NULL) { ProjectionApplier::_allowProjReg = false; _defaultname = name; unique_ptr ai = AnalysisInfo::make(name); assert(ai); _info = move(ai); assert(_info); } double Analysis::sqrtS() const { return handler().sqrtS(); } const ParticlePair& Analysis::beams() const { return handler().beams(); } const PdgIdPair Analysis::beamIds() const { return handler().beamIds(); } const string Analysis::histoDir() const { /// @todo Cache in a member variable string _histoDir; if (_histoDir.empty()) { _histoDir = "/" + name(); if (handler().runName().length() > 0) { _histoDir = "/" + handler().runName() + _histoDir; } replace_all(_histoDir, "//", "/"); //< iterates until none } return _histoDir; } const string Analysis::histoPath(const string& hname) const { const string path = histoDir() + "/" + hname; return path; } const string Analysis::histoPath(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { return histoDir() + "/" + mkAxisCode(datasetId, xAxisId, yAxisId); } const string Analysis::mkAxisCode(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { std::stringstream axisCode; axisCode << "d"; if (datasetId < 10) axisCode << 0; axisCode << datasetId; axisCode << "-x"; if (xAxisId < 10) axisCode << 0; axisCode << xAxisId; axisCode << "-y"; if (yAxisId < 10) axisCode << 0; axisCode << yAxisId; return axisCode.str(); } Log& Analysis::getLog() const { string logname = "Rivet.Analysis." + name(); return Log::getLog(logname); } /////////////////////////////////////////// size_t Analysis::numEvents() const { return handler().numEvents(); } double Analysis::sumW() const { return handler().sumW(); } double Analysis::sumW2() const { return handler().sumW2(); } /////////////////////////////////////////// bool Analysis::isCompatible(const ParticlePair& beams) const { return isCompatible(beams.first.pid(), beams.second.pid(), beams.first.energy(), beams.second.energy()); } bool Analysis::isCompatible(PdgId beam1, PdgId beam2, double e1, double e2) const { PdgIdPair beams(beam1, beam2); pair energies(e1, e2); return isCompatible(beams, energies); } // bool Analysis::beamIDsCompatible(const PdgIdPair& beams) const { // bool beamIdsOk = false; // for (const PdgIdPair& bp : requiredBeams()) { // if (compatible(beams, bp)) { // beamIdsOk = true; // break; // } // } // return beamIdsOk; // } // /// Check that the energies are compatible (within 1% or 1 GeV, whichever is larger, for a bit of UI forgiveness) // bool Analysis::beamEnergiesCompatible(const pair& energies) const { // /// @todo Use some sort of standard ordering to improve comparisons, esp. when the two beams are different particles // bool beamEnergiesOk = requiredEnergies().size() > 0 ? false : true; // typedef pair DoublePair; // for (const DoublePair& ep : requiredEnergies()) { // if ((fuzzyEquals(ep.first, energies.first, 0.01) && fuzzyEquals(ep.second, energies.second, 0.01)) || // (fuzzyEquals(ep.first, energies.second, 0.01) && fuzzyEquals(ep.second, energies.first, 0.01)) || // (abs(ep.first - energies.first) < 1*GeV && abs(ep.second - energies.second) < 1*GeV) || // (abs(ep.first - energies.second) < 1*GeV && abs(ep.second - energies.first) < 1*GeV)) { // beamEnergiesOk = true; // break; // } // } // return beamEnergiesOk; // } // bool Analysis::beamsCompatible(const PdgIdPair& beams, const pair& energies) const { bool Analysis::isCompatible(const PdgIdPair& beams, const pair& energies) const { // First check the beam IDs bool beamIdsOk = false; for (const PdgIdPair& bp : requiredBeams()) { if (compatible(beams, bp)) { beamIdsOk = true; break; } } if (!beamIdsOk) return false; // Next check that the energies are compatible (within 1% or 1 GeV, whichever is larger, for a bit of UI forgiveness) /// @todo Use some sort of standard ordering to improve comparisons, esp. when the two beams are different particles bool beamEnergiesOk = requiredEnergies().size() > 0 ? false : true; typedef pair DoublePair; for (const DoublePair& ep : requiredEnergies()) { if ((fuzzyEquals(ep.first, energies.first, 0.01) && fuzzyEquals(ep.second, energies.second, 0.01)) || (fuzzyEquals(ep.first, energies.second, 0.01) && fuzzyEquals(ep.second, energies.first, 0.01)) || (abs(ep.first - energies.first) < 1*GeV && abs(ep.second - energies.second) < 1*GeV) || (abs(ep.first - energies.second) < 1*GeV && abs(ep.second - energies.first) < 1*GeV)) { beamEnergiesOk = true; break; } } return beamEnergiesOk; } /////////////////////////////////////////// double Analysis::crossSection() const { const YODA::Scatter1D::Points& ps = handler().crossSection()->points(); if (ps.size() != 1) { string errMsg = "cross section missing for analysis " + name(); throw Error(errMsg); } return ps[0].x(); } double Analysis::crossSectionPerEvent() const { return crossSection()/sumW(); } //////////////////////////////////////////////////////////// // Histogramming void Analysis::_cacheRefData() const { if (_refdata.empty()) { MSG_TRACE("Getting refdata cache for paper " << name()); _refdata = getRefData(getRefDataName()); } } - vector Analysis::getAllData(bool includeorphans) const{ - return handler().getData(includeorphans); - } + // vector Analysis::getAllData(bool includeorphans) const{ + // return handler().getData(includeorphans); + // } + CounterPtr & Analysis::book(CounterPtr & ctr, const string& cname, const string& title) { const string path = histoPath(cname); ctr = CounterPtr(handler().weightNames(), Counter(path, title)); - addAnalysisObject(ctr); - MSG_TRACE("Made counter " << cname << " for " << name()); + ctr = addAnalysisObject(ctr); return ctr; - //return addOrGetCompatAO(make_shared(histoPath(cname), title)); } CounterPtr & Analysis::book(CounterPtr & ctr, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, - const string& title) { - // const string& xtitle, - // const string& ytitle) { + const string& title) { const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId); return book(ctr, axisCode, title); } + + Histo1DPtr & Analysis::book(Histo1DPtr & histo, const string& hname, - size_t nbins, double lower, double upper, - const string& title, - const string& xtitle, - const string& ytitle) { + size_t nbins, double lower, double upper, + const string& title, + const string& xtitle, + const string& ytitle) { const string path = histoPath(hname); Histo1D hist = Histo1D(nbins, lower, upper, path, title); - hist.setAnnotation("XLabel", xtitle); hist.setAnnotation("YLabel", ytitle); histo = Histo1DPtr(handler().weightNames(), hist); - - addAnalysisObject(histo); - MSG_TRACE("Made histogram " << hname << " for " << name()); + histo = addAnalysisObject(histo); return histo; - -// Histo1DPtr hist; -// try { // try to bind to pre-existing -// // AnalysisObjectPtr ao = getAnalysisObject(path); -// // hist = dynamic_pointer_cast(ao); -// hist = getHisto1D(hname); -// /// @todo Test that cast worked -// /// @todo Also test that binning is as expected? -// MSG_TRACE("Bound pre-existing histogram " << hname << " for " << name()); -// } catch (...) { // binding failed; make it from scratch -// hist = make_shared(nbins, lower, upper, histoPath(hname), title); -// addAnalysisObject(hist); -// MSG_TRACE("Made histogram " << hname << " for " << name()); -// } - - //Histo1DPtr hist = make_shared(nbins, lower, upper, histoPath(hname), title); - //hist->setTitle(title); - //hist->setAnnotation("XLabel", xtitle); - //hist->setAnnotation("YLabel", ytitle); - //return addOrGetCompatAO(hist); } Histo1DPtr & Analysis::book(Histo1DPtr & histo, const string& hname, - const initializer_list& binedges, - const string& title, - const string& xtitle, - const string& ytitle) { + const initializer_list& binedges, + const string& title, + const string& xtitle, + const string& ytitle) { return book(histo, hname, vector{binedges}, title, xtitle, ytitle); } Histo1DPtr & Analysis::book(Histo1DPtr & histo, const string& hname, - const vector& binedges, - const string& title, - const string& xtitle, - const string& ytitle) { + const vector& binedges, + const string& title, + const string& xtitle, + const string& ytitle) { const string path = histoPath(hname); -// Histo1DPtr hist; -// try { // try to bind to pre-existing -// // AnalysisObjectPtr ao = getAnalysisObject(path); -// // hist = dynamic_pointer_cast(ao); -// hist = getHisto1D(hname); -// /// @todo Test that cast worked -// /// @todo Also test that binning is as expected? -// MSG_TRACE("Bound pre-existing histogram " << hname << " for " << name()); -// } catch (...) { // binding failed; make it from scratch -// hist = make_shared(binedges, histoPath(hname), title); -// addAnalysisObject(hist); -// MSG_TRACE("Made histogram " << hname << " for " << name()); -// } + Histo1D hist = Histo1D(binedges, path, title); - - hist.setAnnotation("XLabel", xtitle); - hist.setAnnotation("YLabel", ytitle); - - - histo = Histo1DPtr(handler().weightNames(), hist); - addAnalysisObject(histo); - - MSG_TRACE("Made histogram " << hname << " for " << name()); - return histo; - - //Histo1DPtr hist = make_shared(binedges, histoPath(hname), title); - //hist->setTitle(title); - //hist->setAnnotation("XLabel", xtitle); - //hist->setAnnotation("YLabel", ytitle); - //return addOrGetCompatAO(hist); - } - - - Histo1DPtr Analysis::book(Histo1DPtr& histo, const string& hname, - const initializer_list& binedges, - const string& title, - const string& xtitle, - const string& ytitle) { - return book(histo, hname, vector{binedges}, title, xtitle, ytitle); - } - - - Histo1DPtr Analysis::bookHisto1D(const string& hname, - const Scatter2D& refscatter, - const string& title, - const string& xtitle, - const string& ytitle) { - const string path = histoPath(hname); -// Histo1DPtr hist; -// try { // try to bind to pre-existing -// // AnalysisObjectPtr ao = getAnalysisObject(path); -// // hist = dynamic_pointer_cast(ao); -// hist = getHisto1D(hname); -// /// @todo Test that cast worked -// /// @todo Also test that binning is as expected? -// MSG_TRACE("Bound pre-existing histogram " << hname << " for " << name()); -// } catch (...) { // binding failed; make it from scratch -// hist = make_shared(refscatter, histoPath(hname)); -// if (hist->hasAnnotation("IsRef")) hist->rmAnnotation("IsRef"); -// addAnalysisObject(hist); -// MSG_TRACE("Made histogram " << hname << " for " << name()); -// } - - Histo1D hist = Histo1D(refscatter, path); - - hist.setTitle(title); hist.setAnnotation("XLabel", xtitle); hist.setAnnotation("YLabel", ytitle); histo = Histo1DPtr(handler().weightNames(), hist); - addAnalysisObject(histo); - - MSG_TRACE("Made histogram " << hname << " for " << name()); + histo = addAnalysisObject(histo); return histo; - - //Histo1DPtr hist = make_shared(refscatter, histoPath(hname)); - //hist->setTitle(title); - //hist->setAnnotation("XLabel", xtitle); - //hist->setAnnotation("YLabel", ytitle); - //return addOrGetCompatAO(hist); } Histo1DPtr & Analysis::book(Histo1DPtr & histo, const string& hname, - const string& title, - const string& xtitle, - const string& ytitle) { + const string& title, + const string& xtitle, + const string& ytitle) { const Scatter2D& refdata = refData(hname); return book(histo, hname, refdata, title, xtitle, ytitle); } Histo1DPtr & Analysis::book(Histo1DPtr & histo, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, - const string& title, - const string& xtitle, - const string& ytitle) { + const string& title, + const string& xtitle, + const string& ytitle) { const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId); return book(histo, axisCode, title, xtitle, ytitle); } + Histo1DPtr & Analysis::book(Histo1DPtr& histo, + const string& hname, + const Scatter2D& refscatter, + const string& title, + const string& xtitle, + const string& ytitle) { + const string path = histoPath(hname); - /// @todo Add booking methods which take a path, titles and *a reference Scatter from which to book* + Histo1D hist = Histo1D(refscatter, path); + hist.setTitle(title); + hist.setAnnotation("XLabel", xtitle); + hist.setAnnotation("YLabel", ytitle); + if (hist.hasAnnotation("IsRef")) hist.rmAnnotation("IsRef"); + + histo = Histo1DPtr(handler().weightNames(), hist); + histo = addAnalysisObject(histo); + return histo; + } ///////////////// Histo2DPtr & Analysis::book(Histo2DPtr & h2d,const string& hname, size_t nxbins, double xlower, double xupper, size_t nybins, double ylower, double yupper, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string path = histoPath(hname); Histo2D hist(nxbins, xlower, xupper, nybins, ylower, yupper, path, title); hist.setAnnotation("XLabel", xtitle); hist.setAnnotation("YLabel", ytitle); hist.setAnnotation("ZLabel", ztitle); h2d = Histo2DPtr(handler().weightNames(), hist); - addAnalysisObject(h2d); - - MSG_TRACE("Made 2D histogram " << hname << " for " << name()); + h2d = addAnalysisObject(h2d); return h2d; - - //Histo2DPtr hist = make_shared(nxbins, xlower, xupper, nybins, ylower, yupper, path, title); - //hist->setAnnotation("XLabel", xtitle); - //hist->setAnnotation("YLabel", ytitle); - //hist->setAnnotation("ZLabel", ztitle); - //return addOrGetCompatAO(hist); } - Histo2DPtr & Analysis::book(Histo2DPtr & h2d,const string& hname, const initializer_list& xbinedges, const initializer_list& ybinedges, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { return book(h2d, hname, vector{xbinedges}, vector{ybinedges}, title, xtitle, ytitle, ztitle); } Histo2DPtr & Analysis::book(Histo2DPtr & h2d,const string& hname, const vector& xbinedges, const vector& ybinedges, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string path = histoPath(hname); Histo2D hist(xbinedges, ybinedges, path, title); hist.setAnnotation("XLabel", xtitle); hist.setAnnotation("YLabel", ytitle); hist.setAnnotation("ZLabel", ztitle); h2d = Histo2DPtr(handler().weightNames(), hist); - addAnalysisObject(h2d); - - MSG_TRACE("Made 2D histogram " << hname << " for " << name()); + h2d = addAnalysisObject(h2d); return h2d; - - //return addOrGetCompatAO(hist); } Histo2DPtr & Analysis::book(Histo2DPtr & histo, const string& hname, const Scatter3D& refscatter, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string path = histoPath(hname); Histo2D hist = Histo2D(refscatter, path); - hist.setTitle(title); hist.setAnnotation("XLabel", xtitle); hist.setAnnotation("YLabel", ytitle); hist.setAnnotation("ZLabel", ztitle); + if (hist.hasAnnotation("IsRef")) hist.rmAnnotation("IsRef"); histo = Histo2DPtr(handler().weightNames(), hist); - addAnalysisObject(histo); - - MSG_TRACE("Made histogram " << hname << " for " << name()); + histo = addAnalysisObject(histo); return histo; - - //return addOrGetCompatAO(hist); } Histo2DPtr & Analysis::book(Histo2DPtr & histo, const string& hname, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const Scatter3D& refdata = refData(hname); return book(histo, hname, refdata, title, xtitle, ytitle, ztitle); } Histo2DPtr & Analysis::book(Histo2DPtr & histo, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId); return book(histo, axisCode, title, xtitle, ytitle, ztitle); } ///////////////// Profile1DPtr & Analysis::book(Profile1DPtr & p1d,const string& hname, size_t nbins, double lower, double upper, const string& title, const string& xtitle, const string& ytitle) { const string path = histoPath(hname); Profile1D prof(nbins, lower, upper, path, title); prof.setAnnotation("XLabel", xtitle); prof.setAnnotation("YLabel", ytitle); p1d = Profile1DPtr(handler().weightNames(), prof); - addAnalysisObject(p1d); - - MSG_TRACE("Made profile histogram " << hname << " for " << name()); + p1d = addAnalysisObject(p1d); return p1d; - - //return addOrGetCompatAO(prof); } Profile1DPtr & Analysis::book(Profile1DPtr & p1d,const string& hname, const initializer_list& binedges, const string& title, const string& xtitle, const string& ytitle) { return book(p1d, hname, vector{binedges}, title, xtitle, ytitle); } - Profile1DPtr & Analysis::book(Profile1DPtr & p1d,const string& hname, + Profile1DPtr & Analysis::book(Profile1DPtr & p1d, const string& hname, const vector& binedges, const string& title, const string& xtitle, const string& ytitle) { const string path = histoPath(hname); + Profile1D prof(binedges, path, title); prof.setAnnotation("XLabel", xtitle); prof.setAnnotation("YLabel", ytitle); p1d = Profile1DPtr(handler().weightNames(), prof); - addAnalysisObject(p1d); - - MSG_TRACE("Made profile histogram " << hname << " for " << name()); + p1d = addAnalysisObject(p1d); return p1d; - - //return addOrGetCompatAO(prof); } - - Profile1DPtr Analysis::book(Profile1DPtr & prof, const string& hname, - const initializer_list& binedges, - const string& title, - const string& xtitle, - const string& ytitle) - { - return book(prof, hname, vector{binedges}, title, xtitle, ytitle); - } - - - Profile1DPtr Analysis::bookProfile1D(const string& hname, + Profile1DPtr & Analysis::book(Profile1DPtr & p1d, const string& hname, const Scatter2D& refscatter, const string& title, const string& xtitle, const string& ytitle) { const string path = histoPath(hname); + Profile1D prof(refscatter, path); prof.setTitle(title); prof.setAnnotation("XLabel", xtitle); prof.setAnnotation("YLabel", ytitle); + if (prof.hasAnnotation("IsRef")) prof.rmAnnotation("IsRef"); p1d = Profile1DPtr(handler().weightNames(), prof); - addAnalysisObject(p1d); - - MSG_TRACE("Made profile histogram " << hname << " for " << name()); + p1d = addAnalysisObject(p1d); return p1d; - - // if (prof.hasAnnotation("IsRef")) prof.rmAnnotation("IsRef"); - //return addOrGetCompatAO(prof); } Profile1DPtr & Analysis::book(Profile1DPtr & p1d,const string& hname, const string& title, const string& xtitle, const string& ytitle) { const Scatter2D& refdata = refData(hname); book(p1d, hname, refdata, title, xtitle, ytitle); return p1d; } Profile1DPtr & Analysis::book(Profile1DPtr & p1d,unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const string& title, const string& xtitle, const string& ytitle) { const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId); return book(p1d, axisCode, title, xtitle, ytitle); } /////////////////// Profile2DPtr & Analysis::book(Profile2DPtr & p2d, const string& hname, size_t nxbins, double xlower, double xupper, size_t nybins, double ylower, double yupper, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string path = histoPath(hname); + Profile2D prof(nxbins, xlower, xupper, nybins, ylower, yupper, path, title); prof.setAnnotation("XLabel", xtitle); prof.setAnnotation("YLabel", ytitle); prof.setAnnotation("ZLabel", ztitle); p2d = Profile2DPtr(handler().weightNames(), prof); - addAnalysisObject(p2d); - - MSG_TRACE("Made 2D profile histogram " << hname << " for " << name()); + p2d = addAnalysisObject(p2d); return p2d; - - //return addOrGetCompatAO(prof); } Profile2DPtr & Analysis::book(Profile2DPtr & p2d, const string& hname, const initializer_list& xbinedges, const initializer_list& ybinedges, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { return book(p2d, hname, vector{xbinedges}, vector{ybinedges}, title, xtitle, ytitle, ztitle); } Profile2DPtr & Analysis::book(Profile2DPtr & p2d, const string& hname, const vector& xbinedges, const vector& ybinedges, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string path = histoPath(hname); + Profile2D prof(xbinedges, ybinedges, path, title); prof.setAnnotation("XLabel", xtitle); prof.setAnnotation("YLabel", ytitle); prof.setAnnotation("ZLabel", ztitle); p2d = Profile2DPtr(handler().weightNames(), prof); - addAnalysisObject(p2d); - - MSG_TRACE("Made 2D profile histogram " << hname << " for " << name()); + p2d = addAnalysisObject(p2d); return p2d; - - //return addOrGetCompatAO(prof); } - Profile2DPtr Analysis::book(Profile2DPtr & prof, const string& hname, - const initializer_list& xbinedges, - const initializer_list& ybinedges, - const string& title, - const string& xtitle, - const string& ytitle, - const string& ztitle) - { - return book(prof, hname, vector{xbinedges}, vector{ybinedges}, - title, xtitle, ytitle, ztitle); - } + /// @todo REINSTATE + // Profile2DPtr Analysis::book(Profile2DPtr& prof,const string& hname, + // const Scatter3D& refscatter, + // const string& title, + // const string& xtitle, + // const string& ytitle, + // const string& ztitle) { + // const string path = histoPath(hname); -/* RE-ENABLE - Profile2DPtr Analysis::book(Profile2DPtr& prof,const string& hname, - const Scatter3D& refscatter, - const string& title, - const string& xtitle, - const string& ytitle, - const string& ztitle) { - const string path = histoPath(hname); - Profile2DPtr prof( new Profile2D(refscatter, path) ); - addAnalysisObject(prof); - MSG_TRACE("Made 2D profile histogram " << hname << " for " << name()); - if (prof->hasAnnotation("IsRef")) prof->rmAnnotation("IsRef"); - prof->setTitle(title); - prof->setAnnotation("XLabel", xtitle); - prof->setAnnotation("YLabel", ytitle); - prof->setAnnotation("ZLabel", ztitle); - return prof; - } -*/ + // /// @todo Add no-metadata argument to YODA copy constructors + // Profile2D prof(refscatter, path); + // prof.setTitle(title); + // prof.setAnnotation("XLabel", xtitle); + // prof.setAnnotation("YLabel", ytitle); + // prof.setAnnotation("ZLabel", ztitle); + // if (prof.hasAnnotation("IsRef")) prof.rmAnnotation("IsRef"); + // p2d = Profile2DPtr(handler().weightNames(), prof); + // p2d = addAnalysisObject(p2d); + // return p2d; + // } - Profile2DPtr Analysis::book(Profile2DPtr& prof, const string& hname, - const string& title, - const string& xtitle, - const string& ytitle, - const string& ztitle) { - const Scatter3D& refdata = refData(hname); - return bookProfile2D(prof, hname, refdata, title, xtitle, ytitle, ztitle); - } + + // Profile2DPtr Analysis::book(Profile2DPtr& prof, const string& hname, + // const string& title, + // const string& xtitle, + // const string& ytitle, + // const string& ztitle) { + // const Scatter3D& refdata = refData(hname); + // return book(prof, hname, refdata, title, xtitle, ytitle, ztitle); + // } + + + /////////////// + + + /// @todo Should be able to book Scatter1Ds + + + /////////////// Scatter2DPtr & Analysis::book(Scatter2DPtr & s2d, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, bool copy_pts, const string& title, const string& xtitle, const string& ytitle) { const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId); return book(s2d, axisCode, copy_pts, title, xtitle, ytitle); } Scatter2DPtr & Analysis::book(Scatter2DPtr & s2d, const string& hname, bool copy_pts, const string& title, const string& xtitle, const string& ytitle) { + const string path = histoPath(hname); + Scatter2D scat; - const string path = histoPath(hname); if (copy_pts) { const Scatter2D& refdata = refData(hname); scat = Scatter2D(refdata, path); for (Point2D& p : scat.points()) p.setY(0, 0); } else { scat = Scatter2D(path); } scat.setTitle(title); scat.setAnnotation("XLabel", xtitle); scat.setAnnotation("YLabel", ytitle); + if (scat.hasAnnotation("IsRef")) scat.rmAnnotation("IsRef"); s2d = Scatter2DPtr(handler().weightNames(), scat); - addAnalysisObject(s2d); - - MSG_TRACE("Made scatter " << hname << " for " << name()); -// if (scat.hasAnnotation("IsRef")) scat.rmAnnotation("IsRef"); + s2d = addAnalysisObject(s2d); return s2d; - - //return addOrGetCompatAO(s); } Scatter2DPtr & Analysis::book(Scatter2DPtr & s2d, const string& hname, size_t npts, double lower, double upper, const string& title, const string& xtitle, const string& ytitle) { - // TODO: default branch has a read mechanism implemented, to start from an existing AO. - // need to work out how to implement that for multiweights const string path = histoPath(hname); + Scatter2D scat; const double binwidth = (upper-lower)/npts; for (size_t pt = 0; pt < npts; ++pt) { const double bincentre = lower + (pt + 0.5) * binwidth; scat.addPoint(bincentre, 0, binwidth/2.0, 0); } + scat.setTitle(title); scat.setAnnotation("XLabel", xtitle); scat.setAnnotation("YLabel", ytitle); s2d = Scatter2DPtr(handler().weightNames(), scat); - addAnalysisObject(s2d); - - MSG_TRACE("Made scatter " << hname << " for " << name()); + s2d = addAnalysisObject(s2d); return s2d; - - //return addOrGetCompatAO(s); } Scatter2DPtr & Analysis::book(Scatter2DPtr & s2d, const string& hname, const vector& binedges, const string& title, const string& xtitle, const string& ytitle) { const string path = histoPath(hname); + Scatter2D scat; for (size_t pt = 0; pt < binedges.size()-1; ++pt) { const double bincentre = (binedges[pt] + binedges[pt+1]) / 2.0; const double binwidth = binedges[pt+1] - binedges[pt]; scat.addPoint(bincentre, 0, binwidth/2.0, 0); } scat.setTitle(title); scat.setAnnotation("XLabel", xtitle); scat.setAnnotation("YLabel", ytitle); s2d = Scatter2DPtr(handler().weightNames(), scat); - addAnalysisObject(s2d); + s2d = addAnalysisObject(s2d); + return s2d; + } - MSG_TRACE("Made scatter " << hname << " for " << name()); - return s2d; - - //return addOrGetCompatAO(s); - } + /// @todo Book Scatter3Ds? ///////////////////// void Analysis::divide(CounterPtr c1, CounterPtr c2, Scatter1DPtr s) const { const string path = s->path(); *s = *c1 / *c2; s->setPath(path); } void Analysis::divide(const Counter& c1, const Counter& c2, Scatter1DPtr s) const { const string path = s->path(); *s = c1 / c2; s->setPath(path); } void Analysis::divide(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const { const string path = s->path(); *s = *h1 / *h2; s->setPath(path); } void Analysis::divide(const Histo1D& h1, const Histo1D& h2, Scatter2DPtr s) const { const string path = s->path(); *s = h1 / h2; s->setPath(path); } void Analysis::divide(Profile1DPtr p1, Profile1DPtr p2, Scatter2DPtr s) const { const string path = s->path(); *s = *p1 / *p2; s->setPath(path); } void Analysis::divide(const Profile1D& p1, const Profile1D& p2, Scatter2DPtr s) const { const string path = s->path(); *s = p1 / p2; s->setPath(path); } void Analysis::divide(Histo2DPtr h1, Histo2DPtr h2, Scatter3DPtr s) const { const string path = s->path(); *s = *h1 / *h2; s->setPath(path); } void Analysis::divide(const Histo2D& h1, const Histo2D& h2, Scatter3DPtr s) const { const string path = s->path(); *s = h1 / h2; s->setPath(path); } void Analysis::divide(Profile2DPtr p1, Profile2DPtr p2, Scatter3DPtr s) const { const string path = s->path(); *s = *p1 / *p2; s->setPath(path); } void Analysis::divide(const Profile2D& p1, const Profile2D& p2, Scatter3DPtr s) const { const string path = s->path(); *s = p1 / p2; s->setPath(path); } /// @todo Counter and Histo2D efficiencies and asymms void Analysis::efficiency(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const { const string path = s->path(); *s = YODA::efficiency(*h1, *h2); s->setPath(path); } void Analysis::efficiency(const Histo1D& h1, const Histo1D& h2, Scatter2DPtr s) const { const string path = s->path(); *s = YODA::efficiency(h1, h2); s->setPath(path); } void Analysis::asymm(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const { const string path = s->path(); *s = YODA::asymm(*h1, *h2); s->setPath(path); } void Analysis::asymm(const Histo1D& h1, const Histo1D& h2, Scatter2DPtr s) const { const string path = s->path(); *s = YODA::asymm(h1, h2); s->setPath(path); } void Analysis::scale(CounterPtr cnt, Analysis::CounterAdapter factor) { if (!cnt) { MSG_WARNING("Failed to scale counter=NULL in analysis " << name() << " (scale=" << double(factor) << ")"); return; } if (std::isnan(double(factor)) || std::isinf(double(factor))) { MSG_WARNING("Failed to scale counter=" << cnt->path() << " in analysis: " << name() << " (invalid scale factor = " << double(factor) << ")"); factor = 0; } MSG_TRACE("Scaling counter " << cnt->path() << " by factor " << double(factor)); try { cnt->scaleW(factor); } catch (YODA::Exception& we) { MSG_WARNING("Could not scale counter " << cnt->path()); return; } } void Analysis::normalize(Histo1DPtr histo, Analysis::CounterAdapter norm, bool includeoverflows) { if (!histo) { MSG_WARNING("Failed to normalize histo=NULL in analysis " << name() << " (norm=" << double(norm) << ")"); return; } MSG_TRACE("Normalizing histo " << histo->path() << " to " << double(norm)); try { const double hint = histo->integral(includeoverflows); if (hint == 0) MSG_WARNING("Skipping histo with null area " << histo->path()); else histo->normalize(norm, includeoverflows); } catch (YODA::Exception& we) { MSG_WARNING("Could not normalize histo " << histo->path()); return; } } void Analysis::scale(Histo1DPtr histo, Analysis::CounterAdapter factor) { if (!histo) { MSG_WARNING("Failed to scale histo=NULL in analysis " << name() << " (scale=" << double(factor) << ")"); return; } if (std::isnan(double(factor)) || std::isinf(double(factor))) { MSG_WARNING("Failed to scale histo=" << histo->path() << " in analysis: " << name() << " (invalid scale factor = " << double(factor) << ")"); factor = 0; } MSG_TRACE("Scaling histo " << histo->path() << " by factor " << double(factor)); try { histo->scaleW(factor); } catch (YODA::Exception& we) { MSG_WARNING("Could not scale histo " << histo->path()); return; } } void Analysis::normalize(Histo2DPtr histo, Analysis::CounterAdapter norm, bool includeoverflows) { if (!histo) { MSG_ERROR("Failed to normalize histo=NULL in analysis " << name() << " (norm=" << double(norm) << ")"); return; } MSG_TRACE("Normalizing histo " << histo->path() << " to " << double(norm)); try { const double hint = histo->integral(includeoverflows); if (hint == 0) MSG_WARNING("Skipping histo with null area " << histo->path()); else histo->normalize(norm, includeoverflows); } catch (YODA::Exception& we) { MSG_WARNING("Could not normalize histo " << histo->path()); return; } } void Analysis::scale(Histo2DPtr histo, Analysis::CounterAdapter factor) { if (!histo) { MSG_ERROR("Failed to scale histo=NULL in analysis " << name() << " (scale=" << double(factor) << ")"); return; } if (std::isnan(double(factor)) || std::isinf(double(factor))) { MSG_ERROR("Failed to scale histo=" << histo->path() << " in analysis: " << name() << " (invalid scale factor = " << double(factor) << ")"); factor = 0; } MSG_TRACE("Scaling histo " << histo->path() << " by factor " << double(factor)); try { histo->scaleW(factor); } catch (YODA::Exception& we) { MSG_WARNING("Could not scale histo " << histo->path()); return; } } void Analysis::integrate(Histo1DPtr h, Scatter2DPtr s) const { // preserve the path info const string path = s->path(); *s = toIntegralHisto(*h); s->setPath(path); } void Analysis::integrate(const Histo1D& h, Scatter2DPtr s) const { // preserve the path info const string path = s->path(); *s = toIntegralHisto(h); s->setPath(path); } } /// @todo 2D versions of integrate... defined how, exactly?!? ////////////////////////////////// namespace { void errormsg(std::string name) { // #ifdef HAVE_BACKTRACE // void * buffer[4]; // backtrace(buffer, 4); // backtrace_symbols_fd(buffer, 4 , 1); // #endif std::cerr << name << ": Can't book objects outside of init().\n"; assert(false); } } + namespace Rivet { - void Analysis::addAnalysisObject(const MultiweightAOPtr & ao) { - if (handler().stage() == AnalysisHandler::Stage::INIT) { - _analysisobjects.push_back(ao); - } - else { - errormsg(name()); - } - } + + // void Analysis::addAnalysisObject(const MultiweightAOPtr & ao) { + // if (handler().stage() == AnalysisHandler::Stage::INIT) { + // _analysisobjects.push_back(ao); + // } + // else { + // errormsg(name()); + // } + // } void Analysis::removeAnalysisObject(const string& path) { for (auto it = _analysisobjects.begin(); it != _analysisobjects.end(); ++it) { if ((*it)->path() == path) { _analysisobjects.erase(it); break; } } } void Analysis::removeAnalysisObject(const MultiweightAOPtr & ao) { for (auto it = _analysisobjects.begin(); it != _analysisobjects.end(); ++it) { if ((*it) == ao) { _analysisobjects.erase(it); break; } } } const CentralityProjection & Analysis::declareCentrality(const SingleValueProjection &proj, string calAnaName, string calHistName, const string projName, bool increasing) { CentralityProjection cproj; // Select the centrality variable from option. Use REF as default. // Other selections are "GEN", "IMP" and "USR" (USR only in HEPMC 3). string sel = getOption("cent","REF"); set done; if ( sel == "REF" ) { Scatter2DPtr refscat; auto refmap = getRefData(calAnaName); if ( refmap.find(calHistName) != refmap.end() ) refscat = dynamic_pointer_cast(refmap.find(calHistName)->second); if ( !refscat ) { MSG_WARNING("No reference calibration histogram for " << "CentralityProjection " << projName << " found " << "(requested histogram " << calHistName << " in " << calAnaName << ")"); } else { MSG_INFO("Found calibration histogram " << sel << " " << refscat->path()); cproj.add(PercentileProjection(proj, refscat, increasing), sel); } } else if ( sel == "GEN" ) { Histo1DPtr genhist; string histpath = "/" + calAnaName + "/" + calHistName; - for ( AnalysisObjectPtr ao : handler().getData(true) ) { + for ( YODA::AnalysisObjectPtr ao : handler().getData(true) ) { if ( ao->path() == histpath ) genhist = dynamic_pointer_cast(ao); } if ( !genhist || genhist->numEntries() <= 1 ) { MSG_WARNING("No generated calibration histogram for " << "CentralityProjection " << projName << " found " << "(requested histogram " << calHistName << " in " << calAnaName << ")"); } else { MSG_INFO("Found calibration histogram " << sel << " " << genhist->path()); cproj.add(PercentileProjection(proj, genhist, increasing), sel); } } else if ( sel == "IMP" ) { Histo1DPtr imphist = - getAnalysisObject(calAnaName, calHistName + "_IMP"); + getAnalysisObject(calAnaName, calHistName + "_IMP"); if ( !imphist || imphist->numEntries() <= 1 ) { MSG_WARNING("No impact parameter calibration histogram for " << "CentralityProjection " << projName << " found " << "(requested histogram " << calHistName << "_IMP in " << calAnaName << ")"); } else { MSG_INFO("Found calibration histogram " << sel << " " << imphist->path()); cproj.add(PercentileProjection(ImpactParameterProjection(), imphist, true), sel); } } else if ( sel == "USR" ) { #if HEPMC_VERSION_CODE >= 3000000 Histo1DPtr usrhist = - getAnalysisObject(calAnaName, calHistName + "_USR"); + getAnalysisObject(calAnaName, calHistName + "_USR"); if ( !usrhist || usrhist->numEntries() <= 1 ) { MSG_WARNING("No user-defined calibration histogram for " << "CentralityProjection " << projName << " found " << "(requested histogram " << calHistName << "_USR in " << - calAnaName << ")"); + calAnaName << ")"); continue; } else { MSG_INFO("Found calibration histogram " << sel << " " << usrhist->path()); cproj.add((UserCentEstimate(), usrhist, true), sel); } #else MSG_WARNING("UserCentEstimate is only available with HepMC3."); #endif } else if ( sel == "RAW" ) { #if HEPMC_VERSION_CODE >= 3000000 cproj.add(GeneratedCentrality(), sel); #else MSG_WARNING("GeneratedCentrality is only available with HepMC3."); #endif } else MSG_WARNING("'" << sel << "' is not a valid PercentileProjection tag."); if ( cproj.empty() ) MSG_WARNING("CentralityProjection " << projName << " did not contain any valid PercentileProjections."); return declare(cproj, projName); } + + vector Analysis::_weightNames() const { + return handler().weightNames(); + } + + size_t Analysis::_defaultWeightIndex() const { + return handler().defaultWeightIndex(); + } + + MultiweightAOPtr Analysis::_getOtherAnalysisObject(const std::string & ananame, const std::string& name) { + std::string path = "/" + ananame + "/" + name; + const auto& ana = handler().analysis(ananame); + return ana->getAnalysisObject(name); //< @todo includeorphans check?? + } + + void Analysis::_checkBookInit() const { + if (handler().stage() != AnalysisHandler::Stage::INIT) { + MSG_ERROR("Can't book objects outside of init()"); + throw UserError(name() + ": Can't book objects outside of init()."); + } + } + + } diff --git a/src/Core/AnalysisHandler.cc b/src/Core/AnalysisHandler.cc --- a/src/Core/AnalysisHandler.cc +++ b/src/Core/AnalysisHandler.cc @@ -1,765 +1,763 @@ // -*- C++ -*- #include "Rivet/Config/RivetCommon.hh" #include "Rivet/AnalysisHandler.hh" #include "Rivet/Analysis.hh" #include "Rivet/Tools/ParticleName.hh" #include "Rivet/Tools/BeamConstraint.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Projections/Beam.hh" #include "YODA/ReaderYODA.h" #include "YODA/WriterYODA.h" #include #include using std::cout; using std::cerr; namespace { - inline std::vector split(const std::string& input, const std::string& regex) { - // passing -1 as the submatch index parameter performs splitting - std::regex re(regex); - std::sregex_token_iterator - first{input.begin(), input.end(), re, -1}, - last; - return {first, last}; - } + + inline std::vector split(const std::string& input, const std::string& regex) { + // passing -1 as the submatch index parameter performs splitting + std::regex re(regex); + std::sregex_token_iterator + first{input.begin(), input.end(), re, -1}, + last; + return {first, last}; + } + } namespace Rivet { AnalysisHandler::AnalysisHandler(const string& runname) : _runname(runname), _eventCounter(vector(), Counter()), _xs(vector(), Scatter1D()), _initialised(false), _ignoreBeams(false), _defaultWeightIdx(0) {} AnalysisHandler::~AnalysisHandler() { } Log& AnalysisHandler::getLog() const { return Log::getLog("Rivet.Analysis.Handler"); } /// http://stackoverflow.com/questions/4654636/how-to-determine-if-a-string-is-a-number-with-c bool is_number(const std::string& s) { std::string::const_iterator it = s.begin(); while (it != s.end() && std::isdigit(*it)) ++it; return !s.empty() && it == s.end(); } /// Check if any of the weightnames is not a number bool AnalysisHandler::haveNamedWeights() { bool dec=false; for (unsigned int i=0;i<_weightNames.size();++i) { string s = _weightNames[i]; if (!is_number(s)) { dec=true; break; } } return dec; } void AnalysisHandler::init(const GenEvent& ge) { if (_initialised) throw UserError("AnalysisHandler::init has already been called: cannot re-initialize!"); - // TODO TODO - // should the rivet analysis objects know about weight names? + /// @todo Should the Rivet analysis objects know about weight names? setRunBeams(Rivet::beams(ge)); MSG_DEBUG("Initialising the analysis handler"); _eventNumber = ge.event_number(); setWeightNames(ge); if (haveNamedWeights()) MSG_INFO("Using named weights"); else MSG_INFO("NOT using named weights. Using first weight as nominal weight"); _eventCounter = CounterPtr(weightNames(), Counter("_EVTCOUNT")); - // set the cross section based on what is reported by this event. - // if no cross section + // Set the cross section based on what is reported by this event. if (ge.cross_section()) { - MSG_TRACE("getting cross section."); + MSG_TRACE("Getting cross section."); double xs = ge.cross_section()->cross_section(); double xserr = ge.cross_section()->cross_section_error(); setCrossSection(xs, xserr); } // Check that analyses are beam-compatible, and remove those that aren't const size_t num_anas_requested = analysisNames().size(); vector anamestodelete; for (const AnaHandle a : _analyses) { if (!_ignoreBeams && !a->isCompatible(beams())) { //MSG_DEBUG(a->name() << " requires beams " << a->requiredBeams() << " @ " << a->requiredEnergies() << " GeV"); anamestodelete.push_back(a->name()); } } for (const string& aname : anamestodelete) { MSG_WARNING("Analysis '" << aname << "' is incompatible with the provided beams: removing"); removeAnalysis(aname); } if (num_anas_requested > 0 && analysisNames().empty()) { cerr << "All analyses were incompatible with the first event's beams\n" << "Exiting, since this probably wasn't intentional!" << endl; exit(1); } // Warn if any analysis' status is not unblemished for (const AnaHandle a : analyses()) { if (toUpper(a->status()) == "PRELIMINARY") { MSG_WARNING("Analysis '" << a->name() << "' is preliminary: be careful, it may change and/or be renamed!"); } else if (toUpper(a->status()) == "OBSOLETE") { MSG_WARNING("Analysis '" << a->name() << "' is obsolete: please update!"); } else if (toUpper(a->status()).find("UNVALIDATED") != string::npos) { MSG_WARNING("Analysis '" << a->name() << "' is unvalidated: be careful, it may be broken!"); } } // Initialize the remaining analyses _stage = Stage::INIT; for (AnaHandle a : _analyses) { MSG_DEBUG("Initialising analysis: " << a->name()); try { // Allow projection registration in the init phase onwards a->_allowProjReg = true; a->init(); //MSG_DEBUG("Checking consistency of analysis: " << a->name()); //a->checkConsistency(); } catch (const Error& err) { cerr << "Error in " << a->name() << "::init method: " << err.what() << endl; exit(1); } MSG_DEBUG("Done initialising analysis: " << a->name()); } _stage = Stage::OTHER; _initialised = true; MSG_DEBUG("Analysis handler initialised"); } + void AnalysisHandler::setWeightNames(const GenEvent& ge) { /// reroute the print output to a std::stringstream and process /// The iteration is done over a map in hepmc2 so this is safe std::ostringstream stream; ge.weights().print(stream); // Super lame, I know string str = stream.str(); std::regex re("(([^()]+))"); // Regex for stuff enclosed by parentheses () size_t idx = 0; - for(std::sregex_iterator i = std::sregex_iterator(str.begin(), str.end(), re); - i != std::sregex_iterator(); ++i ) { + for (std::sregex_iterator i = std::sregex_iterator(str.begin(), str.end(), re); i != std::sregex_iterator(); ++i ) { std::smatch m = *i; vector temp = ::split(m.str(), "[,]"); if (temp.size() ==2) { MSG_DEBUG("Name of weight #" << _weightNames.size() << ": " << temp[0]); // store the default weight based on weight names if (temp[0] == "Weight" || temp[0] == "0" || temp[0] == "Default") { MSG_DEBUG(_weightNames.size() << " is being used as the nominal."); _weightNames.push_back(""); _defaultWeightIdx = idx; } else _weightNames.push_back(temp[0]); idx++; } } } void AnalysisHandler::analyze(const GenEvent& ge) { // Call init with event as template if not already initialised if (!_initialised) init(ge); assert(_initialised); // Ensure that beam details match those from the first event (if we're checking beams) if ( !_ignoreBeams ) { const PdgIdPair beams = Rivet::beamIds(ge); const double sqrts = Rivet::sqrtS(ge); if (!compatible(beams, _beams) || !fuzzyEquals(sqrts, sqrtS())) { cerr << "Event beams mismatch: " << PID::toBeamsString(beams) << " @ " << sqrts/GeV << " GeV" << " vs. first beams " << this->beams() << " @ " << this->sqrtS()/GeV << " GeV" << endl; exit(1); } } // Create the Rivet event wrapper /// @todo Filter/normalize the event here Event event(ge); // set the cross section based on what is reported by this event. // if no cross section MSG_TRACE("getting cross section."); if (ge.cross_section()) { MSG_TRACE("getting cross section from GenEvent."); double xs = ge.cross_section()->cross_section(); double xserr = ge.cross_section()->cross_section_error(); setCrossSection(xs, xserr); } #endif // won't happen for first event because _eventNumber is set in // init() if (_eventNumber != ge.event_number()) { /// @todo /// can we get away with not passing a matrix? MSG_TRACE("AnalysisHandler::analyze(): Pushing _eventCounter to persistent."); _eventCounter.get()->pushToPersistent(_subEventWeights); // if this is indeed a new event, push the temporary // histograms and reset for (const AnaHandle& a : _analyses) { for (auto ao : a->analysisObjects()) { MSG_TRACE("AnalysisHandler::analyze(): Pushing " << a->name() << "'s " << ao->name() << " to persistent."); ao.get()->pushToPersistent(_subEventWeights); } MSG_TRACE("AnalysisHandler::analyze(): finished pushing " << a->name() << "'s objects to persistent."); } _eventNumber = ge.event_number(); MSG_DEBUG("nominal event # " << _eventCounter.get()->_persistent[0]->numEntries()); MSG_DEBUG("nominal sum of weights: " << _eventCounter.get()->_persistent[0]->sumW()); MSG_DEBUG("Event has " << _subEventWeights.size() << " sub events."); _subEventWeights.clear(); } MSG_TRACE("starting new sub event"); _eventCounter.get()->newSubEvent(); for (const AnaHandle& a : _analyses) { for (auto ao : a->analysisObjects()) { ao.get()->newSubEvent(); } } _subEventWeights.push_back(event.weights()); MSG_DEBUG("Analyzing subevent #" << _subEventWeights.size() - 1 << "."); _eventCounter->fill(); // Run the analyses for (AnaHandle a : _analyses) { MSG_TRACE("About to run analysis " << a->name()); try { a->analyze(event); } catch (const Error& err) { cerr << "Error in " << a->name() << "::analyze method: " << err.what() << endl; exit(1); } MSG_TRACE("Finished running analysis " << a->name()); } if ( _dumpPeriod > 0 && numEvents()%_dumpPeriod == 0 ) { MSG_INFO("Dumping intermediate results to " << _dumpFile << "."); _dumping = true; finalize(); _dumping = false; writeData(_dumpFile); } } void AnalysisHandler::analyze(const GenEvent* ge) { if (ge == nullptr) { MSG_ERROR("AnalysisHandler received null pointer to GenEvent"); //throw Error("AnalysisHandler received null pointer to GenEvent"); } analyze(*ge); } void AnalysisHandler::finalize() { - if (!_initialised) return; - MSG_INFO("Finalising analyses"); + if (!_initialised) return; + MSG_INFO("Finalising analyses"); - MSG_TRACE("AnalysisHandler::finalize(): Pushing analysis objects to persistent."); - _eventCounter.get()->pushToPersistent(_subEventWeights); - for (const AnaHandle& a : _analyses) { - for (auto ao : a->analysisObjects()) - ao.get()->pushToPersistent(_subEventWeights); - } + // First push all analyses' objects to persistent + MSG_TRACE("AnalysisHandler::finalize(): Pushing analysis objects to persistent."); + _eventCounter.get()->pushToPersistent(_subEventWeights); + for (const AnaHandle& a : _analyses) { + for (auto ao : a->analysisObjects()) + ao.get()->pushToPersistent(_subEventWeights); + } - for (const AnaHandle& a : _analyses) { - for (size_t iW = 0; iW < numWeights(); iW++) { - _eventCounter.get()->setActiveWeightIdx(iW); - _xs.get()->setActiveWeightIdx(iW); - for (auto ao : a->analysisObjects()) - ao.get()->setActiveWeightIdx(iW); - - MSG_TRACE("running " << a->name() << "::finalize() for weight " << iW << "."); - - try { - a->finalize(); - } catch (const Error& err) { - cerr << "Error in " << a->name() << "::finalize method: " << err.what() << '\n'; - exit(1); - } - } - } - - -/* MERGE // First we make copies of all analysis objects. map backupAOs; - for (auto ao : getData(false, true) ) + for (auto ao : getYodaAOs(false, true, false) ) backupAOs[ao->path()] = AnalysisObjectPtr(ao->newclone()); - // Now we run the (re-entrant) finalize() functions for all analyses. - MSG_INFO("Finalising analyses"); - for (AnaHandle a : _analyses) { - a->setCrossSection(_xs); - try { - if ( !_dumping || a->info().reentrant() ) a->finalize(); - else if ( _dumping ) - MSG_INFO("Skipping periodic dump of " << a->name() - << " as it is not declared reentrant."); - } catch (const Error& err) { - cerr << "Error in " << a->name() << "::finalize method: " << err.what() << endl; - exit(1); + // Finalize all the histograms + for (const AnaHandle& a : _analyses) { + // a->setCrossSection(_xs); + for (size_t iW = 0; iW < numWeights(); iW++) { + _eventCounter.get()->setActiveWeightIdx(iW); + _xs.get()->setActiveWeightIdx(iW); + for (auto ao : a->analysisObjects()) + ao.get()->setActiveWeightIdx(iW); + + MSG_TRACE("Running " << a->name() << "::finalize() for weight " << iW << "."); + + try { + if ( !_dumping || a->info().reentrant() ) a->finalize(); + else if ( _dumping == 1 && iW == 0 ) + MSG_INFO("Skipping periodic dump of " << a->name() + << " as it is not declared reentrant."); + } catch (const Error& err) { + cerr << "Error in " << a->name() << "::finalize method: " << err.what() << endl; + exit(1); + } + } } -*/ + + // Now we copy all analysis objects to the list of finalized + // ones, and restore the value to their original ones. + _finalizedAOs.clear(); + for ( auto ao : getYodaAOs(false, false, false) ) + _finalizedAOs.push_back(AnalysisObjectPtr(ao->newclone())); + for ( auto ao : getYodaAOs(false, true, false) ) { + // TODO: This should be possible to do in a nicer way, with a flag etc. + if (ao->path().find("/FINAL") != std::string::npos) continue; + auto aoit = backupAOs.find(ao->path()); + if ( aoit == backupAOs.end() ) { + AnaHandle ana = analysis(split(ao->path(), "/")[0]); + if ( ana ) ana->removeAnalysisObject(ao->path()); + } else + copyao(aoit->second, ao); + } // Print out number of events processed const int nevts = numEvents(); MSG_INFO("Processed " << nevts << " event" << (nevts != 1 ? "s" : "")); // // Delete analyses // MSG_DEBUG("Deleting analyses"); // _analyses.clear(); // Print out MCnet boilerplate cout << '\n'; cout << "The MCnet usage guidelines apply to Rivet: see http://www.montecarlonet.org/GUIDELINES" << '\n'; cout << "Please acknowledge plots made with Rivet analyses, and cite arXiv:1003.0694 (http://arxiv.org/abs/1003.0694)" << endl; } AnalysisHandler& AnalysisHandler::addAnalysis(const string& analysisname, std::map pars) { // Make an option handle. std::string parHandle = ""; for (map::iterator par = pars.begin(); par != pars.end(); ++par) { parHandle +=":"; parHandle += par->first + "=" + par->second; } return addAnalysis(analysisname + parHandle); } AnalysisHandler& AnalysisHandler::addAnalysis(const string& analysisname) { // Check for a duplicate analysis /// @todo Might we want to be able to run an analysis twice, with different params? /// Requires avoiding histo tree clashes, i.e. storing the histos on the analysis objects. string ananame = analysisname; vector anaopt = split(analysisname, ":"); if ( anaopt.size() > 1 ) ananame = anaopt[0]; AnaHandle analysis( AnalysisLoader::getAnalysis(ananame) ); if (analysis.get() != 0) { // < Check for null analysis. MSG_DEBUG("Adding analysis '" << analysisname << "'"); map opts; for ( int i = 1, N = anaopt.size(); i < N; ++i ) { vector opt = split(anaopt[i], "="); if ( opt.size() != 2 ) { MSG_WARNING("Error in option specification. Skipping analysis " << analysisname); return *this; } if ( !analysis->info().validOption(opt[0], opt[1]) ) { MSG_WARNING("Cannot set option '" << opt[0] << "' to '" << opt[1] << "'. Skipping analysis " << analysisname); return *this; } opts[opt[0]] = opt[1]; } for ( auto opt: opts) { analysis->_options[opt.first] = opt.second; analysis->_optstring += ":" + opt.first + "=" + opt.second; } for (const AnaHandle& a : _analyses) { if (a->name() == analysis->name() ) { MSG_WARNING("Analysis '" << analysisname << "' already registered: skipping duplicate"); return *this; } } analysis->_analysishandler = this; _analyses.insert(analysis); } else { MSG_WARNING("Analysis '" << analysisname << "' not found."); } // MSG_WARNING(_analyses.size()); // for (const AnaHandle& a : _analyses) MSG_WARNING(a->name()); return *this; } AnalysisHandler& AnalysisHandler::removeAnalysis(const string& analysisname) { std::shared_ptr toremove; for (const AnaHandle a : _analyses) { if (a->name() == analysisname) { toremove = a; break; } } if (toremove.get() != 0) { MSG_DEBUG("Removing analysis '" << analysisname << "'"); _analyses.erase(toremove); } return *this; } void AnalysisHandler::addData(const std::vector& aos) { for (const YODA::AnalysisObjectPtr ao : aos) { string path = ao->path(); if ( path.substr(0, 5) != "/RAW/" ) { _orphanedPreloads.push_back(ao); continue; } path = path.substr(4); ao->setPath(path); if (path.size() > 1) { // path > "/" try { const string ananame = ::split(path, "/")[0]; AnaHandle a = analysis(ananame); //MultiweightAOPtr mao = ????; /// @todo generate right Multiweight object from ao a->addAnalysisObject(mao); /// @todo Need to statistically merge... } catch (const Error& e) { MSG_TRACE("Adding analysis object " << path << " to the list of orphans."); _orphanedPreloads.push_back(ao); } } } } void AnalysisHandler::stripOptions(AnalysisObjectPtr ao, const vector & delopts) const { string path = ao->path(); string ananame = split(path, "/")[0]; vector anaopts = split(ananame, ":"); for ( int i = 1, N = anaopts.size(); i < N; ++i ) for ( auto opt : delopts ) if ( opt == "*" || anaopts[i].find(opt + "=") == 0 ) path.replace(path.find(":" + anaopts[i]), (":" + anaopts[i]).length(), ""); ao->setPath(path); } void AnalysisHandler:: mergeYodas(const vector & aofiles, const vector & delopts, bool equiv) { vector< vector > aosv; vector xsecs; vector xsecerrs; vector sows; set ananames; _eventcounter.reset(); // First scan all files and extract analysis objects and add the // corresponding anayses.. for ( auto file : aofiles ) { Scatter1DPtr xsec; CounterPtr sow; // For each file make sure that cross section and sum-of-weights // objects are present and stor all RAW ones in a vector; vector aos; try { /// @todo Use new YODA SFINAE to fill the smart ptr vector directly vector aos_raw; YODA::read(file, aos_raw); for (AnalysisObject* aor : aos_raw) { AnalysisObjectPtr ao = AnalysisObjectPtr(aor); if ( ao->path().substr(0, 5) != "/RAW/" ) continue; ao->setPath(ao->path().substr(4)); if ( ao->path() == "/_XSEC" ) xsec = dynamic_pointer_cast(ao); else if ( ao->path() == "/_EVTCOUNT" ) sow = dynamic_pointer_cast(ao); else { stripOptions(ao, delopts); string ananame = split(ao->path(), "/")[0]; if ( ananames.insert(ananame).second ) addAnalysis(ananame); aos.push_back(ao); } } if ( !xsec || !sow ) { MSG_ERROR( "Error in AnalysisHandler::mergeYodas: The file " << file << " did not contain weights and cross section info."); exit(1); } xsecs.push_back(xsec->point(0).x()); sows.push_back(sow); xsecerrs.push_back(sqr(xsec->point(0).xErrAvg())); _eventcounter += *sow; sows.push_back(sow); aosv.push_back(aos); } catch (...) { //< YODA::ReadError& throw UserError("Unexpected error in reading file: " + file); } } // Now calculate the scale to be applied for all bins in a file // and get the common cross section and sum of weights. _xs = _xserr = 0.0; for ( int i = 0, N = sows.size(); i < N; ++i ) { double effnent = sows[i]->effNumEntries(); _xs += (equiv? effnent: 1.0)*xsecs[i]; _xserr += (equiv? sqr(effnent): 1.0)*xsecerrs[i]; } vector scales(sows.size(), 1.0); if ( equiv ) { _xs /= _eventcounter.effNumEntries(); _xserr = sqrt(_xserr)/_eventcounter.effNumEntries(); } else { _xserr = sqrt(_xserr); for ( int i = 0, N = sows.size(); i < N; ++i ) scales[i] = (_eventcounter.sumW()/sows[i]->sumW())*(xsecs[i]/_xs); } // Initialize the analyses allowing them to book analysis objects. for (AnaHandle a : _analyses) { MSG_DEBUG("Initialising analysis: " << a->name()); if ( !a->info().reentrant() ) MSG_WARNING("Analysis " << a->name() << " has not been validated to have " << "a reentrant finalize method. The result is unpredictable."); try { // Allow projection registration in the init phase onwards a->_allowProjReg = true; cerr << "sqrtS " << sqrtS() << endl; a->init(); //MSG_DEBUG("Checking consistency of analysis: " << a->name()); //a->checkConsistency(); } catch (const Error& err) { cerr << "Error in " << a->name() << "::init method: " << err.what() << endl; exit(1); } MSG_DEBUG("Done initialising analysis: " << a->name()); } _initialised = true; // Get a list of all anaysis objects to handle. map current; for ( auto ao : getData(false, true) ) current[ao->path()] = ao; // Go through all objects to be merged and add them to current // after appropriate scaling. for ( int i = 0, N = aosv.size(); i < N; ++i) for ( auto ao : aosv[i] ) { if ( ao->path() == "/_XSEC" || ao->path() == "_EVTCOUNT" ) continue; auto aoit = current.find(ao->path()); if ( aoit == current.end() ) { MSG_WARNING("" << ao->path() << " was not properly booked."); continue; } if ( !addaos(aoit->second, ao, scales[i]) ) MSG_WARNING("Cannot merge objects with path " << ao->path() <<" of type " << ao->annotation("Type") ); } // Now we can simply finalize() the analysis, leaving the // controlling program to write it out some yoda-file. finalize(); } void AnalysisHandler::readData(const string& filename) { vector aos; try { /// @todo Use new YODA SFINAE to fill the smart ptr vector directly vector aos_raw; YODA::ReaderYODA::read(filename, aos_raw); for (YODA::AnalysisObject* aor : aos_raw) aos.push_back(YODA::AnalysisObjectPtr(aor)); //} catch (const YODA::ReadError & e) { } catch (...) { //< YODA::ReadError& throw UserError("Unexpected error in reading file: " + filename); } if (!aos.empty()) addData(aos); } vector AnalysisHandler::getRivetAOs() const { vector rtn; for (AnaHandle a : _analyses) { for (const auto & ao : a->analysisObjects()) { rtn.push_back(ao); } } rtn.push_back(_eventCounter); rtn.push_back(_xs); return rtn; } - vector AnalysisHandler::getYodaAOs() const { + vector AnalysisHandler::getYodaAOs(bool includeorphans, + bool includetmps, + bool usefinalized) const { vector rtn; - - for (auto rao : getRivetAOs()) { + if (usefinalized) + rtn = _finalizedAOs; + else { + for (auto rao : getRivetAOs()) { // need to set the index // before we can search the PATH rao.get()->setActiveWeightIdx(_defaultWeightIdx); - if (rao->path().find("/TMP/") != string::npos) - continue; + // Exclude paths from final write-out if they contain a "TMP" layer (i.e. matching "/TMP/") + if (!includetmps && rao->path().find("/TMP/") != string::npos) + continue; for (size_t iW = 0; iW < numWeights(); iW++) { - rao.get()->setActiveWeightIdx(iW); - rtn.push_back(rao.get()->activeYODAPtr()); + rao.get()->setActiveWeightIdx(iW); + rtn.push_back(rao.get()->activeYODAPtr()); } + } } + // Sort histograms alphanumerically by path before write-out sort(rtn.begin(), rtn.end(), [](YODA::AnalysisObjectPtr a, YODA::AnalysisObjectPtr b) { return a->path() < b->path(); } ); return rtn; } - vector AnalysisHandler::getData() const { - return getYodaAOs(); + + vector AnalysisHandler::getData(bool includeorphans, + bool includetmps, + bool usefinalized) const { + return getYodaAOs(includeorphans, includetmps, usefinalized); } -/* MERGE - vector AnalysisHandler:: - getData(bool includeorphans, bool includetmps) const { - vector rtn; - // Event counter - rtn.push_back( make_shared(_eventcounter) ); - // Cross-section + err as scatter - YODA::Scatter1D::Points pts; pts.insert(YODA::Point1D(_xs, _xserr)); - rtn.push_back( make_shared(pts, "/_XSEC") ); - // Analysis histograms - for (const AnaHandle a : analyses()) { - vector aos = a->analysisObjects(); - // MSG_WARNING(a->name() << " " << aos.size()); - for (const AnalysisObjectPtr ao : aos) { - // Exclude paths from final write-out if they contain a "TMP" layer (i.e. matching "/TMP/") - /// @todo This needs to be much more nuanced for re-entrant histogramming - if ( !includetmps && ao->path().find("/TMP/" ) != string::npos) continue; - rtn.push_back(ao); - } - } - // Sort histograms alphanumerically by path before write-out - sort(rtn.begin(), rtn.end(), [](AnalysisObjectPtr a, AnalysisObjectPtr b) {return a->path() < b->path();}); - if ( includeorphans ) - rtn.insert(rtn.end(), _orphanedPreloads.begin(), _orphanedPreloads.end()); - return rtn; - } -*/ void AnalysisHandler::writeData(const string& filename) const { vector out = _finalizedAOs; + set finalana; + for ( auto ao : out) finalana.insert(ao->path()); out.reserve(2*out.size()); - vector aos = getData(false, true); + vector aos = getData(false, true, false); + + if ( _dumping ) { + for ( auto ao : aos ) { + if ( finalana.find(ao->path()) == finalana.end() ) + out.push_back(AnalysisObjectPtr(ao->newclone())); + } + } + for ( auto ao : aos ) { ao = YODA::AnalysisObjectPtr(ao->newclone()); ao->setPath("/RAW" + ao->path()); out.push_back(ao); } try { YODA::WriterYODA::write(filename, aos.begin(), aos.end()); } catch (...) { //< YODA::WriteError& throw UserError("Unexpected error in writing file: " + filename); } } string AnalysisHandler::runName() const { return _runname; } size_t AnalysisHandler::numEvents() const { return _eventCounter->numEntries(); } std::vector AnalysisHandler::analysisNames() const { std::vector rtn; for (AnaHandle a : _analyses) { rtn.push_back(a->name()); } return rtn; } const AnaHandle AnalysisHandler::analysis(const std::string& analysisname) const { for (const AnaHandle a : analyses()) if (a->name() == analysisname) return a; throw Error("No analysis named '" + analysisname + "' registered in AnalysisHandler"); } AnalysisHandler& AnalysisHandler::addAnalyses(const std::vector& analysisnames) { for (const string& aname : analysisnames) { //MSG_DEBUG("Adding analysis '" << aname << "'"); addAnalysis(aname); } return *this; } AnalysisHandler& AnalysisHandler::removeAnalyses(const std::vector& analysisnames) { for (const string& aname : analysisnames) { removeAnalysis(aname); } return *this; } AnalysisHandler& AnalysisHandler::setCrossSection(double xs, double xserr) { _xs = Scatter1DPtr(weightNames(), Scatter1D("_XSEC")); _eventCounter.get()->setActiveWeightIdx(_defaultWeightIdx); double nomwgt = sumOfWeights(); // The cross section of each weight variation is the nominal cross section // times the sumOfWeights(variation) / sumOfWeights(nominal). // This way the cross section will work correctly for (size_t iW = 0; iW < numWeights(); iW++) { _eventCounter.get()->setActiveWeightIdx(iW); double s = sumOfWeights() / nomwgt; _xs.get()->setActiveWeightIdx(iW); _xs->addPoint(xs*s, xserr*s); } _eventCounter.get()->unsetActiveWeight(); _xs.get()->unsetActiveWeight(); return *this; } AnalysisHandler& AnalysisHandler::addAnalysis(Analysis* analysis) { analysis->_analysishandler = this; _analyses.insert(AnaHandle(analysis)); return *this; } PdgIdPair AnalysisHandler::beamIds() const { return Rivet::beamIds(beams()); } double AnalysisHandler::sqrtS() const { return Rivet::sqrtS(beams()); } void AnalysisHandler::setIgnoreBeams(bool ignore) { _ignoreBeams=ignore; } } diff --git a/src/Core/Run.cc b/src/Core/Run.cc --- a/src/Core/Run.cc +++ b/src/Core/Run.cc @@ -1,157 +1,138 @@ // -*- C++ -*- #include "Rivet/Run.hh" #include "Rivet/AnalysisHandler.hh" #include "HepMC/IO_GenEvent.h" #include "Rivet/Math/MathUtils.hh" #include "Rivet/Tools/RivetPaths.hh" #include "zstr/zstr.hpp" #include #include using std::cout; +using std::endl; namespace Rivet { Run::Run(AnalysisHandler& ah) : _ah(ah), _fileweight(1.0), _xs(NAN) { } Run::~Run() { } Run& Run::setCrossSection(const double xs) { _xs = xs; return *this; } Run& Run::setListAnalyses(const bool dolist) { _listAnalyses = dolist; return *this; } // Fill event and check for a bad read state bool Run::readEvent() { /// @todo Clear rather than new the GenEvent object per-event? _evt.reset(new GenEvent()); if (_io->rdstate() != 0 || !_io->fill_next_event(_evt.get()) ) { Log::getLog("Rivet.Run") << Log::DEBUG << "Read failed. End of file?" << endl; return false; } // Rescale event weights by file-level weight, if scaling is non-trivial if (!fuzzyEquals(_fileweight, 1.0)) { for (size_t i = 0; i < (size_t) _evt->weights().size(); ++i) { _evt->weights()[i] *= _fileweight; } } return true; } // Fill event and check for a bad read state --- to skip, maybe HEPMC3 will have a better way bool Run::skipEvent() { if (_io->rdstate() != 0 || !_io->fill_next_event(_evt.get()) ) { Log::getLog("Rivet.Run") << Log::DEBUG << "Read failed. End of file?" << endl; return false; } return true; } bool Run::openFile(const std::string& evtfile, double weight) { // Set current weight-scaling member _fileweight = weight; // Set up HepMC input reader objects if (evtfile == "-") { _io.reset(new HepMC::IO_GenEvent(std::cin)); } else { if (!fileexists(evtfile)) throw Error("Event file '" + evtfile + "' not found"); #ifdef HAVE_LIBZ // NB. zstr auto-detects if file is deflated or plain-text _istr.reset(new zstr::ifstream(evtfile.c_str())); #else _istr.reset(new std::fstream(evtfile.c_str(), std::ios::in)); #endif _io.reset(new HepMC::IO_GenEvent(*_istr)); } if (_io->rdstate() != 0) { Log::getLog("Rivet.Run") << Log::ERROR << "Read error on file " << evtfile << endl; return false; } return true; } bool Run::init(const std::string& evtfile, double weight) { if (!openFile(evtfile, weight)) return false; // Read first event to define run conditions bool ok = readEvent(); if (!ok) return false; if (_evt->particles_size() == 0) { Log::getLog("Rivet.Run") << Log::ERROR << "Empty first event." << endl; return false; } // Initialise AnalysisHandler with beam information from first event _ah.init(*_evt); - // Set cross-section from command line + // Tell the user about using the cross-section from the command line if (!std::isnan(_xs)) { - Log::getLog("Rivet.Run") - << Log::DEBUG << "Setting user cross-section = " << _xs << " pb" << endl; - _ah.setCrossSection(_xs); + Log::getLog("Rivet.Run") << Log::DEBUG << "Setting user cross-section = " << _xs << " pb" << endl; + // Actually do the setting in finalize() } // List the chosen & compatible analyses if requested if (_listAnalyses) { for (const std::string& ana : _ah.analysisNames()) { cout << ana << endl; } } return true; } bool Run::processEvent() { - // Set cross-section if found in event and not from command line - if (std::isnan(_xs) && _evt->cross_section()) { - const double xs = _evt->cross_section()->cross_section()*picobarn; - const double xserr = _evt->cross_section()->cross_section_error()*picobarn; - Log::getLog("Rivet.Run") - << Log::DEBUG << "Setting cross-section = " << xs << " +- " << xserr << " pb" << endl; - _ah.setCrossSection(xs, xserr); - } - // Complain about absence of cross-section if required! - if (_ah.needCrossSection() && !_ah.hasCrossSection()) { - Log::getLog("Rivet.Run") - << Log::ERROR - << "Total cross-section needed for at least one of the analyses. " - << "Please set it (on the command line with '-x' if using the 'rivet' program)" << endl; - return false; - } - - // Analyze event _ah.analyze(*_evt); - return true; } bool Run::finalize() { _evt.reset(); _istr.reset(); _io.reset(); if (!std::isnan(_xs)) _ah.setCrossSection(_xs, 0); _ah.finalize(); return true; } } diff --git a/src/Tools/RivetYODA.cc b/src/Tools/RivetYODA.cc --- a/src/Tools/RivetYODA.cc +++ b/src/Tools/RivetYODA.cc @@ -1,406 +1,406 @@ #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Tools/RivetYODA.hh" #include "Rivet/Tools/RivetPaths.hh" #include "YODA/ReaderYODA.h" #include "YODA/ReaderAIDA.h" // use execinfo for backtrace if available #include "DummyConfig.hh" #ifdef HAVE_EXECINFO_H #include #endif using namespace std; namespace Rivet { template Wrapper::~Wrapper() {} template Wrapper::Wrapper(const vector& weightNames, const T & p) { for (const string& weightname : weightNames) { _persistent.push_back(make_shared(p)); auto obj = _persistent.back(); if (weightname != "") - obj->setPath(obj->path() + "[" + weightname + "]"); + obj->setPath(obj->path() + "[" + weightname + "]"); } } template typename T::Ptr Wrapper::active() const { if ( !_active ) { #ifdef HAVE_BACKTRACE void * buffer[4]; backtrace(buffer, 4); backtrace_symbols_fd(buffer, 4 , 1); #endif assert(false && "No active pointer set. Was this object booked in init()?"); } return _active; } template void Wrapper::newSubEvent() { typename TupleWrapper::Ptr tmp = make_shared>(_persistent[0]->clone()); tmp->reset(); _evgroup.push_back( tmp ); _active = _evgroup.back(); assert(_active); } string getDatafilePath(const string& papername) { /// Try to find YODA otherwise fall back to try AIDA const string path1 = findAnalysisRefFile(papername + ".yoda"); if (!path1.empty()) return path1; const string path2 = findAnalysisRefFile(papername + ".aida"); if (!path2.empty()) return path2; throw Rivet::Error("Couldn't find ref data file '" + papername + ".yoda" + " in data path, '" + getRivetDataPath() + "', or '.'"); } map getRefData(const string& papername) { const string datafile = getDatafilePath(papername); // Make an appropriate data file reader and read the data objects /// @todo Remove AIDA support some day... YODA::Reader& reader = (datafile.find(".yoda") != string::npos) ? \ YODA::ReaderYODA::create() : YODA::ReaderAIDA::create(); vector aovec; reader.read(datafile, aovec); // Return value, to be populated map rtn; for ( YODA::AnalysisObject* ao : aovec ) { YODA::AnalysisObjectPtr refdata(ao); if (!refdata) continue; const string plotpath = refdata->path(); // Split path at "/" and only return the last field, i.e. the histogram ID const size_t slashpos = plotpath.rfind("/"); const string plotname = (slashpos+1 < plotpath.size()) ? plotpath.substr(slashpos+1) : ""; rtn[plotname] = refdata; } return rtn; } } namespace { using Rivet::Fill; using Rivet::Fills; using Rivet::TupleWrapper; template double get_window_size(const typename T::Ptr & histo, typename T::BinType x) { // the bin index we fall in const auto binidx = histo->binIndexAt(x); // gaps, overflow, underflow don't contribute if ( binidx == -1 ) return 0; const auto & b = histo->bin(binidx); // if we don't have a valid neighbouring bin, // we use infinite width typename T::Bin b1(-1.0/0.0, 1.0/0.0); // points in the top half compare to the upper neighbour if ( x > b.xMid() ) { size_t nextidx = binidx + 1; if ( nextidx < histo->bins().size() ) b1 = histo->bin(nextidx); } else { // compare to the lower neighbour int nextidx = binidx - 1; if ( nextidx >= 0 ) b1 = histo->bin(nextidx); } // the factor 2 is arbitrary, could poss. be smaller return min( b.width(), b1.width() ) / 2.0; } template typename T::BinType fillT2binT(typename T::FillType a) { return a; } template <> YODA::Profile1D::BinType fillT2binT(YODA::Profile1D::FillType a) { return get<0>(a); } template <> YODA::Profile2D::BinType fillT2binT(YODA::Profile2D::FillType a) { return YODA::Profile2D::BinType{ get<0>(a), get<1>(a) }; } template void commit(vector & persistent, const vector< vector> > & tuple, const vector> & weights ) { // TODO check if all the xs are in the same bin anyway! // Then no windowing needed assert(persistent.size() == weights[0].size()); for ( const auto & x : tuple ) { double maxwindow = 0.0; for ( const auto & xi : x ) { // TODO check for NOFILL here // persistent[0] has the same binning as all the other persistent objects double window = get_window_size(persistent[0], fillT2binT(xi.first)); if ( window > maxwindow ) maxwindow = window; } const double wsize = maxwindow; // all windows have same size set edgeset; // bin edges need to be in here! for ( const auto & xi : x ) { edgeset.insert(fillT2binT(xi.first) - wsize); edgeset.insert(fillT2binT(xi.first) + wsize); } vector< std::tuple,double> > hfill; double sumf = 0.0; auto edgit = edgeset.begin(); double ehi = *edgit; while ( ++edgit != edgeset.end() ) { double elo = ehi; ehi = *edgit; valarray sumw(0.0, persistent.size()); // need m copies of this bool gap = true; // Check for gaps between the sub-windows. for ( size_t i = 0; i < x.size(); ++i ) { // check equals comparisons here! if ( fillT2binT(x[i].first) + wsize >= ehi && fillT2binT(x[i].first) - wsize <= elo ) { sumw += x[i].second * weights[i]; gap = false; } } if ( gap ) continue; hfill.push_back( make_tuple( (ehi + elo)/2.0, sumw, (ehi - elo) ) ); sumf += ehi - elo; } for ( auto f : hfill ) for ( size_t m = 0; m < persistent.size(); ++m ) persistent[m]->fill( get<0>(f), get<1>(f)[m], get<2>(f)/sumf ); // Note the scaling to one single fill } } template<> void commit(vector & persistent, const vector< vector> > & tuple, const vector> & weights) {} template<> void commit(vector & persistent, const vector< vector> > & tuple, const vector> & weights) {} template double distance(T a, T b) { return abs(a - b); } template <> double distance >(tuple a, tuple b) { return Rivet::sqr(get<0>(a) - get<0>(b)) + Rivet::sqr(get<1>(a) - get<1>(b)); } - bool copyao(AnalysisObjectPtr src, AnalysisObjectPtr dst) { + bool copyao(YODA::AnalysisObjectPtr src, YODA::AnalysisObjectPtr dst) { for (const std::string& a : src->annotations()) dst->setAnnotation(a, src->annotation(a)); if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; return false; } - bool addaos(AnalysisObjectPtr dst, AnalysisObjectPtr src, double scale) { + bool addaos(YODA::AnalysisObjectPtr dst, YODA::AnalysisObjectPtr src, double scale) { if ( aoadd(dst,src,scale) ) return true; if ( aoadd(dst,src,scale) ) return true; if ( aoadd(dst,src,scale) ) return true; if ( aoadd(dst,src,scale) ) return true; if ( aoadd(dst,src,scale) ) return true; return false; } } /// fills is a vector of sub-event with an ordered set of x-values of /// the fills in each sub-event. NOFILL should be an "impossible" /// value for this histogram. Returns a vector of sub-events with /// an ordered vector of fills (including NOFILLs) for each sub-event. template vector< vector > > match_fills(const vector::Ptr> & evgroup, const Fill & NOFILL) { vector< vector > > matched; // First just copy subevents into vectors and find the longest vector. unsigned int maxfill = 0; // length of biggest vector int imax = 0; // index position of biggest vector for ( const auto & it : evgroup ) { const auto & subev = it->fills(); if ( subev.size() > maxfill ) { maxfill = subev.size(); imax = matched.size(); } matched.push_back(vector >(subev.begin(), subev.end())); } // Now, go through all subevents with missing fills. const vector> & full = matched[imax]; // the longest one for ( auto & subev : matched ) { if ( subev.size() == maxfill ) continue; // Add NOFILLs to the end; while ( subev.size() < maxfill ) subev.push_back(NOFILL); // Iterate from the back and shift all fill values backwards by // swapping with NOFILLs so that they better match the full // subevent. for ( int i = maxfill - 1; i >= 0; --i ) { if ( subev[i] == NOFILL ) continue; size_t j = i; while ( j + 1 < maxfill && subev[j + 1] == NOFILL && distance(fillT2binT(subev[j].first), fillT2binT(full[j].first)) > distance(fillT2binT(subev[j].first), fillT2binT(full[j + 1].first)) ) { swap(subev[j], subev[j + 1]); ++j; } } } // transpose vector>> result(maxfill,vector>(matched.size())); for (size_t i = 0; i < matched.size(); ++i) for (size_t j = 0; j < maxfill; ++j) result.at(j).at(i) = matched.at(i).at(j); return result; } namespace Rivet { template void Wrapper::pushToPersistent(const vector >& weight) { assert( _evgroup.size() == weight.size() ); // have we had subevents at all? const bool have_subevents = _evgroup.size() > 1; if ( ! have_subevents ) { // simple replay of all tuple entries // each recorded fill is inserted into all persistent weightname histos for ( size_t m = 0; m < _persistent.size(); ++m ) for ( const auto & f : _evgroup[0]->fills() ) _persistent[m]->fill( f.first, f.second * weight[0][m] ); } else { // outer index is subevent, inner index is jets in the event vector>> linedUpXs = match_fills(_evgroup, {typename T::FillType(), 0.0}); commit( _persistent, linedUpXs, weight ); } _evgroup.clear(); _active.reset(); } template <> void Wrapper::pushToPersistent(const vector >& weight) { for ( size_t m = 0; m < _persistent.size(); ++m ) { for ( size_t n = 0; n < _evgroup.size(); ++n ) { for ( const auto & f : _evgroup[n]->fills() ) { _persistent[m]->fill( f.second * weight[n][m] ); } } } _evgroup.clear(); _active.reset(); } template <> void Wrapper::pushToPersistent(const vector >& weight) { _evgroup.clear(); _active.reset(); } template <> void Wrapper::pushToPersistent(const vector >& weight) { _evgroup.clear(); _active.reset(); } template <> void Wrapper::pushToPersistent(const vector >& weight) { _evgroup.clear(); _active.reset(); } // explicitly instantiate all wrappers template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; }