diff --git a/analyses/pluginALICE/ALICE_2016_I1507157.cc b/analyses/pluginALICE/ALICE_2016_I1507157.cc --- a/analyses/pluginALICE/ALICE_2016_I1507157.cc +++ b/analyses/pluginALICE/ALICE_2016_I1507157.cc @@ -1,186 +1,186 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/AliceCommon.hh" #include "Rivet/Projections/PrimaryParticles.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/EventMixingFinalState.hh" namespace Rivet { /// @brief Correlations of identified particles in pp. /// Also showcasing use of EventMixingFinalState. class ALICE_2016_I1507157 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2016_I1507157); /// @name Analysis methods //@{ /// @brief Calculate angular distance between particles. double phaseDif(double a1, double a2){ double dif = a1 - a2; while (dif < -M_PI/2) dif += 2*M_PI; while (dif > 3*M_PI/2) dif -= 2*M_PI; return dif; } /// Book histograms and initialise projections before the run void init() { double etamax = 0.8; double pTmin = 0.5; // GeV // Trigger declare(ALICE::V0AndTrigger(), "V0-AND"); // Charged tracks used to manage the mixing observable. ChargedFinalState cfsMult(Cuts::abseta < etamax); declare(cfsMult, "CFSMult"); // Primary particles. PrimaryParticles pp({Rivet::PID::PIPLUS, Rivet::PID::KPLUS, Rivet::PID::K0S, Rivet::PID::K0L, Rivet::PID::PROTON, Rivet::PID::NEUTRON, Rivet::PID::LAMBDA, Rivet::PID::SIGMAMINUS, Rivet::PID::SIGMAPLUS, Rivet::PID::XIMINUS, Rivet::PID::XI0, Rivet::PID::OMEGAMINUS},Cuts::abseta < etamax && Cuts::pT > pTmin*GeV); declare(pp,"APRIM"); // The event mixing projection declare(EventMixingFinalState(cfsMult, pp, 5, 0, 100, 10),"EVM"); // The particle pairs. pid = {{211, -211}, {321, -321}, {2212, -2212}, {3122, -3122}, {211, 211}, {321, 321}, {2212, 2212}, {3122, 3122}, {2212, 3122}, {2212, -3122}}; // The associated histograms in the data file. vector refdata = {"d04-x01-y01","d04-x01-y02","d04-x01-y03", "d06-x01-y02","d05-x01-y01","d05-x01-y02","d05-x01-y03","d06-x01-y01", "d01-x01-y02","d02-x01-y02"}; ratio.resize(refdata.size()); signal.resize(refdata.size()); background.resize(refdata.size()); for (int i = 0, N = refdata.size(); i < N; ++i) { // The ratio plots. book(ratio[i], refdata[i], true); // Signal and mixed background. book(signal[i], "/TMP/" + refdata[i] + "-s", *ratio[i], refdata[i] + "-s"); book(background[i], "/TMP/" + refdata[i] + "-b", *ratio[i], refdata[i] + "-b"); // Number of signal and mixed pairs. nsp.push_back(0.); nmp.push_back(0.); } } /// Perform the per-event analysis void analyze(const Event& event) { // Triggering if (!apply(event, "V0-AND")()) return; // The projections const PrimaryParticles& pp = - apply(event,"APRIM"); + applyProjection(event,"APRIM"); const EventMixingFinalState& evm = - apply(event, "EVM"); + applyProjection(event, "EVM"); // Test if we have enough mixing events available to continue. if (!evm.hasMixingEvents()) return; for(const Particle& p1 : pp.particles()) { // Start by doing the signal distributions for(const Particle& p2 : pp.particles()) { if(isSame(p1,p2)) continue; double dEta = abs(p1.eta() - p2.eta()); double dPhi = phaseDif(p1.phi(), p2.phi()); if(dEta < 1.3) { for (int i = 0, N = pid.size(); i < N; ++i) { int pid1 = pid[i].first; int pid2 = pid[i].second; bool samesign = (pid1 * pid2 > 0); if (samesign && ((pid1 == p1.pid() && pid2 == p2.pid()) || (pid1 == -p1.pid() && pid2 == -p2.pid()))) { signal[i]->fill(dPhi); nsp[i] += 1.0; } if (!samesign && abs(pid1) == abs(pid2) && pid1 == p1.pid() && pid2 == p2.pid()) { signal[i]->fill(dPhi); nsp[i] += 1.0; } if (!samesign && abs(pid1) != abs(pid2) && ( (pid1 == p1.pid() && pid2 == p2.pid()) || (pid2 == p1.pid() && pid1 == p2.pid()) ) ) { signal[i]->fill(dPhi); nsp[i] += 1.0; } } } } // Then do the background distribution for(const Particle& pMix : evm.particles()){ double dEta = abs(p1.eta() - pMix.eta()); double dPhi = phaseDif(p1.phi(), pMix.phi()); if(dEta < 1.3) { for (int i = 0, N = pid.size(); i < N; ++i) { int pid1 = pid[i].first; int pid2 = pid[i].second; bool samesign = (pid1 * pid2 > 0); if (samesign && ((pid1 == p1.pid() && pid2 == pMix.pid()) || (pid1 == -p1.pid() && pid2 == -pMix.pid()))) { background[i]->fill(dPhi); nmp[i] += 1.0; } if (!samesign && abs(pid1) == abs(pid2) && pid1 == p1.pid() && pid2 == pMix.pid()) { background[i]->fill(dPhi); nmp[i] += 1.0; } if (!samesign && abs(pid1) != abs(pid2) && ( (pid1 == p1.pid() && pid2 == pMix.pid()) || (pid2 == p1.pid() && pid1 == pMix.pid()) ) ) { background[i]->fill(dPhi); nmp[i] += 1.0; } } } } } } /// Normalise histograms etc., after the run void finalize() { for (int i = 0, N = pid.size(); i < N; ++i) { double sc = nmp[i] / nsp[i]; signal[i]->scaleW(sc); divide(signal[i],background[i],ratio[i]); } } //@} /// @name Histograms //@{ vector > pid; vector signal; vector background; vector ratio; vector nsp; vector nmp; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2016_I1507157); } diff --git a/analyses/pluginATLAS/ATLAS_2015_I1386475.cc b/analyses/pluginATLAS/ATLAS_2015_I1386475.cc --- a/analyses/pluginATLAS/ATLAS_2015_I1386475.cc +++ b/analyses/pluginATLAS/ATLAS_2015_I1386475.cc @@ -1,83 +1,85 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Tools/AtlasCommon.hh" namespace Rivet { /// @brief Add a short analysis description here class ATLAS_2015_I1386475 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2015_I1386475); /// Book histograms and initialise projections before the run void init() { // The centrality projection. declareCentrality(ATLAS::SumET_PB_Centrality(), "ATLAS_pPb_Calib", "SumETPb", "CENT"); // The trigger projection. declare(ATLAS::MinBiasTrigger(), "Trigger"); // The particles to be analysed. declare(ChargedFinalState(Cuts::eta > -2.7 && Cuts::eta < 2.7 && Cuts::pT > 0.1*GeV), "CFS"); - + // The centrality bins' upper edges. centralityBins = {90., 60., 40., 30., 20., 10., 5., 1.}; for (int i = 0; i < 8; ++i) { book(histEta[centralityBins[i]], 2, 1, i + 1); - sow[centralityBins[i]] = 0.; + book(sow[centralityBins[i]], histEta[centralityBins[i]]->name() + "Counter"); } } /// Perform the per-event analysis void analyze(const Event& event) { + // Apply event triggers. if ( !apply(event, "Trigger")() ) vetoEvent; // We must have direct acces to the centrality projection. - const CentralityProjection& cent = + const CentralityProjection& cent = apply(event,"CENT"); double c = cent(); // Find the correct centrality histogram auto hItr = histEta.upper_bound(c); if (hItr == histEta.end()) return; // Find the correct sow. auto sItr = sow.upper_bound(c); if (sItr == sow.end()) return; - sItr->second += 1; + sItr->second->fill(); for ( const auto &p : apply(event,"CFS").particles() ) hItr->second->fill(p.eta()); } - + /// Finalize void finalize() { - // Scale by the inverse sum of event weights in each centrality bin + // Scale by the inverse sum of event weights in each centrality + // bin. for (int i = 0; i < 8; ++i) - histEta[centralityBins[i]]->scaleW(1./sow[centralityBins[i]]); + histEta[centralityBins[i]]->scaleW(1./sow[centralityBins[i]]->sumW()); } private: /// The histograms binned in centrality. vector centralityBins; map histEta; - map sow; + map sow; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2015_I1386475); } diff --git a/analyses/pluginCMS/CMS_2013_I1223519.cc b/analyses/pluginCMS/CMS_2013_I1223519.cc --- a/analyses/pluginCMS/CMS_2013_I1223519.cc +++ b/analyses/pluginCMS/CMS_2013_I1223519.cc @@ -1,249 +1,250 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalStates.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "Rivet/Projections/Smearing.hh" #include namespace Rivet { /// @brief Add a short analysis description here class CMS_2013_I1223519 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2013_I1223519); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections FinalState calofs(Cuts::abseta < 5.0); declare(calofs, "Clusters"); MissingMomentum mm(calofs); declare(mm, "TruthMET"); declare(SmearedMET(mm, MET_SMEAR_CMS_RUN2), "MET"); FastJets fj(calofs, FastJets::ANTIKT, 0.5); declare(fj, "TruthJets"); declare(SmearedJets(fj, JET_SMEAR_CMS_RUN2, [](const Jet& j) { if (j.abseta() > 2.4) return 0.; return j.bTagged() ? 0.65 : 0.01; }), "Jets"); ///< @note Charm mistag and exact b-tag eff not given FinalState ys(Cuts::abspid == PID::PHOTON && Cuts::abseta < 5.0); declare(ys, "TruthPhotons"); declare(SmearedParticles(ys, PHOTON_EFF_CMS_RUN2 /*, PHOTON_SMEAR_CMS_RUN2 */), "Photons"); FinalState es(Cuts::abspid == PID::ELECTRON && Cuts::abseta < 2.5); declare(es, "TruthElectrons"); declare(SmearedParticles(es, ELECTRON_EFF_CMS_RUN2, ELECTRON_SMEAR_CMS_RUN2), "Electrons"); FinalState mus(Cuts::abspid == PID::MUON && Cuts::abseta < 2.4); declare(mus, "TruthMuons"); declare(SmearedParticles(mus, MUON_EFF_CMS_RUN2, MUON_SMEAR_CMS_RUN2), "Muons"); ChargedFinalState cfs(Cuts::abseta < 2.5); declare(cfs, "TruthTracks"); declare(SmearedParticles(cfs, TRK_EFF_CMS_RUN2), "Tracks"); // Book histograms book(_h_alphaT23, "alphaT23", 15, 0, 3); book(_h_alphaT4 , "alphaT4", 15, 0, 3); /// @todo Add HT histograms // Book counters _h_srcounters.resize(8*7 + 3); for (size_t inj = 0; inj < 2; ++inj) { const size_t njmax = inj + 3; for (size_t nb = 0; nb < njmax; ++nb) { for (size_t iht = 0; iht < 8; ++iht) { const size_t i = 8 * ((inj == 0 ? 0 : 3) + nb) + iht; book(_h_srcounters[i], "srcount_j" + toString(njmax) + "_b" + toString(nb) + "_ht" + toString(iht+1)); } } } // Special nj >= 4, nb >= 4 bins for (size_t iht = 0; iht < 3; ++iht) { book(_h_srcounters[8*7 + iht], "srcount_j4_b4_ht" + toString(iht+1)); } } /// Perform the per-event analysis void analyze(const Event& event) { // Get baseline photons, electrons & muons Particles photons = apply(event, "Photons").particles(Cuts::pT > 25*GeV); Particles elecs = apply(event, "Electrons").particles(Cuts::pT > 10*GeV); Particles muons = apply(event, "Muons").particles(Cuts::pT > 10*GeV); // Electron/muon isolation (guesswork/copied from other CMS analysis -- paper is unspecific) const Particles calofs = apply(event, "Clusters").particles(); ifilter_discard(photons, [&](const Particle& y) { double ptsum = -y.pT(); for (const Particle& p : calofs) if (deltaR(p,y) < 0.3) ptsum += p.pT(); return ptsum / y.pT() > 0.1; }); ifilter_discard(elecs, [&](const Particle& e) { double ptsum = -e.pT(); for (const Particle& p : calofs) if (deltaR(p,e) < 0.3) ptsum += p.pT(); return ptsum / e.pT() > 0.1; }); ifilter_discard(muons, [&](const Particle& m) { double ptsum = -m.pT(); for (const Particle& p : calofs) if (deltaR(p,m) < 0.3) ptsum += p.pT(); return ptsum / m.pT() > 0.2; }); // Veto the event if there are any remaining baseline photons or leptons if (!photons.empty()) vetoEvent; if (!elecs.empty()) vetoEvent; if (!muons.empty()) vetoEvent; // Get jets and apply jet-based event-selection cuts const JetAlg& jetproj = apply(event, "Jets"); const Jets alljets = jetproj.jetsByPt(Cuts::abseta < 3.0 && Cuts::Et > 37*GeV); //< most inclusive jets requirement if (filter_select(alljets, Cuts::Et > 73*GeV).size() < 2) vetoEvent; //< most inclusive lead jets requirement // Filter jets into different Et requirements & compute corresponding HTs /// @note It's not clear if different HTs are used to choose the HT bins const Jets jets37 = filter_select(alljets, Cuts::Et > 37*GeV); const Jets jets43 = filter_select(jets37, Cuts::Et > 43*GeV); const Jets jets50 = filter_select(jets43, Cuts::Et > 50*GeV); const double ht37 = sum(jets37, Kin::Et, 0.0); const double ht43 = sum(jets43, Kin::Et, 0.0); const double ht50 = sum(jets50, Kin::Et, 0.0); // Find the relevant HT bin and apply leading jet event-selection cuts static const vector htcuts = { /* 275., 325., */ 375., 475., 575., 675., 775., 875.}; //< comment to avoid jets50 "fall-down" const int iht = inRange(ht37, 275*GeV, 325*GeV) ? 0 : inRange(ht43, 325*GeV, 375*GeV) ? 1 : (2+binIndex(ht50, htcuts, true)); MSG_TRACE("HT = {" << ht37 << ", " << ht43 << ", " << ht50 << "} => IHT = " << iht); if (iht < 0) vetoEvent; if (iht == 1 && filter_select(jets43, Cuts::Et > 78*GeV).size() < 2) vetoEvent; if (iht >= 2 && filter_select(jets50, Cuts::Et > 100*GeV).size() < 2) vetoEvent; // Create references for uniform access to relevant set of jets & HT const double etcut = iht == 0 ? 37. : iht == 1 ? 43. : 50.; const double& ht = iht == 0 ? ht37 : iht == 1 ? ht43 : ht50; const Jets& jets = iht == 0 ? jets37 : iht == 1 ? jets43 : jets50; if (!jetproj.jets(Cuts::abseta > 3 && Cuts::Et > etcut*GeV).empty()) vetoEvent; const size_t nj = jets.size(); const size_t nb = count_if(jets.begin(), jets.end(), [](const Jet& j) { return j.bTagged(Cuts::pT > 5*GeV); }); // Compute HTmiss = pT of 4-vector sum of jet momenta const FourMomentum jsum = sum(jets, mom, FourMomentum()); const double htmiss = jsum.pT(); // Require HTmiss / ETmiss < 1.25 const double etmiss = apply(event, "MET").met(); if (htmiss/etmiss > 1.25) vetoEvent; // Compute DeltaHT = minimum difference of "dijet" ETs, i.e. max(|1+2-3|, |1+3-2|, |2+3-1|) double deltaht = -1; vector jetets; transform(jets, jetets, Kin::Et); for (int i = 1; i < (1 << (jetets.size()-1)); ++i) { // count from 1 to 2**N-1, i.e. through all heterogeneous bitmasks with MSB(2**N)==0 const std::bitset<10> bits(i); /// @warning There'd better not be more than 10 jets... const double htdiff = partition_diff(bits, jetets); // MSG_INFO(bits.to_string() << " => " << htdiff); if (deltaht < 0 || htdiff < deltaht) deltaht = htdiff; } MSG_DEBUG("dHT_bitmask = " << deltaht); // Cross-check calculation in 2- and 3-jet cases // if (jets.size() == 2) { // MSG_INFO("dHT2 = " << fabs(jets[0].Et() - jets[1].Et())); // } else if (jets.size() == 3) { // double deltaht_01_2 = fabs(jets[0].Et()+jets[1].Et()-jets[2].Et()); // double deltaht_02_1 = fabs(jets[0].Et()+jets[2].Et()-jets[1].Et()); // double deltaht_12_0 = fabs(jets[1].Et()+jets[2].Et()-jets[0].Et()); // MSG_INFO("dHT3 = " << min({deltaht_01_2, deltaht_02_1, deltaht_12_0})); // } // Compute alphaT from the above double alphaT = fabs(0.5*((ht-deltaht)/(sqrt((ht*ht)-(htmiss*htmiss))))); if (alphaT < 0.55) vetoEvent; /// @todo Need to include trigger efficiency sampling or weighting? // Fill histograms const double weight = 1.0; const size_t inj = nj < 4 ? 0 : 1; const size_t inb = nb < 4 ? nb : 4; if (iht >= 2) (inj == 0 ? _h_alphaT23 : _h_alphaT4)->fill(alphaT, weight); // Fill the appropriate counter -- after working out the irregular SR bin index! *sigh* size_t i = 8 * ((inj == 0 ? 0 : 3) + inb) + iht; if (inj == 1 && inb == 4) i = 8*7 + (iht < 3 ? iht : 2); MSG_INFO("inj = " << inj << ", inb = " << inb << ", i = " << i); _h_srcounters[i]->fill(weight); } /// Normalise histograms etc., after the run void finalize() { const double sf = crossSection()/femtobarn*11.7/sumOfWeights(); scale({_h_alphaT23,_h_alphaT4}, sf); for (size_t i = 0; i < 8*7+3; ++i) scale(_h_srcounters[i], sf); } //@} /// @name Utility functions for partitioning jet pTs into two groups and summing/diffing them //@{ /// Sum the given values into two subsets according to the provided bitmask template - pair partition_sum(const std::bitset& mask, const vector& vals) const { + pair partition_sum(const std::bitset& mask, + const vector& vals) const { pair rtn(0., 0.); for (size_t i = 0; i < vals.size(); ++i) { (!mask[vals.size()-1-i] ? rtn.first : rtn.second) += vals[i]; } return rtn; } /// Return the difference between summed subsets according to the provided bitmask template double partition_diff(const std::bitset& mask, const vector& vals) const { const pair sums = partition_sum(mask, vals); const double diff = fabs(sums.first - sums.second); MSG_TRACE(mask.to_string() << ": " << sums.first << "/" << sums.second << " => " << diff); return diff; } //@} /// @name Histograms //@{ Histo1DPtr _h_alphaT23, _h_alphaT4; vector _h_srcounters; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2013_I1223519); } diff --git a/analyses/pluginMC/MC_Cent_pPb_Eta.cc b/analyses/pluginMC/MC_Cent_pPb_Eta.cc new file mode 100644 --- /dev/null +++ b/analyses/pluginMC/MC_Cent_pPb_Eta.cc @@ -0,0 +1,77 @@ +// -*- C++ -*- +#include "Rivet/Analyses/MC_Cent_pPb.hh" +#include "Rivet/Tools/Percentile.hh" + +namespace Rivet { + + +class MC_Cent_pPb_Eta : public Analysis { + +public: + + DEFAULT_RIVET_ANALYSIS_CTOR(MC_Cent_pPb_Eta); + + /// Book histograms and initialise projections before the run + void init() { + + MSG_INFO("CENT parameter set to " << getOption("cent","REF")); + + // The centrality projection. + declareCentrality(MC_SumETFwdPbCentrality(), + "MC_Cent_pPb_Calib", "SumETPb", "CENT"); + + // The trigger projection. + declare(MC_pPbMinBiasTrigger(), "Trigger"); + + // The particles to be analysed. + declare(ChargedFinalState(Cuts::eta > -2.7 && Cuts::eta < 2.7 && + Cuts::pT > 0.1*GeV), "CFS"); + + // The centrality bins and the corresponding histograms. + std::vector< std::pair > centralityBins = + { {0, 1}, {1, 5}, {5, 10}, {10, 20}, + {20, 30}, {30, 40}, {40, 60}, {60, 90} }; + // std::vector< std::tuple > refData = + // { {2, 1, 8}, {2, 1, 7}, {2, 1, 6}, {2, 1, 5}, + // {2, 1, 4}, {2, 1, 3}, {2, 1, 2}, {2, 1, 1} }; + std::vector< std::tuple > refData; + for ( int i = 8; i > 0; --i ) + refData.push_back(std::tuple(2, 1, i)); + + // The centrality-binned histograms. + _hEta = bookPercentile("CENT", centralityBins, refData); + + } + + /// Perform the per-event analysis + void analyze(const Event& event) { + + if ( !apply(event, "Trigger")() ) vetoEvent; + + _hEta->init(event); + for ( const auto &p : apply(event,"CFS").particles() ) + _hEta->fill(p.eta()); + + } + + /// Finalize + void finalize() { + + // Scale by the inverse sum of event weights in each centrality + // bin. + _hEta->normalizePerEvent(); + + } + +private: + + /// The histograms binned in centrality. + Percentile _hEta; + +}; + + +// The hook for the plugin system +DECLARE_RIVET_PLUGIN(MC_Cent_pPb_Eta); + +} diff --git a/analyses/pluginMC/TEST.cc b/analyses/pluginMC/TEST.cc new file mode 100644 --- /dev/null +++ b/analyses/pluginMC/TEST.cc @@ -0,0 +1,81 @@ +// -*- C++ -*- +#include "Rivet/Analysis.hh" +#include "Rivet/Projections/ChargedFinalState.hh" +#include "Rivet/Projections/PrimaryParticles.hh" +#include "Rivet/Tools/Correlators.hh" + + +namespace Rivet { + + + class TEST : public CumulantAnalysis { + public: + + /// @name Constructors etc. + //@{ + + /// Constructor + TEST() : CumulantAnalysis("TEST") { + } + //@} + + public: + + /// @name Analysis methods + //@{ + /// Book histograms and initialise projections before the run + void init() { + + ChargedFinalState cfs(Cuts::abseta < 1.0); + declare(cfs, "CFS"); + ChargedFinalState pp(Cuts::abseta < 2.0); + declare(pp, "PP"); + book(h_c22, "c22",120,0,120); + book(h_c23, "c23",120,0,120); + book(h_v22pT, "v22pT",10,0,10); + ec22 = bookECorrelator<2,2>("ec22",h_c22); + ec23 = bookECorrelator<3,2>("ec32",h_c22); + ec22pT = bookECorrelator<2,2>("ec22pT",h_v22pT); + pair max = getMaxValues(); + // Declare correlator projections. + declare(Correlators(pp, max.first, max.second, h_v22pT),"CRS"); + } + /// Perform the per-event analysis + void analyze(const Event& event) { + const Correlators& c = apply(event,"CRS"); + ec22->fill(apply(event,"CFS").particles().size(), c); + ec23->fill(apply(event,"CFS").particles().size(), c); + ec22pT->fill(c); + } + /// Normalise histograms etc., after the run + void finalize() { + stream(); + cnTwoInt(h_c22,ec22); + cnTwoInt(h_c23,ec23); + vnTwoDiff(h_v22pT,ec22pT); + + } + + + //@} + private: + + + /// @name Histograms + //@{ + Scatter2DPtr h_c22; + Scatter2DPtr h_v22pT; + ECorrPtr ec22; + ECorrPtr ec22pT; + Scatter2DPtr h_c23; + ECorrPtr ec23; + //@} + + }; + + + + // The hook for the plugin system + DECLARE_RIVET_PLUGIN(TEST); + +} diff --git a/include/Rivet/Projections/ConstLossyFinalState.hh b/include/Rivet/Projections/ConstLossyFinalState.hh --- a/include/Rivet/Projections/ConstLossyFinalState.hh +++ b/include/Rivet/Projections/ConstLossyFinalState.hh @@ -1,72 +1,75 @@ // -*- C++ -*- #ifndef RIVET_ConstLossyFinalState_HH #define RIVET_ConstLossyFinalState_HH +#include "Rivet/Tools/Logging.hh" +#include "Rivet/Tools/Random.hh" #include "Rivet/Config/RivetCommon.hh" +#include "Rivet/Particle.hh" +#include "Rivet/Event.hh" #include "Rivet/Projection.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/LossyFinalState.hh" -#include "Rivet/Tools/Random.hh" namespace Rivet { /// Functor used to implement constant random lossiness. class ConstRandomFilter { public: ConstRandomFilter(double lossFraction) : _lossFraction(lossFraction) { assert(_lossFraction >= 0); } // If operator() returns true, particle is deleted ("lost") bool operator()(const Particle&) { return rand01() < _lossFraction; } CmpState compare(const ConstRandomFilter& other) const { return cmp(_lossFraction, other._lossFraction); } private: double _lossFraction; }; /// @brief Randomly lose a constant fraction of particles. class ConstLossyFinalState : public LossyFinalState { public: /// @name Constructors //@{ /// Constructor from a FinalState. ConstLossyFinalState(const FinalState& fsp, double lossfraction) : LossyFinalState(fsp, ConstRandomFilter(lossfraction)) { setName("ConstLossyFinalState"); } /// Stand-alone constructor. Initialises the base FinalState projection. ConstLossyFinalState(double lossfraction, const Cut& c=Cuts::open()) : LossyFinalState(ConstRandomFilter(lossfraction), c) { setName("ConstLossyFinalState"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(ConstLossyFinalState); //@} }; } #endif diff --git a/include/Rivet/Projections/EventMixingFinalState.hh b/include/Rivet/Projections/EventMixingFinalState.hh --- a/include/Rivet/Projections/EventMixingFinalState.hh +++ b/include/Rivet/Projections/EventMixingFinalState.hh @@ -1,185 +1,228 @@ // -*- C++ -*- #ifndef RIVET_EventMixingFinalState_HH #define RIVET_EventMixingFinalState_HH #include "Rivet/Projection.hh" #include "Rivet/Projections/ParticleFinder.hh" #include "Rivet/Tools/Random.hh" #include #include - namespace Rivet { // @brief Projects out an event mixed of several events, given // a mixing observable (eg. number of final state particles), // defining what should qualify as a mixable event. // Binning in the mixing observable is defined in the constructor, // as is the number of events one wants to mix with. // The method calculateMixingObs() must can be overloaded // in derived classes, to provide the definition of the mixing observable, // on the provided projection, eg. centrality or something more elaborate. // + // The implementation can correcly handle mixing of weighted events, but + // not multi-weighted events. Nor does the implementation attemt to handle + // calculation of one (or more) event weights for the mixed events. For most + // common use cases, like calculating a background sample, this is sufficient. + // If a more elaborate use case ever turns up, this must be reevaluated. + // + // // @author Christian Bierlich // Weighted random shuffle, similar to std::random_shuffle, which // allows the passing of a weight for each element to be shuffled. template void weighted_shuffle(RandomAccessIterator first, RandomAccessIterator last, WeightIterator fw, WeightIterator lw, RandomNumberGenerator& g) { while(first != last && fw != lw) { - discrete_distribution weightDist(fw, lw); + std::discrete_distribution weightDist(fw, lw); int i = weightDist(g); if(i){ - iter_swap(first, next(first, i)); - iter_swap(fw, next(fw, i)); + std::iter_swap(first, next(first, i)); + std::iter_swap(fw, next(fw, i)); } ++first; ++fw; } } // A MixEvent is a vector of particles with and associated weight. typedef pair MixEvent; - typedef map > MixMap; - // EventMixingBase is the base class for event mixing projections. - // Most methods are defined in this base class as they should. - // In order to use it, a derived class should be implemented where: - // - The constructor is reimplmented, giving the derived projection type - // from which the mixing observable is calculated. The constructor must also - // be declared public in the derived class. - // - The calculateMixingObs is implemented. - // To examples of such derived classes are given below, - // 1) EventMixingFinalState, where the mixing observable are calculated - // on a multiplicity of a charged final state, and: - // 2) EventMixingCentrality, where the mixing observable is centrality. + typedef map > MixMap; + /// EventMixingBase is the base class for event mixing projections. + /// + /// Most methods are defined in this base class as they should. + /// In order to use it, a derived class should be implemented where: + /// - The constructor is reimplmented, giving the derived projection type + /// from which the mixing observable is calculated. The constructor must also + /// be declared public in the derived class. + /// - The calculateMixingObs is implemented. + /// To examples of such derived classes are given below, + /// 1) EventMixingFinalState, where the mixing observable are calculated + /// on a multiplicity of a charged final state, and: + /// 2) EventMixingCentrality, where the mixing observable is centrality. + /// class EventMixingBase : public Projection { protected: // Constructor EventMixingBase(const Projection* mixObsProjPtr, const ParticleFinder& mix, size_t nMixIn, double oMin, double oMax, double deltao) : nMix(nMixIn), unitWeights(true) { // The base class contructor should be called explicitly in derived classes // to add projections below. setName("EventMixingBase"); - addProjection(*mixObsProjPtr,"OBS"); - addProjection(mix,"MIX"); - MSG_WARNING("EventMixing is not currently validated. Use with caution."); + declare(*mixObsProjPtr,"OBS"); + declare(mix,"MIX"); + MSG_WARNING("EventMixing is not fully validated. Use with caution."); // Set up the map for mixing events. for(double o = oMin; o < oMax; o+=deltao ) - mixEvents[o] = deque(); + mixEvents[o] = std::deque(); } public: // Test if we have enough mixing events available for projected, // current mixing observable. bool hasMixingEvents() const { MixMap::const_iterator mixItr = mixEvents.lower_bound(mObs); if(mixItr == mixEvents.end() || mixItr->second.size() < nMix + 1) return false; return true; } // Return a vector of mixing events. vector getMixingEvents() const { if (!hasMixingEvents()) return vector(); MixMap::const_iterator mixItr = mixEvents.lower_bound(mObs); return vector(mixItr->second.begin(), mixItr->second.end() - 1); } // Return a vector of particles from the mixing events. Can // be overloaded in derived classes, though normally not neccesary. virtual const Particles particles() const { // Test if we have enough mixing events. if(!hasMixingEvents()) return vector(); // Get mixing events for the current, projected mixing observable. MixMap::const_iterator mixItr = mixEvents.lower_bound(mObs); vector mixEvents = vector(mixItr->second.begin(), mixItr->second.end() - 1); // Make the vector of mixed particles. Particles mixParticles; vector weights; size_t pSize = 0; for(size_t i = 0; i < mixEvents.size(); ++i) pSize+=mixEvents[i].first.size(); mixParticles.reserve(pSize); weights.reserve(pSize); // Put the particles in the vector. for(size_t i = 0; i < mixEvents.size(); ++i) { mixParticles.insert(mixParticles.end(), mixEvents[i].first.begin(), mixEvents[i].first.end()); vector tmp = vector(mixEvents[i].first.size(), mixEvents[i].second); weights.insert(weights.end(), tmp.begin(), tmp.end()); } // Shuffle the particles. if (unitWeights) { // Use the thread safe random number generator. auto rnd = [&] (int i) {return rng()()%i;}; random_shuffle(mixParticles.begin(), mixParticles.end(), rnd); return mixParticles; } else { weighted_shuffle(mixParticles.begin(), mixParticles.end(), weights.begin(), weights.end(), rng()); Particles tmp = vector(mixParticles.begin(), mixParticles.begin() + size_t(ceil(mixParticles.size() / 2))); return tmp; } } - + protected: // Calulate mixing observable. // Must be overloaded in derived classes. virtual void calculateMixingObs(const Projection* mProj) = 0; /// Perform the projection on the Event. void project(const Event& e){ const Projection* mixObsProjPtr = &applyProjection(e, "OBS"); calculateMixingObs(mixObsProjPtr); MixMap::iterator mixItr = mixEvents.lower_bound(mObs); if(mixItr == mixEvents.end()){ // We are out of bounds. MSG_DEBUG("Mixing observable out of bounds."); return; } const Particles mix = applyProjection(e, "MIX").particles(); - mixItr->second.push_back(make_pair(mix,e.weight())); + mixItr->second.push_back(make_pair(mix,e.weights()[0])); // Assume unit weights until we see otherwise. - if (unitWeights && e.weight() != 1.0 ) { + if (unitWeights && e.weights()[0] != 1.0 ) { unitWeights = false; nMix *= 2; } if(mixItr->second.size() > nMix + 1) mixItr->second.pop_front(); } /// Compare with other projections CmpState compare(const Projection& p) const { - return mkNamedPCmp(p, "FS"); + return mkNamedPCmp(p,"OBS"); } - - + + /// The mixing observable of the current event. + double mObs; + private: - /// The mixing observable of the current event. - double mObs; - // The number of event to mix with. + /// The number of event to mix with. size_t nMix; - // The event map. + /// The event map. MixMap mixEvents; - // Using unit weights or not. + /// Using unit weights or not. bool unitWeights; - }; + // EventMixingFinalState has multiplicity in the mixing projection. + // as the mixing observable. + class EventMixingFinalState : public EventMixingBase { + public: + EventMixingFinalState(const ParticleFinder* mixObsProjPtr, + const ParticleFinder& mix, size_t nMixIn, double oMin, double oMax, + double deltao) : + EventMixingBase(mixObsProjPtr, mix, nMixIn, oMin, oMax, deltao) { + setName("EventMixingFinalState"); + } + DEFAULT_RIVET_PROJ_CLONE(EventMixingFinalState); + + protected: + // Calulate mixing observable. + virtual void calculateMixingObs(const Projection* mProj) { + mObs = ((ParticleFinder*) mProj)->particles().size(); + } + }; + + // EventMixingCentrality has centrality as the mixing observable. + class EventMixingCentrality : public EventMixingBase { + public: + EventMixingCentrality(const CentralityProjection* mixObsProjPtr, + const ParticleFinder& mix, size_t nMixIn, double oMin, double oMax, + double deltao) : + EventMixingBase(mixObsProjPtr, mix, nMixIn, oMin, oMax, deltao) { + setName("EventMixingCentrality"); + } + + DEFAULT_RIVET_PROJ_CLONE(EventMixingCentrality); + protected: + virtual void calculateMixingObs(const Projection* mProj) { + mObs = ((CentralityProjection*) mProj)->operator()(); + } + }; } + #endif diff --git a/include/Rivet/Tools/Correlators.hh b/include/Rivet/Tools/Correlators.hh --- a/include/Rivet/Tools/Correlators.hh +++ b/include/Rivet/Tools/Correlators.hh @@ -1,1147 +1,1147 @@ // -*- C++ -*- #ifndef RIVET_Correlators_HH #define RIVET_Correlators_HH #include "Rivet/Projection.hh" #include "Rivet/Projections/ParticleFinder.hh" #include #include "YODA/Scatter2D.h" #include "Rivet/Analysis.hh" /* File contains tools for calculating flow coefficients using * correlators. * Classes: * Correlators: Calculates single event correlators of a given harmonic. * Cumulants: An additional base class for flow analyses * (use as: class MyAnalysis : public Analysis, Cumulants {};) * Includes a framework for calculating cumulants and flow coefficients * from single event correlators, including automatic handling of statistical * errors. Contains multiple internal sub-classes: * CorBinBase: Base class for correlators binned in event or particle observables. * CorSingleBin: A simple bin for correlators. * CorBin: Has the interface of a simple bin, but does automatic calculation * of statistical errors by a bootstrap method. * ECorrelator: Data type for event averaged correlators. * @author Vytautas Vislavicius, Christine O. Rasmussen, Christian Bierlich. */ namespace Rivet { using std::complex; /* @brief Projection for calculating correlators for flow measurements. * * A projection which calculates Q-vectors and P-vectors, and projects * them out as correlators. Implementation follows the description of * the ''Generic Framework'' * Phys. Rev. C 83 (2011) 044913, arXiv: 1010.0233 * Phys. Rev. C 89 (2014) 064904, arXiv: 1312.3572 * */ class Correlators : public Projection { public: // Constructor. Takes parameters @parm fsp, the Final State // projection correlators should be constructed from, @parm nMaxIn, // the maximal sum of harmonics, eg. for // c_2{2} = {2,-2} = 2 + 2 = 4 // c_2{4} = {2,2,-2,-2} = 2 + 2 + 2 + 2 = 8 // c_4{2} = {4,-4} = 4 + 4 = 8 // c_4{4} = {4,4,-4,-4} = 4 + 4 + 4 + 4 = 16. // @parm pMaxIn is the maximal number of particles // you want to correlate and @parm pTbinEdgesIn is the (lower) // edges of pT bins, the last one the upper edge of the final bin. Correlators(const ParticleFinder& fsp, int nMaxIn = 2, int pMaxIn = 0, vector pTbinEdgesIn = {}); // Constructor which takes a Scatter2D to estimate bin edges. Correlators(const ParticleFinder& fsp, int nMaxIn, int pMaxIn, const Scatter2DPtr hIn); /// @brief Integrated correlator of @parm n harmonic, with the /// number of powers being the size of @parm n. /// Eg. @parm n should be: /// <<2>>_2 => n = {2, -2} /// <<4>>_2 => n = {2, 2, -2, -2} /// <<2>>_4 => n = {4, -4} /// <<4>>_4 => n = {4, 4, -4, 4} and so on. const pair intCorrelator(vector n) const; /// @brief pT differential correlator of @parm n harmonic, with the /// number of powers being the size of @parm n. /// The method can include overflow/underflow bins in the /// beginning/end of the returned vector, by toggling /// @parm overflow = true. const vector> pTBinnedCorrelators(vector n, bool overflow = false) const; /// @brief Integrated correlator of @parm n1 harmonic, with the /// number of powers being the size of @parm n1. This method /// imposes an eta gap, correlating with another phase space, /// where another Correlators projection @parm other should be /// defined. The harmonics of the other phase space is given /// as @parm n2. /// To get eg. integrated <<4>>_2, @parm n1 should be: /// n1 = {2, 2} and n2 = {-2, -2} const pair intCorrelatorGap(const Correlators& other, vector n1, vector n2) const; /// @brief pT differential correlators of @parm n1 harmonic, with the /// number of powers being the size of @parm n1. This method /// imposes an eta gap, correlating with another phase space, /// where another Correlators projection @parm other should be /// defined. The harmonics of the other phase space is given /// as @parm n2. /// To get eg. differential <<4'>_2, @parm n1 should be: /// n1 = {2, 2} and @parm n2: n2 = {-2, -2}. /// To get eg. differential <<2'>>_4, @parm n1 should be: /// n1 = {4} and @parm n2: n2 = {-4}. /// The method can include overflow/underflow /// bins in the beginning/end of the returned vector, by toggling /// @parm overflow = true. const vector> pTBinnedCorrelatorsGap( const Correlators& other, vector n1, vector n2, bool oveflow = false) const; /// @brief Construct a harmonic vectors from @parm n harmonics /// and @parm m number of particles. /// TODO: In C++14 this can be done much nicer with TMP. static vector hVec(int n, int m) { if (m%2 != 0) { cout << "Harmonic Vector: Number of particles " "must be an even number." << endl; return {}; } vector ret = {}; for (int i = 0; i < m; ++i) { if (i < m/2) ret.push_back(n); else ret.push_back(-n); } return ret; } /// @brief Return the maximal values for n, p to be used in the /// constructor of Correlators(xxx, nMax, pMax, xxxx) static pair getMaxValues(vector< vector >& hList){ int nMax = 0, pMax = 0; for (vector h : hList) { int tmpN = 0, tmpP = 0; for ( int i = 0; i < int(h.size()); ++i) { tmpN += abs(h[i]); ++tmpP; } if (tmpN > nMax) nMax = tmpN; if (tmpP > pMax) pMax = tmpP; } return make_pair(nMax,pMax); } // Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(Correlators); protected: // @brief Project function. Loops over array and calculates Q vectors // and P vectors if needed void project(const Event& e); // @brief Compare against other projection. Test if harmonics, pT-bins // and underlying final state are similar. CmpState compare(const Projection& p) const { const Correlators* other = dynamic_cast(&p); if (nMax != other->nMax) return CmpState::NEQ; if (pMax != other->pMax) return CmpState::NEQ; if (pTbinEdges != other->pTbinEdges) return CmpState::NEQ; return mkPCmp(p, "FS"); }; // @brief Calculate correlators from one particle. void fillCorrelators(const Particle& p, const double& weight); // @brief Return a Q-vector. const complex getQ(int n, int p) const { bool isNeg = (n < 0); if (isNeg) return conj( qVec[abs(n)][p] ); else return qVec[n][p]; }; // @brief Return a P-vector. const complex getP(int n, int p, double pT = 0.) const { bool isNeg = (n < 0); map::const_iterator pTitr = pVec.lower_bound(pT); if (pTitr == pVec.end()) return DBL_NAN; if (isNeg) return conj( pTitr->second[abs(n)][p] ); else return pTitr->second[n][p]; }; private: // Find correlators by recursion. Order = M (# of particles), // n's are harmonics, p's are the powers of the weights const complex recCorr(int order, vector n, vector p, bool useP, double pT = 0.) const; // Two-particle correlator Eq. (19) p. 6 // Flag if p-vectors or q-vectors should be used to // calculate the correlator. const complex twoPartCorr(int n1, int n2, int p1 = 1, int p2 = 1, double pT = 0., bool useP = false) const; // Set elements in vectors to zero. void setToZero(); // Shorthands for setting and comparing to zero. const complex _ZERO = {0., 0.}; const double _TINY = 1e-10; // Shorthand typedefs for vec>. typedef vector< vector> > Vec2D; // Define Q-vectors and p-vectors Vec2D qVec; // Q[n][p] map pVec; // p[pT][n][p] // The max values of n and p to be calculated. int nMax, pMax; // pT bin-edges vector pTbinEdges; bool isPtDiff; /// End class Correlators }; /// @brief Tools for flow analyses. /// The following are helper classes to construct event averaged correlators /// as well as cummulants and flow coefficents from the basic event // correlators defined above. They are all encapsulated in a Cumulants // class, which can be used as a(nother) base class for flow analyses, // to ensure access. class CumulantAnalysis : public Analysis { private: // Number of bins used for bootstrap calculation of statistical // uncertainties. It is hard coded, and shout NOT be changed unless there // are good reasons to do so. static const int BOOT_BINS = 9; // Enum for choosing the method of error calculation. enum Error { VARIANCE, ENVELOPE }; // The desired error method. Can be changed in the analysis constructor // by setting it appropriately. Error errorMethod; /// @brief Base class for correlator bins. class CorBinBase { public: CorBinBase() {} virtual ~CorBinBase() {}; // Derived class should have fill and mean defined. - virtual void fill(const pair& cor, const double& weight) = 0; + virtual void fill(const pair& cor, const double& weight = 1.0) = 0; virtual const double mean() const = 0; }; /// @brief The CorSingleBin is the basic quantity filled in an ECorrelator. /// It is a simple counter with an even simpler structure than normal /// YODA type DBNs, but added functionality to test for out of /// bounds correlators. class CorSingleBin : public CorBinBase { public: /// @brief The default constructor. CorSingleBin() : _sumWX(0.), _sumW(0.), _sumW2(0.), _numEntries(0.) {} ~CorSingleBin() {} /// @brief Fill a correlator bin with the return type from a /// Correlator (a pair giving numerator and denominator of _event). - void fill(const pair& cor, const double& weight) { + void fill(const pair& cor, const double& weight = 1.0) { // Test if denominator for the single event average is zero. if (cor.second < 1e-10) return; // The single event average is then cor.first / cor.second. // With weights this becomes just: _sumWX += cor.first * weight; _sumW += weight * cor.second; _sumW2 += weight * weight * cor.second * cor.second; _numEntries += 1.; } const double mean() const { if (_sumW < 1e-10) return 0; return _sumWX / _sumW; } // @brief Sum of weights. const double sumW() const { return _sumW; } const double sumW2() const { return _sumW2; } // @brief Sum of weight * X. const double sumWX() const { return _sumWX; } // @brief Number of entries. const double numEntries() const { return _numEntries; } void addContent(double ne, double sw, double sw2, double swx) { _numEntries += ne; _sumW += sw; _sumW2 += sw2; _sumWX += swx; } private: double _sumWX, _sumW, _sumW2, _numEntries; }; // End of CorSingleBin sub-class. /// @brief The CorBin is the basic bin quantity in ECorrelators. /// It consists of several CorSingleBins, to facilitate /// bootstrapping calculation of statistical uncertainties. class CorBin : public CorBinBase { public: // @brief The constructor. nBins signifies the period of the bootstrap // calculation, and should never be changed here, but in its definition // above -- and only if there are good reasons to do so. CorBin() : binIndex(0), nBins(BOOT_BINS) { for(size_t i = 0; i < nBins; ++i) bins.push_back(CorSingleBin()); } // Destructor must be implemented. ~CorBin() {} // @brief Fill the correct underlying bin and take a step. - void fill(const pair& cor, const double& weight) { + void fill(const pair& cor, const double& weight = 1.0) { // Test if denominator for the single event average is zero. if (cor.second < 1e-10) return; // Fill the correct bin. bins[binIndex].fill(cor, weight); if (binIndex == nBins - 1) binIndex = 0; else ++binIndex; } // @brief Calculate the total sample mean with all // available statistics. const double mean() const { double sow = 0; double sowx = 0; for(auto b : bins) { if (b.sumW() < 1e-10) continue; sow += b.sumW(); sowx += b.sumWX(); } return sowx / sow; } // @brief Return a copy of the bins. const vector getBins() const { return bins; } // @brief Return the bins as pointers to the base class. template const vector getBinPtrs() { vector ret(bins.size()); transform(bins.begin(), bins.end(), ret.begin(), [](CorSingleBin& b) {return &b;}); return ret; } private: vector bins; size_t binIndex; size_t nBins; }; // End of CorBin sub-class. public: /// @brief The ECorrelator is a helper class to calculate all event /// averages of correlators, in order to construct cumulants. /// It can be binned in any variable. class ECorrelator { public: /// @brief Constructor. Takes as argument the desired harmonic and number /// of correlated particles as a generic framework style vector, eg, /// {2, -2} for <<2>>_2, no binning. /// TODO: Implement functionality for this if needed. //ECorrelator(vector h) : h1(h), h2({}), // binX(0), binContent(0), reference() { //}; /// @brief Constructor. Takes as argument the desired harmonic and number /// of correlated particles as a generic framework style vector, eg, /// {2, -2} for <<2>>_2 and binning. ECorrelator(vector h, vector binIn) : h1(h), h2({}), binX(binIn), binContent(binIn.size() - 1), reference() {}; /// @brief Constructor for gapped correlator. Takes as argument the /// desired harmonics for the two final states, and binning. ECorrelator(vector h1In, vector h2In, vector binIn) : h1(h1In), h2(h2In), binX(binIn), binContent(binIn.size() - 1), reference() {}; /// @brief Fill the appropriate bin given an input (per event) /// observable, eg. centrality. void fill(const double& obs, const Correlators& c, const double weight = 1.0) { int index = getBinIndex(obs); if (index < 0) return; binContent[index].fill(c.intCorrelator(h1), weight); } /// @brief Fill the appropriate bin given an input (per event) /// observable, eg. centrality. Using a rapidity gap between /// two Correlators. void fill(const double& obs, const Correlators& c1, const Correlators& c2, const double weight = 1.0) { if (!h2.size()) { cout << "Trying to fill gapped correlator, but harmonics behind " "the gap (h2) are not given!" << endl; return; } int index = getBinIndex(obs); if (index < 0) return; binContent[index].fill(c1.intCorrelatorGap(c2, h1, h2), weight); } /// @brief Fill the bins with the appropriate correlator, taking the /// binning directly from the Correlators object, and filling also the /// reference flow. void fill(const Correlators& c, const double& weight = 1.0) { vector< pair > diffCorr = c.pTBinnedCorrelators(h1); // We always skip overflow when calculating the all event average. if (diffCorr.size() != binX.size() - 1) cout << "Tried to fill event with wrong binning (ungapped)" << endl; for (size_t i = 0; i < diffCorr.size(); ++i) { int index = getBinIndex(binX[i]); if (index < 0) return; binContent[index].fill(diffCorr[i], weight); } reference.fill(c.intCorrelator(h1), weight); } /// @brief Fill bins with the appropriate correlator, taking the binning /// directly from the Correlators object, and also the reference flow. /// Using a rapidity gap between two Correlators. void fill(const Correlators& c1, const Correlators& c2, const double& weight = 1.0) { if (!h2.size()) { cout << "Trying to fill gapped correlator, but harmonics behind " "the gap (h2) are not given!" << endl; return; } vector< pair > diffCorr = c1.pTBinnedCorrelatorsGap(c2, h1, h2); // We always skip overflow when calculating the all event average. if (diffCorr.size() != binX.size() - 1) cout << "Tried to fill event with wrong binning (gapped)" << endl; for (size_t i = 0; i < diffCorr.size(); ++i) { int index = getBinIndex(binX[i]); if (index < 0) return; binContent[index].fill(diffCorr[i], weight); } reference.fill(c1.intCorrelatorGap(c2, h1, h2), weight); } /// @brief Get a copy of the bin contents. const vector getBins() const { return binContent; } // @brief Return the bins as pointers to the base class. const vector getBinPtrs() { vector ret(binContent.size()); transform(binContent.begin(), binContent.end(), ret.begin(), [](CorBin& b) {return &b;}); return ret; } /// @brief Get a copy of the bin x-values. const vector getBinX() const { return binX; } /// @brief Get a copy of the @ret h1 harmonic vector. const vector getH1() const { return h1; } /// @brief Get a copy of the @ret h2 harmonic vector. const vector getH2() const { return h2; } /// @brief Replace reference flow bin with another reference /// flow bin, eg. calculated in another phase space or with /// other pid. void setReference(CorBin refIn) { reference = refIn; } /// @brief Extract the reference flow from a differential event /// averaged correlator. const CorBin getReference() const { if (reference.mean() < 1e-10) cout << "Warning: ECorrelator, reference bin is zero." << endl; return reference; } /// @brief set the @parm prIn list of profile histograms associated /// with the internal bins. Is called automatically when booking, no /// need to call it yourself. void setProfs(list prIn) { profs = prIn; } /// @brief Fill bins with content from preloaded histograms. void fillFromProfs() { list::iterator hItr = profs.begin(); auto refs = reference.getBinPtrs(); for (size_t i = 0; i < profs.size(); ++i, ++hItr) { for (size_t j = 0; j < binX.size() - 1; ++j) { const YODA::ProfileBin1D& pBin = (*hItr)->binAt(binX[j]); auto tmp = binContent[j].getBinPtrs(); tmp[i]->addContent(pBin.numEntries(), pBin.sumW(), pBin.sumW2(), pBin.sumWY()); } // Get the reference flow from the underflow bin of the histogram. const YODA::Dbn2D& uBin = (*hItr)->underflow(); refs[i]->addContent(uBin.numEntries(), uBin.sumW(), uBin.sumW2(), uBin.sumWY()); } // End loop of bootstrapped correlators. } /// @brief begin() iterator for the list of associated profile histograms. list::iterator profBegin() { return profs.begin(); } /// @brief end() iterator for the list of associated profile histograms. list::iterator profEnd() { return profs.end(); } private: /// @brief Get correct bin index for a given @parm obs value. const int getBinIndex(const double& obs) const { // Find the correct index of binContent. // If we are in overflow, just skip. if (obs >= binX.back()) return -1; // If we are in underflow, ditto. if (obs < binX[0]) return -1; int index = 0; for (int i = 0, N = binX.size() - 1; i < N; ++i, ++index) if (obs >= binX[i] && obs < binX[i + 1]) break; return index; } // The harmonics vectors. vector h1; vector h2; // The bins. vector binX; vector binContent; // The reference flow. CorBin reference; // The profile histograms associated with the CorBins for streaming. list profs; }; // End of ECorrelator sub-class. // @brief Get the correct max N and max P for the set of // booked correlators. const pair getMaxValues() const { vector< vector> harmVecs; for ( auto eItr = eCorrPtrs.begin(); eItr != eCorrPtrs.end(); ++eItr) { vector h1 = (*eItr)->getH1(); vector h2 = (*eItr)->getH2(); if (h1.size() > 0) harmVecs.push_back(h1); if (h2.size() > 0) harmVecs.push_back(h2); } if (harmVecs.size() == 0) { cout << "Warning: You tried to extract max values from harmonic " "vectors, but have not booked any." << endl; return pair(); } return Correlators::getMaxValues(harmVecs); } // Typedeffing shared pointer to ECorrelator. typedef shared_ptr ECorrPtr; /// @brief Book an ECorrelator in the same way as a histogram. /// @todo Rename to book(ECorrPtr, ...) ECorrPtr bookECorrelator(const string name, const vector& h, const Scatter2DPtr hIn) { vector binIn; for (auto b : hIn->points()) binIn.push_back(b.xMin()); binIn.push_back(hIn->points().back().xMax()); ECorrPtr ecPtr = ECorrPtr(new ECorrelator(h, binIn)); list eCorrProfs; for (int i = 0; i < BOOT_BINS; ++i) { Profile1DPtr tmp; book(tmp, name+"-"+to_string(i),*hIn); tmp->setPath(this->name()+"/FINAL/" + name+"-"+to_string(i)); //tmp->setPath(tmp->path()+"FINAL"); eCorrProfs.push_back(tmp); } ecPtr->setProfs(eCorrProfs); eCorrPtrs.push_back(ecPtr); return ecPtr; } /// @brief Book a gapped ECorrelator with two harmonic vectors. /// @todo Rename to book(ECorrPtr, ...) ECorrPtr bookECorrelator(const string name, const vector& h1, const vector& h2, const Scatter2DPtr hIn ) { vector binIn; for (auto b : hIn->points()) binIn.push_back(b.xMin()); binIn.push_back(hIn->points().back().xMax()); ECorrPtr ecPtr = ECorrPtr(new ECorrelator(h1, h2, binIn)); list eCorrProfs; for (int i = 0; i < BOOT_BINS; ++i) { Profile1DPtr tmp; book(tmp, name+"-"+to_string(i),*hIn); tmp->setPath(this->name()+"/FINAL/" + name+"-"+to_string(i)); //tmp->setPath(tmp->path()+"FINAL"); eCorrProfs.push_back(tmp); } ecPtr->setProfs(eCorrProfs); eCorrPtrs.push_back(ecPtr); return ecPtr; } /// @brief Short hand for gapped correlators which splits the harmonic /// vector into negative and positive components. /// @todo Rename to book(ECorrPtr, ...) ECorrPtr bookECorrelatorGap (const string name, const vector& h, const Scatter2DPtr hIn) { const vector h1(h.begin(), h.begin() + h.size() / 2); const vector h2(h.begin() + h.size() / 2, h.end()); return bookECorrelator(name, h1, h2, hIn); } /// @brief Templated version of correlator booking which takes /// @parm N desired harmonic and @parm M number of particles. /// @todo Rename to book(ECorrPtr, ...) template ECorrPtr bookECorrelator(const string name, const Scatter2DPtr hIn) { return bookECorrelator(name, Correlators::hVec(N, M), hIn); } /// @brief Templated version of gapped correlator booking which takes /// @parm N desired harmonic and @parm M number of particles. /// @todo Rename to book(ECorrPtr, ...) template ECorrPtr bookECorrelatorGap(const string name, const Scatter2DPtr hIn) { return bookECorrelatorGap(name, Correlators::hVec(N, M), hIn); } // @brief The stream method MUST be called in finalize() if one wants to stream // correlators to the yoda file, in order to do reentrant finalize // (ie. multi-histogram merging) for the analysis. void stream() { for (auto ecItr = eCorrPtrs.begin(); ecItr != eCorrPtrs.end(); ++ecItr){ (*ecItr)->fillFromProfs(); corrPlot(list((*ecItr)->profBegin(), (*ecItr)->profEnd()), *ecItr); } } private: /// Bookkeeping of the event averaged correlators. list eCorrPtrs; public: // @brief Constructor. Use CumulantAnalysis as base class for the // analysis to have access to functionality. CumulantAnalysis (string n) : Analysis(n), errorMethod(VARIANCE) {}; // @brief Helper method for turning correlators into Scatter2Ds. // Takes @parm h a pointer to the resulting Scatter2D, @parm binx // the x-bins and a function @parm func defining the transformation. // Makes no attempt at statistical errors. // See usage in the methods below. // Can also be used directly in the analysis if a user wants to // perform an unforseen transformation from correlators to // Scatter2D. template static void fillScatter(Scatter2DPtr h, vector& binx, T func) { vector points; for (int i = 0, N = binx.size() - 1; i < N; ++i) { double xMid = (binx[i] + binx[i + 1]) / 2.0; double xeMin = fabs(xMid - binx[i]); double xeMax = fabs(xMid - binx[i + 1]); double yVal = func(i); if (std::isnan(yVal)) yVal = 0.; double yErr = 0; points.push_back(YODA::Point2D(xMid, yVal, xeMin, xeMax, yErr, yErr)); } h->reset(); h->points().clear(); for (int i = 0, N = points.size(); i < N; ++i) h->addPoint(points[i]); } // @brief Helper method for turning correlators into Scatter2Ds // with error estimates. // Takes @parm h a pointer to the resulting Scatter2D, @parm binx // the x-bins, a function @parm func defining the transformation // and a vector of errors @parm err. // See usage in the methods below. // Can also be used directly in the analysis if a user wants to // perform an unforseen transformation from correlators to // Scatter2D. template const void fillScatter(Scatter2DPtr h, vector& binx, F func, vector >& yErr) const { vector points; for (int i = 0, N = binx.size() - 1; i < N; ++i) { double xMid = (binx[i] + binx[i + 1]) / 2.0; double xeMin = fabs(xMid - binx[i]); double xeMax = fabs(xMid - binx[i + 1]); double yVal = func(i); if (std::isnan(yVal)) points.push_back(YODA::Point2D(xMid, 0., xeMin, xeMax,0., 0.)); else points.push_back(YODA::Point2D(xMid, yVal, xeMin, xeMax, yErr[i].first, yErr[i].second)); } h->reset(); h->points().clear(); for (int i = 0, N = points.size(); i < N; ++i) h->addPoint(points[i]); } /// @brief Take the @parm n th power of all points in @parm hIn, /// and put the result in @parm hOut. Optionally put a /// @parm k constant below the root. static void nthPow(Scatter2DPtr hOut, const Scatter2DPtr hIn, const double& n, const double& k = 1.0) { if (n == 0 || n == 1) { cout << "Error: Do not take the 0th or 1st power of a Scatter2D," " use scale instead." << endl; return; } if (hIn->points().size() != hOut->points().size()) { cout << "nthRoot: Scatterplots: " << hIn->name() << " and " << hOut->name() << " not initialized with same length." << endl; return; } vector points; // The error pre-factor is k^(1/n) / n by Taylors formula. double eFac = pow(k,1./n)/n; for (auto b : hIn->points()) { double yVal = pow(k * b.y(),n); if (std::isnan(yVal)) points.push_back(YODA::Point2D(b.x(), 0., b.xErrMinus(), b.xErrPlus(), 0, 0 )); else { double yemin = abs(eFac * pow(yVal,1./(n - 1.))) * b.yErrMinus(); if (std::isnan(yemin)) yemin = b.yErrMinus(); double yemax = abs(eFac * pow(yVal,1./(n - 1.))) * b.yErrPlus(); if (std::isnan(yemax)) yemax = b.yErrPlus(); points.push_back(YODA::Point2D(b.x(), yVal, b.xErrMinus(), b.xErrPlus(), yemin, yemax )); } } hOut->reset(); hOut->points().clear(); for (int i = 0, N = points.size(); i < N; ++i) hOut->addPoint(points[i]); } /// @brief Take the @parm n th power of all points in @parm h, /// and put the result back in the same Scatter2D. Optionally put a /// @parm k constant below the root. static void nthPow(Scatter2DPtr h, const double& n, const double& k = 1.0) { if (n == 0 || n == 1) { cout << "Error: Do not take the 0th or 1st power of a Scatter2D," " use scale instead." << endl; return; } vector points; vector pIn = h->points(); // The error pre-factor is k^(1/n) / n by Taylors formula. double eFac = pow(k,1./n)/n; for (auto b : pIn) { double yVal = pow(k * b.y(),n); if (std::isnan(yVal)) points.push_back(YODA::Point2D(b.x(), 0., b.xErrMinus(), b.xErrPlus(), 0, 0 )); else { double yemin = abs(eFac * pow(yVal,1./(n - 1.))) * b.yErrMinus(); if (std::isnan(yemin)) yemin = b.yErrMinus(); double yemax = abs(eFac * pow(yVal,1./(n - 1.))) * b.yErrPlus(); if (std::isnan(yemax)) yemax = b.yErrPlus(); points.push_back(YODA::Point2D(b.x(), yVal, b.xErrMinus(), b.xErrPlus(), yemin, yemax )); } } h->reset(); h->points().clear(); for (int i = 0, N = points.size(); i < N; ++i) h->addPoint(points[i]); } // @brief Calculate the bootstrapped sample variance for the observable // given by correlators and the transformation @parm func. template static pair sampleVariance(T func) { // First we calculate the mean (two pass calculation). double avg = 0.; for (int i = 0; i < BOOT_BINS; ++i) avg += func(i); avg /= double(BOOT_BINS); // Then we find the variance. double var = 0.; for (int i = 0; i < BOOT_BINS; ++i) var += pow(func(i) - avg, 2.); var /= (double(BOOT_BINS) - 1); return pair(var, var); } // @brief Calculate the bootstrapped sample envelope for the observable // given by correlators and the transformation @parm func. template static pair sampleEnvelope(T func) { // First we calculate the mean. double avg = 0.; for (int i = 0; i < BOOT_BINS; ++i) avg += func(i); avg /= double(BOOT_BINS); double yMax = avg; double yMin = avg; // Then we find the envelope using the mean as initial value. for (int i = 0; i < BOOT_BINS; ++i) { double yVal = func(i); if (yMin > yVal) yMin = yVal; else if (yMax < yVal) yMax = yVal; } return pair(fabs(avg - yMin), fabs(yMax - avg)); } // @brief Selection method for which sample error to use, given // in the constructor. template const pair sampleError(T func) const { if (errorMethod == VARIANCE) return sampleVariance(func); else if (errorMethod == ENVELOPE) return sampleEnvelope(func); else cout << "Error: Error method not found!" << endl; return pair(0.,0.); } // @brief Two particle integrated cn. const void cnTwoInt(Scatter2DPtr h, ECorrPtr e2) const { vector bins = e2->getBins(); vector binx = e2->getBinX(); // Assert bin size. if (binx.size() - 1 != bins.size()){ cout << "cnTwoInt: Bin size (x,y) differs!" << endl; return; } vector binPtrs; // The mean value of the cumulant. auto cn = [&] (int i) { return binPtrs[i]->mean(); }; // Error calculation. vector > yErr; for (int j = 0, N = bins.size(); j < N; ++j) { binPtrs = bins[j].getBinPtrs(); yErr.push_back(sampleError(cn)); } binPtrs = e2->getBinPtrs(); fillScatter(h, binx, cn, yErr); } // @brief Two particle integrated vn. const void vnTwoInt(Scatter2DPtr h, ECorrPtr e2) const { cnTwoInt(h, e2); nthPow(h, 0.5); } // @brief Put an event averaged correlator into a Scatter2D. // Reduces to cnTwoInt, but better with a proper name. const void corrPlot(Scatter2DPtr h, ECorrPtr e) const { cnTwoInt(h, e); } // @brief Put an event averaged correlator into Profile1Ds, one // for each bootstrapping bin. const void corrPlot(list profs, ECorrPtr e) const { vector corBins = e->getBins(); vector binx = e->getBinX(); auto ref = e->getReference(); auto refBins = ref.getBinPtrs(); // Assert bin size. if (binx.size() - 1 != corBins.size()){ cout << "corrPlot: Bin size (x,y) differs!" << endl; return; } list::iterator hItr = profs.begin(); // Loop over the boostrapped correlators. for (size_t i = 0; i < profs.size(); ++i, ++hItr) { vector profBins; // Numbers for the summary distribution double ne = 0., sow = 0., sow2 = 0.; for (size_t j = 0, N = binx.size() - 1; j < N; ++j) { vector binPtrs = corBins[j].getBinPtrs(); // Construct bin of the profiled quantities. We have no information // (and no desire to add it) of sumWX of the profile, so really // we should use a Dbn1D - but that does not work for Profile1D's. profBins.push_back( YODA::ProfileBin1D((*hItr)->bin(j).xEdges(), YODA::Dbn2D( binPtrs[i]->numEntries(), binPtrs[i]->sumW(), binPtrs[i]->sumW2(), 0., 0., binPtrs[i]->sumWX(), 0, 0))); ne += binPtrs[i]->numEntries(); sow += binPtrs[i]->sumW(); sow2 += binPtrs[i]->sumW2(); } // Reset the bins of the profiles. (*hItr)->reset(); (*hItr)->bins().clear(); // Add our new bins. // The total distribution (*hItr)->setTotalDbn(YODA::Dbn2D(ne,sow,sow2,0.,0.,0.,0.,0.)); // The bins. for (int j = 0, N = profBins.size(); j < N; ++j) (*hItr)->addBin(profBins[j]); // The reference flow in the underflow bin. (*hItr)->setUnderflow(YODA::Dbn2D(refBins[i]->numEntries(), refBins[i]->sumW(), refBins[i]->sumW2(), 0., 0., refBins[i]->sumWX(), 0., 0.)); } // End loop of bootstrapped correlators. } // @brief Four particle integrated cn. const void cnFourInt(Scatter2DPtr h, ECorrPtr e2, ECorrPtr e4) const { auto e2bins = e2->getBins(); auto e4bins = e4->getBins(); auto binx = e2->getBinX(); if (binx.size() - 1 != e2bins.size()){ cout << "cnFourInt: Bin size (x,y) differs!" << endl; return; } if (binx != e4->getBinX()) { cout << "Error in cnFourInt: Correlator x-binning differs!" << endl; return; } vector e2binPtrs; vector e4binPtrs; auto cn = [&] (int i) { double e22 = e2binPtrs[i]->mean() * e2binPtrs[i]->mean(); return e4binPtrs[i]->mean() - 2. * e22; }; // Error calculation. vector > yErr; for (int j = 0, N = e2bins.size(); j < N; ++j) { e2binPtrs = e2bins[j].getBinPtrs(); e4binPtrs = e4bins[j].getBinPtrs(); yErr.push_back(sampleError(cn)); } // Put the bin ptrs back in place. e2binPtrs = e2->getBinPtrs(); e4binPtrs = e4->getBinPtrs(); fillScatter(h, binx, cn, yErr); } // @brief Four particle integrated vn const void vnFourInt(Scatter2DPtr h, ECorrPtr e2, ECorrPtr e4) const { cnFourInt(h, e2, e4); nthPow(h, 0.25, -1.0); } // @brief Six particle integrated cn. const void cnSixInt(Scatter2DPtr h, ECorrPtr e2, ECorrPtr e4, ECorrPtr e6) const { auto e2bins = e2->getBins(); auto e4bins = e4->getBins(); auto e6bins = e6->getBins(); auto binx = e2->getBinX(); if (binx.size() - 1 != e2bins.size()){ cout << "cnSixInt: Bin size (x,y) differs!" << endl; return; } if (binx != e4->getBinX() || binx != e6->getBinX()) { cout << "Error in cnSixInt: Correlator x-binning differs!" << endl; return; } vector e2binPtrs; vector e4binPtrs; vector e6binPtrs; auto cn = [&] (int i) { double e23 = pow(e2binPtrs[i]->mean(), 3.0); return e6binPtrs[i]->mean() - 9.*e2binPtrs[i]->mean()*e4binPtrs[i]->mean() + 12.*e23; }; // Error calculation. vector > yErr; for (int j = 0, N = e2bins.size(); j < N; ++j) { e2binPtrs = e2bins[j].getBinPtrs(); e4binPtrs = e4bins[j].getBinPtrs(); e6binPtrs = e6bins[j].getBinPtrs(); yErr.push_back(sampleError(cn)); } // Put the bin ptrs back in place. e2binPtrs = e2->getBinPtrs(); e4binPtrs = e4->getBinPtrs(); e6binPtrs = e6->getBinPtrs(); fillScatter(h, binx, cn, yErr); } // @brief Six particle integrated vn const void vnSixInt(Scatter2DPtr h, ECorrPtr e2, ECorrPtr e4, ECorrPtr e6) const { cnSixInt(h, e2, e4, e6); nthPow(h, 1./6., 0.25); } // @brief Eight particle integrated cn. const void cnEightInt(Scatter2DPtr h, ECorrPtr e2, ECorrPtr e4, ECorrPtr e6, ECorrPtr e8) const { auto e2bins = e2->getBins(); auto e4bins = e4->getBins(); auto e6bins = e6->getBins(); auto e8bins = e8->getBins(); auto binx = e2->getBinX(); if (binx.size() - 1 != e2bins.size()){ cout << "cnEightInt: Bin size (x,y) differs!" << endl; return; } if (binx != e4->getBinX() || binx != e6->getBinX() || binx != e8->getBinX()) { cout << "Error in cnEightInt: Correlator x-binning differs!" << endl; return; } vector e2binPtrs; vector e4binPtrs; vector e6binPtrs; vector e8binPtrs; auto cn = [&] (int i ) { double e22 = e2binPtrs[i]->mean() * e2binPtrs[i]->mean(); double e24 = e22 * e22; double e42 = e4binPtrs[i]->mean() * e4binPtrs[i]->mean(); return e8binPtrs[i]->mean() - 16. * e6binPtrs[i]->mean() * e2binPtrs[i]->mean() - 18. * e42 + 144. * e4binPtrs[i]->mean()*e22 - 144. * e24; }; // Error calculation. vector > yErr; for (int j = 0, N = e2bins.size(); j < N; ++j) { e2binPtrs = e2bins[j].getBinPtrs(); e4binPtrs = e4bins[j].getBinPtrs(); e6binPtrs = e6bins[j].getBinPtrs(); e8binPtrs = e8bins[j].getBinPtrs(); yErr.push_back(sampleError(cn)); } // Put the bin ptrs back in place. e2binPtrs = e2->getBinPtrs(); e4binPtrs = e4->getBinPtrs(); e6binPtrs = e6->getBinPtrs(); e8binPtrs = e8->getBinPtrs(); fillScatter(h, binx, cn, yErr); } // @brief Eight particle integrated vn const void vnEightInt(Scatter2DPtr h, ECorrPtr e2, ECorrPtr e4, ECorrPtr e6, ECorrPtr e8) const { cnEightInt(h, e2, e4, e6, e8); nthPow(h, 1./8., -1./33.); } // @brief Two particle differential vn. const void vnTwoDiff(Scatter2DPtr h, ECorrPtr e2Dif) const { auto e2bins = e2Dif->getBins(); auto ref = e2Dif->getReference(); auto binx = e2Dif->getBinX(); if (binx.size() -1 != e2bins.size()) { cout << "vnTwoDif: Bin size (x,y) differs!" << endl; return; } vector e2binPtrs; vector refPtrs; auto vn = [&] (int i) { // Test reference flow. if (ref.mean() <= 0) return 0.; return e2binPtrs[i]->mean() / sqrt(ref.mean()); }; // We need here a seperate error function, as we don't // iterate over the reference flow. auto vnerr = [&] (int i) { // Test reference flow. if (refPtrs[i]->mean() <=0) return 0.; return e2binPtrs[i]->mean() / sqrt(refPtrs[i]->mean()); }; // Error calculation. vector > yErr; refPtrs = ref.getBinPtrs(); for (int j = 0, N = e2bins.size(); j < N; ++j) { e2binPtrs = e2bins[j].getBinPtrs(); yErr.push_back(sampleError(vnerr)); } // Put the e2binPtrs back in place. e2binPtrs = e2Dif->getBinPtrs(); fillScatter(h, binx, vn); } // @brief Four particle differential vn. const void vnFourDiff(Scatter2DPtr h, ECorrPtr e2Dif, ECorrPtr e4Dif) const { auto e2bins = e2Dif->getBins(); auto e4bins = e4Dif->getBins(); auto ref2 = e2Dif->getReference(); auto ref4 = e4Dif->getReference(); auto binx = e2Dif->getBinX(); if (binx.size() - 1 != e2bins.size()){ cout << "vnFourDif: Bin size (x,y) differs!" << endl; return; } if (binx != e4Dif->getBinX()) { cout << "Error in vnFourDif: Correlator x-binning differs!" << endl; return; } vector e2binPtrs; vector e4binPtrs; vector ref2Ptrs; vector ref4Ptrs; double denom = 2 * ref2.mean() * ref2.mean() - ref4.mean(); auto vn = [&] (int i) { // Test denominator. if (denom <= 0 ) return 0.; return ((2 * ref2.mean() * e2bins[i].mean() - e4bins[i].mean()) / pow(denom, 0.75)); }; // We need here a seperate error function, as we don't // iterate over the reference flow. auto vnerr = [&] (int i) { double denom2 = 2 * ref2Ptrs[i]->mean() * ref2Ptrs[i]->mean() - ref4Ptrs[i]->mean(); // Test denominator. if (denom2 <= 0) return 0.; return ((2 * ref2Ptrs[i]->mean() * e2binPtrs[i]->mean() - e4binPtrs[i]->mean()) / pow(denom2, 0.75)); }; // Error calculation. vector > yErr; ref2Ptrs = ref2.getBinPtrs(); ref4Ptrs = ref4.getBinPtrs(); for (int j = 0, N = e2bins.size(); j < N; ++j) { e2binPtrs = e2bins[j].getBinPtrs(); e4binPtrs = e4bins[j].getBinPtrs(); yErr.push_back(sampleError(vnerr)); } // Put the binPtrs back in place. e2binPtrs = e2Dif->getBinPtrs(); e4binPtrs = e4Dif->getBinPtrs(); fillScatter(h, binx, vn, yErr); } }; // End class CumulantAnalysis. } // End Rivet namespace. #endif diff --git a/include/Rivet/Tools/Percentile.hh b/include/Rivet/Tools/Percentile.hh --- a/include/Rivet/Tools/Percentile.hh +++ b/include/Rivet/Tools/Percentile.hh @@ -1,688 +1,688 @@ #ifndef PERCENTILE_HH #define PERCENTILE_HH #include "Rivet/Event.hh" #include "Rivet/Projections/CentralityProjection.hh" #include "Rivet/ProjectionApplier.hh" namespace Rivet { /// Forward declaration. class Analysis; /// @brief PercentileBase is the base class of all Percentile classes. /// /// This base class contains all non-templated variables and /// infrastructure needed. class PercentileBase { public: /// @brief the main constructor /// /// requiring a pointer, @a ana, to the Analysis to which this /// object belongs and the name of the CentralityProjection, @a /// projname, to be used. PercentileBase(Analysis * ana, string projName) : _ana(ana), _projName(projName) {} /// @brief Default constructor. PercentileBase() {} /// @initialize the PercentileBase for a new event. /// /// This will perform the assigned CentralityProjection and select /// out the (indices) of the internal AnalysisObjects that are to be /// active in this event. void selectBins(const Event &); /// @brief Helper function to check if @a x is within @a range. static bool inRange(double x, pair range) { return x >= range.first && ( x < range.second || ( x == 100.0 && x == range.second ) ); } /// @brief Copy information from @a other PercentileBase void copyFrom(const PercentileBase & other) { _ana = other._ana; _projName = other._projName; _cent = other._cent; } /// @brief check if @a other PercentileBase is compatible with this. bool compatible(const PercentileBase & other) const { return ( _ana == other._ana && _projName == other._projName && _cent == other._cent ); } /// @breif return the list of centrality bins. /// /// The size of this vector is the same as number of internal /// analysis objects in the sub class PercentileTBase. const vector< pair > & centralities() const { return _cent; } protected: /// The Analysis object to which This object is assigned. Analysis * _ana; /// The name of the CentralityProjection. string _projName; /// The list of indices of the analysis objects that are to be /// filled in the current event. vector _activeBins; /// The list of centrality intervals, one for each included analysis /// object. vector > _cent; }; /// @brief PercentileTBase is the base class of all Percentile classes. /// /// This base class contains all template-dependent variables and /// infrastructure needed for Percentile and PercentileXaxis. template class PercentileTBase : public PercentileBase { public: /// Convenient typedef. - typedef typename T::Ptr TPtr; + typedef rivet_shared_ptr> TPtr; /// @brief the main constructor /// /// requiring a pointer, @a ana, to the Analysis to which this /// object belongs and the name of the CentralityProjection, @a /// projname, to be used. PercentileTBase(Analysis * ana, string projName) : PercentileBase(ana, projName), _histos() {} /// @brief Default constructor. PercentileTBase() {} /// @brief Empty destructor. ~PercentileTBase() {} /// @brief add a new percentile bin. /// /// Add an analysis objects which are clones of @a temp that should /// be active for events in the given centrality bin @a /// cent. Several analysis objects may be added depending on the /// number of alternative centrality definitions in the /// CentralityProjection @a proj. This function is common for /// Percentile and PecentileXaxis, but for the latter the @a cent /// argument should be left to its default. - void add(shared_ptr ao, CounterPtr cnt, + void add(TPtr ao, CounterPtr cnt, pair cent = {0.0, 100.0} ) { _cent.push_back(cent); _histos.push_back( { ao, cnt } ); } /// @brief Copy the information from an @a other Percentile object. /// /// This function differs from a simple assignement as the @a other /// analysis objects are not copied, but supplied separately through /// @a tv. bool add(const PercentileBase & other, const vector & tv) { copyFrom(other); if ( tv.size() != _cent.size() ) return false; for ( auto t : tv ) - _histos.push_back( { t, make_shared() } ); + _histos.push_back( { t, CounterPtr() } ); return true; } /// @brief initialize for a new event. Select which AnalysisObjects /// should be filled for this event. Keeps track of the number of /// events seen for each centrality bin and AnalysisAbject. bool init(const Event & event) { selectBins(event); for (const auto bin : _activeBins) - _histos[bin].second->fill(event.weight()); + _histos[bin].second->fill(); return !_activeBins.empty(); } /// @brief Normalize each AnalysisObject /// /// by dividing by the sum of the events seen for each centrality /// bin. void normalizePerEvent() { for (const auto &hist : _histos) if ( hist.second->numEntries() > 0 && hist.first->numEntries() > 0) hist.first->scaleW(1./hist.second->val()); } /// Simple scaling of each AnalysisObject. void scale(float scale) { for (const auto hist : _histos) hist.first->scaleW(scale); } /// Execute a function for each AnalysisObject. void exec(function f) { for ( auto hist : _histos) f(hist); } /// @brief Access the underlyng AnalysisObjects /// /// The returned vector contains a pair, where the first member is /// the AnalysisObject and the second is a counter keeping track of /// the sum of event weights for which the AnalysisObject has been /// active. - const vector, shared_ptr > > & + const vector > & analysisObjects() const{ return _histos; } protected: /// The returned vector contains a pair, where the first member is /// the AnalysisObject and the second is a counter keeping track of /// the sum of event weights for which the AnalysisObject has been /// active. - vector, shared_ptr > > _histos; + vector > _histos; }; /// @brief The Percentile class for centrality binning. /// /// The Percentile class automatically handles the selection of which /// AnalysisObject(s) should be filled depending on the centrality of /// an event. It cointains a list of AnalysisObjects, one for each /// centrality bin requested (note that these bins may be overlapping) /// and each centrality definition is available in the assigned /// CentralityProjection. template class Percentile : public PercentileTBase { public: /// @brief the main constructor /// /// requiring a pointer, @a ana, to the Analysis to which this /// object belongs and the name of the CentralityProjection, @a /// projname, to be used. Percentile(Analysis * ana, string projName) : PercentileTBase(ana, projName) {} /// @brief Default constructor. Percentile() {} /// @brief Empty destructor. ~Percentile() {} /// Needed to access members of the templated base class. using PercentileTBase::_histos; /// Needed to access members of the templated base class. using PercentileTBase::_activeBins; /// Fill each AnalysisObject selected in the last call to /// PercentileTBaseinit template void fill(Args... args) { for (const auto bin : _activeBins) { _histos[bin].first->fill(args...); } } /// Subtract the contents fro another Pecentile. Percentile &operator-=(const Percentile &rhs) { const int nCent = _histos.size(); for (int iCent = 0; iCent < nCent; ++iCent) { *_histos[iCent].first -= *rhs._histos[iCent].first; } } /// Add the contents fro another Pecentile. Percentile &operator+=(const Percentile &rhs) { const int nCent = _histos.size(); for (int iCent = 0; iCent < nCent; ++iCent) { *_histos[iCent].first += *rhs._histos[iCent].first; /// @todo should this also add the Counter? } } /// Make this object look like a pointer. Percentile *operator->() { return this; } /// Pointer to member operator. Percentile &operator->*(function f) { exec(f); return *this; } }; /// @brief The PercentileXaxis class for centrality binning. /// /// The PercentileXaxis class automatically handles the x-axis of an /// AnalysisObject when the x-axis is to be the centrality of an /// event. This could also be done by eg. filling directly a Histo1D /// with the result of a CentralityProjection. However, since the /// CentralityProjection may handle several centrality definitions at /// the same time it is reasonable to instead use /// PercentileXaxis which will fill one histogram for each /// centrality definition. /// /// Operationally this class works like the Percentile class, but only /// one centrality bin (0-100) is included. When fill()ed the first /// argument is always given by the assigned CentralityProjection. template class PercentileXaxis : public PercentileTBase { public: /// @brief the main constructor /// /// requiring a pointer, @a ana, to the Analysis to which this /// object belongs and the name of the CentralityProjection, @a /// projname, to be used. PercentileXaxis(Analysis * ana, string projName) : PercentileTBase(ana, projName) {} /// @brief Default constructor. PercentileXaxis() {} /// @brief Empty destructor. ~PercentileXaxis() {} /// Needed to access members of the templated base class. using PercentileTBase::_histos; /// Needed to access members of the templated base class. using PercentileTBase::_activeBins; /// Fill each AnalysisObject selected in the last call to /// PercentileTBaseinit template void fill(Args... args) { for (const auto bin : _activeBins) { _histos[bin].first->fill(bin, args...); } } /// Subtract the contents fro another PecentileXaxis. PercentileXaxis &operator-=(const PercentileXaxis &rhs) { const int nCent = _histos.size(); for (int iCent = 0; iCent < nCent; ++iCent) { *_histos[iCent].first -= *rhs._histos[iCent].first; } } /// Add the contents fro another PecentileXaxis. PercentileXaxis &operator+=(const PercentileXaxis &rhs) { const int nCent = this->_histos.size(); for (int iCent = 0; iCent < nCent; ++iCent) { *_histos[iCent].first += *rhs._histos[iCent].first; } } /// Make this object look like a pointer. PercentileXaxis *operator->() { return this; } /// Pointer to member operator. PercentileXaxis &operator->*(function f) { exec(f); return *this; } }; /// @name Combining Percentiles following the naming of functions for // the underlying AnalysisObjects: global operators // @{ template Percentile::RefT> divide(const Percentile numer, const Percentile denom) { typedef typename ReferenceTraits::RefT ScatT; Percentile ret; vector scatters; assert( numer.compatible(denom) ); for ( int i = 0, N = numer.analysisObjects().size(); i < N; ++i ) scatters.push_back(make_shared(divide(*numer.analysisObjects()[i].first, *denom.analysisObjects()[i].first))); ret.add(numer, scatters); return ret; } template Percentile::RefT> divide(const Percentile numer, const Percentile::RefT> denom) { typedef typename ReferenceTraits::RefT ScatT; Percentile ret; vector scatters; assert( numer.compatible(denom) ); for ( int i = 0, N = numer.analysisObjects().size(); i < N; ++i ) scatters.push_back(make_shared(divide(*numer.analysisObjects()[i].first, *denom.analysisObjects()[i].first))); ret.add(numer, scatters); return ret; } template Percentile::RefT> divide(const Percentile::RefT> numer, const Percentile denom) { typedef typename ReferenceTraits::RefT ScatT; Percentile::RefT> ret; vector scatters; assert( numer.compatible(denom) ); for ( int i = 0, N = numer.analysisObjects().size(); i < N; ++i ) scatters.push_back(make_shared(divide(*numer.analysisObjects()[i].first, *denom.analysisObjects()[i].first))); ret.add(numer, scatters); return ret; } template Percentile add(const Percentile pctla, const Percentile pctlb) { Percentile ret; vector aos; assert( pctla.compatible(pctlb) ); for ( int i = 0, N = pctla.analysisObjects().size(); i < N; ++i ) aos.push_back(make_shared(add(*pctla.analysisObjects()[i].first, *pctlb.analysisObjects()[i].first))); ret.add(pctla, aos); return ret; } template Percentile::RefT> add(const Percentile pctla, const Percentile::RefT> pctlb) { typedef typename ReferenceTraits::RefT ScatT; Percentile ret; vector scatters; assert( pctla.compatible(pctlb) ); for ( int i = 0, N = pctla.analysisObjects().size(); i < N; ++i ) scatters.push_back(make_shared(add(*pctla.analysisObjects()[i].first, *pctlb.analysisObjects()[i].first))); ret.add(pctla, scatters); return ret; } template Percentile::RefT> add(const Percentile::RefT> pctla, const Percentile pctlb) { typedef typename ReferenceTraits::RefT ScatT; Percentile ret; vector scatters; assert( pctla.compatible(pctlb) ); for ( int i = 0, N = pctla.analysisObjects().size(); i < N; ++i ) scatters.push_back(make_shared(add(*pctla.analysisObjects()[i].first, *pctlb.analysisObjects()[i].first))); ret.add(pctla, scatters); return ret; } template Percentile subtract(const Percentile pctla, const Percentile pctlb) { Percentile ret; vector aos; assert( pctla.compatible(pctlb) ); for ( int i = 0, N = pctla.analysisObjects().size(); i < N; ++i ) aos.push_back(make_shared(subtract(*pctla.analysisObjects()[i].first, *pctlb.analysisObjects()[i].first))); ret.add(pctla, aos); return ret; } template Percentile::RefT> subtract(const Percentile pctla, const Percentile::RefT> pctlb) { typedef typename ReferenceTraits::RefT ScatT; Percentile ret; vector scatters; assert( pctla.compatible(pctlb) ); for ( int i = 0, N = pctla.analysisObjects().size(); i < N; ++i ) scatters.push_back(make_shared(subtract(*pctla.analysisObjects()[i].first, *pctlb.analysisObjects()[i].first))); ret.add(pctla, scatters); return ret; } template Percentile::RefT> subtract(const Percentile::RefT> pctla, const Percentile pctlb) { typedef typename ReferenceTraits::RefT ScatT; Percentile ret; vector scatters; assert( pctla.compatible(pctlb) ); for ( int i = 0, N = pctla.analysisObjects().size(); i < N; ++i ) scatters.push_back(make_shared(subtract(*pctla.analysisObjects()[i].first, *pctlb.analysisObjects()[i].first))); ret.add(pctla, scatters); return ret; } template Percentile::RefT> multiply(const Percentile pctla, const Percentile::RefT> pctlb) { typedef typename ReferenceTraits::RefT ScatT; Percentile ret; vector scatters; assert( pctla.compatible(pctlb) ); for ( int i = 0, N = pctla.analysisObjects().size(); i < N; ++i ) scatters.push_back(make_shared(multiply(*pctla.analysisObjects()[i].first, *pctlb.analysisObjects()[i].first))); ret.add(pctla, scatters); return ret; } template Percentile::RefT> multiply(const Percentile::RefT> pctla, const Percentile pctlb) { typedef typename ReferenceTraits::RefT ScatT; Percentile ret; vector scatters; assert( pctla.compatible(pctlb) ); for ( int i = 0, N = pctla.analysisObjects().size(); i < N; ++i ) scatters.push_back(make_shared(multiply(*pctla.analysisObjects()[i].first, *pctlb.analysisObjects()[i].first))); ret.add(pctla, scatters); return ret; } template PercentileXaxis::RefT> divide(const PercentileXaxis numer, const PercentileXaxis denom) { typedef typename ReferenceTraits::RefT ScatT; PercentileXaxis ret; vector scatters; assert( numer.compatible(denom) ); for ( int i = 0, N = numer.analysisObjects().size(); i < N; ++i ) scatters.push_back(make_shared(divide(*numer.analysisObjects()[i].first, *denom.analysisObjects()[i].first))); ret.add(numer, scatters); return ret; } template PercentileXaxis::RefT> divide(const PercentileXaxis numer, const PercentileXaxis::RefT> denom) { typedef typename ReferenceTraits::RefT ScatT; PercentileXaxis ret; vector scatters; assert( numer.compatible(denom) ); for ( int i = 0, N = numer.analysisObjects().size(); i < N; ++i ) scatters.push_back(make_shared(divide(*numer.analysisObjects()[i].first, *denom.analysisObjects()[i].first))); ret.add(numer, scatters); return ret; } template PercentileXaxis::RefT> divide(const PercentileXaxis::RefT> numer, const PercentileXaxis denom) { typedef typename ReferenceTraits::RefT ScatT; PercentileXaxis::RefT> ret; vector scatters; assert( numer.compatible(denom) ); for ( int i = 0, N = numer.analysisObjects().size(); i < N; ++i ) scatters.push_back(make_shared(divide(*numer.analysisObjects()[i].first, *denom.analysisObjects()[i].first))); ret.add(numer, scatters); return ret; } template PercentileXaxis add(const PercentileXaxis pctla, const PercentileXaxis pctlb) { PercentileXaxis ret; vector aos; assert( pctla.compatible(pctlb) ); for ( int i = 0, N = pctla.analysisObjects().size(); i < N; ++i ) aos.push_back(make_shared(add(*pctla.analysisObjects()[i].first, *pctlb.analysisObjects()[i].first))); ret.add(pctla, aos); return ret; } template PercentileXaxis::RefT> add(const PercentileXaxis pctla, const PercentileXaxis::RefT> pctlb) { typedef typename ReferenceTraits::RefT ScatT; PercentileXaxis ret; vector scatters; assert( pctla.compatible(pctlb) ); for ( int i = 0, N = pctla.analysisObjects().size(); i < N; ++i ) scatters.push_back(make_shared(add(*pctla.analysisObjects()[i].first, *pctlb.analysisObjects()[i].first))); ret.add(pctla, scatters); return ret; } template PercentileXaxis::RefT> add(const PercentileXaxis::RefT> pctla, const PercentileXaxis pctlb) { typedef typename ReferenceTraits::RefT ScatT; PercentileXaxis ret; vector scatters; assert( pctla.compatible(pctlb) ); for ( int i = 0, N = pctla.analysisObjects().size(); i < N; ++i ) scatters.push_back(make_shared(add(*pctla.analysisObjects()[i].first, *pctlb.analysisObjects()[i].first))); ret.add(pctla, scatters); return ret; } template PercentileXaxis subtract(const PercentileXaxis pctla, const PercentileXaxis pctlb) { PercentileXaxis ret; vector aos; assert( pctla.compatible(pctlb) ); for ( int i = 0, N = pctla.analysisObjects().size(); i < N; ++i ) aos.push_back(make_shared(subtract(*pctla.analysisObjects()[i].first, *pctlb.analysisObjects()[i].first))); ret.add(pctla, aos); return ret; } template PercentileXaxis::RefT> subtract(const PercentileXaxis pctla, const PercentileXaxis::RefT> pctlb) { typedef typename ReferenceTraits::RefT ScatT; PercentileXaxis ret; vector scatters; assert( pctla.compatible(pctlb) ); for ( int i = 0, N = pctla.analysisObjects().size(); i < N; ++i ) scatters.push_back(make_shared(subtract(*pctla.analysisObjects()[i].first, *pctlb.analysisObjects()[i].first))); ret.add(pctla, scatters); return ret; } template PercentileXaxis::RefT> subtract(const PercentileXaxis::RefT> pctla, const PercentileXaxis pctlb) { typedef typename ReferenceTraits::RefT ScatT; PercentileXaxis ret; vector scatters; assert( pctla.compatible(pctlb) ); for ( int i = 0, N = pctla.analysisObjects().size(); i < N; ++i ) scatters.push_back(make_shared(subtract(*pctla.analysisObjects()[i].first, *pctlb.analysisObjects()[i].first))); ret.add(pctla, scatters); return ret; } template PercentileXaxis::RefT> multiply(const PercentileXaxis pctla, const PercentileXaxis::RefT> pctlb) { typedef typename ReferenceTraits::RefT ScatT; PercentileXaxis ret; vector scatters; assert( pctla.compatible(pctlb) ); for ( int i = 0, N = pctla.analysisObjects().size(); i < N; ++i ) scatters.push_back(make_shared(multiply(*pctla.analysisObjects()[i].first, *pctlb.analysisObjects()[i].first))); ret.add(pctla, scatters); return ret; } template PercentileXaxis::RefT> multiply(const PercentileXaxis::RefT> pctla, const PercentileXaxis pctlb) { typedef typename ReferenceTraits::RefT ScatT; PercentileXaxis ret; vector scatters; assert( pctla.compatible(pctlb) ); for ( int i = 0, N = pctla.analysisObjects().size(); i < N; ++i ) scatters.push_back(make_shared(multiply(*pctla.analysisObjects()[i].first, *pctlb.analysisObjects()[i].first))); ret.add(pctla, scatters); return ret; } template Percentile operator+(const Percentile pctla, const Percentile pctlb) { return add(pctla, pctlb); } template Percentile operator-(const Percentile pctla, const Percentile pctlb) { return subtract(pctla, pctlb); } template Percentile::RefT> operator/(const Percentile numer, const Percentile denom) { return divide(numer, denom); } template PercentileXaxis operator+(const PercentileXaxis pctla, const PercentileXaxis pctlb) { return add(pctla, pctlb); } template PercentileXaxis operator-(const PercentileXaxis pctla, const PercentileXaxis pctlb) { return subtract(pctla, pctlb); } template PercentileXaxis::RefT> operator/(const PercentileXaxis numer, const PercentileXaxis denom) { return divide(numer, denom); } } #endif diff --git a/include/Rivet/Tools/RivetYODA.hh b/include/Rivet/Tools/RivetYODA.hh --- a/include/Rivet/Tools/RivetYODA.hh +++ b/include/Rivet/Tools/RivetYODA.hh @@ -1,579 +1,595 @@ #ifndef RIVET_RIVETYODA_HH #define RIVET_RIVETYODA_HH #include "Rivet/Config/RivetCommon.hh" #include "YODA/AnalysisObject.h" #include "YODA/Counter.h" #include "YODA/Histo1D.h" #include "YODA/Histo2D.h" #include "YODA/Profile1D.h" #include "YODA/Profile2D.h" #include "YODA/Scatter1D.h" #include "YODA/Scatter2D.h" #include "YODA/Scatter3D.h" #include #include namespace YODA { typedef std::shared_ptr AnalysisObjectPtr; typedef std::shared_ptr CounterPtr; typedef std::shared_ptr Histo1DPtr; typedef std::shared_ptr Histo2DPtr; typedef std::shared_ptr Profile1DPtr; typedef std::shared_ptr Profile2DPtr; typedef std::shared_ptr Scatter1DPtr; typedef std::shared_ptr Scatter2DPtr; typedef std::shared_ptr Scatter3DPtr; } namespace Rivet { class AnalysisObjectWrapper { public: virtual ~AnalysisObjectWrapper() {} virtual YODA::AnalysisObject* operator->() = 0; virtual YODA::AnalysisObject* operator->() const = 0; virtual const YODA::AnalysisObject & operator*() const = 0; /// @todo Rename to setActive(idx) virtual void setActiveWeightIdx(unsigned int iWeight) = 0; + /// @todo Set active object for finalize + virtual void setActiveFinalWeightIdx(unsigned int iWeight) = 0; + bool operator ==(const AnalysisObjectWrapper& p) { return (this == &p); } bool operator !=(const AnalysisObjectWrapper& p) { return (this != &p); } protected: /// @todo do we need this? // virtual void reset() = 0; }; /// @todo /// implement scatter1dptr and scatter2dptr here /// these need to be multi-weighted eventually. /* class Scatter1DPtr : public AnalysisObjectPtr { public: Scatter1DPtr() : _persistent() { } Scatter1DPtr(size_t len_of_weightvec, const YODA::Scatter1D& p) { for (size_t m = 0; m < len_of_weightvec; ++m) _persistent.push_back(make_shared(p)); } bool operator!() const { return !_persistent; } explicit operator bool() const { return bool(_persistent); } YODA::Scatter1D* operator->() { return _persistent.get(); } YODA::Scatter1D* operator->() const { return _persistent.get(); } YODA::Scatter1D & operator*() { return *_persistent; } const YODA::Scatter1D & operator*() const { return *_persistent; } protected: vector _persistent; }; class Scatter2DPtr : public AnalysisObjectPtr { public: Scatter2DPtr(size_t len_of_weightvec, const YODA::Scatter2D& p) { for (size_t m = 0; m < len_of_weightvec; ++m) _persistent.push_back(make_shared(p)); } Scatter2DPtr() : _persistent() { } bool operator!() { return !_persistent; } explicit operator bool() { return bool(_persistent); } YODA::Scatter2D* operator->() { return _persistent.get(); } YODA::Scatter2D* operator->() const { return _persistent.get(); } YODA::Scatter2D & operator*() { return *_persistent; } const YODA::Scatter2D & operator*() const { return *_persistent; } protected: vector _persistent; }; class Scatter3DPtr : public AnalysisObjectPtr { public: Scatter3DPtr(size_t len_of_weightvec, const YODA::Scatter3D& p) { for (size_t m = 0; m < len_of_weightvec; ++m) _persistent.push_back(make_shared(p)); } Scatter3DPtr() : _persistent() { } bool operator!() { return !_persistent; } explicit operator bool() { return bool(_persistent); } YODA::Scatter3D* operator->() { return _persistent.get(); } YODA::Scatter3D* operator->() const { return _persistent.get(); } YODA::Scatter3D & operator*() { return *_persistent; } const YODA::Scatter3D & operator*() const { return *_persistent; } protected: vector _persistent; }; */ class MultiweightAOWrapper : public AnalysisObjectWrapper { public: using Inner = YODA::AnalysisObject; virtual void newSubEvent() = 0; virtual void pushToPersistent(const vector >& weight) = 0; + virtual void pushToFinal(const vector >& weight) = 0; virtual YODA::AnalysisObjectPtr activeYODAPtr() const = 0; }; using Weight = double; template using Fill = pair; template using Fills = multiset>; // TODO TODO TODO // need to override the old fill method too! // otherwise we can't intercept existing fill calls in analysis code // TODO TODO TODO /// Wrappers for analysis objects to store all fills unaggregated, until collapsed template class TupleWrapper; template<> class TupleWrapper : public YODA::Counter { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Counter & h) : YODA::Counter(h) {} // todo: do we need to deal with users using fractions directly? void fill( double weight=1.0, double fraction=1.0 ) { fills_.insert( {YODA::Counter::FillType(),weight} ); } void reset() { fills_.clear(); } const Fills & fills() const { return fills_; } private: // x / weight pairs Fills fills_; }; template<> class TupleWrapper : public YODA::Histo1D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Histo1D & h) : YODA::Histo1D(h) {} // todo: do we need to deal with users using fractions directly? void fill( double x, double weight=1.0, double fraction=1.0 ) { if ( std::isnan(x) ) throw YODA::RangeError("X is NaN"); fills_.insert( { x , weight } ); } void reset() { fills_.clear(); } const Fills & fills() const { return fills_; } private: // x / weight pairs Fills fills_; }; template<> class TupleWrapper : public YODA::Profile1D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Profile1D & h) : YODA::Profile1D(h) {} // todo: do we need to deal with users using fractions directly? void fill( double x, double y, double weight=1.0, double fraction=1.0 ) { if ( std::isnan(x) ) throw YODA::RangeError("X is NaN"); if ( std::isnan(y) ) throw YODA::RangeError("Y is NaN"); fills_.insert( { YODA::Profile1D::FillType{x,y}, weight } ); } void reset() { fills_.clear(); } const Fills & fills() const { return fills_; } private: // x / weight pairs Fills fills_; }; template<> class TupleWrapper : public YODA::Histo2D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Histo2D & h) : YODA::Histo2D(h) {} // todo: do we need to deal with users using fractions directly? void fill( double x, double y, double weight=1.0, double fraction=1.0 ) { if ( std::isnan(x) ) throw YODA::RangeError("X is NaN"); if ( std::isnan(y) ) throw YODA::RangeError("Y is NaN"); fills_.insert( { YODA::Histo2D::FillType{x,y}, weight } ); } void reset() { fills_.clear(); } const Fills & fills() const { return fills_; } private: // x / weight pairs Fills fills_; }; template<> class TupleWrapper : public YODA::Profile2D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Profile2D & h) : YODA::Profile2D(h) {} // todo: do we need to deal with users using fractions directly? void fill( double x, double y, double z, double weight=1.0, double fraction=1.0 ) { if ( std::isnan(x) ) throw YODA::RangeError("X is NaN"); if ( std::isnan(y) ) throw YODA::RangeError("Y is NaN"); if ( std::isnan(z) ) throw YODA::RangeError("Z is NaN"); fills_.insert( { YODA::Profile2D::FillType{x,y,z}, weight } ); } void reset() { fills_.clear(); } const Fills & fills() const { return fills_; } private: // x / weight pairs Fills fills_; }; template<> class TupleWrapper : public YODA::Scatter1D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Scatter1D & h) : YODA::Scatter1D(h) {} }; template<> class TupleWrapper : public YODA::Scatter2D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Scatter2D & h) : YODA::Scatter2D(h) {} }; template<> class TupleWrapper : public YODA::Scatter3D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Scatter3D & h) : YODA::Scatter3D(h) {} }; template class Wrapper : public MultiweightAOWrapper { friend class Analysis; public: using Inner = T; /* @todo * some things are not really well-defined here * for instance: fill() in the finalize() method and integral() in * the analyze() method. */ Wrapper() = default; Wrapper(const vector& weightnames, const T & p); ~Wrapper(); typename T::Ptr active() const; /* @todo this probably need to loop over all? */ bool operator!() const { return !_active; } // Don't use active() here, assert will catch explicit operator bool() const { return static_cast(_active); } // Don't use active() here, assert will catch T * operator->() { return active().get(); } T * operator->() const { return active().get(); } T & operator*() { return *active(); } const T & operator*() const { return *active(); } /* @todo * these need to be re-thought out. void reset() { active()->reset(); } */ /* @todo * these probably need to loop over all? * do we even want to provide equality? */ /* @todo * how about no. friend bool operator==(Wrapper a, Wrapper b){ if (a._persistent.size() != b._persistent.size()) return false; for (size_t i = 0; i < a._persistent.size(); i++) { if (a._persistent.at(i) != b._persistent.at(i)) { return false; } } return true; } friend bool operator!=(Wrapper a, Wrapper b){ return !(a == b); } friend bool operator<(Wrapper a, Wrapper b){ if (a._persistent.size() >= b._persistent.size()) return false; for (size_t i = 0; i < a._persistent.size(); i++) { if (*(a._persistent.at(i)) >= *(b._persistent.at(i))) { return false; } } return true; } */ private: void setActiveWeightIdx(unsigned int iWeight) { _active = _persistent.at(iWeight); } + void setActiveFinalWeightIdx(unsigned int iWeight) { + _active = _final.at(iWeight); + } + + /* this is for dev only---we shouldn't need this in real runs. */ void unsetActiveWeight() { _active.reset(); } void newSubEvent(); virtual YODA::AnalysisObjectPtr activeYODAPtr() const { return _active; } const vector & persistent() const { return _persistent; } + const vector & final() const { return _final; } + /* to be implemented for each type */ void pushToPersistent(const vector >& weight); + void pushToFinal(const vector >& weight); + /* M of these, one for each weight */ vector _persistent; + /* This is the copy of _persistent that will be passed to finalize(). */ + vector _final; + /* N of these, one for each event in evgroup */ vector::Ptr> _evgroup; typename T::Ptr _active; // do we need implicit cast? // operator typename T::Ptr () { // return _active; // } friend class AnalysisHandler; }; /// We need our own shared_ptr class, so we can dispatch -> and * /// all the way down to the inner YODA analysis objects /// /// TODO: provide remaining functionality that shared_ptr has (not needed right now) /// template class rivet_shared_ptr { public: typedef T value_type; rivet_shared_ptr() = default; rivet_shared_ptr(decltype(nullptr)) : _p(nullptr) {} /// Convenience constructor, pass through to the Wrapper constructor rivet_shared_ptr(const vector& weightNames, const typename T::Inner & p) : _p( make_shared(weightNames, p) ) {} template rivet_shared_ptr(const shared_ptr & p) : _p(p) {} template rivet_shared_ptr(const rivet_shared_ptr & p) : _p(p.get()) {} // Goes right through to the active YODA object's members T & operator->() { return *_p; } const T & operator->() const { return *_p; } // The active YODA object typename T::Inner & operator*() { return **_p; } const typename T::Inner & operator*() const { return **_p; } bool operator!() const { return !_p || !(*_p); } explicit operator bool() const { return _p && bool(*_p); } template bool operator==(const rivet_shared_ptr & other) const { return _p == other._p; } template bool operator!=(const rivet_shared_ptr & other) const { return _p != other._p; } template bool operator<(const rivet_shared_ptr & other) const { return _p < other._p; } template bool operator>(const rivet_shared_ptr & other) const { return _p > other._p; } template bool operator<=(const rivet_shared_ptr & other) const { return _p <= other._p; } template bool operator>=(const rivet_shared_ptr & other) const { return _p >= other._p; } shared_ptr get() const { return _p; } private: shared_ptr _p; }; // every object listed here needs a virtual fill method in YODA, // otherwise the Tuple fakery won't work. using MultiweightAOPtr = rivet_shared_ptr; using Histo1DPtr = rivet_shared_ptr>; using Histo2DPtr = rivet_shared_ptr>; using Profile1DPtr = rivet_shared_ptr>; using Profile2DPtr = rivet_shared_ptr>; using CounterPtr = rivet_shared_ptr>; using Scatter1DPtr = rivet_shared_ptr>; using Scatter2DPtr = rivet_shared_ptr>; using Scatter3DPtr = rivet_shared_ptr>; using YODA::Counter; using YODA::Histo1D; using YODA::HistoBin1D; using YODA::Histo2D; using YODA::HistoBin2D; using YODA::Profile1D; using YODA::ProfileBin1D; using YODA::Profile2D; using YODA::ProfileBin2D; using YODA::Scatter1D; using YODA::Point1D; using YODA::Scatter2D; using YODA::Point2D; using YODA::Scatter3D; using YODA::Point3D; /// Function to get a map of all the refdata in a paper with the /// given @a papername. map getRefData(const string& papername); /// @todo Also provide a Scatter3D getRefData() version? /// Get the file system path to the reference file for this paper. string getDatafilePath(const string& papername); /// Traits class to access the type of the AnalysisObject in the /// reference files. template struct ReferenceTraits {}; template<> struct ReferenceTraits { typedef Counter RefT; }; template<> struct ReferenceTraits { typedef Scatter1D RefT; }; template<> struct ReferenceTraits { typedef Scatter2D RefT; }; template<> struct ReferenceTraits { typedef Scatter2D RefT; }; template<> struct ReferenceTraits { typedef Scatter2D RefT; }; template<> struct ReferenceTraits { typedef Scatter3D RefT; }; template<> struct ReferenceTraits { typedef Scatter3D RefT; }; template<> struct ReferenceTraits { typedef Scatter3D RefT; }; /// If @a dst and @a src both are of same subclass T, copy the /// contents of @a src into @a dst and return true. Otherwise return /// false. template inline bool aocopy(YODA::AnalysisObjectPtr src, YODA::AnalysisObjectPtr dst) { shared_ptr tsrc = dynamic_pointer_cast(src); if ( !tsrc ) return false; shared_ptr tdst = dynamic_pointer_cast(dst); if ( !tdst ) return false; *tdst = *tsrc; return true; } /// If @a dst and @a src both are of same subclass T, add the /// contents of @a src into @a dst and return true. Otherwise return /// false. template inline bool aoadd(YODA::AnalysisObjectPtr dst, YODA::AnalysisObjectPtr src, double scale) { shared_ptr tsrc = dynamic_pointer_cast(src); if ( !tsrc ) return false; shared_ptr tdst = dynamic_pointer_cast(dst); if ( !tdst ) return false; tsrc->scaleW(scale); *tdst += *tsrc; return true; } /// If @a dst is the same subclass as @a src, copy the contents of @a /// src into @a dst and return true. Otherwise return false. bool copyao(YODA::AnalysisObjectPtr src, YODA::AnalysisObjectPtr dst); /// If @a dst is the same subclass as @a src, scale the contents of /// @a src with @a scale and add it to @a dst and return true. Otherwise /// return false. bool addaos(YODA::AnalysisObjectPtr dst, YODA::AnalysisObjectPtr src, double scale); /// Check if two analysis objects have the same binning or, if not /// binned, are in other ways compatible. template inline bool bookingCompatible(TPtr a, TPtr b) { return a->sameBinning(*b); } inline bool bookingCompatible(CounterPtr, CounterPtr) { return true; } inline bool bookingCompatible(Scatter1DPtr a, Scatter1DPtr b) { return a->numPoints() == b->numPoints(); } inline bool bookingCompatible(Scatter2DPtr a, Scatter2DPtr b) { return a->numPoints() == b->numPoints(); } inline bool bookingCompatible(Scatter3DPtr a, Scatter3DPtr b) { return a->numPoints() == b->numPoints(); } } #endif diff --git a/src/Core/AnalysisHandler.cc b/src/Core/AnalysisHandler.cc --- a/src/Core/AnalysisHandler.cc +++ b/src/Core/AnalysisHandler.cc @@ -1,714 +1,716 @@ // -*- C++ -*- #include "Rivet/Config/RivetCommon.hh" #include "Rivet/AnalysisHandler.hh" #include "Rivet/Analysis.hh" #include "Rivet/Tools/ParticleName.hh" #include "Rivet/Tools/BeamConstraint.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Projections/Beam.hh" #include "YODA/IO.h" #include #include using std::cout; using std::cerr; namespace { inline std::vector split(const std::string& input, const std::string& regex) { // passing -1 as the submatch index parameter performs splitting std::regex re(regex); std::sregex_token_iterator first{input.begin(), input.end(), re, -1}, last; return {first, last}; } } namespace Rivet { AnalysisHandler::AnalysisHandler(const string& runname) : _runname(runname), _eventcounter("/_EVTCOUNT"), _xs(NAN), _xserr(NAN), _initialised(false), _ignoreBeams(false), _dumpPeriod(0), _dumping(false), _defaultWeightIdx(0) { } AnalysisHandler::~AnalysisHandler() { } Log& AnalysisHandler::getLog() const { return Log::getLog("Rivet.Analysis.Handler"); } /// http://stackoverflow.com/questions/4654636/how-to-determine-if-a-string-is-a-number-with-c namespace { bool is_number(const std::string& s) { std::string::const_iterator it = s.begin(); while (it != s.end() && std::isdigit(*it)) ++it; return !s.empty() && it == s.end(); } } /// Check if any of the weightnames is not a number bool AnalysisHandler::haveNamedWeights() const { bool dec=false; for (unsigned int i=0;i<_weightNames.size();++i) { string s = _weightNames[i]; if (!is_number(s)) { dec=true; break; } } return dec; } void AnalysisHandler::init(const GenEvent& ge) { if (_initialised) throw UserError("AnalysisHandler::init has already been called: cannot re-initialize!"); /// @todo Should the Rivet analysis objects know about weight names? setRunBeams(Rivet::beams(ge)); MSG_DEBUG("Initialising the analysis handler"); _eventNumber = ge.event_number(); setWeightNames(ge); if (haveNamedWeights()) MSG_INFO("Using named weights"); else MSG_INFO("NOT using named weights. Using first weight as nominal weight"); _eventCounter = CounterPtr(weightNames(), Counter("_EVTCOUNT")); // Set the cross section based on what is reported by this event. if (ge.cross_section()) { MSG_TRACE("Getting cross section."); double xs = ge.cross_section()->cross_section(); double xserr = ge.cross_section()->cross_section_error(); setCrossSection(xs, xserr); } // Check that analyses are beam-compatible, and remove those that aren't const size_t num_anas_requested = analysisNames().size(); vector anamestodelete; for (const AnaHandle a : analyses()) { if (!_ignoreBeams && !a->isCompatible(beams())) { //MSG_DEBUG(a->name() << " requires beams " << a->requiredBeams() << " @ " << a->requiredEnergies() << " GeV"); anamestodelete.push_back(a->name()); } } for (const string& aname : anamestodelete) { MSG_WARNING("Analysis '" << aname << "' is incompatible with the provided beams: removing"); removeAnalysis(aname); } if (num_anas_requested > 0 && analysisNames().empty()) { cerr << "All analyses were incompatible with the first event's beams\n" << "Exiting, since this probably wasn't intentional!" << endl; exit(1); } // Warn if any analysis' status is not unblemished for (const AnaHandle a : analyses()) { if ( a->info().preliminary() ) { MSG_WARNING("Analysis '" << a->name() << "' is preliminary: be careful, it may change and/or be renamed!"); } else if ( a->info().obsolete() ) { MSG_WARNING("Analysis '" << a->name() << "' is obsolete: please update!"); } else if (( a->info().unvalidated() ) ) { MSG_WARNING("Analysis '" << a->name() << "' is unvalidated: be careful, it may be broken!"); } } // Initialize the remaining analyses _stage = Stage::INIT; for (AnaHandle a : analyses()) { MSG_DEBUG("Initialising analysis: " << a->name()); try { // Allow projection registration in the init phase onwards a->_allowProjReg = true; a->init(); //MSG_DEBUG("Checking consistency of analysis: " << a->name()); //a->checkConsistency(); } catch (const Error& err) { cerr << "Error in " << a->name() << "::init method: " << err.what() << endl; exit(1); } MSG_DEBUG("Done initialising analysis: " << a->name()); } _stage = Stage::OTHER; _initialised = true; MSG_DEBUG("Analysis handler initialised"); } void AnalysisHandler::analyze(const GenEvent& ge) { // Call init with event as template if not already initialised if (!_initialised) init(ge); assert(_initialised); // Ensure that beam details match those from the first event (if we're checking beams) if ( !_ignoreBeams ) { const PdgIdPair beams = Rivet::beamIds(ge); const double sqrts = Rivet::sqrtS(ge); if (!compatible(beams, _beams) || !fuzzyEquals(sqrts, sqrtS())) { cerr << "Event beams mismatch: " << PID::toBeamsString(beams) << " @ " << sqrts/GeV << " GeV" << " vs. first beams " << this->beams() << " @ " << this->sqrtS()/GeV << " GeV" << endl; exit(1); } } // Create the Rivet event wrapper /// @todo Filter/normalize the event here bool strip = ( getEnvParam("RIVET_STRIP_HEPMC", string("NOOOO") ) != "NOOOO" ); Event event(ge, strip); // Weights /// @todo Drop this / just report first weight when we support multiweight events _eventcounter.fill(event.weight()); MSG_DEBUG("Event #" << _eventcounter.numEntries() << " weight = " << event.weight()); // Cross-section #if defined ENABLE_HEPMC_3 if (ge.cross_section()) { //@todo HepMC3::GenCrossSection methods aren't const accessible :( RivetHepMC::GenCrossSection gcs = *(event.genEvent()->cross_section()); _xs = gcs.xsec(); _xserr = gcs.xsec_err(); } #elif defined HEPMC_HAS_CROSS_SECTION if (ge.cross_section()) { _xs = ge.cross_section()->cross_section(); _xserr = ge.cross_section()->cross_section_error(); } #endif // Won't happen for first event because _eventNumber is set in init() if (_eventNumber != ge.event_number()) { /// @todo Can we get away with not passing a matrix? MSG_TRACE("AnalysisHandler::analyze(): Pushing _eventCounter to persistent."); _eventCounter.get()->pushToPersistent(_subEventWeights); // if this is indeed a new event, push the temporary // histograms and reset for (const AnaHandle& a : _analyses) { for (auto ao : a->analysisObjects()) { MSG_TRACE("AnalysisHandler::analyze(): Pushing " << a->name() << "'s " << ao->name() << " to persistent."); ao.get()->pushToPersistent(_subEventWeights); } MSG_TRACE("AnalysisHandler::analyze(): finished pushing " << a->name() << "'s objects to persistent."); } _eventNumber = ge.event_number(); MSG_DEBUG("nominal event # " << _eventCounter.get()->_persistent[0]->numEntries()); MSG_DEBUG("nominal sum of weights: " << _eventCounter.get()->_persistent[0]->sumW()); MSG_DEBUG("Event has " << _subEventWeights.size() << " sub events."); _subEventWeights.clear(); } MSG_TRACE("starting new sub event"); _eventCounter.get()->newSubEvent(); for (const AnaHandle& a : _analyses) { for (auto ao : a->analysisObjects()) { ao.get()->newSubEvent(); } } _subEventWeights.push_back(event.weights()); MSG_DEBUG("Analyzing subevent #" << _subEventWeights.size() - 1 << "."); _eventCounter->fill(); // Run the analyses for (AnaHandle a : analyses()) { MSG_TRACE("About to run analysis " << a->name()); try { a->analyze(event); } catch (const Error& err) { cerr << "Error in " << a->name() << "::analyze method: " << err.what() << endl; exit(1); } MSG_TRACE("Finished running analysis " << a->name()); } if ( _dumpPeriod > 0 && numEvents()%_dumpPeriod == 0 ) { MSG_INFO("Dumping intermediate results to " << _dumpFile << "."); _dumping = numEvents()/_dumpPeriod; finalize(); writeData(_dumpFile); _dumping = 0; } } void AnalysisHandler::analyze(const GenEvent* ge) { if (ge == nullptr) { MSG_ERROR("AnalysisHandler received null pointer to GenEvent"); //throw Error("AnalysisHandler received null pointer to GenEvent"); } analyze(*ge); } void AnalysisHandler::finalize() { if (!_initialised) return; MSG_INFO("Finalising analyses"); // First push all analyses' objects to persistent MSG_TRACE("AnalysisHandler::finalize(): Pushing analysis objects to persistent."); _eventCounter.get()->pushToPersistent(_subEventWeights); for (const AnaHandle& a : _analyses) { for (auto ao : a->analysisObjects()) ao.get()->pushToPersistent(_subEventWeights); } // First we make copies of all analysis objects. map backupAOs; for (auto ao : getData(false, true, false) ) backupAOs[ao->path()] = AnalysisObjectPtr(ao->newclone()); // Now we run the (re-entrant) finalize() functions for all analyses. MSG_INFO("Finalising analyses"); for (AnaHandle a : analyses()) { a->setCrossSection(_xs); try { if ( !_dumping || a->info().reentrant() ) a->finalize(); else if ( _dumping == 1 ) MSG_INFO("Skipping finalize in periodic dump of " << a->name() << " as it is not declared reentrant."); } catch (const Error& err) { cerr << "Error in " << a->name() << "::finalize method: " << err.what() << endl; exit(1); } } // Now we copy all analysis objects to the list of finalized // ones, and restore the value to their original ones. _finalizedAOs.clear(); for ( auto ao : getYodaAOs(false, false, false) ) _finalizedAOs.push_back(YODA::AnalysisObjectPtr(ao->newclone())); for ( auto ao : getYodaAOs(false, true, false) ) { // TODO: This should be possible to do in a nicer way, with a flag etc. if (ao->path().find("/FINAL") != std::string::npos) continue; auto aoit = backupAOs.find(ao->path()); if ( aoit == backupAOs.end() ) { AnaHandle ana = analysis(split(ao->path(), "/")[0]); if ( ana ) ana->removeAnalysisObject(ao->path()); } else copyao(aoit->second, ao); } // Print out number of events processed const int nevts = numEvents(); MSG_INFO("Processed " << nevts << " event" << (nevts != 1 ? "s" : "")); // // Delete analyses // MSG_DEBUG("Deleting analyses"); // _analyses.clear(); // Print out MCnet boilerplate cout << endl; cout << "The MCnet usage guidelines apply to Rivet: see http://www.montecarlonet.org/GUIDELINES" << endl; cout << "Please acknowledge plots made with Rivet analyses, and cite arXiv:1003.0694 (http://arxiv.org/abs/1003.0694)" << endl; } AnalysisHandler& AnalysisHandler::addAnalysis(const string& analysisname, std::map pars) { // Make an option handle. std::string parHandle = ""; for (map::iterator par = pars.begin(); par != pars.end(); ++par) { parHandle +=":"; parHandle += par->first + "=" + par->second; } return addAnalysis(analysisname + parHandle); } AnalysisHandler& AnalysisHandler::addAnalysis(const string& analysisname) { // Check for a duplicate analysis /// @todo Might we want to be able to run an analysis twice, with different params? /// Requires avoiding histo tree clashes, i.e. storing the histos on the analysis objects. string ananame = analysisname; vector anaopt = split(analysisname, ":"); if ( anaopt.size() > 1 ) ananame = anaopt[0]; AnaHandle analysis( AnalysisLoader::getAnalysis(ananame) ); if (analysis.get() != 0) { // < Check for null analysis. MSG_DEBUG("Adding analysis '" << analysisname << "'"); map opts; for ( int i = 1, N = anaopt.size(); i < N; ++i ) { vector opt = split(anaopt[i], "="); if ( opt.size() != 2 ) { MSG_WARNING("Error in option specification. Skipping analysis " << analysisname); return *this; } if ( !analysis->info().validOption(opt[0], opt[1]) ) { MSG_WARNING("Cannot set option '" << opt[0] << "' to '" << opt[1] << "'. Skipping analysis " << analysisname); return *this; } opts[opt[0]] = opt[1]; } for ( auto opt: opts) { analysis->_options[opt.first] = opt.second; analysis->_optstring += ":" + opt.first + "=" + opt.second; } for (const AnaHandle& a : analyses()) { if (a->name() == analysis->name() ) { MSG_WARNING("Analysis '" << analysisname << "' already registered: skipping duplicate"); return *this; } } analysis->_analysishandler = this; _analyses[analysisname] = analysis; } else { MSG_WARNING("Analysis '" << analysisname << "' not found."); } // MSG_WARNING(_analyses.size()); // for (const AnaHandle& a : _analyses) MSG_WARNING(a->name()); return *this; } AnalysisHandler& AnalysisHandler::removeAnalysis(const string& analysisname) { MSG_DEBUG("Removing analysis '" << analysisname << "'"); if (_analyses.find(analysisname) != _analyses.end()) _analyses.erase(analysisname); // } return *this; } void AnalysisHandler::addData(const std::vector& aos) { for (const YODA::AnalysisObjectPtr ao : aos) { string path = ao->path(); if ( path.substr(0, 5) != "/RAW/" ) { _orphanedPreloads.push_back(ao); continue; } path = path.substr(4); ao->setPath(path); if (path.size() > 1) { // path > "/" try { const string ananame = ::split(path, "/")[0]; AnaHandle a = analysis(ananame); /// @todo FIXXXXX //MultiweightAOPtr mao = ????; /// @todo generate right Multiweight object from ao //a->addAnalysisObject(mao); /// @todo Need to statistically merge... } catch (const Error& e) { MSG_TRACE("Adding analysis object " << path << " to the list of orphans."); _orphanedPreloads.push_back(ao); } } } } void AnalysisHandler::stripOptions(YODA::AnalysisObjectPtr ao, const vector & delopts) const { string path = ao->path(); string ananame = split(path, "/")[0]; vector anaopts = split(ananame, ":"); for ( int i = 1, N = anaopts.size(); i < N; ++i ) for ( auto opt : delopts ) if ( opt == "*" || anaopts[i].find(opt + "=") == 0 ) path.replace(path.find(":" + anaopts[i]), (":" + anaopts[i]).length(), ""); ao->setPath(path); } + void AnalysisHandler::mergeYodas(const vector & , + const vector & , bool ) {} // void AnalysisHandler::mergeYodas(const vector & aofiles, // const vector & delopts, bool equiv) { // vector< vector > aosv; // vector xsecs; // vector xsecerrs; // vector sows; // set ananames; // _eventCounter->reset(); // // First scan all files and extract analysis objects and add the // // corresponding analyses.. // for (const string& file : aofiles ) { // Scatter1DPtr xsec; // CounterPtr sow; // // For each file make sure that cross section and sum-of-weights // // objects are present and stor all RAW ones in a vector; // vector aos; // try { // /// @todo Use new YODA SFINAE to fill the smart ptr vector directly // vector aos_raw; // YODA::read(file, aos_raw); // for (YODA::AnalysisObject* aor : aos_raw) { // YODA::AnalysisObjectPtr ao(aor); // if ( ao->path().substr(0, 5) != "/RAW/" ) continue; // ao->setPath(ao->path().substr(4)); // if ( ao->path() == "/_XSEC" ) // xsec = dynamic_pointer_cast(ao); // else if ( ao->path() == "/_EVTCOUNT" ) // sow = dynamic_pointer_cast(ao); // else { // stripOptions(ao, delopts); // string ananame = split(ao->path(), "/")[0]; // if ( ananames.insert(ananame).second ) addAnalysis(ananame); // aos.push_back(ao); // } // } // if ( !xsec || !sow ) { // MSG_ERROR( "Error in AnalysisHandler::mergeYodas: The file " << file // << " did not contain weights and cross section info."); // exit(1); // } // xsecs.push_back(xsec->point(0).x()); // sows.push_back(sow); // xsecerrs.push_back(sqr(xsec->point(0).xErrAvg())); // _eventCounter->operator+=(*sow); //< HAHAHAHAHA! // sows.push_back(sow); // aosv.push_back(aos); // } catch (...) { //< YODA::ReadError& // throw UserError("Unexpected error in reading file: " + file); // } // } // // Now calculate the scale to be applied for all bins in a file // // and get the common cross section and sum of weights. // _xs = _xserr = 0.0; // for ( int i = 0, N = sows.size(); i < N; ++i ) { // double effnent = sows[i]->effNumEntries(); // _xs += (equiv? effnent: 1.0)*xsecs[i]; // _xserr += (equiv? sqr(effnent): 1.0)*xsecerrs[i]; // } // vector scales(sows.size(), 1.0); // if ( equiv ) { // _xs /= _eventCounter.effNumEntries(); // _xserr = sqrt(_xserr)/_eventCounter.effNumEntries(); // } else { // _xserr = sqrt(_xserr); // for ( int i = 0, N = sows.size(); i < N; ++i ) // scales[i] = (_eventCounter.sumW()/sows[i]->sumW())*(xsecs[i]/_xs); // } // // Initialize the analyses allowing them to book analysis objects. // for (AnaHandle a : _analyses) { // MSG_DEBUG("Initialising analysis: " << a->name()); // if ( !a->info().reentrant() ) // MSG_WARNING("Analysis " << a->name() << " has not been validated to have " // << "a reentrant finalize method. The result is unpredictable."); // try { // // Allow projection registration in the init phase onwards // a->_allowProjReg = true; // cerr << "sqrtS " << sqrtS() << endl; // a->init(); // //MSG_DEBUG("Checking consistency of analysis: " << a->name()); // //a->checkConsistency(); // } catch (const Error& err) { // cerr << "Error in " << a->name() << "::init method: " << err.what() << endl; // exit(1); // } // MSG_DEBUG("Done initialising analysis: " << a->name()); // } // _initialised = true; // // Get a list of all anaysis objects to handle. // map current; // for ( auto ao : getData(false, true) ) current[ao->path()] = ao; // // Go through all objects to be merged and add them to current // // after appropriate scaling. // for ( int i = 0, N = aosv.size(); i < N; ++i) // for ( auto ao : aosv[i] ) { // if ( ao->path() == "/_XSEC" || ao->path() == "_EVTCOUNT" ) continue; // auto aoit = current.find(ao->path()); // if ( aoit == current.end() ) { // MSG_WARNING("" << ao->path() << " was not properly booked."); // continue; // } // if ( !addaos(aoit->second, ao, scales[i]) ) // MSG_WARNING("Cannot merge objects with path " << ao->path() // <<" of type " << ao->annotation("Type") ); // } // // Now we can simply finalize() the analysis, leaving the // // controlling program to write it out some yoda-file. // finalize(); // } void AnalysisHandler::readData(const string& filename) { vector aos; try { /// @todo Use new YODA SFINAE to fill the smart ptr vector directly vector aos_raw; YODA::read(filename, aos_raw); for (YODA::AnalysisObject* aor : aos_raw) aos.push_back(YODA::AnalysisObjectPtr(aor)); //} catch (const YODA::ReadError & e) { } catch (...) { //< YODA::ReadError& throw UserError("Unexpected error in reading file: " + filename); } if (!aos.empty()) addData(aos); } vector AnalysisHandler::getRivetAOs() const { vector rtn; for (AnaHandle a : _analyses) { for (const auto & ao : a->analysisObjects()) { rtn.push_back(ao); } } rtn.push_back(_eventCounter); rtn.push_back(_xs); return rtn; } vector AnalysisHandler::getYodaAOs(bool includeorphans, bool includetmps, bool usefinalized) const { vector rtn; if (usefinalized) rtn = _finalizedAOs; else { for (auto rao : getRivetAOs()) { // need to set the index // before we can search the PATH rao.get()->setActiveWeightIdx(_defaultWeightIdx); // Exclude paths from final write-out if they contain a "TMP" layer (i.e. matching "/TMP/") if (!includetmps && rao->path().find("/TMP/") != string::npos) continue; for (size_t iW = 0; iW < numWeights(); iW++) { rao.get()->setActiveWeightIdx(iW); rtn.push_back(rao.get()->activeYODAPtr()); } } } // Sort histograms alphanumerically by path before write-out sort(rtn.begin(), rtn.end(), [](YODA::AnalysisObjectPtr a, YODA::AnalysisObjectPtr b) { return a->path() < b->path(); } ); return rtn; } vector AnalysisHandler::getData(bool includeorphans, bool includetmps, bool usefinalized) const { return getYodaAOs(includeorphans, includetmps, usefinalized); } void AnalysisHandler::writeData(const string& filename) const { vector out = _finalizedAOs; set finalana; for ( auto ao : out) finalana.insert(ao->path()); out.reserve(2*out.size()); vector aos = getData(false, true); for ( auto ao : aos ) { ao = YODA::AnalysisObjectPtr(ao->newclone()); ao->setPath("/RAW" + ao->path()); out.push_back(ao); } try { YODA::write(filename, aos.begin(), aos.end()); } catch (...) { //< YODA::WriteError& throw UserError("Unexpected error in writing file: " + filename); } } string AnalysisHandler::runName() const { return _runname; } size_t AnalysisHandler::numEvents() const { return _eventCounter->numEntries(); } std::vector AnalysisHandler::analysisNames() const { std::vector rtn; for (AnaHandle a : analyses()) { rtn.push_back(a->name()); } return rtn; } AnalysisHandler& AnalysisHandler::addAnalyses(const std::vector& analysisnames) { for (const string& aname : analysisnames) { //MSG_DEBUG("Adding analysis '" << aname << "'"); addAnalysis(aname); } return *this; } AnalysisHandler& AnalysisHandler::removeAnalyses(const std::vector& analysisnames) { for (const string& aname : analysisnames) { removeAnalysis(aname); } return *this; } AnalysisHandler& AnalysisHandler::setCrossSection(double xs, double xserr) { _xs = Scatter1DPtr(weightNames(), Scatter1D("_XSEC")); _eventCounter.get()->setActiveWeightIdx(_defaultWeightIdx); double nomwgt = sumW(); // The cross section of each weight variation is the nominal cross section // times the sumW(variation) / sumW(nominal). // This way the cross section will work correctly for (size_t iW = 0; iW < numWeights(); iW++) { _eventCounter.get()->setActiveWeightIdx(iW); double s = sumW() / nomwgt; _xs.get()->setActiveWeightIdx(iW); _xs->addPoint(xs*s, xserr*s); } _eventCounter.get()->unsetActiveWeight(); _xs.get()->unsetActiveWeight(); return *this; } bool AnalysisHandler::hasCrossSection() const { return (!std::isnan(crossSection())); } AnalysisHandler& AnalysisHandler::addAnalysis(Analysis* analysis) { analysis->_analysishandler = this; // _analyses.insert(AnaHandle(analysis)); _analyses[analysis->name()] = AnaHandle(analysis); return *this; } PdgIdPair AnalysisHandler::beamIds() const { return Rivet::beamIds(beams()); } double AnalysisHandler::sqrtS() const { return Rivet::sqrtS(beams()); } void AnalysisHandler::setIgnoreBeams(bool ignore) { _ignoreBeams=ignore; } } diff --git a/src/Tools/RivetYODA.cc b/src/Tools/RivetYODA.cc --- a/src/Tools/RivetYODA.cc +++ b/src/Tools/RivetYODA.cc @@ -1,415 +1,428 @@ #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Tools/RivetYODA.hh" #include "Rivet/Tools/RivetPaths.hh" #include "YODA/ReaderYODA.h" #include "YODA/ReaderAIDA.h" // use execinfo for backtrace if available #include "DummyConfig.hh" #ifdef HAVE_EXECINFO_H #include #endif using namespace std; namespace Rivet { template Wrapper::~Wrapper() {} template Wrapper::Wrapper(const vector& weightNames, const T & p) { for (const string& weightname : weightNames) { _persistent.push_back(make_shared(p)); + _final.push_back(make_shared(p)); auto obj = _persistent.back(); - if (weightname != "") - obj->setPath(obj->path() + "[" + weightname + "]"); + auto final = _final.back(); + if (weightname != "") { + obj->setPath("/RAW" + obj->path() + "[" + weightname + "]"); + final->setPath(obj->path() + "[" + weightname + "]"); + } } } template typename T::Ptr Wrapper::active() const { if ( !_active ) { #ifdef HAVE_BACKTRACE void * buffer[4]; backtrace(buffer, 4); backtrace_symbols_fd(buffer, 4 , 1); #endif assert(false && "No active pointer set. Was this object booked in init()?"); } return _active; } template void Wrapper::newSubEvent() { typename TupleWrapper::Ptr tmp = make_shared>(_persistent[0]->clone()); tmp->reset(); _evgroup.push_back( tmp ); _active = _evgroup.back(); assert(_active); } string getDatafilePath(const string& papername) { /// Try to find YODA otherwise fall back to try AIDA const string path1 = findAnalysisRefFile(papername + ".yoda"); if (!path1.empty()) return path1; const string path2 = findAnalysisRefFile(papername + ".aida"); if (!path2.empty()) return path2; throw Rivet::Error("Couldn't find ref data file '" + papername + ".yoda" + " in data path, '" + getRivetDataPath() + "', or '.'"); } map getRefData(const string& papername) { const string datafile = getDatafilePath(papername); // Make an appropriate data file reader and read the data objects /// @todo Remove AIDA support some day... YODA::Reader& reader = (datafile.find(".yoda") != string::npos) ? \ YODA::ReaderYODA::create() : YODA::ReaderAIDA::create(); vector aovec; reader.read(datafile, aovec); // Return value, to be populated map rtn; for ( YODA::AnalysisObject* ao : aovec ) { YODA::AnalysisObjectPtr refdata(ao); if (!refdata) continue; const string plotpath = refdata->path(); // Split path at "/" and only return the last field, i.e. the histogram ID const size_t slashpos = plotpath.rfind("/"); const string plotname = (slashpos+1 < plotpath.size()) ? plotpath.substr(slashpos+1) : ""; rtn[plotname] = refdata; } return rtn; } } namespace { using Rivet::Fill; using Rivet::Fills; using Rivet::TupleWrapper; template double get_window_size(const typename T::Ptr & histo, typename T::BinType x) { // the bin index we fall in const auto binidx = histo->binIndexAt(x); // gaps, overflow, underflow don't contribute if ( binidx == -1 ) return 0; const auto & b = histo->bin(binidx); // if we don't have a valid neighbouring bin, // we use infinite width typename T::Bin b1(-1.0/0.0, 1.0/0.0); // points in the top half compare to the upper neighbour if ( x > b.xMid() ) { size_t nextidx = binidx + 1; if ( nextidx < histo->bins().size() ) b1 = histo->bin(nextidx); } else { // compare to the lower neighbour int nextidx = binidx - 1; if ( nextidx >= 0 ) b1 = histo->bin(nextidx); } // the factor 2 is arbitrary, could poss. be smaller return min( b.width(), b1.width() ) / 2.0; } template typename T::BinType fillT2binT(typename T::FillType a) { return a; } template <> YODA::Profile1D::BinType fillT2binT(YODA::Profile1D::FillType a) { return get<0>(a); } template <> YODA::Profile2D::BinType fillT2binT(YODA::Profile2D::FillType a) { return YODA::Profile2D::BinType{ get<0>(a), get<1>(a) }; } template void commit(vector & persistent, const vector< vector> > & tuple, const vector> & weights ) { // TODO check if all the xs are in the same bin anyway! // Then no windowing needed assert(persistent.size() == weights[0].size()); for ( const auto & x : tuple ) { double maxwindow = 0.0; for ( const auto & xi : x ) { // TODO check for NOFILL here // persistent[0] has the same binning as all the other persistent objects double window = get_window_size(persistent[0], fillT2binT(xi.first)); if ( window > maxwindow ) maxwindow = window; } const double wsize = maxwindow; // all windows have same size set edgeset; // bin edges need to be in here! for ( const auto & xi : x ) { edgeset.insert(fillT2binT(xi.first) - wsize); edgeset.insert(fillT2binT(xi.first) + wsize); } vector< std::tuple,double> > hfill; double sumf = 0.0; auto edgit = edgeset.begin(); double ehi = *edgit; while ( ++edgit != edgeset.end() ) { double elo = ehi; ehi = *edgit; valarray sumw(0.0, persistent.size()); // need m copies of this bool gap = true; // Check for gaps between the sub-windows. for ( size_t i = 0; i < x.size(); ++i ) { // check equals comparisons here! if ( fillT2binT(x[i].first) + wsize >= ehi && fillT2binT(x[i].first) - wsize <= elo ) { sumw += x[i].second * weights[i]; gap = false; } } if ( gap ) continue; hfill.push_back( make_tuple( (ehi + elo)/2.0, sumw, (ehi - elo) ) ); sumf += ehi - elo; } for ( auto f : hfill ) for ( size_t m = 0; m < persistent.size(); ++m ) persistent[m]->fill( get<0>(f), get<1>(f)[m], get<2>(f)/sumf ); // Note the scaling to one single fill } } template<> void commit(vector & persistent, const vector< vector> > & tuple, const vector> & weights) {} template<> void commit(vector & persistent, const vector< vector> > & tuple, const vector> & weights) {} template double distance(T a, T b) { return abs(a - b); } template <> double distance >(tuple a, tuple b) { return Rivet::sqr(get<0>(a) - get<0>(b)) + Rivet::sqr(get<1>(a) - get<1>(b)); } } namespace Rivet { bool copyao(YODA::AnalysisObjectPtr src, YODA::AnalysisObjectPtr dst) { for (const std::string& a : src->annotations()) dst->setAnnotation(a, src->annotation(a)); if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; return false; } bool addaos(YODA::AnalysisObjectPtr dst, YODA::AnalysisObjectPtr src, double scale) { if ( aoadd(dst,src,scale) ) return true; if ( aoadd(dst,src,scale) ) return true; if ( aoadd(dst,src,scale) ) return true; if ( aoadd(dst,src,scale) ) return true; if ( aoadd(dst,src,scale) ) return true; return false; } } namespace { /// fills is a vector of sub-event with an ordered set of x-values of /// the fills in each sub-event. NOFILL should be an "impossible" /// value for this histogram. Returns a vector of sub-events with /// an ordered vector of fills (including NOFILLs) for each sub-event. template vector< vector > > match_fills(const vector::Ptr> & evgroup, const Fill & NOFILL) { vector< vector > > matched; // First just copy subevents into vectors and find the longest vector. unsigned int maxfill = 0; // length of biggest vector int imax = 0; // index position of biggest vector for ( const auto & it : evgroup ) { const auto & subev = it->fills(); if ( subev.size() > maxfill ) { maxfill = subev.size(); imax = matched.size(); } matched.push_back(vector >(subev.begin(), subev.end())); } // Now, go through all subevents with missing fills. const vector> & full = matched[imax]; // the longest one for ( auto & subev : matched ) { if ( subev.size() == maxfill ) continue; // Add NOFILLs to the end; while ( subev.size() < maxfill ) subev.push_back(NOFILL); // Iterate from the back and shift all fill values backwards by // swapping with NOFILLs so that they better match the full // subevent. for ( int i = maxfill - 1; i >= 0; --i ) { if ( subev[i] == NOFILL ) continue; size_t j = i; while ( j + 1 < maxfill && subev[j + 1] == NOFILL && distance(fillT2binT(subev[j].first), fillT2binT(full[j].first)) > distance(fillT2binT(subev[j].first), fillT2binT(full[j + 1].first)) ) { swap(subev[j], subev[j + 1]); ++j; } } } // transpose vector>> result(maxfill,vector>(matched.size())); for (size_t i = 0; i < matched.size(); ++i) for (size_t j = 0; j < maxfill; ++j) result.at(j).at(i) = matched.at(i).at(j); return result; } } namespace Rivet { template void Wrapper::pushToPersistent(const vector >& weight) { assert( _evgroup.size() == weight.size() ); // have we had subevents at all? const bool have_subevents = _evgroup.size() > 1; if ( ! have_subevents ) { // simple replay of all tuple entries // each recorded fill is inserted into all persistent weightname histos for ( size_t m = 0; m < _persistent.size(); ++m ) for ( const auto & f : _evgroup[0]->fills() ) _persistent[m]->fill( f.first, f.second * weight[0][m] ); } else { // outer index is subevent, inner index is jets in the event vector>> linedUpXs = match_fills(_evgroup, {typename T::FillType(), 0.0}); commit( _persistent, linedUpXs, weight ); } _evgroup.clear(); _active.reset(); } + template + void Wrapper::pushToFinal(const vector >& weight) { + for ( size_t m = 0; m < _persistent.size(); ++m ) { + copyao(_persistent.at(m), _final.at(m)); + } + } + + + template <> void Wrapper::pushToPersistent(const vector >& weight) { for ( size_t m = 0; m < _persistent.size(); ++m ) { for ( size_t n = 0; n < _evgroup.size(); ++n ) { for ( const auto & f : _evgroup[n]->fills() ) { _persistent[m]->fill( f.second * weight[n][m] ); } } } _evgroup.clear(); _active.reset(); } template <> void Wrapper::pushToPersistent(const vector >& weight) { _evgroup.clear(); _active.reset(); } template <> void Wrapper::pushToPersistent(const vector >& weight) { _evgroup.clear(); _active.reset(); } template <> void Wrapper::pushToPersistent(const vector >& weight) { _evgroup.clear(); _active.reset(); } // explicitly instantiate all wrappers template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; } diff --git a/test/testCmp.cc b/test/testCmp.cc --- a/test/testCmp.cc +++ b/test/testCmp.cc @@ -1,42 +1,43 @@ #include #include #include "Rivet/Tools/Cmp.hh" using namespace std; ostream & operator<<(ostream & os, Rivet::CmpState c) { string s; switch (c) { - case Rivet::CmpState::UNDEF : s = "UNDEF"; break; - case Rivet::CmpState::LT : s = "LT"; break; - case Rivet::CmpState::EQ : s = "EQ"; break; - case Rivet::CmpState::GT : s = "GT"; break; + case Rivet::CmpState::UNDEF : s = "UNDEF"; break; + case Rivet::CmpState::LT : s = "LT"; break; + case Rivet::CmpState::EQ : s = "EQ"; break; + case Rivet::CmpState::GT : s = "GT"; break; + default: s = "OTHER"; break; } os << s; return os; } int main() { using namespace Rivet; CmpState cs = CmpState::UNDEF; cs = cmp(0.5, 0.6); cout << "cmp(0.5, 0.6) = " << cs << '\n'; assert(cs == CmpState::LT); cs = cmp(0.5, 0.5); cout << "cmp(0.5, 0.5) = " << cs << '\n'; assert(cs == CmpState::EQ); cs = cmp(0.6, 0.5); cout << "cmp(0.6, 0.5) = " << cs << '\n'; assert(cs == CmpState::GT); cs = cmp(1.,1.) || cmp(0.6, 0.5); cout << "cmp(1.,1.) || cmp(0.6, 0.5) = " << cs << '\n'; assert(cs == CmpState::GT); return EXIT_SUCCESS; } diff --git a/test/testNaN.cc b/test/testNaN.cc --- a/test/testNaN.cc +++ b/test/testNaN.cc @@ -1,74 +1,74 @@ #include "Rivet/AnalysisHandler.hh" #include "Rivet/Analysis.hh" #include "Rivet/Tools/RivetYODA.hh" #include #include #include using namespace std; class Test : public Rivet::Analysis { public: Test() : Analysis("Test") {} void init() { book(_h_test, "test", 50, 66.0, 116.0); } void analyze(const Rivet::Event & e) { cout << "Normal fill" << endl; _h_test->fill(90., 1.); cout << "Underflow fill" << endl; _h_test->fill(30.,1.); cout << "Overflow fill" << endl; _h_test->fill(130.,1.); cout << "Inf fill" << endl; try { _h_test->fill(numeric_limits::infinity(), 1.); - } catch (YODA::RangeError e) { + } catch (YODA::RangeError & e) { cerr << e.what() << '\n'; if ( string(e.what()) != string("X is Inf") ) throw; } cout << "NaN fill" << endl; try { _h_test->fill(numeric_limits::quiet_NaN(), 1.); - } catch (YODA::RangeError e) { + } catch (YODA::RangeError & e) { cerr << e.what() << '\n'; if ( string(e.what()) != string("X is NaN") ) throw; } } private: Rivet::Histo1DPtr _h_test; }; DECLARE_RIVET_PLUGIN(Test); int main(int argc, char* argv[]) { assert(argc > 1); Rivet::AnalysisHandler rivet; rivet.addAnalysis("Test"); std::ifstream file("testApi.hepmc"); shared_ptr reader = Rivet::HepMCUtils::makeReader(file); std::shared_ptr evt = make_shared(); double sum_of_weights = 0.0; while ( Rivet::HepMCUtils::readEvent(reader, evt) ) { // Analyse current event rivet.analyze(*evt); sum_of_weights += evt->weights()[0]; } file.close(); rivet.setCrossSection(1.0, 0.1); rivet.finalize(); rivet.writeData("NaN.aida"); return 0; }