diff --git a/analyses/pluginALICE/ALICE_2010_I880049.cc b/analyses/pluginALICE/ALICE_2010_I880049.cc --- a/analyses/pluginALICE/ALICE_2010_I880049.cc +++ b/analyses/pluginALICE/ALICE_2010_I880049.cc @@ -1,104 +1,96 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Tools/Cuts.hh" #include "Rivet/Projections/SingleValueProjection.hh" #include "Rivet/Tools/AliceCommon.hh" #include "Rivet/Projections/AliceCommon.hh" -#include - -#define _USE_MATH_DEFINES -#include namespace Rivet { - - // Analysis + + class ALICE_2010_I880049 : public Analysis { - public: - + DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2010_I880049); - + /// Book histograms and initialise projections before the run void init() { - + // Declare centrality projection declareCentrality(ALICE::V0MMultiplicity(), "ALICE_2015_PBPBCentrality", "V0M", "V0M"); - + // Charged final states with |eta| < 0.5 and pT > 50 MeV const Cut& cut = Cuts::abseta < 0.5 && Cuts::pT > 50*MeV; const ChargedFinalState cfs(cut); - addProjection(cfs,"CFS"); - + declare(cfs,"CFS"); + // Trigger projections - declare(ChargedFinalState((Cuts::eta > 2.8 && Cuts::eta < 5.1) && + declare(ChargedFinalState((Cuts::eta > 2.8 && Cuts::eta < 5.1) && Cuts::pT > 0.1*GeV), "VZERO1"); declare(ChargedFinalState((Cuts::eta > -3.7 && Cuts::eta < -1.7) && Cuts::pT > 0.1*GeV), "VZERO2"); declare(ChargedFinalState(Cuts::abseta < 1. && Cuts::pT > 0.15*GeV), "SPD"); - + // Primary particles declare(ALICE::PrimaryParticles(Cuts::abseta < 5.6),"APRIM"); - + // Histograms and variables initialization - _histNchVsCentr = bookProfile1D(1, 1, 1); - _histNpartVsCentr = bookProfile1D(1, 1, 2); - + book(_histNchVsCentr, 1, 1, 1); + book(_histNpartVsCentr, 1, 1, 2); + } /// Perform the per-event analysis void analyze(const Event& event) { - + // Trigger projections - const ChargedFinalState& vz1 = + const ChargedFinalState& vz1 = applyProjection(event,"VZERO1"); - const ChargedFinalState& vz2 = + const ChargedFinalState& vz2 = applyProjection(event,"VZERO2"); - const ChargedFinalState& spd = + const ChargedFinalState& spd = applyProjection(event,"SPD"); int fwdTrig = (vz1.particles().size() > 0 ? 1 : 0); int bwdTrig = (vz2.particles().size() > 0 ? 1 : 0); int cTrig = (spd.particles().size() > 0 ? 1 : 0); - + if (fwdTrig + bwdTrig + cTrig < 2) vetoEvent; - - const CentralityProjection& centrProj = + + const CentralityProjection& centrProj = apply(event, "V0M"); double centr = centrProj(); if (centr > 80.) vetoEvent; // Calculate number of charged particles and fill histogram double nch = 0.; //primary.particles().size(); for (const auto& p : applyProjection(event,"APRIM").particles()) { if(p.abscharge() > 0) nch++; } - _histNchVsCentr->fill(centr, nch, event.weight()); - + _histNchVsCentr->fill(centr, nch); + // Attempt to extract Npart form GenEvent. TODO: Unclear how to handle this // in HepMC3 const HepMC::HeavyIon* hi = event.genEvent()->heavy_ion(); - if (hi && hi->is_valid()) - _histNpartVsCentr->fill(centr, hi->Npart_proj() + hi->Npart_targ(), - event.weight()); - } - - /// Normalise histograms etc., after the run - void finalize() { - + if (hi && hi->is_valid()) + _histNpartVsCentr->fill(centr, hi->Npart_proj() + hi->Npart_targ()); } + /// Normalise histograms etc., after the run + // void finalize() { } + private: - + Profile1DPtr _histNchVsCentr; Profile1DPtr _histNpartVsCentr; - + }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2010_I880049); } diff --git a/analyses/pluginALICE/ALICE_2012_I1126966.cc b/analyses/pluginALICE/ALICE_2012_I1126966.cc --- a/analyses/pluginALICE/ALICE_2012_I1126966.cc +++ b/analyses/pluginALICE/ALICE_2012_I1126966.cc @@ -1,151 +1,148 @@ //-*- C++ -*- #include "Rivet/Projections/CentralityProjection.hh" #include "Rivet/Projections/AliceCommon.hh" #include "Rivet/Tools/AliceCommon.hh" #include "Rivet/Tools/Cuts.hh" namespace Rivet { - - + + /// Pion, Kaon, and Proton Production in 0-5% /// central Pb--Pb Collisions at 2.76 TeV class ALICE_2012_I1126966 : public Analysis { public: - + /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2012_I1126966); - + /// Book histograms and initialise projections before the run void init() { // Particles of interest. - declare(ALICE::PrimaryParticles(Cuts::absrap < 0.5),"CFS"); + declare(ALICE::PrimaryParticles(Cuts::absrap < 0.5),"CFS"); // The event trigger. declare(ALICE::V0AndTrigger(), "V0-AND"); // The centrality projection. declareCentrality(ALICE::V0MMultiplicity(), "ALICE_2015_PBPBCentrality", "V0M", "V0M"); // Invariant pT distributions. - _histPtPi = bookHisto1D("d01-x01-y01"); //pi+ - _histPtPibar = bookHisto1D("d01-x01-y02");// pi- - _histPtKaon = bookHisto1D("d02-x01-y01"); //K+ - _histPtKaonbar = bookHisto1D("d02-x01-y02"); //K- - _histPtProton = bookHisto1D("d03-x01-y01"); //P+ - _histPtProtonbar = bookHisto1D("d03-x01-y02"); //P- + book(_histPtPi, "d01-x01-y01"); //pi+ + book(_histPtPibar, "d01-x01-y02");// pi- + book(_histPtKaon, "d02-x01-y01"); //K+ + book(_histPtKaonbar, "d02-x01-y02"); //K- + book(_histPtProton, "d03-x01-y01"); //P+ + book(_histPtProtonbar, "d03-x01-y02"); //P- // Yield histograms. - _histNpi = bookHisto1D("d04-x01-y01"); - _histNpibar = bookHisto1D("d04-x01-y02"); - _histNKaon = bookHisto1D("d04-x01-y03"); - _histNKaonbar = bookHisto1D("d04-x01-y04"); - _histNproton = bookHisto1D("d04-x01-y05"); - _histNprotonbar =bookHisto1D("d04-x01-y06"); + book(_histNpi, "d04-x01-y01"); + book(_histNpibar, "d04-x01-y02"); + book(_histNKaon, "d04-x01-y03"); + book(_histNKaonbar, "d04-x01-y04"); + book(_histNproton, "d04-x01-y05"); + book(_histNprotonbar, "d04-x01-y06"); // Sum of weights of triggered events. - sow = bookCounter("sow"); + book(sow, "sow"); + } - } - - /// Perform the per-event analysis - + + /// Perform the per-event analysis void analyze(const Event& event) { - // Event weight. - const double weight = event.weight(); // Analysis only considers 0-5% central events if (apply(event,"V0M")() > 5.0) vetoEvent; // Event trigger. if (!apply(event, "V0-AND")() ) vetoEvent; - - sow->fill(weight); + + sow->fill(); // ID particles counters for this event. int Npi=0; int Npibar=0; int NKaon=0; int NKaonbar=0; int Nproton=0; int Nprotonbar=0; - for (const auto& p : + for (const auto& p : apply(event,"CFS").particles()) { - const double pWeight = weight / p.pT() / 2. / M_PI; + const double pWeight = 1 / p.pT() / 2. / M_PI; switch (p.pid()) { case 211: // pi+ Npi++; _histPtPi->fill(p.pT()/GeV, pWeight); break; case -211: //pi- Npibar++; _histPtPibar->fill(p.pT()/GeV, pWeight); break; case 2212: // proton Nproton++; _histPtProton->fill(p.pT()/GeV, pWeight); break; case -2212: // p-bar Nprotonbar++; _histPtProtonbar->fill(p.pT()/GeV, pWeight); break; case 321: // K+ NKaon++; _histPtKaon->fill(p.pT()/GeV, pWeight); break; case -321: // K- NKaonbar++; _histPtKaonbar->fill(p.pT()/GeV, pWeight); break; } } // Particle loop ends. // Fill yield histograms. - _histNpi->fill(Npi, weight); - _histNpibar->fill(Npibar, weight); - _histNKaon->fill(NKaon, weight); - _histNKaonbar->fill(NKaonbar, weight); - _histNproton->fill(Nproton, weight); - _histNprotonbar->fill(Nprotonbar, weight); + _histNpi->fill(Npi); + _histNpibar->fill(Npibar); + _histNKaon->fill(NKaon); + _histNKaonbar->fill(NKaonbar); + _histNproton->fill(Nproton); + _histNprotonbar->fill(Nprotonbar); } void finalize() { const double s = 1./sow->sumW(); _histPtPi->scaleW(s); _histPtPibar->scaleW(s); _histPtKaon->scaleW(s); _histPtKaonbar->scaleW(s); _histPtProton->scaleW(s); _histPtProtonbar->scaleW(s); _histNpi->scaleW(s); _histNpibar->scaleW(s); _histNKaon->scaleW(s); _histNKaonbar->scaleW(s); _histNproton->scaleW(s); _histNprotonbar->scaleW(s); } - + private: - + // pT histograms Histo1DPtr _histPtPi; Histo1DPtr _histPtKaon; Histo1DPtr _histPtProton; Histo1DPtr _histPtPibar; Histo1DPtr _histPtKaonbar; Histo1DPtr _histPtProtonbar; Histo1DPtr _histNpi; Histo1DPtr _histNpibar; Histo1DPtr _histNKaon; Histo1DPtr _histNKaonbar; Histo1DPtr _histNproton; Histo1DPtr _histNprotonbar; CounterPtr sow; }; - - + + // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2012_I1126966); - - + + } diff --git a/analyses/pluginALICE/ALICE_2012_I1127497.cc b/analyses/pluginALICE/ALICE_2012_I1127497.cc --- a/analyses/pluginALICE/ALICE_2012_I1127497.cc +++ b/analyses/pluginALICE/ALICE_2012_I1127497.cc @@ -1,180 +1,173 @@ // -*- C++ -*- #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Tools/Cuts.hh" #include "Rivet/Projections/SingleValueProjection.hh" #include "Rivet/Tools/AliceCommon.hh" #include "Rivet/Projections/AliceCommon.hh" -#include - -#define _USE_MATH_DEFINES -#include namespace Rivet { - /// Analysis + class ALICE_2012_I1127497 : public Analysis { - public: - + DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2012_I1127497); - + /// Book histograms and initialise projections before the run void init() { - + // Declare centrality projection if (getOption("cent") == "REF") { _mode = 1; declareCentrality(ALICE::V0MMultiplicity(), "ALICE_2015_PBPBCentrality", "V0M", "V0M"); } else if (getOption("cent") == "IMP") { _mode = 2; declareCentrality(ALICE::V0MMultiplicity(), "ALICE_2015_PBPBCentrality", "V0M", "V0M_IMP"); } - + // Charged final states with |eta| < 0.5 and pT > 150 MeV const Cut& cut = Cuts::abseta < 0.5 && Cuts::pT > 150*MeV; const ChargedFinalState cfs(cut); - addProjection(cfs,"CFS"); - + declare(cfs,"CFS"); + // Loop over all histograms for (size_t ihist = 0; ihist < NHISTOS; ++ihist) { - - // Initialize PbPb objects - _histNch[PBPB][ihist] = bookHisto1D(ihist+1, 1, 1); - + + // Initialize PbPb objects + book(_histNch[PBPB][ihist], ihist+1, 1, 1); + std::string nameCounterPbPb = "Counter_PbPb_" + std::to_string(ihist); - _counterSOW[PBPB][ihist] = bookCounter(nameCounterPbPb, "Sum of weights counter for PbPb"); - + book(_counterSOW[PBPB][ihist], nameCounterPbPb, "Sum of weights counter for PbPb"); + std::string nameCounterNcoll = "Counter_Ncoll_" + std::to_string(ihist); - _counterNcoll[ihist] = bookCounter(nameCounterNcoll, "Ncoll counter for PbPb"); - - // Initialize pp objects. In principle, only one pp histogram would be needed since - // centrality does not make any difference here. However, in some cases in this analysis + book(_counterNcoll[ihist], nameCounterNcoll, "Ncoll counter for PbPb"); + + // Initialize pp objects. In principle, only one pp histogram would be needed since + // centrality does not make any difference here. However, in some cases in this analysis // the binning differ from each other, so this is easy-to-implement way to account for that. std::string namePP = _histNch[PBPB][ihist]->name() + "-pp"; - _histNch[PP][ihist] = bookHisto1D(namePP, refData(ihist+1, 1, 1)); // binning taken from ref data - + book(_histNch[PP][ihist], namePP, refData(ihist+1, 1, 1)); // binning taken from ref data + std::string nameCounterpp = "Counter_pp_" + std::to_string(ihist); - _counterSOW[PP][ihist] = bookCounter(nameCounterpp, "Sum of weights counter for pp"); - + book(_counterSOW[PP][ihist], nameCounterpp, "Sum of weights counter for pp"); + } - - // Centrality regions keeping boundaries for a certain region. + + // Centrality regions keeping boundaries for a certain region. // Note, that some regions overlap with other regions. _centrRegions.clear(); _centrRegions = {{0., 5.}, {5., 10.}, {10., 20.}, {20., 30.}, {30., 40.}, {40., 50.}, {50., 60.}, {60., 70.}, {70., 80.}, {0., 10.}, {0., 20.}, {20., 40.}, {40., 60.}, {40., 80.}, {60., 80.}}; - + } /// Perform the per-event analysis void analyze(const Event& event) { - - const double weight = event.weight(); - + // Final state particles with at least pT = 150 MeV in eta range of |eta| < 0.5 const ChargedFinalState& charged = applyProjection(event, "CFS"); Particles chargedParticles = charged.particlesByPt(); - - // Check type of event. This may be not the perfect way to check for the type of event as - // there might be some weird conditions hidden inside. For example some HepMC versions check + + // Check type of event. This may be not the perfect way to check for the type of event as + // there might be some weird conditions hidden inside. For example some HepMC versions check // if number of hard collisions is equal to 0 and assign 'false' in that case, which is usually wrong. // This might be changed in the future if (event.genEvent()->heavy_ion()) { - + // Prepare centrality projection and value const CentralityProjection& centrProj = apply(event, (_mode == 1 ? "V0M" : "V0M_IMP")); double centr = centrProj(); // Veto event for too large centralities since those are not used in the analysis at all if ((centr < 0.) || (centr > 80.)) vetoEvent; - + // Fill the right PbPb histograms and add weights based on centrality value for (size_t ihist = 0; ihist < NHISTOS; ++ihist) { if (inRange(centr, _centrRegions[ihist].first, _centrRegions[ihist].second)) { - _counterSOW[PBPB][ihist]->fill(weight); - _counterNcoll[ihist]->fill(event.genEvent()->heavy_ion()->Ncoll(), weight); - foreach (const Particle& p, chargedParticles) { + _counterSOW[PBPB][ihist]->fill(); + _counterNcoll[ihist]->fill(event.genEvent()->heavy_ion()->Ncoll()); + for (const Particle& p : chargedParticles) { float pT = p.pT()/GeV; if (pT < 50.) { double pTAtBinCenter = _histNch[PBPB][ihist]->binAt(pT).xMid(); - _histNch[PBPB][ihist]->fill(pT, weight/pTAtBinCenter); + _histNch[PBPB][ihist]->fill(pT, 1/pTAtBinCenter); } } } } - + } else { - + // Fill all pp histograms and add weights for (size_t ihist = 0; ihist < NHISTOS; ++ihist) { - _counterSOW[PP][ihist]->fill(weight); - foreach (const Particle& p, chargedParticles) { + _counterSOW[PP][ihist]->fill(); + for (const Particle& p : chargedParticles) { float pT = p.pT()/GeV; if (pT < 50.) { double pTAtBinCenter = _histNch[PP][ihist]->binAt(pT).xMid(); - _histNch[PP][ihist]->fill(pT, weight/pTAtBinCenter); + _histNch[PP][ihist]->fill(pT, 1/pTAtBinCenter); } } } - + } - + } /// Normalise histograms etc., after the run void finalize() { - + // Right scaling of the histograms with their individual weights. for (size_t itype = 0; itype < EVENT_TYPES; ++itype ) { for (size_t ihist = 0; ihist < NHISTOS; ++ihist) { if (_counterSOW[itype][ihist]->sumW() > 0.) { scale(_histNch[itype][ihist], (1./_counterSOW[itype][ihist]->sumW() / 2. / M_PI)); } } } - + // Postprocessing of the histograms for (size_t ihist = 0; ihist < NHISTOS; ++ihist) { // If there are entires in histograms for both beam types if (_histNch[PP][ihist]->numEntries() > 0 && _histNch[PBPB][ihist]->numEntries() > 0) { // Initialize and fill R_AA histograms - _histRAA[ihist] = bookScatter2D(ihist+16, 1, 1); + book(_histRAA[ihist], ihist+16, 1, 1); divide(_histNch[PBPB][ihist], _histNch[PP][ihist], _histRAA[ihist]); - // Scale by Ncoll. Unfortunately some generators does not provide Ncoll (eg. JEWEL), + // Scale by Ncoll. Unfortunately some generators does not provide Ncoll (eg. JEWEL), // so the following scaling will be done only if there are entries in the counters if (_counterNcoll[ihist]->sumW() > 1e-6 && _counterSOW[PBPB][ihist]->sumW() > 1e-6) { _histRAA[ihist]->scaleY(1. / (_counterNcoll[ihist]->sumW() / _counterSOW[PBPB][ihist]->sumW())); } } } - + } - + private: - + static const int NHISTOS = 15; static const int EVENT_TYPES = 2; static const int PP = 0; static const int PBPB = 1; - + Histo1DPtr _histNch[EVENT_TYPES][NHISTOS]; CounterPtr _counterSOW[EVENT_TYPES][NHISTOS]; CounterPtr _counterNcoll[NHISTOS]; - + Scatter2DPtr _histRAA[NHISTOS]; std::vector> _centrRegions; - + int _mode; - + }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2012_I1127497); } diff --git a/analyses/pluginALICE/ALICE_2012_I930312.cc b/analyses/pluginALICE/ALICE_2012_I930312.cc --- a/analyses/pluginALICE/ALICE_2012_I930312.cc +++ b/analyses/pluginALICE/ALICE_2012_I930312.cc @@ -1,335 +1,326 @@ // -*- C++ -*- #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Tools/Cuts.hh" #include "Rivet/Projections/SingleValueProjection.hh" #include "Rivet/Tools/AliceCommon.hh" #include "Rivet/Projections/AliceCommon.hh" -#include - -#define _USE_MATH_DEFINES -#include namespace Rivet { - - /// Analysis + + class ALICE_2012_I930312 : public Analysis { - public: - + DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2012_I930312); - + /// Book histograms and initialise projections before the run void init() { // Declare centrality projection - declareCentrality(ALICE::V0MMultiplicity(), "ALICE_2015_PBPBCentrality", - "V0M", "V0M"); + declareCentrality(ALICE::V0MMultiplicity(), "ALICE_2015_PBPBCentrality", "V0M", "V0M"); - // Charged final states with |eta| < 1.0 and 8 < pT < 15 GeV/c for + // Charged final states with |eta| < 1.0 and 8 < pT < 15 GeV/c for // trigger particles - const Cut& cutTrigger = - Cuts::abseta < 1.0 && Cuts::pT > 8*GeV && Cuts::pT < 15*GeV; + const Cut& cutTrigger = Cuts::abseta < 1.0 && Cuts::pT > 8*GeV && Cuts::pT < 15*GeV; const ChargedFinalState cfsTrigger(cutTrigger); - addProjection(cfsTrigger,"CFSTrigger"); - + declare(cfsTrigger,"CFSTrigger"); + // Set limit values of pT bins pt_limits[0] = 3.; pt_limits[1] = 4.; pt_limits[2] = 6.; pt_limits[3] = 8.; pt_limits[4] = 10.; - - // Charged final states with |eta| < 1.0 and different pT bins for + + // Charged final states with |eta| < 1.0 and different pT bins for // associated particles. for (int ipt = 0; ipt < PT_BINS; ipt++) { - Cut mycut = Cuts::abseta < 1.0 && Cuts::pT > pt_limits[ipt]*GeV && + Cut mycut = Cuts::abseta < 1.0 && Cuts::pT > pt_limits[ipt]*GeV && Cuts::pT < pt_limits[ipt + 1]*GeV; declare(ChargedFinalState(mycut), "CFSAssoc" + std::to_string(ipt)); } - + // Create event strings event_string[0] = "pp"; event_string[1] = "central"; event_string[2] = "peripheral"; event_string[3] = "other"; - + // For each event type for (int itype = 0; itype < EVENT_TYPES; itype++) { // For each pT range for (int ipt = 0; ipt < PT_BINS; ipt++) { // Initialize yield histograms - _histYield[itype][ipt] = bookHisto1D("Yield_" + event_string[itype] - + "_" + std::to_string(ipt), 36, -0.5 * M_PI, 1.5 * M_PI, - "Associated particle per trigger particle yield", + book(_histYield[itype][ipt], "Yield_" + event_string[itype] + + "_" + std::to_string(ipt), 36, -0.5 * M_PI, 1.5 * M_PI, + "Associated particle per trigger particle yield", "$\\Delta\\eta$ (rad)", "$1 / N_{trig} dN_{assoc} / d\\Delta\\eta$ (rad$^-1$)"); - _histYieldBkgRemoved[itype][ipt] = bookHisto1D("Yield_" + + book(_histYieldBkgRemoved[itype][ipt], "Yield_" + event_string[itype] + "_nobkg_" + std::to_string(ipt), 36, -0.5*M_PI, - 1.5 * M_PI, "Associated particle per trigger particle yield no bkg", + 1.5 * M_PI, "Associated particle per trigger particle yield no bkg", "$\\Delta\\eta$ (rad)", "$1 / N_{trig} dN_{assoc} / d\\Delta\\eta$ (rad$^-1$)"); } } - + // Histogram for counting trigger particles for each event type - _histTriggerCounter = bookHisto1D("Trigger", EVENT_TYPES, 0.0, + book(_histTriggerCounter, "Trigger", EVENT_TYPES, 0.0, EVENT_TYPES, "Trigger counter", "event type", "N"); - + } /// Perform the per-event analysis void analyze(const Event& event) { - - const double weight = event.weight(); - + // Create charged final state for trigger particle - const ChargedFinalState& triggerFinalState = + const ChargedFinalState& triggerFinalState = applyProjection(event, "CFSTrigger"); Particles triggerParticles = triggerFinalState.particlesByPt(); - // Create charged final state for associated particle + // Create charged final state for associated particle ChargedFinalState associatedFinalState[PT_BINS]; Particles associatedParticles[PT_BINS]; for (int ipt = 0; ipt < PT_BINS; ipt++) { - associatedFinalState[ipt] = applyProjection(event, + associatedFinalState[ipt] = applyProjection(event, "CFSAssoc" + std::to_string(ipt)); associatedParticles[ipt] = associatedFinalState[ipt].particlesByPt(); } - + // Check event type if (event.genEvent()->heavy_ion()) { // Prepare centrality projection and value - const CentralityProjection& centrProj = + const CentralityProjection& centrProj = apply(event, "V0M"); double centr = centrProj(); - + // Set the flag for the type of the event if (centr > 0.0 && centr < 5.0) event_type = 1; // PbPb, central else if (centr > 60.0 && centr < 90.0) event_type = 2; // PbPb, peripherial else event_type = 3; // PbPb, other } else { event_type = 0; // pp } - + // Veto event if not valid event type if (event_type == 3) vetoEvent; - + // Fill trigger histogram for a proper event type _histTriggerCounter->fill(event_type, triggerParticles.size()); - + // Loop over trigger particles for (const auto& triggerParticle : triggerParticles) { // For each pt bin for (int ipt = 0; ipt < PT_BINS; ipt++) { // Loop over associated particles for (const auto& associatedParticle : associatedParticles[ipt]) { // If associated and trigger particle are not the same particles. if (associatedParticle != triggerParticle) { // Test trigger particle. if (triggerParticle.pt() > associatedParticle.pt()) { // Calculate delta phi in range (-0.5*PI, 1.5*PI). - double dPhi = triggerParticle.phi() - + double dPhi = triggerParticle.phi() - associatedParticle.phi(); while (dPhi > 1.5 * M_PI) { dPhi -= 2 * M_PI; } while (dPhi < -0.5 * M_PI) { dPhi += 2 * M_PI; } // Fill yield histogram for calculated delta phi - _histYield[event_type][ipt]->fill(dPhi, weight); + _histYield[event_type][ipt]->fill(dPhi); } } } } } } /// Normalise histograms etc., after the run void finalize() { - + // Check for the reentrant finalize bool pp_available = false, PbPb_available = false; reentrant_flag = false; // For each event type for (int itype = 0; itype < EVENT_TYPES; itype++) { // For each pT range for (int ipt = 0; ipt < PT_BINS; ipt++) { if (_histYield[itype][ipt]->numEntries() > 0) itype == 0 ? pp_available = true : PbPb_available = true; } } reentrant_flag = (pp_available && PbPb_available); - + // Postprocessing of the histograms if (reentrant_flag == true) { - + // Initialize IAA and ICP histograms - _histIAA[0] = bookScatter2D(1, 1, 1); - _histIAA[1] = bookScatter2D(2, 1, 1); - _histIAA[2] = bookScatter2D(5, 1, 1); + book(_histIAA[0], 1, 1, 1); + book(_histIAA[1], 2, 1, 1); + book(_histIAA[2], 5, 1, 1); - _histIAA[3] = bookScatter2D(3, 1, 1); - _histIAA[4] = bookScatter2D(4, 1, 1); - _histIAA[5] = bookScatter2D(6, 1, 1); + book(_histIAA[3], 3, 1, 1); + book(_histIAA[4], 4, 1, 1); + book(_histIAA[5], 6, 1, 1); // Variables for near and away side peak calculation double nearSide[EVENT_TYPES][PT_BINS] = { {0.0} }; double awaySide[EVENT_TYPES][PT_BINS] = { {0.0} }; - + // Variables for background error calculation double background[EVENT_TYPES][PT_BINS] = { {0.0} }; double backgroundError[EVENT_TYPES][PT_BINS] = { {0.0} }; - + // Variables for integration error calculation double scalingFactor[EVENT_TYPES] = {0.0}; double numberOfEntries[EVENT_TYPES][PT_BINS][2] = { { {0.0} } }; int numberOfBins[EVENT_TYPES][PT_BINS][2] = { { {0} } }; - + // For each event type for (int itype = 0; itype < EVENT_TYPES; itype++) { // For each pT range for (int ipt = 0; ipt < PT_BINS; ipt++) { // Check if histograms are fine - if (_histTriggerCounter->numEntries() == 0 || + if (_histTriggerCounter->numEntries() == 0 || _histYield[itype][ipt]->numEntries() == 0) { cout << "There are no entries in one of the histograms" << endl; continue; } - + // Scale yield histogram if ((_histTriggerCounter->bin(itype).sumW() != 0)) { - scalingFactor[itype] = 1. / + scalingFactor[itype] = 1. / _histTriggerCounter->bin(itype).sumW(); - scale(_histYield[itype][ipt], + scale(_histYield[itype][ipt], (1. / _histTriggerCounter->bin(itype).sumW())); } - + // Calculate background double sum = 0.0; int nbins = 0; for (unsigned int ibin = 0; ibin < _histYield[itype][ipt]->numBins(); ibin++) { - if ((_histYield[itype][ipt]->bin(ibin).xMid() > (-0.5 * M_PI) && + if ((_histYield[itype][ipt]->bin(ibin).xMid() > (-0.5 * M_PI) && _histYield[itype][ipt]->bin(ibin).xMid() < (-0.5 * M_PI + 0.4)) || (_histYield[itype][ipt]->bin(ibin).xMid() > (0.5 * M_PI - 0.4) && _histYield[itype][ipt]->bin(ibin).xMid() < (0.5 * M_PI + 0.4)) || (_histYield[itype][ipt]->bin(ibin).xMid() > (1.5 * M_PI - 0.4) && _histYield[itype][ipt]->bin(ibin).xMid() < (1.5 * M_PI))) { sum += _histYield[itype][ipt]->bin(ibin).sumW(); nbins++; } } if (nbins == 0) { std::cout << "Failed to estimate background!" << std::endl; continue; } background[itype][ipt] = sum / nbins; - + // Calculate background error sum = 0.0; nbins = 0; for (unsigned int ibin = 0; ibin < _histYield[itype][ipt]->numBins(); ibin++) { if (_histYield[itype][ipt]->bin(ibin).xMid() > (0.5 * M_PI - 0.4) && _histYield[itype][ipt]->bin(ibin).xMid() < (0.5 * M_PI + 0.4)) { sum += (_histYield[itype][ipt]->bin(ibin).sumW() - background[itype][ipt]) * (_histYield[itype][ipt]->bin(ibin).sumW() - background[itype][ipt]); nbins++; } } backgroundError[itype][ipt] = sqrt(sum / (nbins - 1)); - + // Fill histograms with removed background for (unsigned int ibin = 0; ibin < _histYield[itype][ipt]->numBins(); ibin++) { - _histYieldBkgRemoved[itype][ipt]->fillBin(ibin, + _histYieldBkgRemoved[itype][ipt]->fillBin(ibin, _histYield[itype][ipt]->bin(ibin).sumW() - background[itype][ipt]); } - + // Integrate near-side yield unsigned int lowerBin = _histYield[itype][ipt]->binIndexAt(-0.7 + 0.02); unsigned int upperBin = _histYield[itype][ipt]->binIndexAt( 0.7 - 0.02) + 1; nbins = upperBin - lowerBin; numberOfBins[itype][ipt][0] = nbins; nearSide[itype][ipt] = _histYield[itype][ipt]->integralRange(lowerBin, upperBin) - nbins * background[itype][ipt]; numberOfEntries[itype][ipt][0] = _histYield[itype][ipt]->integralRange( lowerBin, upperBin) * _histTriggerCounter->bin(itype).sumW(); - + // Integrate away-side yield lowerBin = _histYield[itype][ipt]->binIndexAt(M_PI - 0.7 + 0.02); upperBin = _histYield[itype][ipt]->binIndexAt(M_PI + 0.7 - 0.02) + 1; nbins = upperBin - lowerBin; numberOfBins[itype][ipt][1] = nbins; awaySide[itype][ipt] = _histYield[itype][ipt]->integralRange( lowerBin, upperBin) - nbins * background[itype][ipt]; - numberOfEntries[itype][ipt][1] = + numberOfEntries[itype][ipt][1] = _histYield[itype][ipt]->integralRange(lowerBin, upperBin) * _histTriggerCounter->bin(itype).sumW(); - + } } - + // Variables for IAA/ICP plots double dI = 0.0; int near = 0; int away = 1; double xval[PT_BINS] = { 3.5, 5.0, 7.0, 9.0 }; double xerr[PT_BINS] = { 0.5, 1.0, 1.0, 1.0 }; - + int types1[3] = {1, 2, 1}; int types2[3] = {0, 0, 2}; // Fill IAA/ICP plots for near side peak for (int ihist = 0; ihist < 3; ihist++) { int type1 = types1[ihist]; int type2 = types2[ihist]; for (int ipt = 0; ipt < PT_BINS; ipt++) { - dI = scalingFactor[type1] * scalingFactor[type1] * numberOfEntries[type1][ipt][near] + - scalingFactor[type2] * scalingFactor[type2] * numberOfEntries[type2][ipt][near] * - nearSide[type1][ipt] * nearSide[type1][ipt] / (nearSide[type2][ipt] * nearSide[type2][ipt]) + + dI = scalingFactor[type1] * scalingFactor[type1] * numberOfEntries[type1][ipt][near] + + scalingFactor[type2] * scalingFactor[type2] * numberOfEntries[type2][ipt][near] * + nearSide[type1][ipt] * nearSide[type1][ipt] / (nearSide[type2][ipt] * nearSide[type2][ipt]) + numberOfBins[type1][ipt][near] * numberOfBins[type1][ipt][near] * backgroundError[type1][ipt] * - backgroundError[type1][ipt] + numberOfBins[type2][ipt][near] * numberOfBins[type2][ipt][near] * - backgroundError[type2][ipt] * backgroundError[type2][ipt] * nearSide[type1][ipt] * + backgroundError[type1][ipt] + numberOfBins[type2][ipt][near] * numberOfBins[type2][ipt][near] * + backgroundError[type2][ipt] * backgroundError[type2][ipt] * nearSide[type1][ipt] * nearSide[type1][ipt] / (nearSide[type2][ipt] * nearSide[type2][ipt]); dI = sqrt(dI)/nearSide[type2][ipt]; _histIAA[ihist]->addPoint(xval[ipt], nearSide[type1][ipt] / nearSide[type2][ipt], xerr[ipt], dI); } } - + // Fill IAA/ICP plots for away side peak for (int ihist = 0; ihist < 3; ihist++) { int type1 = types1[ihist]; int type2 = types2[ihist]; for (int ipt = 0; ipt < PT_BINS; ipt++) { - dI = scalingFactor[type1] * scalingFactor[type1] * numberOfEntries[type1][ipt][away] + - scalingFactor[type2] * scalingFactor[type2] * numberOfEntries[type2][ipt][away] * - awaySide[type1][ipt] * awaySide[type1][ipt] / (awaySide[type2][ipt] * awaySide[type2][ipt]) + + dI = scalingFactor[type1] * scalingFactor[type1] * numberOfEntries[type1][ipt][away] + + scalingFactor[type2] * scalingFactor[type2] * numberOfEntries[type2][ipt][away] * + awaySide[type1][ipt] * awaySide[type1][ipt] / (awaySide[type2][ipt] * awaySide[type2][ipt]) + numberOfBins[type1][ipt][away] * numberOfBins[type1][ipt][away] * backgroundError[type1][ipt] * backgroundError[type1][ipt] + numberOfBins[type2][ipt][away] * numberOfBins[type2][ipt][away] * - backgroundError[type2][ipt] * backgroundError[type2][ipt] * awaySide[type1][ipt] * + backgroundError[type2][ipt] * backgroundError[type2][ipt] * awaySide[type1][ipt] * awaySide[type1][ipt] / (awaySide[type2][ipt] * awaySide[type2][ipt]); dI = sqrt(dI)/awaySide[type2][ipt]; _histIAA[ihist + 3]->addPoint(xval[ipt], awaySide[type1][ipt] / awaySide[type2][ipt], xerr[ipt], dI); } } } - + } - + private: - + static const int PT_BINS = 4; static const int EVENT_TYPES = 3; Histo1DPtr _histYield[EVENT_TYPES][PT_BINS]; Histo1DPtr _histYieldBkgRemoved[EVENT_TYPES][PT_BINS]; Histo1DPtr _histTriggerCounter; Scatter2DPtr _histIAA[6]; double pt_limits[5]; int event_type; string event_string[EVENT_TYPES + 1]; bool reentrant_flag; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2012_I930312); } diff --git a/analyses/pluginALICE/ALICE_2013_I1225979.cc b/analyses/pluginALICE/ALICE_2013_I1225979.cc --- a/analyses/pluginALICE/ALICE_2013_I1225979.cc +++ b/analyses/pluginALICE/ALICE_2013_I1225979.cc @@ -1,106 +1,105 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Tools/AliceCommon.hh" #include "Rivet/Projections/AliceCommon.hh" namespace Rivet { /// @brief ALICE PbPb at 2.76 TeV eta distributions. class ALICE_2013_I1225979 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2013_I1225979); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections // Centrality projection. declareCentrality(ALICE::V0MMultiplicity(), "ALICE_2015_PBPBCentrality", "V0M","V0M"); // Projections for the 2-out-of-3 trigger. - declare(ChargedFinalState( (Cuts::eta > 2.8 && Cuts::eta < 5.1) && + declare(ChargedFinalState( (Cuts::eta > 2.8 && Cuts::eta < 5.1) && Cuts::pT > 0.1*GeV), "VZERO1"); declare(ChargedFinalState( (Cuts::eta > -3.7 && Cuts::eta < -1.7) && Cuts::pT > 0.1*GeV), "VZERO2"); declare(ChargedFinalState(Cuts::abseta < 1. && Cuts::pT > 0.15*GeV), "SPD"); // Primary particles. declare(ALICE::PrimaryParticles(Cuts::abseta < 5.6),"APRIM"); - + // The centrality bins upper bin edges. centralityBins = { 5., 10., 20., 30. }; // Centrality histograms and corresponding sow counters. for (int i = 0; i < 4; ++i) { - histEta[centralityBins[i]] = bookHisto1D(1, 1, i + 1); - sow[centralityBins[i]] = bookCounter("sow_" + toString(i)); + book(histEta[centralityBins[i]], 1, 1, i + 1); + book(sow[centralityBins[i]], "sow_" + toString(i)); } } /// Perform the per-event analysis void analyze(const Event& event) { - const double weight = event.weight(); // Trigger projections. - const ChargedFinalState& vz1 = + const ChargedFinalState& vz1 = applyProjection(event,"VZERO1"); - const ChargedFinalState& vz2 = + const ChargedFinalState& vz2 = applyProjection(event,"VZERO2"); - const ChargedFinalState& spd = + const ChargedFinalState& spd = applyProjection(event,"SPD"); int fwdTrig = (vz1.particles().size() > 0 ? 1 : 0); int bwdTrig = (vz2.particles().size() > 0 ? 1 : 0); int cTrig = (spd.particles().size() > 0 ? 1 : 0); - + if (fwdTrig + bwdTrig + cTrig < 2) vetoEvent; // We must have direct acces to the centrality projection. const CentralityProjection& cent = apply(event,"V0M"); double c = cent(); // Find the correct centrality histogram auto hItr = histEta.upper_bound(c); if (hItr == histEta.end()) return; // Find the correct sow. auto sItr = sow.upper_bound(c); if (sItr == sow.end()) return; - sItr->second->fill(weight); + sItr->second->fill(); // Fill the histograms. - for ( const auto& p : + for ( const auto& p : applyProjection(event,"APRIM").particles() ) - if(p.abscharge() > 0) hItr->second->fill(p.eta(), weight); + if(p.abscharge() > 0) hItr->second->fill(p.eta()); } /// Normalise histograms etc., after the run void finalize() { for (int i = 0; i < 4; ++i) histEta[centralityBins[i]]->scaleW(1./sow[centralityBins[i]]->sumW()); - + } //@} /// @name Histograms //@{ vector centralityBins; map histEta; map sow; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2013_I1225979); } diff --git a/analyses/pluginALICE/ALICE_2014_I1243865.cc b/analyses/pluginALICE/ALICE_2014_I1243865.cc --- a/analyses/pluginALICE/ALICE_2014_I1243865.cc +++ b/analyses/pluginALICE/ALICE_2014_I1243865.cc @@ -1,134 +1,141 @@ // -*- C++ -*- #include "Rivet/Projections/CentralityProjection.hh" #include "Rivet/Projections/AliceCommon.hh" #include "Rivet/Tools/AliceCommon.hh" #include "Rivet/Tools/Cuts.hh" namespace Rivet { + + /// @brief Multi Strange Baryon production at mid rapidity in + /// 2.76 TeV Pb--Pb collisions for different centralities. class ALICE_2014_I1243865 : public Analysis { - // @brief Multi Strange Baryon production at mid rapidity in - // 2.76 TeV Pb--Pb collisions for different centralities. public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2014_I1243865); - + // Book histograms and projections etc. void init() { // Particles of interest. - declare(ALICE::PrimaryParticles(Cuts::absrap < 0.5),"CFS"); + declare(ALICE::PrimaryParticles(Cuts::absrap < 0.5),"CFS"); // The event trigger. declare(ALICE::V0AndTrigger(), "V0-AND"); // The centrality projection. declareCentrality(ALICE::V0MMultiplicity(), "ALICE_2015_PBPBCentrality", "V0M", "V0M"); // Xi Baryons. + _histPtXi.resize(5); + _histPtXi_bar.resize(5); + size_t ixi = 0; for (string str : {"d01-","d02-","d03-","d04-","d05-"}){ - _histPtXi.push_back(bookHisto1D(str+"x01-y01")); - _histPtXi_bar.push_back(bookHisto1D(str+"x01-y02")); + book(_histPtXi[ixi], str+"x01-y01"); + book(_histPtXi_bar[ixi], str+"x01-y02"); + ixi += 1; } - + // Omega Baryons. + size_t iom = 0; + _histPtOmega.resize(5); + _histPtOmega_bar.resize(5); for (string str : {"d06-","d07-","d08-","d09-","d10-"}){ - _histPtOmega.push_back(bookHisto1D(str+"x01-y01")); - _histPtOmega_bar.push_back(bookHisto1D(str+"x01-y02")); + book(_histPtOmega[iom], str+"x01-y01"); + book(_histPtOmega_bar[iom], str+"x01-y02"); + iom += 1; } // Sum of weights for the centrality intervals. + sow.resize(_histPtOmega.size()); for (int i = 0, N = _histPtOmega.size(); i < N; ++i) { - sow.push_back(bookCounter("sow_" + toString(i))); + book(sow[i], "sow_" + toString(i)); } - _histXitoPi = bookProfile1D("d14-x01-y01"); - _histOmegatoPi = bookProfile1D("d14-x01-y02"); + book(_histXitoPi, "d14-x01-y01"); + book(_histOmegatoPi, "d14-x01-y02"); } + void analyze(const Event& event) { - // Event weight. - const double weight = event.weight(); // Event trigger. if (!apply(event, "V0-AND")() ) vetoEvent; - - // Centrality. + + // Centrality. const CentralityProjection& cent = apply(event,"V0M"); const double c = cent(); - + int centralityclass = -1; if(c > 0. && c <= 10) centralityclass = 0; if(c > 10. && c <= 20) centralityclass = 1; if(c > 20. && c <= 40) centralityclass = 2; if(c > 40. && c <= 60) centralityclass = 3; if(c > 60. && c <= 80) centralityclass = 4; if (centralityclass == -1) vetoEvent; // Fill sum of weights - sow[centralityclass]->fill(weight); + sow[centralityclass]->fill(); int nPions = 0; int nXi = 0; int nOmega = 0; - for (const auto& p : + for (const auto& p : apply(event,"CFS").particles()) { const double pT = p.pT() / GeV; switch (p.pid()){ case 211: nPions++; break; case 3312: - _histPtXi[centralityclass]->fill(pT,weight); + _histPtXi[centralityclass]->fill(pT); nXi++; break; case -3312: - _histPtXi_bar[centralityclass]->fill(pT,weight); + _histPtXi_bar[centralityclass]->fill(pT); nXi++; break; case 3334: - _histPtOmega[centralityclass]->fill(pT,weight); + _histPtOmega[centralityclass]->fill(pT); nOmega++; break; case -3334: - _histPtOmega_bar[centralityclass]->fill(pT,weight); + _histPtOmega_bar[centralityclass]->fill(pT); nOmega++; break; } } // Extract Npart form GenEvent. TODO: Unclear how to do // this in HepMC3 const HepMC::HeavyIon* hi = event.genEvent()->heavy_ion(); if (hi && nPions != 0){ const double npart = hi->Npart_proj() + hi->Npart_targ(); - if (nXi != 0) - _histXitoPi->fill(npart, double(nXi) / double(nPions), weight); - if (nOmega != 0) - _histOmegatoPi->fill(npart, double(nOmega) / double(nPions), - weight); + if (nXi != 0) + _histXitoPi->fill(npart, double(nXi) / double(nPions)); + if (nOmega != 0) + _histOmegatoPi->fill(npart, double(nOmega) / double(nPions)); } } void finalize() { - + for (int i = 0, N = _histPtOmega.size(); i < N; ++i) { const double s = 1./sow[i]->sumW(); _histPtXi[i]->scaleW(s); _histPtXi_bar[i]->scaleW(s); _histPtOmega[i]->scaleW(s); _histPtOmega_bar[i]->scaleW(s); } } private: vector _histPtXi; vector _histPtXi_bar; vector _histPtOmega; vector _histPtOmega_bar; vector sow; Profile1DPtr _histXitoPi; Profile1DPtr _histOmegatoPi; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2014_I1243865); } - diff --git a/analyses/pluginALICE/ALICE_2014_I1244523.cc b/analyses/pluginALICE/ALICE_2014_I1244523.cc --- a/analyses/pluginALICE/ALICE_2014_I1244523.cc +++ b/analyses/pluginALICE/ALICE_2014_I1244523.cc @@ -1,279 +1,278 @@ // -*- C++ -*- #include "Rivet/Projections/CentralityProjection.hh" #include "Rivet/Projections/AliceCommon.hh" #include "Rivet/Tools/AliceCommon.hh" #include "Rivet/Tools/Cuts.hh" namespace Rivet { /// @brief Identified particles in p--Pb @ 5 TeV class ALICE_2014_I1244523 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2014_I1244523); /// @name Analysis methods //@{ int profileIndex(vector cBins, double c) { int index = 100; if (c > 0 && c <= cBins[0]) return cBins.size() - 1; for (size_t i = 0; i < cBins.size() - 1; ++i) { if (c > cBins[i] && c <= cBins[i + 1]) { index = i; break; - } + } } // Catch low fluctuation. return max(0, int(cBins.size() - index - 2)); } void scaleHisto(Histo1DPtr h) { vector& bins = h->bins(); for (vector::iterator b = bins.begin(); b != bins.end(); ++b) { b->scaleW(1./b->width()/b->xMid()); } } /// Book histograms and initialise projections before the run void init() { // The centrality projection. declareCentrality(ALICE::V0AMultiplicity(), "ALICE_2015_PPBCentrality", "V0A", "V0A"); // Define the cuts for the analysis: // pPb Collision has a centre of mass system shift of +0.465 // They study -0.5 < yCoM < 0.0 -> -0.035 < y < 0.465 const Cut& cut = Cuts::rap < 0.035 && Cuts::rap > -0.465; //const Cut& cut = Cuts::rap > -0.035 && Cuts::rap < 0.465; const ALICE::PrimaryParticles fs(cut); - addProjection(fs,"FS"); + declare(fs,"FS"); // The event trigger. declare(ALICE::V0AndTrigger(), "V0-AND"); // The centrality bins centralityBins = {5.,10.,20.,40.,60.,80.,100.}; for (int i = 0; i < 4; ++i) { // First we book the invariant spectra. - _histPipT[centralityBins[i]] = bookHisto1D(1, 1, 1 + i); - if (i < 3) _histPipT[centralityBins[i + 4]] = bookHisto1D(2, 1, 1 + i); - _histKpT[centralityBins[i]] = bookHisto1D(3, 1, 1 + i); - if (i < 3) _histKpT[centralityBins[i + 4]] = bookHisto1D(4, 1, 1 + i); - _histK0SpT[centralityBins[i]] = bookHisto1D(5, 1, 1 + i); - if (i < 3) _histK0SpT[centralityBins[i + 4]] = bookHisto1D(6, 1, 1 + i); - _histProtonpT[centralityBins[i]] = bookHisto1D(7, 1, 1 + i); - if (i < 3) _histProtonpT[centralityBins[i + 4]] = bookHisto1D(8, 1, 1 + i); - _histLambdapT[centralityBins[i]] = bookHisto1D(9, 1, 1 + i); - if (i < 3) _histLambdapT[centralityBins[i + 4]] = bookHisto1D(10, 1, 1 + i); - // The associated sow counters. - _sow[centralityBins[i]] = bookCounter("TMP/sow" + toString(i)); - if (i < 3) _sow[centralityBins[i + 4]] = bookCounter("TMP/sow" + toString(i + 4)); + book(_histPipT[centralityBins[i]], 1, 1, 1 + i); + if (i < 3) book(_histPipT[centralityBins[i + 4]], 2, 1, 1 + i); + book(_histKpT[centralityBins[i]], 3, 1, 1 + i); + if (i < 3) book(_histKpT[centralityBins[i + 4]], 4, 1, 1 + i); + book(_histK0SpT[centralityBins[i]], 5, 1, 1 + i); + if (i < 3) book(_histK0SpT[centralityBins[i + 4]], 6, 1, 1 + i); + book(_histProtonpT[centralityBins[i]], 7, 1, 1 + i); + if (i < 3) book(_histProtonpT[centralityBins[i + 4]], 8, 1, 1 + i); + book(_histLambdapT[centralityBins[i]], 9, 1, 1 + i); + if (i < 3) book(_histLambdapT[centralityBins[i + 4]], 10, 1, 1 + i); + // The associated sow counters. + book(_sow[centralityBins[i]], "TMP/sow" + toString(i)); + if (i < 3) book(_sow[centralityBins[i + 4]], "TMP/sow" + toString(i + 4)); // Then the pi spectra going into the centrality dependent pT ratios. - _tmpPi4KpT[centralityBins[i]] = bookHisto1D("TMP/NPi4K" + toString(i), refData(11, 1, 1 + i)); - if (i < 3) _tmpPi4KpT[centralityBins[i + 4]] = bookHisto1D("TMP/NPi4K" + toString(i + 4), refData(12, 1, 1 + i)); - _tmpPi4PpT[centralityBins[i]] = bookHisto1D("TMP/NPi4P" + toString(i), refData(13, 1, 1 + i)); - if (i < 3) _tmpPi4PpT[centralityBins[i + 4]] = bookHisto1D("TMP/NPi4P" + toString(i + 4), refData(14, 1, 1 + i)); - _tmpK4LpT[centralityBins[i]] = bookHisto1D("TMP/NK4L" + toString(i), refData(15, 1, 1 + i)); - if (i < 3) _tmpK4LpT[centralityBins[i + 4]] = bookHisto1D("TMP/NK4L" + toString(i + 4), refData(16, 1, 1 + i)); + book(_tmpPi4KpT[centralityBins[i]], "TMP/NPi4K" + toString(i), refData(11, 1, 1 + i)); + if (i < 3) book(_tmpPi4KpT[centralityBins[i + 4]], "TMP/NPi4K" + toString(i + 4), refData(12, 1, 1 + i)); + book(_tmpPi4PpT[centralityBins[i]], "TMP/NPi4P" + toString(i), refData(13, 1, 1 + i)); + if (i < 3) book(_tmpPi4PpT[centralityBins[i + 4]], "TMP/NPi4P" + toString(i + 4), refData(14, 1, 1 + i)); + book(_tmpK4LpT[centralityBins[i]], "TMP/NK4L" + toString(i), refData(15, 1, 1 + i)); + if (i < 3) book(_tmpK4LpT[centralityBins[i + 4]], "TMP/NK4L" + toString(i + 4), refData(16, 1, 1 + i)); // Then the rest of the spectra going into the cent. dep't pT ratios. - _tmpKpT[centralityBins[i]] = bookHisto1D("TMP/NK" + toString(i), refData(11, 1, 1 + i)); - if (i < 3) _tmpKpT[centralityBins[i + 4]] = bookHisto1D("TMP/NK" + toString(i + 4), refData(12, 1, 1 + i)); - _tmpProtonpT[centralityBins[i]] = bookHisto1D("TMP/NP" + toString(i), refData(13, 1, 1 + i)); - if (i < 3) _tmpProtonpT[centralityBins[i + 4]] = bookHisto1D("TMP/NP" + toString(i + 4), refData(14, 1, 1 + i)); - _tmpLambdapT[centralityBins[i]] = bookHisto1D("TMP/NL" + toString(i), refData(15, 1, 1 + i)); - if (i < 3) _tmpLambdapT[centralityBins[i + 4]] = bookHisto1D("TMP/NL" + toString(i + 4), refData(16, 1, 1 + i)); - // Then the centrality dependent pT ratios. - _ratioKPi[centralityBins[i]] = bookScatter2D(11, 1, 1 + i, true); - if (i < 3) _ratioKPi[centralityBins[i + 4]] = bookScatter2D(12, 1, 1 + i, true); - _ratioPPi[centralityBins[i]] = bookScatter2D(13, 1, 1 + i, true); - if (i < 3) _ratioPPi[centralityBins[i + 4]] = bookScatter2D(14, 1, 1 + i, true); - _ratioLK[centralityBins[i]] = bookScatter2D(15, 1, 1 + i, true); - if (i < 3) _ratioLK[centralityBins[i + 4]] = bookScatter2D(16, 1, 1 + i, true); + book(_tmpKpT[centralityBins[i]], "TMP/NK" + toString(i), refData(11, 1, 1 + i)); + if (i < 3) book(_tmpKpT[centralityBins[i + 4]], "TMP/NK" + toString(i + 4), refData(12, 1, 1 + i)); + book(_tmpProtonpT[centralityBins[i]], "TMP/NP" + toString(i), refData(13, 1, 1 + i)); + if (i < 3) book(_tmpProtonpT[centralityBins[i + 4]], "TMP/NP" + toString(i + 4), refData(14, 1, 1 + i)); + book(_tmpLambdapT[centralityBins[i]], "TMP/NL" + toString(i), refData(15, 1, 1 + i)); + if (i < 3) book(_tmpLambdapT[centralityBins[i + 4]], "TMP/NL" + toString(i + 4), refData(16, 1, 1 + i)); + // Then the centrality dependent pT ratios. + book(_ratioKPi[centralityBins[i]], 11, 1, 1 + i, true); + if (i < 3) book(_ratioKPi[centralityBins[i + 4]], 12, 1, 1 + i, true); + book(_ratioPPi[centralityBins[i]], 13, 1, 1 + i, true); + if (i < 3) book(_ratioPPi[centralityBins[i + 4]], 14, 1, 1 + i, true); + book(_ratioLK[centralityBins[i]], 15, 1, 1 + i, true); + if (i < 3) book(_ratioLK[centralityBins[i + 4]], 16, 1, 1 + i, true); } - // Mean pT vs. multiplicity class. - _histLambdaMeanpT = bookProfile1D(17, 1, 1); - _histProtonMeanpT = bookProfile1D(18, 1, 1); - _histK0SMeanpT = bookProfile1D(19, 1, 1); - _histKMeanpT = bookProfile1D(20, 1, 1); - _histPiMeanpT = bookProfile1D(21, 1, 1); - + // Mean pT vs. multiplicity class. + book(_histLambdaMeanpT, 17, 1, 1); + book(_histProtonMeanpT, 18, 1, 1); + book(_histK0SMeanpT, 19, 1, 1); + book(_histKMeanpT, 20, 1, 1); + book(_histPiMeanpT, 21, 1, 1); + // Yield ratios. - _histKtoPiYield = bookScatter2D(22, 1, 1, true); - _histProtontoPiYield = bookScatter2D(22, 1, 2, true); - _histLambdatoPiYield = bookScatter2D(22, 1, 3, true); + book(_histKtoPiYield, 22, 1, 1, true); + book(_histProtontoPiYield, 22, 1, 2, true); + book(_histLambdatoPiYield, 22, 1, 3, true); - _histKYield = bookProfile1D("TMP/KY", refData(22,1,1)); - _histProtonYield = bookProfile1D("TMP/PrY",refData(22,1,2)); - _histLambdaYield = bookProfile1D("TMP/LY", refData(22,1,3)); - _histPiYield = bookProfile1D("TMP/PiY",refData(22,1,1)); - _histPi4LYield = bookProfile1D("TMP/PiLY",refData(22,1,3)); // HepData entry is wrong -- look in the paper. + book(_histKYield, "TMP/KY", refData(22,1,1)); + book(_histProtonYield, "TMP/PrY",refData(22,1,2)); + book(_histLambdaYield, "TMP/LY", refData(22,1,3)); + book(_histPiYield, "TMP/PiY",refData(22,1,1)); + book(_histPi4LYield, "TMP/PiLY",refData(22,1,3)); // HepData entry is wrong -- look in the paper. } /// Perform the per-event analysis void analyze(const Event& event) { - const double weight = event.weight(); // Event trigger. if (!apply(event, "V0-AND")() ) vetoEvent; // Centrality const CentralityProjection& cent = apply(event,"V0A"); double c = cent(); // Find the index for the profiles. int index = profileIndex(centralityBins, c); // Find the correct histograms // all the pion histos auto pi1Itr = _histPipT.upper_bound(c); // Test the first one. if (pi1Itr == _histPipT.end()) return; auto pi2Itr = _tmpPi4KpT.upper_bound(c); auto pi3Itr = _tmpPi4PpT.upper_bound(c); // Then the rest auto kItr = _histKpT.upper_bound(c); auto k0Itr = _histK0SpT.upper_bound(c); auto krItr = _tmpKpT.upper_bound(c); auto klItr = _tmpK4LpT.upper_bound(c); auto pItr = _histProtonpT.upper_bound(c); auto prItr = _tmpProtonpT.upper_bound(c); auto lItr = _histLambdapT.upper_bound(c); auto lrItr = _tmpLambdapT.upper_bound(c); // And the sow auto sowItr = _sow.upper_bound(c); - sowItr->second->fill(weight); + sowItr->second->fill(); const ALICE::PrimaryParticles& fs = apply(event,"FS"); // Count number of particles for yields. int npi = 0, nk = 0, np = 0, nlam = 0; for(auto p : fs.particles()) { const double pT = p.pT(); const int pid = abs(p.pid()); - const double nW = weight / M_PI / pT; // Dividing and multiplying by 2 because dy. + const double nW = 1 / M_PI / pT; // Dividing and multiplying by 2 because dy. if (pid == 211) { // pi+/- ++npi; pi1Itr->second->fill(pT, nW); - pi2Itr->second->fill(pT, weight); - pi3Itr->second->fill(pT, weight); - _histPiMeanpT->fillBin(index, pT, weight); + pi2Itr->second->fill(pT); + pi3Itr->second->fill(pT); + _histPiMeanpT->fillBin(index, pT); } else if (pid == 321) { // K +/- ++nk; kItr->second->fill(pT, nW); - krItr->second->fill(pT, weight); - _histKMeanpT->fillBin(index, pT, weight); + krItr->second->fill(pT); + _histKMeanpT->fillBin(index, pT); } else if (pid == 310) { // K0S k0Itr->second->fill(pT, nW); - klItr->second->fill(pT, weight); - _histK0SMeanpT->fillBin(index, pT, weight); + klItr->second->fill(pT); + _histK0SMeanpT->fillBin(index, pT); } else if (pid == 2212) { // p + pbar ++np; pItr->second->fill(pT, nW); - prItr->second->fill(pT, weight); - _histProtonMeanpT->fillBin(index, pT, weight); + prItr->second->fill(pT); + _histProtonMeanpT->fillBin(index, pT); } else if (pid == 3122) { // Lambda + Lambdabar ++nlam; lItr->second->fill(pT, nW); - lrItr->second->fill(pT, weight); - _histLambdaMeanpT->fillBin(index, pT, weight); + lrItr->second->fill(pT); + _histLambdaMeanpT->fillBin(index, pT); } } // Fill the yield profiles. - _histKYield->fillBin(index, double(nk), weight); - _histPi4LYield->fillBin(index, double(npi), weight); - _histProtonYield->fillBin(index, double(np), weight); - _histPiYield->fillBin(index, double(npi), weight); - _histLambdaYield->fillBin(index, double(nlam), weight); + _histKYield->fillBin(index, double(nk)); + _histPi4LYield->fillBin(index, double(npi)); + _histProtonYield->fillBin(index, double(np)); + _histPiYield->fillBin(index, double(npi)); + _histLambdaYield->fillBin(index, double(nlam)); } /// Normalise histograms etc., after the run void finalize() { - + // Loop over centrality classes. for (int i = 0; i < 7; i++){ // Normalize the spectra. _histPipT[centralityBins[i]]->scaleW(1./_sow[centralityBins[i]]->sumW()); _histKpT[centralityBins[i]]->scaleW(1./_sow[centralityBins[i]]->sumW()); _histK0SpT[centralityBins[i]]->scaleW(1./_sow[centralityBins[i]]->sumW()); _histProtonpT[centralityBins[i]]->scaleW(1./_sow[centralityBins[i]]->sumW()); _histLambdapT[centralityBins[i]]->scaleW(1./_sow[centralityBins[i]]->sumW()); // Make the pT ratios. - divide(_tmpKpT[centralityBins[i]], _tmpPi4KpT[centralityBins[i]], + divide(_tmpKpT[centralityBins[i]], _tmpPi4KpT[centralityBins[i]], _ratioKPi[centralityBins[i]]); - divide(_tmpProtonpT[centralityBins[i]], _tmpPi4PpT[centralityBins[i]], + divide(_tmpProtonpT[centralityBins[i]], _tmpPi4PpT[centralityBins[i]], _ratioPPi[centralityBins[i]]); - divide(_tmpLambdapT[centralityBins[i]], _tmpK4LpT[centralityBins[i]], + divide(_tmpLambdapT[centralityBins[i]], _tmpK4LpT[centralityBins[i]], _ratioLK[centralityBins[i]]); } divide(_histKYield, _histPiYield, _histKtoPiYield); divide(_histProtonYield, _histPiYield, _histProtontoPiYield); divide(_histLambdaYield, _histPi4LYield, _histLambdatoPiYield); } //@} private: vector centralityBins; // pT spectra (separated by multiplicity classes) map _histPipT; map _histKpT; map _histK0SpT; map _histProtonpT; map _histLambdapT; - + // Associated sum of weights. map _sow; - + // pT spectra for ratios. map _tmpPi4KpT; map _tmpPi4PpT; map _tmpK4LpT; map _tmpKpT; map _tmpProtonpT; map _tmpLambdapT; // The acual ratios. map _ratioKPi; map _ratioPPi; map _ratioLK; // Mean pT vs. Multiplicity Profile1DPtr _histKMeanpT; Profile1DPtr _histK0SMeanpT; Profile1DPtr _histProtonMeanpT; Profile1DPtr _histLambdaMeanpT; Profile1DPtr _histPiMeanpT; - - // Total yields + + // Total yields Profile1DPtr _histKYield; Profile1DPtr _histProtonYield; Profile1DPtr _histLambdaYield; Profile1DPtr _histPiYield; Profile1DPtr _histPi4LYield; - + // Yield ratios. Scatter2DPtr _histKtoPiYield; Scatter2DPtr _histProtontoPiYield; Scatter2DPtr _histLambdatoPiYield; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2014_I1244523); } diff --git a/analyses/pluginALICE/ALICE_2015_PBPBCentrality.cc b/analyses/pluginALICE/ALICE_2015_PBPBCentrality.cc --- a/analyses/pluginALICE/ALICE_2015_PBPBCentrality.cc +++ b/analyses/pluginALICE/ALICE_2015_PBPBCentrality.cc @@ -1,82 +1,81 @@ /** * @file ALICE_2015_PBPBCentrality.cc * @author Christian Holm Christensen * @date Wed Aug 22 15:56:23 2018 - * + * * @brief Dummy analysis for centrality calibration in Pb-Pb at 5.02TeV */ #include #include namespace Rivet { - /** + /** * Dummy analysis for centrality calibration in Pb-Pb at 5.02TeV */ class ALICE_2015_PBPBCentrality : public Analysis { public: - /** - * Constructor + /** + * Constructor */ ALICE_2015_PBPBCentrality() : Analysis("ALICE_2015_PBPBCentrality") { } - /** - * Initialize this analysis. + /** + * Initialize this analysis. */ void init() { ALICE::V0AndTrigger v0and; declare(v0and,"V0-AND"); ALICE::V0MMultiplicity v0m; declare(v0m,"V0M"); - _v0m = bookHisto1D("V0M","Forward multiplicity","V0M","Events"); - _imp = bookHisto1D("V0M_IMP",100,0,20, - "Impact parameter","b (fm)","Events"); + book(_v0m, "V0M","Forward multiplicity","V0M","Events"); + book(_imp, "V0M_IMP",100,0,20, "Impact parameter","b (fm)","Events"); } - /** + /** * Analyse a single event. * - * @param event The event + * @param event The event */ void analyze(const Event& event) { // Get and fill in the impact parameter value if the information // is valid. const HepMC::GenEvent* ge = event.genEvent(); const HepMC::HeavyIon* hi = ge->heavy_ion(); if (hi && hi->is_valid()) - _imp->fill(hi->impact_parameter(), event.weight()); - + _imp->fill(hi->impact_parameter()); + // Check if we have any hit in either V0-A or -C. If not, the // event is not selected and we get out. if (!apply(event,"V0-AND")()) return; - // Fill in the V0 multiplicity for this event - _v0m->fill(apply(event,"V0M")(), event.weight()); + // Fill in the V0 multiplicity for this event + _v0m->fill(apply(event,"V0M")()); } - /** + /** * Finalize this analysis */ void finalize() { _v0m->normalize(); _imp->normalize(); } /** The distribution of V0M multiplicity */ Histo1DPtr _v0m; /** The distribution of impact parameters */ Histo1DPtr _imp; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2015_PBPBCentrality); } // // EOF // diff --git a/analyses/pluginALICE/ALICE_2015_PPBCentrality.cc b/analyses/pluginALICE/ALICE_2015_PPBCentrality.cc --- a/analyses/pluginALICE/ALICE_2015_PPBCentrality.cc +++ b/analyses/pluginALICE/ALICE_2015_PPBCentrality.cc @@ -1,82 +1,79 @@ // -*- C++ -*- #include "Rivet/Projections/ImpactParameterProjection.hh" #include "Rivet/Analysis.hh" #include "Rivet/Projections/AliceCommon.hh" - namespace Rivet { /// Generic analysis looking at various distributions of final state particles class ALICE_2015_PPBCentrality : public Analysis { public: DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2015_PPBCentrality); /// Book histograms and initialise projections before the run void init() { // One projection for the actual observable, and one for the // generated impact parameter. declare(ALICE::V0AMultiplicity(), "V0A"); declare(ImpactParameterProjection(), "IMP"); declare(ALICE::V0AndTrigger(), "Trigger"); // The calibration histogram: - _calib = bookHisto1D("V0A", 100, 0.0, 500.0); + book(_calib, "V0A", 100, 0.0, 500.0); // If histogram was pre-loaded, the calibration is done. _done = ( _calib->numEntries() > 0 ); // The alternative histogram based on impact parameter. Note that // it MUST be named the same as the histogram for the experimental // observable with an added _IMP suffix. - _impcalib = bookHisto1D("V0A_IMP", 400, 0.0, 20.0); - + book(_impcalib, "V0A_IMP", 400, 0.0, 20.0); } - + /// Perform the per-event analysis void analyze(const Event& event) { if ( _done ) return; - - const double weight = event.weight(); // The alternative centrality based on generated impact // parameter, assumes that the generator does not describe the // full final state, and should therefore be filled even if the // event is not triggered. - _impcalib->fill(apply(event, "IMP")(), weight); + _impcalib->fill(apply(event, "IMP")()); if ( !apply(event, "Trigger")() ) vetoEvent; - _calib->fill(apply(event, "V0A")(), weight); + _calib->fill(apply(event, "V0A")()); } - + + /// Finalize void finalize() { _calib->normalize(); _impcalib->normalize(); } private: /// The calibration histograms. Histo1DPtr _calib; Histo1DPtr _impcalib; /// Safeguard from adding to a pre-loaded histogram. bool _done; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2015_PPBCentrality); } diff --git a/analyses/pluginALICE/ALICE_2015_PPCentrality.cc b/analyses/pluginALICE/ALICE_2015_PPCentrality.cc --- a/analyses/pluginALICE/ALICE_2015_PPCentrality.cc +++ b/analyses/pluginALICE/ALICE_2015_PPCentrality.cc @@ -1,51 +1,54 @@ // -*- C++ -*- - #include "Rivet/Analysis.hh" #include "Rivet/Projections/ImpactParameterProjection.hh" #include "Rivet/Projections/AliceCommon.hh" namespace Rivet { + + class ALICE_2015_PPCentrality : public Analysis { - public: + public: DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2015_PPCentrality); - + // Book histograms, initialize projections. void init() { declare(ALICE::V0AndTrigger(),"V0-AND"); declare(ALICE::V0MMultiplicity(),"V0M"); - declare(ImpactParameterProjection(), "IMP"); - _v0m = bookHisto1D("V0M",100,0,200); - _imp = bookHisto1D("V0M_IMP",100,0,20); + declare(ImpactParameterProjection(), "IMP"); + book(_v0m, "V0M",100,0,200); + book(_imp, "V0M_IMP",100,0,20); _done = (_v0m->numEntries() > 0); } + + // Per-event analysis void analyze(const Event& event) { if (_done) return; - const double weight = event.weight(); + _imp->fill(apply(event,"IMP")()); - _imp->fill(apply(event,"IMP")(), weight); - // Check if we have any hit in either V0-A or -C. If not, the // event is not selected and we get out. if (!apply(event,"V0-AND")()) return; - // Fill in the V0 multiplicity for this event - _v0m->fill(apply(event,"V0M")(), event.weight()); + // Fill in the V0 multiplicity for this event + _v0m->fill(apply(event,"V0M")()); } - /** - * Finalize this analysis - */ - void finalize() - { + + + void finalize() { _v0m->normalize(); _imp->normalize(); } + Histo1DPtr _v0m; Histo1DPtr _imp; bool _done; }; + + // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2015_PPCentrality); + } diff --git a/analyses/pluginALICE/ALICE_2016_I1394676.cc b/analyses/pluginALICE/ALICE_2016_I1394676.cc --- a/analyses/pluginALICE/ALICE_2016_I1394676.cc +++ b/analyses/pluginALICE/ALICE_2016_I1394676.cc @@ -1,107 +1,106 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Tools/AliceCommon.hh" #include "Rivet/Projections/AliceCommon.hh" + namespace Rivet { /// @brief ALICE PbPb at 2.76 TeV eta distributions, peripheral events. class ALICE_2016_I1394676 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2016_I1394676); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections // Centrality projection. declareCentrality(ALICE::V0MMultiplicity(), "ALICE_2015_PBPBCentrality", "V0M","V0M"); // Projections for the 2-out-of-3 trigger. - declare(ChargedFinalState( (Cuts::eta > 2.8 && Cuts::eta < 5.1) && + declare(ChargedFinalState( (Cuts::eta > 2.8 && Cuts::eta < 5.1) && Cuts::pT > 0.1*GeV), "VZERO1"); declare(ChargedFinalState( (Cuts::eta > -3.7 && Cuts::eta < -1.7) && Cuts::pT > 0.1*GeV), "VZERO2"); - declare(ChargedFinalState(Cuts::abseta < 1. && Cuts::pT > 0.15*GeV), - "SPD"); + declare(ChargedFinalState(Cuts::abseta < 1. && Cuts::pT > 0.15*GeV), "SPD"); // Primary particles. declare(ALICE::PrimaryParticles(Cuts::abseta < 5.6),"APRIM"); - + // The centrality bins and the corresponding histograms and sow counters. centralityBins = { 40, 50, 60, 70, 80, 90 }; for (int i = 0, N = centralityBins.size(); i < N; ++i) { - histEta[centralityBins[i]] = bookHisto1D(1, 1, i + 1); - sow[centralityBins[i]] = bookCounter("sow_" + toString(i)); + book(histEta[centralityBins[i]], 1, 1, i + 1); + book(sow[centralityBins[i]], "sow_" + toString(i)); } } /// Perform the per-event analysis void analyze(const Event& event) { - const double weight = event.weight(); // Trigger projections. - const ChargedFinalState& vz1 = - applyProjection(event,"VZERO1"); - const ChargedFinalState& vz2 = - applyProjection(event,"VZERO2"); - const ChargedFinalState& spd = - applyProjection(event,"SPD"); + const ChargedFinalState& vz1 = + apply(event,"VZERO1"); + const ChargedFinalState& vz2 = + apply(event,"VZERO2"); + const ChargedFinalState& spd = + apply(event,"SPD"); int fwdTrig = (vz1.particles().size() > 0 ? 1 : 0); int bwdTrig = (vz2.particles().size() > 0 ? 1 : 0); int cTrig = (spd.particles().size() > 0 ? 1 : 0); - + if (fwdTrig + bwdTrig + cTrig < 2) vetoEvent; const CentralityProjection& cent = apply(event,"V0M"); double c = cent(); // No centralities below 30 % if (c < 30.) return; // Find the correct centrality histogram auto hItr = histEta.upper_bound(c); if (hItr == histEta.end()) return; // Find the correct sow. auto sItr = sow.upper_bound(c); if (sItr == sow.end()) return; - sItr->second->fill(weight); + sItr->second->fill(); // Fill the histograms. - for ( const auto& p : + for ( const auto& p : applyProjection(event,"APRIM").particles() ) - if(p.abscharge() > 0) hItr->second->fill(p.eta(), weight); + if(p.abscharge() > 0) hItr->second->fill(p.eta()); } /// Normalise histograms etc., after the run void finalize() { - for (int i = 0, N = centralityBins.size(); i < N; ++i) + for (int i = 0, N = centralityBins.size(); i < N; ++i) histEta[centralityBins[i]]->scaleW(1./sow[centralityBins[i]]->sumW()); - + } //@} /// @name Histograms //@{ vector centralityBins; map histEta; map sow; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2016_I1394676); } diff --git a/analyses/pluginALICE/ALICE_2016_I1419244.cc b/analyses/pluginALICE/ALICE_2016_I1419244.cc --- a/analyses/pluginALICE/ALICE_2016_I1419244.cc +++ b/analyses/pluginALICE/ALICE_2016_I1419244.cc @@ -1,273 +1,272 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Tools/Correlators.hh" #include "Rivet/Tools/AliceCommon.hh" #include "Rivet/Projections/AliceCommon.hh" -#include "YODA/Scatter2D.h" namespace Rivet { + /// @brief Multiparticle azimuthal correlations PbPb 5TeV. class ALICE_2016_I1419244 : public CumulantAnalysis { public: /// Constructor - ALICE_2016_I1419244() : CumulantAnalysis("ALICE_2016_I1419244"){ + ALICE_2016_I1419244() + : CumulantAnalysis("ALICE_2016_I1419244") { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - + // Initialise and register projections // Declare the trigger projection. declare(ALICE::V0AndTrigger(),"V0-AND"); // Centrality projection. - declareCentrality(ALICE::V0MMultiplicity(), "ALICE_2015_PBPBCentrality", - "V0M","V0M"); - + declareCentrality(ALICE::V0MMultiplicity(), "ALICE_2015_PBPBCentrality", "V0M","V0M"); + // The full central charged final state. const ChargedFinalState& cfs = ChargedFinalState(Cuts::abseta < 0.8 && Cuts::pT > 0.2*GeV && Cuts::pT < 5.0*GeV); declare(cfs, "CFS"); // The positive eta side used for rapidity gap. - const ChargedFinalState& cfsp = ChargedFinalState(Cuts::eta > 0.5 && + const ChargedFinalState& cfsp = ChargedFinalState(Cuts::eta > 0.5 && Cuts::eta < 0.8 && Cuts::pT > 0.2*GeV && Cuts::pT < 5.0*GeV); declare(cfsp, "CFSP"); // ..negative ditto. - const ChargedFinalState& cfsn = ChargedFinalState(Cuts::eta < -0.5 && + const ChargedFinalState& cfsn = ChargedFinalState(Cuts::eta < -0.5 && Cuts::eta > -0.8 && Cuts::pT > 0.2*GeV && Cuts::pT < 5.0*GeV); declare(cfsn, "CFSN"); - // Book histograms before booking the correlators + // Book histograms before booking the correlators // to have access to bin edges. - h_v22gap = bookScatter2D(1, 1, 1, true); - h_v24 = bookScatter2D(1, 1, 2, true); - h_v26 = bookScatter2D(1, 1, 3, true); - h_v28 = bookScatter2D(1, 1, 4, true); - h_v32gap = bookScatter2D(2, 1, 1, true); - h_v42gap = bookScatter2D(2, 1, 2, true); - h_v22gappT = bookScatter2D(8, 1, 1, true); - h_v32gappT = bookScatter2D(8, 1, 2, true); - h_v42gappT = bookScatter2D(8, 1, 3, true); - h_v24pT10 = bookScatter2D(9, 1, 1, true); - h_v24pT20 = bookScatter2D(9, 1, 2, true); - h_v24pT30 = bookScatter2D(9, 1, 3, true); - - h_c22gap = bookScatter2D(h_v22gap,"/" + name() + "/c22gap"); - h_c24 = bookScatter2D(h_v24,"/" + name() + "/c24"); - h_c26 = bookScatter2D(h_v26,"/" + name() + "/c26"); - h_c28 = bookScatter2D(h_v28,"/" + name() + "/c28"); - h_c32gap = bookScatter2D(h_v32gap,"/" + name() + "/c32gap"); - h_c42gap = bookScatter2D(h_v42gap,"/" + name() + "/c42gap"); + book(h_v22gap, 1, 1, 1, true); + book(h_v24, 1, 1, 2, true); + book(h_v26, 1, 1, 3, true); + book(h_v28, 1, 1, 4, true); + book(h_v32gap, 2, 1, 1, true); + book(h_v42gap, 2, 1, 2, true); + book(h_v22gappT, 8, 1, 1, true); + book(h_v32gappT, 8, 1, 2, true); + book(h_v42gappT, 8, 1, 3, true); + book(h_v24pT10, 9, 1, 1, true); + book(h_v24pT20, 9, 1, 2, true); + book(h_v24pT30, 9, 1, 3, true); - h_ec22gap = bookScatter2D(h_v22gap,"/" + name() + "/ec22gap"); - h_ec22 = bookScatter2D(h_v24,"/" + name() + "/ec22"); - h_ec24 = bookScatter2D(h_v24,"/" + name() + "/ec24"); - h_ec26 = bookScatter2D(h_v26,"/" + name() + "/ec26"); - h_ec28 = bookScatter2D(h_v28,"/" + name() + "/ec28"); + book(h_c22gap, h_v22gap,"/" + name() + "/c22gap"); + book(h_c24, h_v24,"/" + name() + "/c24"); + book(h_c26, h_v26,"/" + name() + "/c26"); + book(h_c28, h_v28,"/" + name() + "/c28"); + book(h_c32gap, h_v32gap,"/" + name() + "/c32gap"); + book(h_c42gap, h_v42gap,"/" + name() + "/c42gap"); - + book(h_ec22gap, h_v22gap,"/" + name() + "/ec22gap"); + book(h_ec22, h_v24,"/" + name() + "/ec22"); + book(h_ec24, h_v24,"/" + name() + "/ec24"); + book(h_ec26, h_v26,"/" + name() + "/ec26"); + book(h_ec28, h_v28,"/" + name() + "/ec28"); + + // Corresponding event averaged correlators. // Integrated, with gap. ec22gap = bookECorrelatorGap<2,2>("ec22gap",h_v22gap); ec32gap = bookECorrelatorGap<3,2>("ec32gap",h_v32gap); ec42gap = bookECorrelatorGap<4,2>("ec42gap",h_v42gap); - + // Integrated, no gap. ec22 = bookECorrelator<2,2>("ec22",h_v24); ec24 = bookECorrelator<2,4>("ec24",h_v24); ec26 = bookECorrelator<2,6>("ec26",h_v26); ec28 = bookECorrelator<2,8>("ec28",h_v28); - - // pT differential, no gap, three centralities. + + // pT differential, no gap, three centralities. ec22pT10 = bookECorrelator<2,2>("ec22pT10",h_v24pT10); ec24pT10 = bookECorrelator<2,4>("ec24pT10",h_v24pT10); ec22pT20 = bookECorrelator<2,2>("ec22pT20",h_v24pT20); ec24pT20 = bookECorrelator<2,4>("ec24pT20",h_v24pT20); - + ec22pT30 = bookECorrelator<2,2>("ec22pT30",h_v24pT30); ec24pT30 = bookECorrelator<2,4>("ec24pT30",h_v24pT30); // pT differential, with gap, 30-40% centrality. ec22gappT = bookECorrelatorGap<2,2>("ec22gappT",h_v22gappT); ec32gappT = bookECorrelatorGap<3,2>("ec32gappT",h_v32gappT); ec42gappT = bookECorrelatorGap<4,2>("ec42gappT",h_v42gappT); pair max = getMaxValues(); // Declare correlator projections. declare(Correlators(cfs, max.first, max.second, h_v22gappT), "Correlators"); declare(Correlators(cfsp, max.first, max.second, h_v22gappT), "CorrelatorsPos"); declare(Correlators(cfsn, max.first, max.second, h_v22gappT), "CorrelatorsNeg"); } + /// Perform the per-event analysis void analyze(const Event& event) { // Event trigger. if (!apply(event, "V0-AND")() ) vetoEvent; - const double w = event.weight(); - // The centrality projection. - const CentralityProjection& centProj = + const CentralityProjection& centProj = apply(event,"V0M"); // The centrality. const double cent = centProj(); // The correlators projections. const Correlators& c = applyProjection(event,"Correlators"); - const Correlators& cp = + const Correlators& cp = applyProjection(event,"CorrelatorsPos"); - const Correlators& cn = + const Correlators& cn = applyProjection(event,"CorrelatorsNeg"); - - ec22gap->fill(cent, cp, cn, w); - ec32gap->fill(cent, cp, cn, w); - ec42gap->fill(cent, cp, cn, w); - - ec22->fill(cent, c, w); - ec24->fill(cent, c, w); - ec26->fill(cent, c, w); - ec28->fill(cent, c, w); - if (cent < 10.) ec22pT10->fill(c, w), ec24pT10->fill(c, w); - else if (cent < 20.) ec22pT20->fill(c, w), ec24pT20->fill(c, w); - else if (cent < 30.) ec22pT30->fill(c, w), ec24pT30->fill(c, w); + ec22gap->fill(cent, cp, cn); + ec32gap->fill(cent, cp, cn); + ec42gap->fill(cent, cp, cn); + + ec22->fill(cent, c); + ec24->fill(cent, c); + ec26->fill(cent, c); + ec28->fill(cent, c); + + if (cent < 10.) ec22pT10->fill(c), ec24pT10->fill(c); + else if (cent < 20.) ec22pT20->fill(c), ec24pT20->fill(c); + else if (cent < 30.) ec22pT30->fill(c), ec24pT30->fill(c); else if (cent < 40.) { - ec22gappT->fill(cp, cn, w); - ec32gappT->fill(cp, cn, w); - ec42gappT->fill(cp, cn, w); + ec22gappT->fill(cp, cn); + ec32gappT->fill(cp, cn); + ec42gappT->fill(cp, cn); } } /// Normalise histograms etc., after the run void finalize() { - // Correlators must be streamed + // Correlators must be streamed // in order to run reentrant finalize. stream(); // Filling test histos. cnTwoInt(h_c22gap, ec22gap); cnTwoInt(h_c32gap, ec32gap); cnTwoInt(h_c42gap, ec42gap); cnFourInt(h_c24, ec22, ec24); cnSixInt(h_c26, ec22, ec24, ec26); cnEightInt(h_c28, ec22, ec24, ec26, ec28); corrPlot(h_ec22gap, ec22gap); corrPlot(h_ec22, ec22); corrPlot(h_ec24, ec24); corrPlot(h_ec26, ec26); corrPlot(h_ec28, ec28); vnTwoInt(h_v22gap, ec22gap); vnTwoInt(h_v32gap, ec32gap); vnTwoInt(h_v42gap, ec42gap); vnFourInt(h_v24, ec22, ec24); vnSixInt(h_v26, ec22, ec24, ec26); vnEightInt(h_v28, ec22, ec24, ec26, ec28); vnTwoDiff(h_v22gappT, ec22gappT); vnTwoDiff(h_v32gappT, ec32gappT); - vnTwoDiff(h_v42gappT, ec42gappT); + vnTwoDiff(h_v42gappT, ec42gappT); vnFourDiff(h_v24pT10, ec22pT10, ec24pT10); vnFourDiff(h_v24pT20, ec22pT20, ec24pT20); vnFourDiff(h_v24pT30, ec22pT30, ec24pT30); } //@} /// @name Histograms //@{ // The integrated centrality dependent v2{n}. Scatter2DPtr h_v22gap; Scatter2DPtr h_v24; Scatter2DPtr h_v26; Scatter2DPtr h_v28; // The integrated, centrality dependent v3,4{2} gapped. Scatter2DPtr h_v32gap; Scatter2DPtr h_v42gap; // The pT differential, v2{2} gapped for 30-40% centrality Scatter2DPtr h_v22gappT; // ...v3{2} ditto. Scatter2DPtr h_v32gappT; // ... v4{2} ditto. Scatter2DPtr h_v42gappT; // The pT differential, centrality dependent v2{4} Scatter2DPtr h_v24pT10; Scatter2DPtr h_v24pT20; Scatter2DPtr h_v24pT30; // Test histograms -- cumulants Scatter2DPtr h_c22gap; Scatter2DPtr h_c24; Scatter2DPtr h_c26; Scatter2DPtr h_c28; Scatter2DPtr h_c32gap; Scatter2DPtr h_c42gap; // Test histograms -- correlators. Scatter2DPtr h_ec22gap; Scatter2DPtr h_ec22; Scatter2DPtr h_ec24; Scatter2DPtr h_ec26; Scatter2DPtr h_ec28; // The all event averaged correlators. // Integrated with gap. ECorrPtr ec22gap; ECorrPtr ec32gap; ECorrPtr ec42gap; // Integrated without gap. ECorrPtr ec22; ECorrPtr ec24; ECorrPtr ec26; ECorrPtr ec28; - + // pT dependent, 2 particle, gapped, 30-40% centrality ECorrPtr ec22gappT; ECorrPtr ec32gappT; ECorrPtr ec42gappT; // pT dependent, 4 particle, three centralities. ECorrPtr ec22pT10; ECorrPtr ec24pT10; ECorrPtr ec22pT20; ECorrPtr ec24pT20; - + ECorrPtr ec22pT30; ECorrPtr ec24pT30; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2016_I1419244); } diff --git a/analyses/pluginALICE/ALICE_2016_I1471838.cc b/analyses/pluginALICE/ALICE_2016_I1471838.cc --- a/analyses/pluginALICE/ALICE_2016_I1471838.cc +++ b/analyses/pluginALICE/ALICE_2016_I1471838.cc @@ -1,196 +1,192 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/CentralityProjection.hh" #include "Rivet/Projections/AliceCommon.hh" #include "Rivet/Tools/AliceCommon.hh" + namespace Rivet { /// @brief Strangeness enhancement in pp 7 TeV by ALICE. class ALICE_2016_I1471838 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2016_I1471838); int profileIndex(vector cBins, double c) { int index = 100; if (c > 0 && c <= cBins[0]) return cBins.size() - 1; for (size_t i = 0; i < cBins.size() - 1; ++i) { if (c > cBins[i] && c <= cBins[i + 1]) { index = i; break; - } + } } return max(0, int(cBins.size() - index - 2)); } /// Book histograms and initialise projections before the run void init() { // Centrality projection. - declareCentrality(ALICE::V0MMultiplicity(), + declareCentrality(ALICE::V0MMultiplicity(), "ALICE_2015_PPCentrality","V0M","V0M"); // Central primary particles declare(ChargedFinalState(Cuts::abseta < 1.0),"PP"); declare(ALICE::PrimaryParticles(Cuts::absrap < 0.5),"PPy"); centralityBins = {1.,5.,10.,15.,20., 30., 40., 50., 70., 100.}; centralityBinsOmega = {5.,15.,30.,50.,100.}; // Book histograms for (int i = 0; i < 10; ++i) { - K0SpT[centralityBins[9-i]] = bookHisto1D(i+1,1,1); - LambdapT[centralityBins[9-i]] = bookHisto1D(i+11,1,1); - XipT[centralityBins[9-i]] = bookHisto1D(i+21,1,1); - sow[centralityBins[9-i]] = bookCounter("sow_" + toString(i)); + book(K0SpT[centralityBins[9-i]], i+1,1,1); + book(LambdapT[centralityBins[9-i]], i+11,1,1); + book(XipT[centralityBins[9-i]], i+21,1,1); + book(sow[centralityBins[9-i]], "sow_" + toString(i)); } for (int i = 0; i < 5; ++i) { - OmegapT[centralityBinsOmega[4-i]] = bookHisto1D(i+31,1,1); - sowOmega[centralityBinsOmega[4-i]] = bookCounter("sowO_" + toString(i)); + book(OmegapT[centralityBinsOmega[4-i]], i+31,1,1); + book(sowOmega[centralityBinsOmega[4-i]], "sowO_" + toString(i)); } - piYield = bookProfile1D(40,1,1); - pYield = bookProfile1D(41,1,1); - kYield = bookProfile1D(42,1,1); - lambdaYield = bookProfile1D(43,1,1); - xiYield = bookProfile1D(44,1,1); - omegaYield = bookProfile1D(45,1,1); - piRebinned = shared_ptr(omegaYield->newclone()); - piRebinned->setTitle("piRebinned"); - piRebinned->setPath("/" + name() + "/piRebinned"); - addAnalysisObject(piRebinned); + book(piYield, 40,1,1); + book(pYield, 41,1,1); + book(kYield, 42,1,1); + book(lambdaYield, 43,1,1); + book(xiYield, 44,1,1); + book(omegaYield, 45,1,1); + book(piRebinned, "/piRebinned", refData(45,1,1)); + // Make the ratios + book(kpi, 36, 1, 1, true); + book(ppi, 47, 1, 1, true); + book(lpi, 37, 1, 1, true); + book(xpi, 38, 1, 1, true); + book(opi, 39, 1, 1, true); + book(lk, 46, 1, 1, true); } /// Perform the per-event analysis void analyze(const Event& event) { if (apply(event,"PP").particles().size() < 1) vetoEvent; const ALICE::PrimaryParticles& prim = apply(event,"PPy"); - const double weight = event.weight(); const CentralityProjection& cent = apply(event,"V0M"); double c = cent(); // Find the correct histograms auto kptItr = K0SpT.upper_bound(c); if (kptItr == K0SpT.end()) return; auto lptItr = LambdapT.upper_bound(c); if (lptItr == LambdapT.end()) return; auto xptItr = XipT.upper_bound(c); if (xptItr == XipT.end()) return; auto optItr = OmegapT.upper_bound(c); if (optItr == OmegapT.end()) return; // Fill the sow. auto sowItr = sow.upper_bound(c); if (sowItr == sow.end()) return; auto sowOmegaItr = sowOmega.upper_bound(c); if (sowOmegaItr == sowOmega.end()) return; - sowItr->second->fill(weight); - sowOmegaItr->second->fill(weight); + sowItr->second->fill(); + sowOmegaItr->second->fill(); // Fill the pt histograms and count yields. int npi = 0, npr = 0, nk = 0; int nla = 0, nxi = 0, nom = 0; for (auto p : prim.particles()) { const double pT = p.pT(); const int pid = abs(p.pid()); if (pid == 211) ++npi; else if (pid == 2212) ++npr; else if (pid == 310) { - kptItr->second->fill(pT, weight); + kptItr->second->fill(pT); ++nk; } else if (pid == 3122) { - lptItr->second->fill(pT, weight); + lptItr->second->fill(pT); ++nla; } else if (pid == 3312) { - xptItr->second->fill(pT, weight); + xptItr->second->fill(pT); ++nxi; } else if (pid == 3334) { - optItr->second->fill(pT, weight); + optItr->second->fill(pT); ++nom; } } // Fill the profiles of yields. int index = profileIndex(centralityBins,c); - piYield->fillBin(index, double(npi), weight); - pYield->fillBin(index, double(npr), weight); - kYield->fillBin(index, double(nk), weight); - lambdaYield->fillBin(index, double(nla), weight); - xiYield->fillBin(index, double(nxi), weight); + piYield->fillBin(index, double(npi)); + pYield->fillBin(index, double(npr)); + kYield->fillBin(index, double(nk)); + lambdaYield->fillBin(index, double(nla)); + xiYield->fillBin(index, double(nxi)); index = profileIndex(centralityBinsOmega, c); - omegaYield->fillBin(index, double(nom), weight); - piRebinned->fillBin(index,double(npi),weight); + omegaYield->fillBin(index, double(nom)); + piRebinned->fillBin(index,double(npi)); } /// Normalise histograms etc., after the run void finalize() { // Normalize the spectra for (int i = 0; i < 10; ++i) { K0SpT[centralityBins[i]]->scaleW(1./sow[centralityBins[i]]->sumW()); XipT[centralityBins[i]]->scaleW(1./sow[centralityBins[i]]->sumW()); LambdapT[centralityBins[i]]->scaleW(1./sow[centralityBins[i]]->sumW()); } for (int i = 0; i < 5; ++i) { - OmegapT[centralityBinsOmega[i]]->scaleW(1./sowOmega[centralityBinsOmega[i]]->sumW()); + OmegapT[centralityBinsOmega[i]]->scaleW(1./sowOmega[centralityBinsOmega[i]]->sumW()); } - // Make the ratios - kpi = bookScatter2D(36, 1, 1, true); - ppi = bookScatter2D(47, 1, 1, true); - lpi = bookScatter2D(37, 1, 1, true); - xpi = bookScatter2D(38, 1, 1, true); - opi = bookScatter2D(39, 1, 1, true); - lk = bookScatter2D(46, 1, 1, true); divide(kYield, piYield, kpi); kpi->scaleY(2.); divide(pYield, piYield, ppi); divide(lambdaYield, piYield, lpi); divide(xiYield, piYield, xpi); divide(omegaYield, piRebinned, opi); divide(lambdaYield, kYield, lk); lk->scaleY(0.5); - } //@} /// @name Histograms //@{ // Histograms ordered in centrality classes vector centralityBins; vector centralityBinsOmega; // pT spectra map K0SpT; map LambdapT; map XipT; map OmegapT; map sow; map sowOmega; // Total yields Profile1DPtr piYield; Profile1DPtr pYield; Profile1DPtr kYield; Profile1DPtr lambdaYield; Profile1DPtr xiYield; Profile1DPtr omegaYield; Profile1DPtr piRebinned; // Ratios Scatter2DPtr kpi; Scatter2DPtr ppi; Scatter2DPtr lpi; Scatter2DPtr xpi; Scatter2DPtr opi; Scatter2DPtr lk; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2016_I1471838); } diff --git a/analyses/pluginALICE/ALICE_2016_I1507090.cc b/analyses/pluginALICE/ALICE_2016_I1507090.cc --- a/analyses/pluginALICE/ALICE_2016_I1507090.cc +++ b/analyses/pluginALICE/ALICE_2016_I1507090.cc @@ -1,105 +1,104 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Tools/AliceCommon.hh" #include "Rivet/Projections/AliceCommon.hh" namespace Rivet { /// @brief ALICE PbPb at 5.02 TeV eta distributions. class ALICE_2016_I1507090 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2016_I1507090); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections // Centrality projection. declareCentrality(ALICE::V0MMultiplicity(), "ALICE_2015_PBPBCentrality", "V0M","V0M"); // Projections for the 2-out-of-3 trigger. - declare(ChargedFinalState( (Cuts::eta > 2.8 && Cuts::eta < 5.1) && + declare(ChargedFinalState( (Cuts::eta > 2.8 && Cuts::eta < 5.1) && Cuts::pT > 0.1*GeV), "VZERO1"); declare(ChargedFinalState( (Cuts::eta > -3.7 && Cuts::eta < -1.7) && Cuts::pT > 0.1*GeV), "VZERO2"); declare(ChargedFinalState(Cuts::abseta < 1. && Cuts::pT > 0.15*GeV), "SPD"); // Primary particles. declare(ALICE::PrimaryParticles(Cuts::abseta < 5.6),"APRIM"); - + // The centrality bins and the corresponding histograms. centralityBins = { 5, 10, 20, 30, 40, 50, 60, 70, 80, 90 }; for (int i = 0, N = centralityBins.size(); i < N; ++i) { - histEta[centralityBins[i]] = bookHisto1D(1, 1, i + 1); - sow[centralityBins[i]] = bookCounter("sow_" + toString(i)); + book(histEta[centralityBins[i]], 1, 1, i + 1); + book(sow[centralityBins[i]], "sow_" + toString(i)); } } /// Perform the per-event analysis void analyze(const Event& event) { - const double weight = event.weight(); // Trigger projections. - const ChargedFinalState& vz1 = + const ChargedFinalState& vz1 = applyProjection(event,"VZERO1"); - const ChargedFinalState& vz2 = + const ChargedFinalState& vz2 = applyProjection(event,"VZERO2"); - const ChargedFinalState& spd = + const ChargedFinalState& spd = applyProjection(event,"SPD"); int fwdTrig = (vz1.particles().size() > 0 ? 1 : 0); int bwdTrig = (vz2.particles().size() > 0 ? 1 : 0); int cTrig = (spd.particles().size() > 0 ? 1 : 0); - + if (fwdTrig + bwdTrig + cTrig < 2) vetoEvent; const CentralityProjection& cent = apply(event,"V0M"); double c = cent(); // Find the correct centrality histogram auto hItr = histEta.upper_bound(c); if (hItr == histEta.end()) return; // Find the correct sow. auto sItr = sow.upper_bound(c); if (sItr == sow.end()) return; - sItr->second->fill(weight); - + sItr->second->fill(); + // Fill the histograms. - for ( const auto& p : + for ( const auto& p : applyProjection(event,"APRIM").particles() ) - if(p.abscharge() > 0) hItr->second->fill(p.eta(), weight); + if(p.abscharge() > 0) hItr->second->fill(p.eta()); } /// Normalise histograms etc., after the run void finalize() { for (int i = 0, N = centralityBins.size(); i < N; ++i) histEta[centralityBins[i]]->scaleW(1./sow[centralityBins[i]]->sumW()); - + } //@} /// @name Histograms //@{ vector centralityBins; map histEta; map sow; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2016_I1507090); } diff --git a/analyses/pluginALICE/ALICE_2016_I1507157.cc b/analyses/pluginALICE/ALICE_2016_I1507157.cc --- a/analyses/pluginALICE/ALICE_2016_I1507157.cc +++ b/analyses/pluginALICE/ALICE_2016_I1507157.cc @@ -1,202 +1,201 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Tools/AliceCommon.hh" #include "Rivet/Projections/PrimaryParticles.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/EventMixingFinalState.hh" namespace Rivet { /// @brief Correlations of identified particles in pp. /// Also showcasing use of EventMixingFinalState. - class ALICE_2016_I1507157 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2016_I1507157); /// @name Analysis methods //@{ - + /// @brief Calculate angular distance between particles. double phaseDif(double a1, double a2){ double dif = a1 - a2; while (dif < -M_PI/2) dif += 2*M_PI; while (dif > 3*M_PI/2) dif -= 2*M_PI; return dif; } /// Book histograms and initialise projections before the run void init() { double etamax = 0.8; - double pTmin = 0.5; // GeV - + double pTmin = 0.5; // GeV + // Trigger declare(ALICE::V0AndTrigger(), "V0-AND"); // Charged tracks used to manage the mixing observable. ChargedFinalState cfsMult(Cuts::abseta < etamax); - addProjection(cfsMult, "CFSMult"); - + declare(cfsMult, "CFSMult"); + // Primary particles. - PrimaryParticles pp({Rivet::PID::PIPLUS, Rivet::PID::KPLUS, - Rivet::PID::K0S, Rivet::PID::K0L, Rivet::PID::PROTON, + PrimaryParticles pp({Rivet::PID::PIPLUS, Rivet::PID::KPLUS, + Rivet::PID::K0S, Rivet::PID::K0L, Rivet::PID::PROTON, Rivet::PID::NEUTRON, Rivet::PID::LAMBDA, Rivet::PID::SIGMAMINUS, - Rivet::PID::SIGMAPLUS, Rivet::PID::XIMINUS, Rivet::PID::XI0, + Rivet::PID::SIGMAPLUS, Rivet::PID::XIMINUS, Rivet::PID::XI0, Rivet::PID::OMEGAMINUS},Cuts::abseta < etamax && Cuts::pT > pTmin*GeV); - addProjection(pp,"APRIM"); + declare(pp,"APRIM"); // The event mixing projection declare(EventMixingFinalState(cfsMult, pp, 5, 0, 100, 10),"EVM"); // The particle pairs. pid = {{211, -211}, {321, -321}, {2212, -2212}, {3122, -3122}, {211, 211}, {321, 321}, {2212, 2212}, {3122, 3122}, {2212, 3122}, {2212, -3122}}; // The associated histograms in the data file. vector refdata = {"d04-x01-y01","d04-x01-y02","d04-x01-y03", "d06-x01-y02","d05-x01-y01","d05-x01-y02","d05-x01-y03","d06-x01-y01", "d01-x01-y02","d02-x01-y02"}; + ratio.resize(refdata.size()); + signal.resize(refdata.size()); + background.resize(refdata.size()); for (int i = 0, N = refdata.size(); i < N; ++i) { // The ratio plots. - ratio.push_back(bookScatter2D(refdata[i], true)); - // Signal and mixed background. - signal.push_back(bookHisto1D("/TMP/" + refdata[i] + - "-s", *ratio[i], refdata[i] + "-s")); - background.push_back(bookHisto1D("/TMP/" + refdata[i] + - "-b", *ratio[i], refdata[i] + "-b")); + book(ratio[i], refdata[i], true); + // Signal and mixed background. + book(signal[i], "/TMP/" + refdata[i] + "-s", *ratio[i], refdata[i] + "-s"); + book(background[i], "/TMP/" + refdata[i] + "-b", *ratio[i], refdata[i] + "-b"); // Number of signal and mixed pairs. - nsp.push_back(0.); + nsp.push_back(0.); nmp.push_back(0.); - } + } } /// Perform the per-event analysis void analyze(const Event& event) { - const double weight = event.weight(); - + // Triggering if (!apply(event, "V0-AND")()) return; // The projections const PrimaryParticles& pp = applyProjection(event,"APRIM"); - const EventMixingFinalState& evm = + const EventMixingFinalState& evm = applyProjection(event, "EVM"); // Get all mixing events vector mixEvents = evm.getMixingEvents(); - + // If there are not enough events to mix, don't fill any histograms if(mixEvents.size() == 0) return; // Make a vector of mixed event particles vector mixParticles; size_t pSize = 0; for(size_t i = 0; i < mixEvents.size(); ++i) pSize+=mixEvents[i].size(); mixParticles.reserve(pSize); for(size_t i = 0; i < mixEvents.size(); ++i) - mixParticles.insert(mixParticles.end(), mixEvents[i].begin(), + mixParticles.insert(mixParticles.end(), mixEvents[i].begin(), mixEvents[i].end()); - + // Shuffle the particles in the mixing events random_shuffle(mixParticles.begin(), mixParticles.end()); - + for(const Particle& p1 : pp.particles()) { // Start by doing the signal distributions for(const Particle& p2 : pp.particles()) { if(p1 == p2) continue; double dEta = abs(p1.eta() - p2.eta()); double dPhi = phaseDif(p1.phi(), p2.phi()); if(dEta < 1.3) { for (int i = 0, N = pid.size(); i < N; ++i) { int pid1 = pid[i].first; int pid2 = pid[i].second; bool samesign = (pid1 * pid2 > 0); - if (samesign && ((pid1 == p1.pid() && pid2 == p2.pid()) || + if (samesign && ((pid1 == p1.pid() && pid2 == p2.pid()) || (pid1 == -p1.pid() && pid2 == -p2.pid()))) { - signal[i]->fill(dPhi, weight); + signal[i]->fill(dPhi); nsp[i] += 1.0; } if (!samesign && abs(pid1) == abs(pid2) && pid1 == p1.pid() && pid2 == p2.pid()) { - signal[i]->fill(dPhi, weight); + signal[i]->fill(dPhi); nsp[i] += 1.0; } - if (!samesign && abs(pid1) != abs(pid2) && + if (!samesign && abs(pid1) != abs(pid2) && ( (pid1 == p1.pid() && pid2 == p2.pid()) || (pid2 == p1.pid() && pid1 == p2.pid()) ) ) { - signal[i]->fill(dPhi, weight); + signal[i]->fill(dPhi); nsp[i] += 1.0; } } } } // Then do the background distribution for(const Particle& pMix : mixParticles){ double dEta = abs(p1.eta() - pMix.eta()); double dPhi = phaseDif(p1.phi(), pMix.phi()); if(dEta < 1.3) { for (int i = 0, N = pid.size(); i < N; ++i) { int pid1 = pid[i].first; int pid2 = pid[i].second; bool samesign = (pid1 * pid2 > 0); - if (samesign && ((pid1 == p1.pid() && pid2 == pMix.pid()) || + if (samesign && ((pid1 == p1.pid() && pid2 == pMix.pid()) || (pid1 == -p1.pid() && pid2 == -pMix.pid()))) { - background[i]->fill(dPhi, weight); + background[i]->fill(dPhi); nmp[i] += 1.0; } if (!samesign && abs(pid1) == abs(pid2) && pid1 == p1.pid() && pid2 == pMix.pid()) { - background[i]->fill(dPhi, weight); + background[i]->fill(dPhi); nmp[i] += 1.0; } - if (!samesign && abs(pid1) != abs(pid2) && + if (!samesign && abs(pid1) != abs(pid2) && ( (pid1 == p1.pid() && pid2 == pMix.pid()) || (pid2 == p1.pid() && pid1 == pMix.pid()) ) ) { - background[i]->fill(dPhi, weight); + background[i]->fill(dPhi); nmp[i] += 1.0; } } } } } } /// Normalise histograms etc., after the run void finalize() { for (int i = 0, N = pid.size(); i < N; ++i) { double sc = nmp[i] / nsp[i]; - signal[i]->scaleW(sc); - divide(signal[i],background[i],ratio[i]); + signal[i]->scaleW(sc); + divide(signal[i],background[i],ratio[i]); } } //@} /// @name Histograms //@{ vector > pid; vector signal; vector background; vector ratio; vector nsp; vector nmp; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2016_I1507157); } diff --git a/analyses/pluginATLAS/ATLAS_2013_I1230812.cc b/analyses/pluginATLAS/ATLAS_2013_I1230812.cc --- a/analyses/pluginATLAS/ATLAS_2013_I1230812.cc +++ b/analyses/pluginATLAS/ATLAS_2013_I1230812.cc @@ -1,296 +1,294 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" namespace Rivet { /// Z + jets in pp at 7 TeV (combined channel / base class) /// @note This base class contains a "mode" variable for combined, e, and mu channel derived classes class ATLAS_2013_I1230812 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2013_I1230812); //@} /// Book histograms and initialise projections before the run void init() { // Get options from the new option system _mode = 0; if ( getOption("LMODE") == "EL" ) _mode = 1; if ( getOption("LMODE") == "MU" ) _mode = 2; // Determine the e/mu decay channels used (NB Prompt leptons only). /// @todo Note that Zs are accepted with any rapidity: the cuts are on the e/mu: is this correct? Cut pt20 = Cuts::pT >= 20*GeV; Cut eta_e = _mode? Cuts::abseta < 1.37 || Cuts::absetaIn(1.52, 2.47) : Cuts::abseta < 2.5; Cut eta_m = _mode? Cuts::abseta < 2.4 : Cuts::abseta < 2.5; ZFinder zfinder_el(FinalState(eta_e), pt20, PID::ELECTRON, 66*GeV, 116*GeV); ZFinder zfinder_mu(FinalState(eta_m), pt20, PID::MUON, 66*GeV, 116*GeV); declare(zfinder_el, "zfinder_el"); declare(zfinder_mu, "zfinder_mu"); // Define veto FS in order to prevent Z-decay products entering the jet algorithm VetoedFinalState had_fs; had_fs.addVetoOnThisFinalState(getProjection("zfinder_el")); had_fs.addVetoOnThisFinalState(getProjection("zfinder_mu")); - FastJets jets(had_fs, FastJets::ANTIKT, 0.4); - jets.useInvisibles(true); + FastJets jets(had_fs, FastJets::ANTIKT, 0.4, JetAlg::Muons::ALL, JetAlg::Invisibles::ALL); declare(jets, "jets"); book(_h_njet_incl , 1, 1, _mode+1); book(_h_njet_incl_ratio , 2, 1, _mode+1, true); book(_h_njet_excl , 3, 1, _mode+1); book(_h_njet_excl_ratio , 4, 1, _mode+1, true); book(_h_njet_excl_pt150 , 5, 1, _mode+1); book(_h_njet_excl_pt150_ratio , 6, 1, _mode+1, true); book(_h_njet_excl_vbf , 7, 1, _mode+1); book(_h_njet_excl_vbf_ratio , 8, 1, _mode+1, true); book(_h_ptlead , 9, 1, _mode+1); book(_h_ptseclead , 10, 1, _mode+1); book(_h_ptthirdlead , 11, 1, _mode+1); book(_h_ptfourthlead , 12, 1, _mode+1); book(_h_ptlead_excl , 13, 1, _mode+1); book(_h_pt_ratio , 14, 1, _mode+1); book(_h_pt_z , 15, 1, _mode+1); book(_h_pt_z_excl , 16, 1, _mode+1); book(_h_ylead , 17, 1, _mode+1); book(_h_yseclead , 18, 1, _mode+1); book(_h_ythirdlead , 19, 1, _mode+1); book(_h_yfourthlead , 20, 1, _mode+1); book(_h_deltay , 21, 1, _mode+1); book(_h_mass , 22, 1, _mode+1); book(_h_deltaphi , 23, 1, _mode+1); book(_h_deltaR , 24, 1, _mode+1); book(_h_ptthirdlead_vbf , 25, 1, _mode+1); book(_h_ythirdlead_vbf , 26, 1, _mode+1); book(_h_ht , 27, 1, _mode+1); book(_h_st , 28, 1, _mode+1); } /// Perform the per-event analysis void analyze(const Event& event) { FourMomentum z, lp, lm; const ZFinder& zfinder_el = apply(event, "zfinder_el"); const ZFinder& zfinder_mu = apply(event, "zfinder_mu"); bool e_ok = zfinder_el.constituents().size() == 2 && zfinder_mu.constituents().size() ==0; bool m_ok = zfinder_el.constituents().size() == 0 && zfinder_mu.constituents().size() ==2; if (_mode == 0 && !e_ok && !m_ok ) vetoEvent; if (_mode == 1 && !e_ok) vetoEvent; if (_mode == 2 && !m_ok) vetoEvent; if (zfinder_el.constituents().size() == 2) { z = zfinder_el.boson().momentum(); lp = zfinder_el.constituents()[0].momentum(); lm = zfinder_el.constituents()[1].momentum(); } else if (zfinder_mu.constituents().size() == 2) { z = zfinder_mu.boson().momentum(); lp = zfinder_mu.constituents()[0].momentum(); lm = zfinder_mu.constituents()[1].momentum(); } else vetoEvent; if (deltaR(lp, lm) < 0.2) vetoEvent; Jets jets = apply(event, "jets").jetsByPt(Cuts::pT > 30*GeV && Cuts::absrap < 4.4); ifilter_discard(jets, deltaRLess(lp, 0.5)); ifilter_discard(jets, deltaRLess(lm, 0.5)); // Fill jet multiplicities for (size_t ijet = 0; ijet <= jets.size(); ++ijet) { _h_njet_incl->fill(ijet); } _h_njet_excl->fill(jets.size()); // Require at least one jet if (jets.size() >= 1) { // Leading jet histos const double ptlead = jets[0].pT()/GeV; const double yabslead = fabs(jets[0].rapidity()); const double ptz = z.pT()/GeV; _h_ptlead->fill(ptlead); _h_ylead ->fill(yabslead); _h_pt_z ->fill(ptz); // Fill jet multiplicities if (ptlead > 150) _h_njet_excl_pt150->fill(jets.size()); // Loop over selected jets, fill inclusive distributions double st = 0; double ht = lp.pT()/GeV + lm.pT()/GeV; for (size_t ijet = 0; ijet < jets.size(); ++ijet) { ht += jets[ijet].pT()/GeV; st += jets[ijet].pT()/GeV; } _h_ht->fill(ht); _h_st->fill(st); // Require exactly one jet if (jets.size() == 1) { _h_ptlead_excl->fill(ptlead); _h_pt_z_excl ->fill(ptz); } } // Require at least two jets if (jets.size() >= 2) { // Second jet histos const double ptlead = jets[0].pT()/GeV; const double pt2ndlead = jets[1].pT()/GeV; const double ptratio = pt2ndlead/ptlead; const double yabs2ndlead = fabs(jets[1].rapidity()); _h_ptseclead->fill(pt2ndlead); _h_yseclead->fill( yabs2ndlead); _h_pt_ratio->fill( ptratio); // Dijet histos const double deltaphi = fabs(deltaPhi(jets[1], jets[0])); const double deltarap = fabs(jets[0].rapidity() - jets[1].rapidity()) ; const double deltar = fabs(deltaR(jets[0], jets[1], RAPIDITY)); const double mass = (jets[0].momentum() + jets[1].momentum()).mass()/GeV; _h_mass->fill( mass); _h_deltay->fill( deltarap); _h_deltaphi->fill(deltaphi); _h_deltaR->fill( deltar); if (mass > 350 && deltarap > 3) _h_njet_excl_vbf->fill(jets.size()); } // Require at least three jets if (jets.size() >= 3) { // Third jet histos const double pt3rdlead = jets[2].pT()/GeV; const double yabs3rdlead = fabs(jets[2].rapidity()); _h_ptthirdlead->fill(pt3rdlead); _h_ythirdlead->fill( yabs3rdlead); //Histos after VBF preselection const double deltarap = fabs(jets[0].rapidity() - jets[1].rapidity()) ; const double mass = (jets[0].momentum() + jets[1].momentum()).mass(); if (mass > 350 && deltarap > 3) { _h_ptthirdlead_vbf->fill(pt3rdlead); _h_ythirdlead_vbf->fill( yabs3rdlead); } } // Require at least four jets if (jets.size() >= 4) { // Fourth jet histos const double pt4thlead = jets[3].pT()/GeV; const double yabs4thlead = fabs(jets[3].rapidity()); _h_ptfourthlead->fill(pt4thlead); _h_yfourthlead->fill( yabs4thlead); } } /// @name Ratio calculator util functions //@{ /// Calculate the efficiency error, being careful about div-by-zero double err_incl(const HistoBin1D &M, const HistoBin1D &N, bool hasWeights) { double r = safediv(M.sumW(), N.sumW()); if (hasWeights) { // use F. James's approximation for weighted events return sqrt( safediv((1 - 2 * r) * M.sumW2() + r * r * N.sumW2(), N.sumW() * N.sumW()) ); } return sqrt( safediv(r * (1 - r), N.sumW()) ); } /// Calculate the ratio error, being careful about div-by-zero double err_excl(const HistoBin1D &A, const HistoBin1D &B) { double r = safediv(A.sumW(), B.sumW()); double dAsquared = safediv(A.sumW2(), A.sumW() * A.sumW()); // squared relative error of A double dBsquared = safediv(B.sumW2(), B.sumW() * B.sumW()); // squared relative error of B return r * sqrt(dAsquared + dBsquared); } //@} void finalize() { bool hasWeights = _h_njet_incl->effNumEntries() != _h_njet_incl->numEntries(); for (size_t i = 0; i < 6; ++i) { _h_njet_incl_ratio->point(i).setY(safediv(_h_njet_incl->bin(i + 1).sumW(), _h_njet_incl->bin(i).sumW()), err_incl(_h_njet_incl->bin(i + 1), _h_njet_incl->bin(i), hasWeights)); _h_njet_excl_ratio->point(i).setY(safediv(_h_njet_excl->bin(i + 1).sumW(), _h_njet_excl->bin(i).sumW()), err_excl(_h_njet_excl->bin(i + 1), _h_njet_excl->bin(i))); if (i >= 1) { _h_njet_excl_pt150_ratio->point(i - 1).setY(safediv(_h_njet_excl_pt150->bin(i).sumW(), _h_njet_excl_pt150->bin(i - 1).sumW()), err_excl(_h_njet_excl_pt150->bin(i), _h_njet_excl_pt150->bin(i - 1))); if (i >= 2) { _h_njet_excl_vbf_ratio->point(i - 2).setY(safediv(_h_njet_excl_vbf->bin(i).sumW(), _h_njet_excl_vbf->bin(i - 1).sumW()), err_excl(_h_njet_excl_vbf->bin(i), _h_njet_excl_vbf->bin(i - 1))); } } } double sf = _mode? 1.0 : 0.5; const double xs = sf * crossSectionPerEvent()/picobarn; scale({_h_njet_incl, _h_njet_excl, _h_njet_excl_pt150, _h_njet_excl_vbf}, xs); scale({_h_ptlead, _h_ptseclead, _h_ptthirdlead, _h_ptfourthlead, _h_ptlead_excl}, xs); scale({_h_pt_ratio, _h_pt_z, _h_pt_z_excl}, xs); scale({_h_ylead, _h_yseclead, _h_ythirdlead, _h_yfourthlead}, xs); scale({_h_deltay, _h_mass, _h_deltaphi, _h_deltaR}, xs); scale({_h_ptthirdlead_vbf, _h_ythirdlead_vbf}, xs); scale({_h_ht, _h_st}, xs); } //@} protected: size_t _mode; private: Scatter2DPtr _h_njet_incl_ratio; Scatter2DPtr _h_njet_excl_ratio; Scatter2DPtr _h_njet_excl_pt150_ratio; Scatter2DPtr _h_njet_excl_vbf_ratio; Histo1DPtr _h_njet_incl; Histo1DPtr _h_njet_excl; Histo1DPtr _h_njet_excl_pt150; Histo1DPtr _h_njet_excl_vbf; Histo1DPtr _h_ptlead; Histo1DPtr _h_ptseclead; Histo1DPtr _h_ptthirdlead; Histo1DPtr _h_ptfourthlead; Histo1DPtr _h_ptlead_excl; Histo1DPtr _h_pt_ratio; Histo1DPtr _h_pt_z; Histo1DPtr _h_pt_z_excl; Histo1DPtr _h_ylead; Histo1DPtr _h_yseclead; Histo1DPtr _h_ythirdlead; Histo1DPtr _h_yfourthlead; Histo1DPtr _h_deltay; Histo1DPtr _h_mass; Histo1DPtr _h_deltaphi; Histo1DPtr _h_deltaR; Histo1DPtr _h_ptthirdlead_vbf; Histo1DPtr _h_ythirdlead_vbf; Histo1DPtr _h_ht; Histo1DPtr _h_st; }; DECLARE_RIVET_PLUGIN(ATLAS_2013_I1230812); } - diff --git a/analyses/pluginATLAS/ATLAS_2017_I1517194.cc b/analyses/pluginATLAS/ATLAS_2017_I1517194.cc --- a/analyses/pluginATLAS/ATLAS_2017_I1517194.cc +++ b/analyses/pluginATLAS/ATLAS_2017_I1517194.cc @@ -1,230 +1,230 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/WFinder.hh" namespace Rivet { class ATLAS_2017_I1517194 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor ///@brief: Electroweak Wjj production at 8 TeV DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2017_I1517194); //@} /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Get options from the new option system _mode = 0; if ( getOption("LMODE") == "EL" ) _mode = 0; if ( getOption("LMODE") == "MU" ) _mode = 1; const FinalState fs; // W Selection WFinder wfinder(fs, Cuts::rap < 2.5 && Cuts::pT >= 25*GeV, _mode? PID::MUON : PID::ELECTRON, 0*GeV, 13*TeV, 0*GeV, 0.1); declare(wfinder, "WFinder"); FastJets jets( wfinder.remainingFinalState(), FastJets::ANTIKT, 0.4); jets.useMuons(JetAlg::DECAY_MUONS); jets.useInvisibles(JetAlg::ALL_INVISIBLES); declare(jets, "Jets_w"); MissingMomentum missmom(FinalState(Cuts::eta < 5.0)); - addProjection(missmom, "mm"); + declare(missmom, "mm"); const vector phase_spaces = { "highmass15", "antiLC", "signal10", "highmass10", "inclusive", "highmass20", "antiLCantiJC", "antiJC", "signal", }; const vector variables = { "dijetmass", "dijetpt", "dphi12", "dy12", "j1pt", "JC", "LC", "ngapjets" }; size_t hepdataid = 10; for (size_t group = 0; group < 4; ++group) { for ( size_t ps=0; ps < phase_spaces.size(); ++ps ) { for ( size_t var=0; var < variables.size(); ++var ) { if (group < 2) { if ((ps == 0 || ps == 2 || ps == 3 || ps == 5) && var == 0) continue; if ((ps == 1 || ps == 2 || ps > 5) && var > 4) continue; if (group == 1 && ps == 7 && var == 3) continue; } else { if (ps == 1 || ps == 4 || ps > 5) continue; if ((ps == 0 || ps == 5) && var < 2) continue; if (ps == 2 && var > 4) continue; if ((ps == 0 || ps == 3 || ps == 5) && var == 5) continue; if (group == 2) { if ((ps == 0 || ps == 5) && var == 3) continue; if (ps == 3 && var == 1) continue; } else { if ((ps == 0 || ps == 3 || ps == 5) && var == 6) continue; if ((ps == 2 || ps == 3) && var < 2) continue; } } ++hepdataid; string label = variables[var]+"_"+phase_spaces[ps]; //string pre = group > 1? "ew_" : ""; //std::cout << "rivet -- " << pre << label << suff << " " << hepdataid << std::endl; string suff = group % 2? "" : "_norm"; if (group > 1) _hists["ew_" + label + suff] = bookHisto1D(hepdataid, 1, 1); else _hists[label + suff] = bookHisto1D(hepdataid, 1, 1); } } } } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = event.weight(); FourMomentum boson, lepton, neutrino; const WFinder& wfinder = apply(event, "WFinder"); const FastJets& jetpro = apply(event, "Jets_w"); const MissingMomentum& missmom = apply(event, "mm"); if ( wfinder.bosons().size() != 1 ) { vetoEvent; } boson = wfinder.bosons().front().momentum(); lepton = wfinder.constituentLeptons().front().momentum(); neutrino = wfinder.constituentNeutrinos().front().momentum(); vector jets; for (const Jet& jet : jetpro.jetsByPt(30*GeV)) { if ( fabs(jet.momentum().rapidity()) > 4.4 ) continue; if ( fabs(deltaR(jet, lepton)) < 0.3 ) continue; jets.push_back(jet.momentum()); } if (jets.size() < 2) vetoEvent; double mT = sqrt( 2.0*lepton.pT()*neutrino.Et()*( 1.0-cos( lepton.phi()-neutrino.phi() ) ) ); double DeltaRap = fabs( jets[0].rapidity()-jets[1].rapidity() ); double dijet_mass = FourMomentum(jets[0]+jets[1]).mass(); size_t nojets = jets.size(); if (missmom.vectorEt().mod() < 25*GeV) vetoEvent; if (jets[0].pT() < 80*GeV) vetoEvent; if (jets[1].pT() < 60*GeV) vetoEvent; if (dijet_mass < 500*GeV) vetoEvent; if (DeltaRap < 2.0) vetoEvent; if (mT < 40*GeV) vetoEvent; // By now, the event has passed all VBF cuts double DijetPt = FourMomentum(jets[0]+jets[1]).pT(); double dphi = fabs(jets[0].phi() - jets[1].phi()); double DeltaPhi = ( dphi<=pi ) ? dphi/pi : (2.*pi-dphi)/pi; double jet_1_rap = jets[0].rapidity(); double jet_2_rap = jets[1].rapidity(); // Njets in Gap Control Regions info int njetsingap = 0; bool firstIsForward = jet_1_rap > jet_2_rap; int jF = (firstIsForward) ? 0 : 1; // sets most positive jet to be forward index int jB = (firstIsForward) ? 1 : 0; // sets most negative jet to be backward index for (size_t j = 2; j < nojets; ++j) { if ( (jets[j].rapidity()jets[jB].rapidity()) ) njetsingap++; } // Third+ Jet Centrality Cut (Vetos any event that has a jet between raps of the two leading jets) bool passJC = false; std::vector JCvals; JCvals.clear(); if ( nojets < 3) passJC = true; else if ( nojets > 2 ) { passJC = true; for (size_t j = 2; j < nojets; ++j) { double jet_3_rap = jets[j].rapidity(); double jet3gap = fabs(( jet_3_rap - ((jet_1_rap + jet_2_rap)/2.0))/(jet_1_rap - jet_2_rap)); JCvals.push_back(jet3gap); if ( jet3gap < 0.4 ) { passJC = false; } } } double lepton_rap = lepton.rapidity(); double lep_cent = fabs((lepton_rap - ((jet_1_rap + jet_2_rap)/2.0) )/(jet_1_rap - jet_2_rap)); bool passLC = (lep_cent < 0.4); map phaseSpaces; phaseSpaces["inclusive"] = true; phaseSpaces["highmass10"] = (dijet_mass>1000.0*GeV); phaseSpaces["highmass15"] = (dijet_mass>1500.0*GeV); phaseSpaces["highmass20"] = (dijet_mass>2000.0*GeV); phaseSpaces["antiLC"] = ( !passLC && passJC ); phaseSpaces["antiJC"] = ( passLC && !passJC ); phaseSpaces["antiLCantiJC"] = ( !passLC && !passJC ); phaseSpaces["signal"] = ( passLC && passJC ); phaseSpaces["signal10"] = ( (dijet_mass>1000.0*GeV) && passLC && passJC ); for (const auto& ps : phaseSpaces) { if (!ps.second) continue; fillHisto("dijetmass_"+ps.first, dijet_mass, weight); fillHisto("dijetpt_"+ps.first, DijetPt, weight); fillHisto("dy12_"+ps.first, fabs(jet_1_rap-jet_2_rap), weight); fillHisto("dphi12_"+ps.first, DeltaPhi, weight); fillHisto("j1pt_"+ps.first, jets[0].pT(), weight); if (ps.first == "inclusive" || ps.first.find("highmass") != string::npos) { fillHisto("LC_"+ps.first, lep_cent, weight); fillHisto("ngapjets_"+ps.first, njetsingap, weight); for (auto& jc : JCvals) { fillHisto("JC_"+ps.first, jc, weight); } } } } /// Normalise histograms etc., after the run void finalize() { double factor = crossSection()/sumOfWeights()/femtobarn; // refData is in fb for (const auto& key_hist : _hists) { scale(key_hist.second, factor); if (key_hist.first.find("_norm") != string::npos) normalize(key_hist.second); } } //@} void fillHisto(const string& label, const double value, const double weight) { if (_hists.find(label) != _hists.end()) { // QCD+EW, absolute _hists[label]->fill(value, weight); } if (_hists.find(label + "_norm") != _hists.end()) { // QCD+EW, normalised _hists[label + "_norm"]->fill(value, weight); } if (_hists.find("ew_" + label) != _hists.end()) { // EW-only, absolute _hists["ew_" + label]->fill(value, weight); } if (_hists.find("ew_" + label + "_norm") != _hists.end()) { // EW-only, normalised _hists["ew_" + label + "_norm"]->fill(value, weight); } } protected: size_t _mode; private: map _hists; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2017_I1517194); } diff --git a/analyses/pluginATLAS/ATLAS_2017_I1614149.cc b/analyses/pluginATLAS/ATLAS_2017_I1614149.cc --- a/analyses/pluginATLAS/ATLAS_2017_I1614149.cc +++ b/analyses/pluginATLAS/ATLAS_2017_I1614149.cc @@ -1,359 +1,359 @@ // -*- C++ -* #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "Rivet/Tools/fjcontrib/Njettiness.hh" #include "Rivet/Tools/fjcontrib/Nsubjettiness.hh" #include "Rivet/Tools/fjcontrib/NjettinessPlugin.hh" namespace Rivet { class ATLAS_2017_I1614149 : public Analysis { public: /// Constructor ///@brief: Resolved and boosted ttbar l+jets cross sections at 13 TeV DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2017_I1614149); void init() { // Eta ranges Cut eta_full = (Cuts::abseta < 5.0); Cut lep_cuts = (Cuts::abseta < 2.5) && (Cuts::pT > 25*GeV); // All final state particles FinalState fs(eta_full); IdentifiedFinalState all_photons(fs); all_photons.acceptIdPair(PID::PHOTON); // Get photons to dress leptons IdentifiedFinalState ph_id(fs); ph_id.acceptIdPair(PID::PHOTON); // Projection to find the electrons IdentifiedFinalState el_id(fs); el_id.acceptIdPair(PID::ELECTRON); PromptFinalState photons(ph_id); photons.acceptTauDecays(true); declare(photons, "photons"); PromptFinalState electrons(el_id); electrons.acceptTauDecays(true); DressedLeptons dressedelectrons(photons, electrons, 0.1, lep_cuts); declare(dressedelectrons, "elecs"); DressedLeptons ewdressedelectrons(all_photons, electrons, 0.1, eta_full); // Projection to find the muons IdentifiedFinalState mu_id(fs); mu_id.acceptIdPair(PID::MUON); PromptFinalState muons(mu_id); muons.acceptTauDecays(true); DressedLeptons dressedmuons(photons, muons, 0.1, lep_cuts); declare(dressedmuons, "muons"); DressedLeptons ewdressedmuons(all_photons, muons, 0.1, eta_full); // Projection to find MET declare(MissingMomentum(fs), "MET"); // remove prompt neutrinos from jet clustering IdentifiedFinalState nu_id(fs); nu_id.acceptNeutrinos(); PromptFinalState neutrinos(nu_id); neutrinos.acceptTauDecays(true); // Jet clustering. VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(ewdressedelectrons); vfs.addVetoOnThisFinalState(ewdressedmuons); vfs.addVetoOnThisFinalState(neutrinos); FastJets jets(vfs, FastJets::ANTIKT, 0.4); jets.useInvisibles(true); - addProjection(jets, "jets"); + declare(jets, "jets"); // Addition of the large-R jets VetoedFinalState vfs1(fs); vfs1.addVetoOnThisFinalState(neutrinos); FastJets fjets(vfs1, FastJets::ANTIKT, 1.); fjets.useInvisibles(JetAlg::Invisibles::NONE); fjets.useMuons(JetAlg::Muons::NONE); - addProjection(fjets, "fjets"); + declare(fjets, "fjets"); bookHists("top_pt_res", 15); bookHists("top_absrap_res", 17); bookHists("ttbar_pt_res", 19); bookHists("ttbar_absrap_res", 21); bookHists("ttbar_m_res", 23); bookHists("top_pt_boost", 25); bookHists("top_absrap_boost", 27); } void analyze(const Event& event) { // Get the selected objects, using the projections. vector electrons = apply(event, "elecs").dressedLeptons(); vector muons = apply(event, "muons").dressedLeptons(); const Jets& jets = apply(event, "jets").jetsByPt(Cuts::pT > 25*GeV && Cuts::abseta < 2.5); const PseudoJets& all_fjets = apply(event, "fjets").pseudoJetsByPt(); // get MET const Vector3 met = apply(event, "MET").vectorMPT(); Jets bjets, lightjets; for (const Jet& jet : jets) { bool b_tagged = jet.bTags(Cuts::pT > 5*GeV).size(); if ( b_tagged && bjets.size() < 2) bjets +=jet; else lightjets += jet; } // Implementing large-R jets definition // trim the jets PseudoJets trimmed_fatJets; float Rfilt = 0.2; float pt_fraction_min = 0.05; fastjet::Filter trimmer(fastjet::JetDefinition(fastjet::kt_algorithm, Rfilt), fastjet::SelectorPtFractionMin(pt_fraction_min)); for (PseudoJet pjet : all_fjets) trimmed_fatJets += trimmer(pjet); trimmed_fatJets = fastjet::sorted_by_pt(trimmed_fatJets); PseudoJets trimmed_jets; for (unsigned int i = 0; i < trimmed_fatJets.size(); ++i) { FourMomentum tj_mom = momentum(trimmed_fatJets[i]); if (tj_mom.pt() <= 300*GeV) continue; if (tj_mom.abseta() >= 2.0) continue; trimmed_jets.push_back(trimmed_fatJets[i]); } bool single_electron = (electrons.size() == 1) && (muons.empty()); bool single_muon = (muons.size() == 1) && (electrons.empty()); DressedLepton *lepton = NULL; if (single_electron) lepton = &electrons[0]; else if (single_muon) lepton = &muons[0]; if (!single_electron && !single_muon) vetoEvent; bool pass_resolved = true; bool num_b_tagged_jets = (bjets.size() == 2); if (!num_b_tagged_jets) pass_resolved = false; if (jets.size() < 4) pass_resolved = false; bool pass_boosted = true; int fatJetIndex = -1; bool passTopTag = false; bool passDphi = false; bool passAddJet = false; bool goodLepJet = false; bool lepbtag = false; bool hadbtag=false; vector lepJetIndex; vector jet_farFromHadTopJetCandidate; if (met.mod() < 20*GeV) pass_boosted = false; if (pass_boosted) { double transmass = _mT(lepton->momentum(), met); if (transmass + met.mod() < 60*GeV) pass_boosted = false; } if (pass_boosted) { if (trimmed_jets.size() >= 1) { for (unsigned int j = 0; j 100*GeV && momentum(trimmed_jets.at(j)).pt() > 300*GeV && momentum(trimmed_jets.at(j)).pt() < 1500*GeV && fabs(momentum(trimmed_jets.at(j)).eta()) < 2.) { passTopTag = true; fatJetIndex = j; break; } } } } if(!passTopTag && fatJetIndex == -1) pass_boosted = false; if (pass_boosted) { double dPhi_fatjet = deltaPhi(lepton->phi(), momentum(trimmed_jets.at(fatJetIndex)).phi()); double dPhi_fatjet_lep_cut = 1.0; //2.3 if (dPhi_fatjet > dPhi_fatjet_lep_cut ) { passDphi = true; } } if (!passDphi) pass_boosted = false; if (bjets.empty()) pass_boosted = false; if (pass_boosted) { for (unsigned int sj = 0; sj < jets.size(); ++sj) { double dR = deltaR(jets.at(sj).momentum(), momentum(trimmed_jets.at(fatJetIndex))); if(dR > 1.5) { passAddJet = true; jet_farFromHadTopJetCandidate.push_back(sj); } } } if (!passAddJet) pass_boosted = false; if (pass_boosted) { for (int ltj : jet_farFromHadTopJetCandidate) { double dR_jet_lep = deltaR(jets.at(ltj).momentum(), lepton->momentum()); double dR_jet_lep_cut = 2.0;//1.5 if (dR_jet_lep < dR_jet_lep_cut) { lepJetIndex.push_back(ltj); goodLepJet = true; } } } if(!goodLepJet) pass_boosted = false; if (pass_boosted) { for (int lepj : lepJetIndex) { lepbtag = jets.at(lepj).bTags(Cuts::pT > 5*GeV).size(); if (lepbtag) break; } } double dR_fatBjet_cut = 1.0; if (pass_boosted) { for (const Jet& bjet : bjets) { hadbtag |= deltaR(momentum(trimmed_jets.at(fatJetIndex)), bjet) < dR_fatBjet_cut; } } if (!(lepbtag || hadbtag)) pass_boosted = false; FourMomentum pbjet1; //Momentum of bjet1 FourMomentum pbjet2; //Momentum of bjet int Wj1index = -1, Wj2index = -1; if (pass_resolved) { if ( deltaR(bjets[0], *lepton) <= deltaR(bjets[1], *lepton) ) { pbjet1 = bjets[0].momentum(); pbjet2 = bjets[1].momentum(); } else { pbjet1 = bjets[1].momentum(); pbjet2 = bjets[0].momentum(); } double bestWmass = 1000.0*TeV; double mWPDG = 80.399*GeV; for (unsigned int i = 0; i < (lightjets.size() - 1); ++i) { for (unsigned int j = i + 1; j < lightjets.size(); ++j) { double wmass = (lightjets[i].momentum() + lightjets[j].momentum()).mass(); if (fabs(wmass - mWPDG) < fabs(bestWmass - mWPDG)) { bestWmass = wmass; Wj1index = i; Wj2index = j; } } } FourMomentum pjet1 = lightjets[Wj1index].momentum(); FourMomentum pjet2 = lightjets[Wj2index].momentum(); // compute hadronic W boson FourMomentum pWhadron = pjet1 + pjet2; double pz = computeneutrinoz(lepton->momentum(), met); FourMomentum ppseudoneutrino( sqrt(sqr(met.x()) + sqr(met.y()) + sqr(pz)), met.x(), met.y(), pz); //compute leptonic, hadronic, combined pseudo-top FourMomentum ppseudotoplepton = lepton->momentum() + ppseudoneutrino + pbjet1; FourMomentum ppseudotophadron = pbjet2 + pWhadron; FourMomentum pttbar = ppseudotoplepton + ppseudotophadron; fillHists("top_pt_res", ppseudotophadron.pt()/GeV); fillHists("top_absrap_res", ppseudotophadron.absrap()); fillHists("ttbar_pt_res", pttbar.pt()/GeV); fillHists("ttbar_absrap_res", pttbar.absrap()); fillHists("ttbar_m_res", pttbar.mass()/GeV); } if (pass_boosted) {// Boosted selection double hadtop_pt= momentum(trimmed_jets.at(fatJetIndex)).pt() / GeV; double hadtop_absrap= momentum(trimmed_jets.at(fatJetIndex)).absrap(); fillHists("top_pt_boost", hadtop_pt); fillHists("top_absrap_boost", hadtop_absrap); } } void finalize() { // Normalize to cross-section const double sf = (crossSection() / sumOfWeights()); for (HistoMap::value_type& hist : _h) { scale(hist.second, sf); if (hist.first.find("_norm") != string::npos) normalize(hist.second); } } void bookHists(std::string name, unsigned int index) { book(_h[name], index, 1 ,1); book(_h[name + "_norm"], index + 1, 1, 1); } void fillHists(std::string name, double value) { _h[name]->fill(value, weight); _h[name + "_norm"]->fill(value); } double _mT(const FourMomentum &l, const Vector3 &met) const { return sqrt(2.0 * l.pT() * met.mod() * (1 - cos(deltaPhi(l, met))) ); } double tau32(const fastjet::PseudoJet &jet, double jet_rad) const { double alpha = 1.0; fjcontrib::Nsubjettiness::NormalizedCutoffMeasure normalized_measure(alpha, jet_rad, 1000000); // WTA definition // Nsubjettiness::OnePass_WTA_KT_Axes wta_kt_axes; // as in JetSubStructure recommendations fjcontrib::Nsubjettiness::KT_Axes kt_axes; /// NsubjettinessRatio uses the results from Nsubjettiness to calculate the ratio /// tau_N/tau_M, where N and M are specified by the user. The ratio of different tau values /// is often used in analyses, so this class is helpful to streamline code. fjcontrib::Nsubjettiness::NsubjettinessRatio tau32_kt(3, 2, kt_axes, normalized_measure); double tau32 = tau32_kt.result(jet); return tau32; } double computeneutrinoz(const FourMomentum& lepton, const Vector3 &met) const { //computing z component of neutrino momentum given lepton and met double pzneutrino; double m_W = 80.399; // in GeV, given in the paper double k = (( sqr( m_W ) - sqr( lepton.mass() ) ) / 2 ) + (lepton.px() * met.x() + lepton.py() * met.y()); double a = sqr ( lepton.E() )- sqr ( lepton.pz() ); double b = -2*k*lepton.pz(); double c = sqr( lepton.E() ) * sqr( met.mod() ) - sqr( k ); double discriminant = sqr(b) - 4 * a * c; double quad[2] = { (- b - sqrt(discriminant)) / (2 * a), (- b + sqrt(discriminant)) / (2 * a) }; //two possible quadratic solns if (discriminant < 0) pzneutrino = - b / (2 * a); //if the discriminant is negative else { //if the discriminant is greater than or equal to zero, take the soln with smallest absolute value double absquad[2]; for (int n=0; n<2; ++n) absquad[n] = fabs(quad[n]); if (absquad[0] < absquad[1]) pzneutrino = quad[0]; else pzneutrino = quad[1]; } return pzneutrino; } private: /// @name Objects that are used by the event selection decisions typedef map HistoMap; HistoMap _h; }; DECLARE_RIVET_PLUGIN(ATLAS_2017_I1614149); } diff --git a/analyses/pluginCMS/CMS_2017_I1471287.cc b/analyses/pluginCMS/CMS_2017_I1471287.cc --- a/analyses/pluginCMS/CMS_2017_I1471287.cc +++ b/analyses/pluginCMS/CMS_2017_I1471287.cc @@ -1,261 +1,261 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/PrimaryParticles.hh" #include "Rivet/Tools/Correlators.hh" namespace Rivet { /// @brief Add a short analysis description here class CMS_2017_I1471287 : public CumulantAnalysis { public: /// Constructor CMS_2017_I1471287() : CumulantAnalysis("CMS_2017_I1471287") { - + }; /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // A projection for charged tracks to manage centrality, corresponding // to CMS offline tracks. ChargedFinalState cfsMult(Cuts::abseta < 2.4 && Cuts::pT > 0.4*GeV); - addProjection(cfsMult, "CFSMult"); - + declare(cfsMult, "CFSMult"); + // The positive eta side used for rapidity gap, integrated. - const ChargedFinalState& cfsp = ChargedFinalState(Cuts::eta > 1.0 && + const ChargedFinalState& cfsp = ChargedFinalState(Cuts::eta > 1.0 && Cuts::eta < 2.0 && Cuts::pT > 0.3*GeV && Cuts::pT < 3.0*GeV); declare(cfsp, "CFSP"); // ..negative ditto. - const ChargedFinalState& cfsn = ChargedFinalState(Cuts::eta < -1.0 && + const ChargedFinalState& cfsn = ChargedFinalState(Cuts::eta < -1.0 && Cuts::eta > -2.0 && Cuts::pT > 0.3*GeV && Cuts::pT < 3.0*GeV); declare(cfsn, "CFSN"); // The positive eta side used for rapidity gap, differential, charged particles. - const ChargedFinalState& cfsppT = ChargedFinalState(Cuts::eta > 1.0 && + const ChargedFinalState& cfsppT = ChargedFinalState(Cuts::eta > 1.0 && Cuts::eta < 2.0 && Cuts::pT > 0.3*GeV && Cuts::pT < 6.0*GeV); declare(cfsppT, "CFSPPT"); // ..negative ditto. - const ChargedFinalState& cfsnpT = ChargedFinalState(Cuts::eta < -1.0 && + const ChargedFinalState& cfsnpT = ChargedFinalState(Cuts::eta < -1.0 && Cuts::eta > -2.0 && Cuts::pT > 0.3*GeV && Cuts::pT < 6.0*GeV); declare(cfsnpT, "CFSNPT"); // The positive eta side used for rapidity gap, differential, Kaons. - const PrimaryParticles& kfsppT = PrimaryParticles({310},Cuts::eta > 1.0 && + const PrimaryParticles& kfsppT = PrimaryParticles({310},Cuts::eta > 1.0 && Cuts::eta < 2.0 && Cuts::pT > 0.3*GeV && Cuts::pT < 6.0*GeV); declare(kfsppT, "KFSP"); // ..negative ditto. - const PrimaryParticles& kfsnpT = PrimaryParticles({310},Cuts::eta < -1.0 && + const PrimaryParticles& kfsnpT = PrimaryParticles({310},Cuts::eta < -1.0 && Cuts::eta > -2.0 && Cuts::pT > 0.3*GeV && Cuts::pT < 6.0*GeV); declare(kfsnpT, "KFSN"); // The positive eta side used for rapidity gap, differential, Lambda. - const PrimaryParticles& lfsppT = PrimaryParticles({3122},Cuts::eta > 1.0 && + const PrimaryParticles& lfsppT = PrimaryParticles({3122},Cuts::eta > 1.0 && Cuts::eta < 2.0 && Cuts::pT > 0.3*GeV && Cuts::pT < 6.0*GeV); declare(lfsppT, "LFSP"); // ..negative ditto. - const PrimaryParticles& lfsnpT = PrimaryParticles({3122},Cuts::eta < -1.0 && + const PrimaryParticles& lfsnpT = PrimaryParticles({3122},Cuts::eta < -1.0 && Cuts::eta > -2.0 && Cuts::pT > 0.3*GeV && Cuts::pT < 6.0*GeV); declare(lfsnpT, "LFSN"); - + // v22 |delta eta| > 2 (fig 4a) h_v22 = bookScatter2D(1, 1, 1, true); // v32 |delta eta| > 2 (fig 4b) h_v32 = bookScatter2D(3, 1, 1, true); // v22(pT) high mult., high pT (fig 6a) h_v22pT = bookScatter2D(11, 1, 1, true); // v22(pT) charged low mult. (fig. 7a) h_v22pTh = bookScatter2D(17, 1, 1, true); // v22(pT) K0S low mult. (fig. 7a) h_v22pTK = bookScatter2D(18, 1, 1, true); // v22(pT) Lambda low mult. (fig. 7a) h_v22pTL = bookScatter2D(19, 1, 1, true); // v22(pT) K0S high mult. (fig. 7b) h_v22pTKc = bookScatter2D(21, 1, 1, true); // v22(pT) Lambda high mult. (fig. 7b) h_v22pTLc = bookScatter2D(22, 1, 1, true); // c24 (fig. 9a) h_c24 = bookScatter2D(28, 1, 1, true); // c26 (fig. 9b) h_c26 = bookScatter2D(31, 1, 1, true); // Corresponding event averaged correlators. ec22 = bookECorrelatorGap<2,2>("ec22",h_v22); ec32 = bookECorrelatorGap<3,2>("ec32",h_v32); // ... pT binned ec22pT = bookECorrelatorGap<2,2>("ec22pT",h_v22pT); ec22pTh = bookECorrelatorGap<2,2>("ec22pTh",h_v22pTh); ec22pTK = bookECorrelatorGap<2,2>("ec22pTK",h_v22pTK); ec22pTL = bookECorrelatorGap<2,2>("ec22pTL",h_v22pTL); ec22pTKc = bookECorrelatorGap<2,2>("ec22pTKc",h_v22pTKc); ec22pTLc = bookECorrelatorGap<2,2>("ec22pTLc",h_v22pTLc); // Maximal N and P for the gapped. - pair max = getMaxValues(); - + pair max = getMaxValues(); + // For the four particle cumulant. ec22_4 = bookECorrelator<2,2>("ec22_4",h_c24); ec24_4 = bookECorrelator<2,4>("ec24_4",h_c24); // For the six particle cumulant. ec22_6 = bookECorrelator<2,2>("ec22_6",h_c26); ec24_6 = bookECorrelator<2,4>("ec24_6",h_c26); ec26_6 = bookECorrelator<2,6>("ec26_6",h_c26); // Maximal N and P for the higher orders. pair maxH = getMaxValues(); // Declare correlator projections. // For integrated. declare(Correlators(cfsMult, maxH.first, maxH.second),"CH"); // ... gapped declare(Correlators(cfsp, max.first, max.second),"CPos"); declare(Correlators(cfsn, max.first, max.second),"CNeg"); // For pT differential, charged particles, low multiplicity. declare(Correlators(cfsppT, max.first, max.second, h_v22pTh),"CPosLowPT"); declare(Correlators(cfsnpT, max.first, max.second, h_v22pTh),"CNegLowPT"); // For pT differential, charged particles, high multiplicity. declare(Correlators(cfsppT, max.first, max.second, h_v22pT),"CPosHighPT"); declare(Correlators(cfsnpT, max.first, max.second, h_v22pT),"CNegHighPT"); - + // For pT differential, kaons. low multiplicity. declare(Correlators(kfsppT, max.first, max.second, h_v22pTK),"CPosLowPTK"); declare(Correlators(kfsnpT, max.first, max.second, h_v22pTK),"CNegLowPTK"); - + // For pT differential, kaons. high multiplicity. declare(Correlators(kfsppT, max.first, max.second, h_v22pTKc),"CPosHighPTK"); declare(Correlators(kfsnpT, max.first, max.second, h_v22pTKc),"CNegHighPTK"); - + // For pT differential, lambda. low multiplicity. declare(Correlators(lfsppT, max.first, max.second, h_v22pTL),"CPosLowPTL"); declare(Correlators(lfsnpT, max.first, max.second, h_v22pTL),"CNegLowPTL"); - + // For pT differential, lambda. high multiplicity. declare(Correlators(lfsppT, max.first, max.second, h_v22pTLc),"CPosHighPTL"); declare(Correlators(lfsnpT, max.first, max.second, h_v22pTLc),"CNegHighPTL"); - + } /// Perform the per-event analysis void analyze(const Event& event) { const double w = event.weight(); const double nTrk = apply(event, "CFSMult").particles().size(); if (nTrk < 10) vetoEvent; // The correlators. const Correlators& ch = apply(event, "CH"); const Correlators& cp = apply(event, "CPos"); const Correlators& cn = apply(event, "CNeg"); const Correlators& cpLow = apply(event, "CPosLowPT"); const Correlators& cnLow = apply(event, "CNegLowPT"); const Correlators& cpHigh = apply(event, "CPosHighPT"); const Correlators& cnHigh = apply(event, "CNegHighPT"); const Correlators& cpLowK = apply(event, "CPosLowPTK"); const Correlators& cnLowK = apply(event, "CNegLowPTK"); const Correlators& cpHighK = apply(event, "CPosHighPTK"); const Correlators& cnHighK = apply(event, "CNegHighPTK"); - + const Correlators& cpLowL = apply(event, "CPosLowPTL"); const Correlators& cnLowL = apply(event, "CNegLowPTL"); const Correlators& cpHighL = apply(event, "CPosHighPTL"); const Correlators& cnHighL = apply(event, "CNegHighPTL"); ec22->fill(nTrk, cp, cn, w); ec32->fill(nTrk, cp, cn, w); ec22_4->fill(nTrk, ch, w); ec24_4->fill(nTrk, ch, w); ec22_6->fill(nTrk, ch, w); ec24_6->fill(nTrk, ch, w); ec26_6->fill(nTrk, ch, w); if (nTrk < 20) { ec22pTh->fill(cpLow, cnLow, w); ec22pTK->fill(cpLowK, cnLowK, w); ec22pTL->fill(cpLowL, cnLowL, w); } else if(nTrk >= 105 && nTrk < 150) ec22pT->fill(cpHigh, cnHigh, w); ec22pTKc->fill(cpHighK, cnHighK, w); ec22pTLc->fill(cpHighL, cnHighL, w); } /// Normalise histograms etc., after the run void finalize() { - // Correlators must be streamed + // Correlators must be streamed // in order to run reentrant finalize. stream(); cnTwoInt(h_v22, ec22); cnTwoInt(h_v32, ec32); vnTwoDiff(h_v22pT, ec22pT); vnTwoDiff(h_v22pTh, ec22pTh); cnFourInt(h_c24, ec22_4, ec24_4); cnSixInt(h_c26, ec22_6, ec24_6, ec26_6); // Set correct reference flow for pid flow. ec22pTK->setReference(ec22pTh->getReference()); vnTwoDiff(h_v22pTK, ec22pTK); ec22pTL->setReference(ec22pTh->getReference()); vnTwoDiff(h_v22pTL, ec22pTL); ec22pTKc->setReference(ec22pT->getReference()); vnTwoDiff(h_v22pTKc, ec22pTKc); ec22pTLc->setReference(ec22pT->getReference()); vnTwoDiff(h_v22pTLc, ec22pTLc); } //@} Scatter2DPtr h_v22; Scatter2DPtr h_v32; Scatter2DPtr h_v22pT; Scatter2DPtr h_v22pTh; Scatter2DPtr h_v22pTK; Scatter2DPtr h_v22pTL; Scatter2DPtr h_v22pTKc; Scatter2DPtr h_v22pTLc; Scatter2DPtr h_c24; Scatter2DPtr h_c26; ECorrPtr ec22; ECorrPtr ec32; ECorrPtr ec22_4; ECorrPtr ec24_4; ECorrPtr ec22_6; ECorrPtr ec24_6; ECorrPtr ec26_6; ECorrPtr ec22pT; ECorrPtr ec22pTh; ECorrPtr ec22pTK; ECorrPtr ec22pTL; ECorrPtr ec22pTKc; ECorrPtr ec22pTLc; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2017_I1471287); } diff --git a/analyses/pluginCMS/CMS_2018_I1653948.cc b/analyses/pluginCMS/CMS_2018_I1653948.cc --- a/analyses/pluginCMS/CMS_2018_I1653948.cc +++ b/analyses/pluginCMS/CMS_2018_I1653948.cc @@ -1,88 +1,88 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { class CMS_2018_I1653948 : public Analysis { public: CMS_2018_I1653948() : Analysis("CMS_2018_I1653948"), _xi_hf_cut(1E-6), _xi_castor_cut(1E-7) { } /// Book projections and histograms void init() { - addProjection(FinalState(),"FS"); + declare(FinalState(),"FS"); _h_xsec = bookHisto1D(1, 1, 1); } /// Analyze each event void analyze(const Event& event) { const double weight = event.weight(); const FinalState& fs = applyProjection(event, "FS"); if (fs.size() < 3) vetoEvent; // veto on elastic events const ParticleVector particlesByRapidity = fs.particles(cmpMomByRap); const size_t num_particles = particlesByRapidity.size(); vector gaps; vector midpoints; for (size_t ip = 1; ip < num_particles; ++ip) { const Particle& p1 = particlesByRapidity[ip-1]; const Particle& p2 = particlesByRapidity[ip]; const double gap = p2.momentum().rapidity() - p1.momentum().rapidity(); const double mid = (p2.momentum().rapidity() + p1.momentum().rapidity()) / 2.; gaps.push_back(gap); midpoints.push_back(mid); } int imid = std::distance(gaps.begin(), max_element(gaps.begin(), gaps.end())); double gapcenter = midpoints[imid]; FourMomentum MxFourVector(0.,0.,0.,0.); FourMomentum MyFourVector(0.,0.,0.,0.); for (const Particle& p : fs.particles(cmpMomByEta)) { if (p.momentum().rapidity() < gapcenter) { MxFourVector += p.momentum(); } else { MyFourVector += p.momentum(); } } double Mx = MxFourVector.mass(); double My = MyFourVector.mass(); double xix = (Mx * Mx) / (sqrtS()/GeV * sqrtS()/GeV); double xiy = (My * My) / (sqrtS()/GeV * sqrtS()/GeV); double xi = max(xix, xiy); if (xi > _xi_hf_cut) _h_xsec->fill(0.5, weight); if (xix > _xi_castor_cut || xiy > _xi_hf_cut) _h_xsec->fill(1.5, weight); } /// Normalizations, etc. void finalize() { scale(_h_xsec, crossSection()/millibarn/sumOfWeights()); } private: Histo1DPtr _h_xsec; double _xi_hf_cut; double _xi_castor_cut; }; DECLARE_RIVET_PLUGIN(CMS_2018_I1653948); } diff --git a/analyses/pluginCMS/CMS_2018_I1680318.cc b/analyses/pluginCMS/CMS_2018_I1680318.cc --- a/analyses/pluginCMS/CMS_2018_I1680318.cc +++ b/analyses/pluginCMS/CMS_2018_I1680318.cc @@ -1,222 +1,222 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class CMS_2018_I1680318 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2018_I1680318); /// Book histograms and initialise projections before the run void init() { // Cuts MinEnergy = 5.0; // Particle's energy cut in the forward region [GeV] EtaForwardMin = 3.0; EtaForwardMax = 5.0; EtaCentralCut = 2.4; MinParticlePt = 0.5; // [GeV] // Initialise and register projections const FinalState fsa(-1.0*EtaForwardMax, EtaForwardMax, 0.0000*GeV); - addProjection(fsa, "FSA"); + declare(fsa, "FSA"); const ChargedFinalState cfs(-1.0*EtaCentralCut, EtaCentralCut, MinParticlePt*GeV); - addProjection(cfs, "CFS"); + declare(cfs, "CFS"); // Event counters _num_evts_noCuts = 0; _num_evts_after_cuts_or = 0; _num_evts_after_cuts_and = 0; _num_evts_after_cuts_xor = 0; _num_evts_after_cuts_xorm = 0; _num_evts_after_cuts_xorp = 0; // Histograms _hist_dNch_all_dEta_OR = bookHisto1D(1,1,1); _hist_dNch_all_dEta_AND = bookHisto1D(1,2,1); _hist_dNch_all_dEta_XOR = bookHisto1D(1,3,1); _hist_dNch_all_dEta_XORpm = bookHisto1D(1,4,1); _hist_dNch_all_dpt_OR = bookHisto1D(2,1,1); _hist_dNch_all_dpt_AND = bookHisto1D(2,2,1); _hist_dNch_all_dpt_XOR = bookHisto1D(2,3,1); _hist_dNch_leading_dpt_OR = bookHisto1D(3,1,1); _hist_dNch_leading_dpt_AND = bookHisto1D(3,2,1); _hist_dNch_leading_dpt_XOR = bookHisto1D(3,3,1); _hist_integrated_leading_pt_OR = bookHisto1D(4,1,1); _hist_integrated_leading_pt_AND = bookHisto1D(4,2,1); _hist_integrated_leading_pt_XOR = bookHisto1D(4,3,1); _hist_dNev_all_dM_OR = bookHisto1D(5,1,1); _hist_dNev_all_dM_AND = bookHisto1D(5,2,1); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = event.weight(); const ChargedFinalState& charged = applyProjection(event, "CFS"); const FinalState& fsa = applyProjection(event, "FSA"); bool activity_plus_side = false, activity_minus_side = false; for (const Particle& p : fsa.particles()) { if ( p.energy() >= MinEnergy ) { if ( inRange(p.eta(), EtaForwardMin, EtaForwardMax) ) activity_plus_side = true; if ( inRange(p.eta(), -1.0*EtaForwardMax, -1.0*EtaForwardMin) ) activity_minus_side = true; } // If activity already found in both sides, // then there is no point in keep going the loop if (activity_plus_side && activity_minus_side) break; } // Event selections const bool cutsor = ( activity_plus_side || activity_minus_side); const bool cutsand = ( activity_plus_side && activity_minus_side); const bool cutsxor = ((activity_plus_side && !activity_minus_side) || (!activity_plus_side && activity_minus_side)); const bool cutsxorm = (!activity_plus_side && activity_minus_side); const bool cutsxorp = ( activity_plus_side && !activity_minus_side); _num_evts_noCuts += weight; if ( charged.size() >= 1 ) { if (cutsor) _num_evts_after_cuts_or += weight; if (cutsand) _num_evts_after_cuts_and += weight; if (cutsxor) _num_evts_after_cuts_xor += weight; if (cutsxorm) _num_evts_after_cuts_xorm += weight; if (cutsxorp) _num_evts_after_cuts_xorp += weight; } // Loop over charged particles double leading_pt = 0; for (const Particle& p : charged.particles()) { // Find the leading-pt particle of the event if (p.pT() > leading_pt) leading_pt = p.pT(); // Filling histograms if (cutsor) _hist_dNch_all_dEta_OR -> fill(p.eta(), weight); if (cutsand) _hist_dNch_all_dEta_AND -> fill(p.eta(), weight); if (cutsxor) _hist_dNch_all_dEta_XOR -> fill(p.eta(), weight); //Average xorm & xorp if (cutsxorm) _hist_dNch_all_dEta_XORpm -> fill(p.eta(), weight); if (cutsxorp) _hist_dNch_all_dEta_XORpm -> fill(-1.0*p.eta(), weight); if (cutsor) _hist_dNch_all_dpt_OR -> fill(p.pT(), weight); if (cutsand) _hist_dNch_all_dpt_AND -> fill(p.pT(), weight); if (cutsxor) _hist_dNch_all_dpt_XOR -> fill(p.pT(), weight); } // Filling multiplicity histograms if ( charged.size() >= 1 ) { if (cutsor) _hist_dNev_all_dM_OR -> fill(charged.size(), weight); if (cutsand) _hist_dNev_all_dM_AND -> fill(charged.size(), weight); } // Filling leading-pt histograms if (cutsor) _hist_dNch_leading_dpt_OR -> fill(leading_pt, weight); if (cutsand) _hist_dNch_leading_dpt_AND -> fill(leading_pt, weight); if (cutsxor) _hist_dNch_leading_dpt_XOR -> fill(leading_pt, weight); // Integrating leading-pt histograms for (size_t i = 0 ; i < _hist_integrated_leading_pt_OR->numBins() ; ++i) { double binlimitlow_t = _hist_integrated_leading_pt_OR->bin(i).xMin(), weightbw_t = _hist_integrated_leading_pt_OR->bin(i).xWidth()*weight, xbin_t = _hist_integrated_leading_pt_OR->bin(i).xMid(); if (leading_pt > binlimitlow_t) { if (cutsor) _hist_integrated_leading_pt_OR -> fill(xbin_t, weightbw_t); if (cutsand) _hist_integrated_leading_pt_AND -> fill(xbin_t, weightbw_t); if (cutsxor) _hist_integrated_leading_pt_XOR -> fill(xbin_t, weightbw_t); } } } /// Normalise histograms etc., after the run void finalize() { MSG_INFO("Number of selected events: " << endl << "\t All = " << _num_evts_noCuts << endl << "\t Inelastic = " << _num_evts_after_cuts_or << endl << "\t NSD = " << _num_evts_after_cuts_and << endl << "\t Xor = " << _num_evts_after_cuts_xor << endl << "\t Xorm = " << _num_evts_after_cuts_xorm << endl << "\t Xorp = " << _num_evts_after_cuts_xorp); scale(_hist_dNch_all_dEta_OR, 1./_num_evts_after_cuts_or); scale(_hist_dNch_all_dEta_AND, 1./_num_evts_after_cuts_and); scale(_hist_dNch_all_dEta_XOR, 1./_num_evts_after_cuts_xor); scale(_hist_dNch_all_dEta_XORpm, 1./(_num_evts_after_cuts_xorm + _num_evts_after_cuts_xorp)); scale(_hist_dNch_all_dpt_OR, 1./_num_evts_after_cuts_or); scale(_hist_dNch_all_dpt_AND, 1./_num_evts_after_cuts_and); scale(_hist_dNch_all_dpt_XOR, 1./_num_evts_after_cuts_xor); scale(_hist_dNch_leading_dpt_OR, 1./_num_evts_after_cuts_or); scale(_hist_dNch_leading_dpt_AND, 1./_num_evts_after_cuts_and); scale(_hist_dNch_leading_dpt_XOR, 1./_num_evts_after_cuts_xor); scale(_hist_integrated_leading_pt_OR, 1./_num_evts_after_cuts_or); scale(_hist_integrated_leading_pt_AND, 1./_num_evts_after_cuts_and); scale(_hist_integrated_leading_pt_XOR, 1./_num_evts_after_cuts_xor); scale(_hist_dNev_all_dM_OR, 1./_num_evts_after_cuts_or); scale(_hist_dNev_all_dM_AND, 1./_num_evts_after_cuts_and); } private: // Cuts double MinEnergy, EtaForwardMin, EtaForwardMax, EtaCentralCut, MinParticlePt; // Counters double _num_evts_noCuts, _num_evts_after_cuts_and, _num_evts_after_cuts_or, _num_evts_after_cuts_xor, _num_evts_after_cuts_xorp, _num_evts_after_cuts_xorm; // Histograms Histo1DPtr _hist_dNch_all_dEta_AND, _hist_dNch_all_dEta_OR, _hist_dNch_all_dEta_XOR, _hist_dNch_all_dEta_XORpm; Histo1DPtr _hist_dNch_all_dpt_AND, _hist_dNch_all_dpt_OR, _hist_dNch_all_dpt_XOR; Histo1DPtr _hist_dNch_leading_dpt_AND, _hist_dNch_leading_dpt_OR, _hist_dNch_leading_dpt_XOR; Histo1DPtr _hist_integrated_leading_pt_AND, _hist_integrated_leading_pt_OR, _hist_integrated_leading_pt_XOR; Histo1DPtr _hist_dNev_all_dM_AND, _hist_dNev_all_dM_OR; }; DECLARE_RIVET_PLUGIN(CMS_2018_I1680318); } diff --git a/analyses/pluginCMS/CMS_2018_I1682495.cc b/analyses/pluginCMS/CMS_2018_I1682495.cc --- a/analyses/pluginCMS/CMS_2018_I1682495.cc +++ b/analyses/pluginCMS/CMS_2018_I1682495.cc @@ -1,157 +1,157 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Tools/fjcontrib/SoftDrop.hh" namespace Rivet { // Soft-drop and ungroomed jet mass measurement class CMS_2018_I1682495 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor CMS_2018_I1682495() : Analysis("CMS_2018_I1682495"), _softdrop(fjcontrib::SoftDrop(0, 0.1, 0.8) ) // parameters are beta, zcut, R0 { } //@} /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // define a projection that keeps all the particles up to |eta|=5 const FinalState fs(Cuts::abseta < 5.); // use FastJet, anti-kt(R=0.8) to do the clustering - addProjection(FastJets(fs, FastJets::ANTIKT, 0.8), "JetsAK8"); + declare(FastJets(fs, FastJets::ANTIKT, 0.8), "JetsAK8"); // Histograms for (size_t i = 0; i < N_PT_BINS_dj; ++i ) { _h_ungroomedJetMass_dj[i][0] = bookHisto1D(i+1+0*N_PT_BINS_dj, 1, 1); // Ungroomed mass, absolute _h_sdJetMass_dj[i][0] = bookHisto1D(i+1+1*N_PT_BINS_dj, 1, 1); // Groomed mass, absolute _h_ungroomedJetMass_dj[i][1] = bookHisto1D(i+1+2*N_PT_BINS_dj, 1, 1); // Ungroomed mass, normalized _h_sdJetMass_dj[i][1] = bookHisto1D(i+1+3*N_PT_BINS_dj, 1, 1); // Groomed mass, normalized } } // Find the pT histogram bin index for value pt (in GeV), to hack a 2D histogram equivalent /// @todo Use a YODA axis/finder alg when available size_t findPtBin(double ptJ) { const double ptBins_dj[N_PT_BINS_dj+1] = { 200., 260., 350., 460., 550., 650., 760., 900, 1000, 1100, 1200, 1300, 13000}; for (size_t ibin = 0; ibin < N_PT_BINS_dj; ++ibin) { if (inRange(ptJ, ptBins_dj[ibin], ptBins_dj[ibin+1])) return ibin; } return N_PT_BINS_dj; } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = event.weight(); // Look at events with >= 2 jets auto jetsAK8 = applyProjection(event, "JetsAK8").jetsByPt(Cuts::pT > 200*GeV and Cuts::abseta < 2.4); if (jetsAK8.size() < 2) vetoEvent; // Get the leading two jets const fastjet::PseudoJet& j0 = jetsAK8[0].pseudojet(); const fastjet::PseudoJet& j1 = jetsAK8[1].pseudojet(); // Calculate delta phi and the pt asymmetry double deltaPhi = Rivet::deltaPhi( j0.phi(), j1.phi() ); double ptasym = (j0.pt() - j1.pt()) / (j0.pt() + j1.pt()); if (deltaPhi < 2.0 ) vetoEvent; if (ptasym > 0.3) vetoEvent; // Find the appropriate pT bins and fill the histogram const size_t njetBin0 = findPtBin(j0.pt()/GeV); const size_t njetBin1 = findPtBin(j1.pt()/GeV); if (njetBin0 < N_PT_BINS_dj && njetBin1 < N_PT_BINS_dj) { for ( size_t jbin = 0; jbin < N_CATEGORIES; jbin++ ){ _h_ungroomedJetMass_dj[njetBin0][jbin]->fill(j0.m()/GeV, weight); _h_ungroomedJetMass_dj[njetBin1][jbin]->fill(j1.m()/GeV, weight); } } // Now run the substructure algs... fastjet::PseudoJet sd0 = _softdrop(j0); fastjet::PseudoJet sd1 = _softdrop(j1); // ... and repeat if (njetBin0 < N_PT_BINS_dj && njetBin1 < N_PT_BINS_dj) { for ( size_t jbin = 0; jbin < N_CATEGORIES; jbin++ ){ _h_sdJetMass_dj[njetBin0][jbin]->fill(sd0.m()/GeV, weight); _h_sdJetMass_dj[njetBin1][jbin]->fill(sd1.m()/GeV, weight); } } } /// Normalise histograms etc., after the run void finalize() { // Normalize the normalized cross section histograms to unity, for (size_t i = 0; i < N_PT_BINS_dj; ++i) { normalize(_h_ungroomedJetMass_dj[i][1]); normalize(_h_sdJetMass_dj[i][1]); } // Normalize the absolute cross section histograms to xs * lumi. for (size_t i = 0; i < N_PT_BINS_dj; ++i) { scale(_h_ungroomedJetMass_dj[i][0], crossSection()/picobarn / sumOfWeights()); scale(_h_sdJetMass_dj[i][0], crossSection()/picobarn / sumOfWeights()); } } //@} private: /// @name FastJet grooming tools (configured in constructor init list) //@{ const fjcontrib::SoftDrop _softdrop; //@} /// @name Histograms //@{ enum { PT_200_260_dj=0, PT_260_350_dj, PT_350_460_dj, PT_460_550_dj, PT_550_650_dj, PT_650_760_dj, PT_760_900_dj, PT_900_1000_dj, PT_1000_1100_dj, PT_1100_1200_dj, PT_1200_1300_dj, PT_1300_Inf_dj, N_PT_BINS_dj } BINS_dj; static const int N_CATEGORIES=2; Histo1DPtr _h_ungroomedJet0pt, _h_ungroomedJet1pt; Histo1DPtr _h_sdJet0pt, _h_sdJet1pt; // Here, store both the absolute (index 0) and normalized (index 1) cross sections. Histo1DPtr _h_ungroomedJetMass_dj[N_PT_BINS_dj][2]; Histo1DPtr _h_sdJetMass_dj[N_PT_BINS_dj][2]; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2018_I1682495); } diff --git a/analyses/pluginCMS/CMS_2018_I1690148.cc b/analyses/pluginCMS/CMS_2018_I1690148.cc --- a/analyses/pluginCMS/CMS_2018_I1690148.cc +++ b/analyses/pluginCMS/CMS_2018_I1690148.cc @@ -1,453 +1,453 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ChargedLeptons.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/InvMassFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "Rivet/Math/MatrixN.hh" #include "Rivet/Tools/fjcontrib/Nsubjettiness.hh" #include "Rivet/Tools/fjcontrib/EnergyCorrelator.hh" namespace Rivet { /// Measurement of jet substructure observables in $t\bar{t}$ events from $pp$ collisions at 13~TeV class CMS_2018_I1690148 : public Analysis { public: enum Reconstruction { CHARGED=0, ALL=1 }; enum Observable { MULT=0, PTDS=1, GA_LHA=2, GA_WIDTH=3, GA_THRUST=4, ECC=5, ZG=6, ZGDR=7, NSD=8, TAU21=9, TAU32=10, TAU43=11, C1_00=12, C1_02=13, C1_05=14, C1_10=15, C1_20=16, C2_00=17, C2_02=18, C2_05=19, C2_10=20, C2_20=21, C3_00=22, C3_02=23, C3_05=24, C3_10=25, C3_20=26, M2_B1=27, N2_B1=28, N3_B1=29, M2_B2=30, N2_B2=31, N3_B2=32 }; enum Flavor { INCL=0, BOTTOM=1, QUARK=2, GLUON=3 }; /// Minimal constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2018_I1690148); /// @name Analysis methods //@{ /// Set up projections and book histograms void init() { // Cuts particle_cut = (Cuts::abseta < 5.0 && Cuts::pT > 0.*GeV); lepton_cut = (Cuts::abseta < 2.4 && Cuts::pT > 15.*GeV); jet_cut = (Cuts::abseta < 2.4 && Cuts::pT > 30.*GeV); // Generic final state FinalState fs(particle_cut); // Dressed leptons ChargedLeptons charged_leptons(fs); IdentifiedFinalState photons(fs); photons.acceptIdPair(PID::PHOTON); PromptFinalState prompt_leptons(charged_leptons); prompt_leptons.acceptMuonDecays(true); prompt_leptons.acceptTauDecays(true); PromptFinalState prompt_photons(photons); prompt_photons.acceptMuonDecays(true); prompt_photons.acceptTauDecays(true); // NB. useDecayPhotons=true allows for photons with tau ancestor; photons from hadrons are vetoed by the PromptFinalState; DressedLeptons dressed_leptons(prompt_photons, prompt_leptons, 0.1, lepton_cut, true); - addProjection(dressed_leptons, "DressedLeptons"); + declare(dressed_leptons, "DressedLeptons"); // Projection for jets VetoedFinalState fsForJets(fs); fsForJets.addVetoOnThisFinalState(dressed_leptons); - addProjection(FastJets(fsForJets, FastJets::ANTIKT, 0.4, - JetAlg::ALL_MUONS, JetAlg::NO_INVISIBLES), "Jets"); + declare(FastJets(fsForJets, FastJets::ANTIKT, 0.4, + JetAlg::ALL_MUONS, JetAlg::NO_INVISIBLES), "Jets"); // Booking of histograms int d = 0; for (int r = 0; r < 2; ++r) { // reconstruction (charged, all) for (int o = 0; o < 33; ++o) { // observable d += 1; for (int f = 0; f < 4; ++f) { // flavor char buffer [20]; sprintf(buffer, "d%02d-x01-y%02d", d, f+1); _h[r][o][f] = bookHisto1D(buffer); } } } } void analyze(const Event& event) { const double weight = event.weight(); // select ttbar -> lepton+jets const vector& leptons = applyProjection(event, "DressedLeptons").dressedLeptons(); int nsel_leptons = 0; for (const DressedLepton& lepton : leptons) { if (lepton.pt() > 26.) nsel_leptons += 1; else vetoEvent; // found veto lepton } if (nsel_leptons != 1) vetoEvent; const Jets all_jets = applyProjection(event, "Jets").jetsByPt(jet_cut); if (all_jets.size() < 4) vetoEvent; // categorize jets int nsel_bjets = 0; int nsel_wjets = 0; Jets jets[4]; for (const Jet& jet : all_jets) { // check for jet-lepton overlap -> do not consider for selection if (deltaR(jet, leptons[0]) < 0.4) continue; bool overlap = false; bool w_jet = false; for (const Jet& jet2 : all_jets) { if (jet.momentum() == jet2.momentum()) continue; // check for jet-jet overlap -> do not consider for analysis if (deltaR(jet, jet2) < 0.8) overlap = true; // check for W candidate if (jet.bTagged() or jet2.bTagged()) continue; FourMomentum w_cand = jet.momentum() + jet2.momentum(); if (abs(w_cand.mass() - 80.4) < 15.) w_jet = true; } // count jets for event selection if (jet.bTagged()) nsel_bjets += 1; if (w_jet) nsel_wjets += 1; // jets for analysis if (jet.abseta() > 2. or overlap) continue; if (jet.bTagged()) { jets[BOTTOM].push_back(jet); } else if (w_jet) { jets[QUARK].push_back(jet); } else { jets[GLUON].push_back(jet); } } if (nsel_bjets != 2) vetoEvent; if (nsel_wjets < 2) vetoEvent; // substructure analysis // no loop over incl jets -> more loc but faster for (int f = 1; f < 4; ++f) { for (const Jet& jet : jets[f]) { // apply cuts on constituents vector particles[2]; for (const Particle& p : jet.particles(Cuts::pT > 1.*GeV)) { particles[ALL].push_back( PseudoJet(p.px(), p.py(), p.pz(), p.energy()) ); if (p.charge3() != 0) particles[CHARGED].push_back( PseudoJet(p.px(), p.py(), p.pz(), p.energy()) ); } if (particles[CHARGED].size() == 0) continue; // recluster with C/A and anti-kt+WTA PseudoJet ca_jet[2]; JetDefinition ca_def(fastjet::cambridge_algorithm, fastjet::JetDefinition::max_allowable_R); ClusterSequence ca_charged(particles[CHARGED], ca_def); ClusterSequence ca_all(particles[ALL], ca_def); ca_jet[CHARGED] = ca_charged.exclusive_jets(1)[0]; ca_jet[ALL] = ca_all.exclusive_jets(1)[0]; PseudoJet akwta_jet[2]; JetDefinition akwta_def(fastjet::antikt_algorithm, fastjet::JetDefinition::max_allowable_R, fastjet::RecombinationScheme::WTA_pt_scheme); ClusterSequence akwta_charged(particles[CHARGED], akwta_def); ClusterSequence akwta_all(particles[ALL], akwta_def); akwta_jet[CHARGED] = akwta_charged.exclusive_jets(1)[0]; akwta_jet[ALL] = akwta_all.exclusive_jets(1)[0]; // calculate observables for (int r = 0; r < 2; ++r) { int mult = akwta_jet[r].constituents().size(); // generalized angularities _h[r][MULT][INCL]->fill(mult, weight); _h[r][MULT][f]->fill(mult, weight); if (mult > 1) { double ptds = getPtDs(akwta_jet[r]); double ga_lha = calcGA(0.5, 1., akwta_jet[r]); double ga_width = calcGA(1., 1., akwta_jet[r]); double ga_thrust = calcGA(2., 1., akwta_jet[r]); _h[r][PTDS][INCL]->fill(ptds, weight); _h[r][PTDS][f]->fill(ptds, weight); _h[r][GA_LHA][INCL]->fill(ga_lha, weight); _h[r][GA_LHA][f]->fill(ga_lha, weight); _h[r][GA_WIDTH][INCL]->fill(ga_width, weight); _h[r][GA_WIDTH][f]->fill(ga_width, weight); _h[r][GA_THRUST][INCL]->fill(ga_thrust, weight); _h[r][GA_THRUST][f]->fill(ga_thrust, weight); } // eccentricity if (mult > 3) { double ecc = getEcc(akwta_jet[r]); _h[r][ECC][INCL]->fill(ecc, weight); _h[r][ECC][f]->fill(ecc, weight); } // N-subjettiness if (mult > 2) { double tau21 = getTau(2, 1, ca_jet[r]); _h[r][TAU21][INCL]->fill(tau21, weight); _h[r][TAU21][f]->fill(tau21, weight); } if (mult > 3) { double tau32 = getTau(3, 2, ca_jet[r]); _h[r][TAU32][INCL]->fill(tau32, weight); _h[r][TAU32][f]->fill(tau32, weight); } if (mult > 4) { double tau43 = getTau(4, 3, ca_jet[r]); _h[r][TAU43][INCL]->fill(tau43, weight); _h[r][TAU43][f]->fill(tau43, weight); } // soft drop if (mult > 1) { vector sd_results = getZg(ca_jet[r]); if (sd_results[0] > 0.) { _h[r][ZG][INCL]->fill(sd_results[0], weight); _h[r][ZG][f]->fill(sd_results[0], weight); _h[r][ZGDR][INCL]->fill(sd_results[1], weight); _h[r][ZGDR][f]->fill(sd_results[1], weight); } } int nsd = getNSD(0.007, -1., ca_jet[r]); _h[r][NSD][INCL]->fill(nsd, weight); _h[r][NSD][f]->fill(nsd, weight); // C-series energy correlation ratios if (mult > 1) { double cn_00 = getC(1, 0.0, ca_jet[r]); double cn_02 = getC(1, 0.2, ca_jet[r]); double cn_05 = getC(1, 0.5, ca_jet[r]); double cn_10 = getC(1, 1.0, ca_jet[r]); double cn_20 = getC(1, 2.0, ca_jet[r]); _h[r][C1_00][INCL]->fill(cn_00, weight); _h[r][C1_02][INCL]->fill(cn_02, weight); _h[r][C1_05][INCL]->fill(cn_05, weight); _h[r][C1_10][INCL]->fill(cn_10, weight); _h[r][C1_20][INCL]->fill(cn_20, weight); _h[r][C1_00][f]->fill(cn_00, weight); _h[r][C1_02][f]->fill(cn_02, weight); _h[r][C1_05][f]->fill(cn_05, weight); _h[r][C1_10][f]->fill(cn_10, weight); _h[r][C1_20][f]->fill(cn_20, weight); } if (mult > 2) { double cn_00 = getC(2, 0.0, ca_jet[r]); double cn_02 = getC(2, 0.2, ca_jet[r]); double cn_05 = getC(2, 0.5, ca_jet[r]); double cn_10 = getC(2, 1.0, ca_jet[r]); double cn_20 = getC(2, 2.0, ca_jet[r]); _h[r][C2_00][INCL]->fill(cn_00, weight); _h[r][C2_02][INCL]->fill(cn_02, weight); _h[r][C2_05][INCL]->fill(cn_05, weight); _h[r][C2_10][INCL]->fill(cn_10, weight); _h[r][C2_20][INCL]->fill(cn_20, weight); _h[r][C2_00][f]->fill(cn_00, weight); _h[r][C2_02][f]->fill(cn_02, weight); _h[r][C2_05][f]->fill(cn_05, weight); _h[r][C2_10][f]->fill(cn_10, weight); _h[r][C2_20][f]->fill(cn_20, weight); } if (mult > 3) { double cn_00 = getC(3, 0.0, ca_jet[r]); double cn_02 = getC(3, 0.2, ca_jet[r]); double cn_05 = getC(3, 0.5, ca_jet[r]); double cn_10 = getC(3, 1.0, ca_jet[r]); double cn_20 = getC(3, 2.0, ca_jet[r]); _h[r][C3_00][INCL]->fill(cn_00, weight); _h[r][C3_02][INCL]->fill(cn_02, weight); _h[r][C3_05][INCL]->fill(cn_05, weight); _h[r][C3_10][INCL]->fill(cn_10, weight); _h[r][C3_20][INCL]->fill(cn_20, weight); _h[r][C3_00][f]->fill(cn_00, weight); _h[r][C3_02][f]->fill(cn_02, weight); _h[r][C3_05][f]->fill(cn_05, weight); _h[r][C3_10][f]->fill(cn_10, weight); _h[r][C3_20][f]->fill(cn_20, weight); } // M/N-series energy correlation ratios if (mult > 2) { double m2_b1 = getM(2, 1., ca_jet[r]); double m2_b2 = getM(2, 2., ca_jet[r]); double n2_b1 = getN(2, 1., ca_jet[r]); double n2_b2 = getN(2, 2., ca_jet[r]); _h[r][M2_B1][INCL]->fill(m2_b1, weight); _h[r][M2_B2][INCL]->fill(m2_b2, weight); _h[r][N2_B1][INCL]->fill(n2_b1, weight); _h[r][N2_B2][INCL]->fill(n2_b2, weight); _h[r][M2_B1][f]->fill(m2_b1, weight); _h[r][M2_B2][f]->fill(m2_b2, weight); _h[r][N2_B1][f]->fill(n2_b1, weight); _h[r][N2_B2][f]->fill(n2_b2, weight); } if (mult > 3) { double n3_b1 = getN(3, 1., ca_jet[r]); double n3_b2 = getN(3, 2., ca_jet[r]); _h[r][N3_B1][INCL]->fill(n3_b1, weight); _h[r][N3_B2][INCL]->fill(n3_b2, weight); _h[r][N3_B1][f]->fill(n3_b1, weight); _h[r][N3_B2][f]->fill(n3_b2, weight); } } } } } void finalize() { for (int r = 0; r < 2; ++r) { // reconstruction (charged, all) for (int o = 0; o < 33; ++o) { // observable for (int f = 0; f < 4; ++f) { // flavor normalize(_h[r][o][f], 1.0, false); } } } } //@} private: double deltaR(PseudoJet j1, PseudoJet j2) { double deta = j1.eta() - j2.eta(); double dphi = j1.delta_phi_to(j2); return sqrt(deta*deta + dphi*dphi); } double getPtDs(PseudoJet jet) { double mult = jet.constituents().size(); double sumpt = 0.; // would be jet.pt() in WTA scheme but better keep it generic double sumpt2 = 0.; for (auto p : jet.constituents()) { sumpt += p.pt(); sumpt2 += pow(p.pt(), 2); } double ptd = sumpt2/pow(sumpt,2); return max(0., sqrt((ptd-1./mult) * mult/(mult-1.))); } double calcGA(double beta, double kappa, PseudoJet jet) { double sumpt = 0.; for (const auto& p : jet.constituents()) { sumpt += p.pt(); } double ga = 0.; for (auto p : jet.constituents()) { ga += pow(p.pt()/sumpt, kappa) * pow(deltaR(jet, p)/0.4, beta); } return ga; } double getEcc(PseudoJet jet) { // Covariance matrix Matrix<2> M; for (const auto& p : jet.constituents()) { Matrix<2> MPart; MPart.set(0, 0, (p.eta() - jet.eta()) * (p.eta() - jet.eta())); MPart.set(0, 1, (p.eta() - jet.eta()) * mapAngleMPiToPi(p.phi() - jet.phi())); MPart.set(1, 0, mapAngleMPiToPi(p.phi() - jet.phi()) * (p.eta() - jet.eta())); MPart.set(1, 1, mapAngleMPiToPi(p.phi() - jet.phi()) * mapAngleMPiToPi(p.phi() - jet.phi())); M += MPart * p.e(); } // Calculate eccentricity from eigenvalues // Check that the matrix is symmetric. const bool isSymm = M.isSymm(); if (!isSymm) { MSG_ERROR("Error: energy tensor not symmetric:"); MSG_ERROR("[0,1] vs. [1,0]: " << M.get(0,1) << ", " << M.get(1,0)); } // If not symmetric, something's wrong (we made sure the error msg appeared first). assert(isSymm); const double a = M.get(0,0); const double b = M.get(1,1); const double c = M.get(1,0); const double l1 = 0.5*(a+b+sqrt( (a-b)*(a-b) + 4 *c*c)); const double l2 = 0.5*(a+b-sqrt( (a-b)*(a-b) + 4 *c*c)); return 1. - l2/l1; } double getTau(int N, int M, PseudoJet jet) { fjcontrib::Nsubjettiness::NsubjettinessRatio tau_ratio(N, M, fjcontrib::Nsubjettiness::OnePass_WTA_KT_Axes(), fjcontrib::Nsubjettiness::NormalizedMeasure(1.0, 0.4)); return tau_ratio(jet); } vector getZg(PseudoJet jet) { PseudoJet jet0 = jet; PseudoJet jet1, jet2; double zg = 0.; while (zg < 0.1 && jet0.has_parents(jet1, jet2)) { zg = jet2.pt()/jet0.pt(); jet0 = jet1; } if (zg < 0.1) return {-1., -1.}; return {zg, jet1.delta_R(jet2)}; } int getNSD(double zcut, double beta, PseudoJet jet) { PseudoJet jet0 = jet; PseudoJet jet1, jet2; int nsd = 0.; double zg = 0.; while (jet0.has_parents(jet1, jet2)) { zg = jet2.pt()/jet0.pt(); if (zg > zcut * pow(jet1.delta_R(jet2)/0.4, beta)) nsd += 1; jet0 = jet1; } return nsd; } double getC(int N, double beta, PseudoJet jet) { fjcontrib::EnergyCorrelatorDoubleRatio C(N, beta); return C(jet); } double getM(int N, double beta, PseudoJet jet) { fjcontrib::EnergyCorrelatorMseries CM(N, beta); return CM(jet); } double getN(int N, double beta, PseudoJet jet) { fjcontrib::EnergyCorrelatorNseries CN(N, beta); return CN(jet); } private: // @name Histogram data members //@{ Cut particle_cut, lepton_cut, jet_cut; Histo1DPtr _h[2][33][4]; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2018_I1690148); } diff --git a/include/Rivet/Projections/AliceCommon.hh b/include/Rivet/Projections/AliceCommon.hh --- a/include/Rivet/Projections/AliceCommon.hh +++ b/include/Rivet/Projections/AliceCommon.hh @@ -1,428 +1,419 @@ -/** - * @file AliceCommon.hh - * @author Christian Holm Christensen - * @date Fri Aug 24 09:50:33 2018 - * - * @brief Common AlICE specific projections - * @ingroup alice_rivet - */ #ifndef PROJECTIONS_ALICECOMMON_HH #define PROJECTIONS_ALICECOMMON_HH #include #include #include #include #include namespace Rivet { namespace ALICE { - /** + + + /** * Template for ALICE V0 multiplicity projection. Which - * acceptance to look in depends on the template argument @a MODE: + * acceptance to look in depends on the template argument @a MODE: * * - @c MODE=-1 Check the V0-C acceptance (@f$-3.7<\eta<-1.7@f$) * - @c MODE=+1 Check the V0-A acceptance (@f$+2.8<\eta<+5.1@f$) * - @c MODE=0 Check both V0-A and -C acceptances (sum) - * * * @ingroup alice_rivet + * @author Christian Holm Christensen */ template class V0Multiplicity : public SingleValueProjection { public: - /** - * Constructor + /** + * Constructor */ V0Multiplicity() : SingleValueProjection() { setName("ALICE::V0Multiplicity"); Cut cut; if (MODE < 0) cut = V0Cacceptance; else if (MODE > 0) cut = V0Aacceptance; else cut = (V0Aacceptance || V0Cacceptance); // Declare our projection. Note, the cuts stipulate charged // particles, so we just use a final state (rather than // charged-final state) projection here. const FinalState fs(cut); this->declare(fs, "FinalState"); } - /** - * Destructor + /** + * Destructor */ virtual ~V0Multiplicity() {} - /** + /** * Do the projection. Sums number of charged final state * particles within the acceptances of the specified V0 * sub-detectors. - * - * @param e Event to project from + * + * @param e Event to project from */ virtual void project(const Event& e) { clear(); set(apply(e,"FinalState").particles().size()); } - /** - * Clone this projection - * - * @return New wrapped pointer to object of this class + /** + * Clone this projection + * + * @return New wrapped pointer to object of this class */ virtual std::unique_ptr clone() const { return std::unique_ptr(new V0Multiplicity(*this)); } - /** - * Compare to another projection + /** + * Compare to another projection * - * @param p Projection to compare against + * @param p Projection to compare against */ - virtual int compare(const Projection& p) const + virtual CmpState compare(const Projection& p) const { return dynamic_cast*>(&p) ? - EQUIVALENT : UNDEFINED; - } + CmpState::EQ : CmpState::NEQ; + } }; //---------------------------------------------------------------- - /** - * Convinience type definition + /** + * Convenience type definition * * @ingroup alice_rivet */ typedef V0Multiplicity<-1> V0AMultiplicity; - /** - * Convinience type definition + /** + * Convinience type definition * * @ingroup alice_rivet */ typedef V0Multiplicity<+1> V0CMultiplicity; - /** - * Convinience type definition + /** + * Convinience type definition * * @ingroup alice_rivet */ typedef V0Multiplicity<0> V0MMultiplicity; //---------------------------------------------------------------- - /** + /** * Template for ALICE CL multiplicity projection. Which - * acceptance to look in depends on the template argument @a INNER: + * acceptance to look in depends on the template argument @a INNER: * * - @c INNER=true Check the inner SPD layer - * - @c INNER=false Check the outer SPD layer + * - @c INNER=false Check the outer SPD layer * * @ingroup alice_rivet - * + * */ template class CLMultiplicity : public SingleValueProjection { public: - /** - * Constructor + /** + * Constructor */ CLMultiplicity() : SingleValueProjection() { setName("ALICE::CLMultiplicity"); Cut cut; if (INNER) cut = CL0acceptance; else cut = CL1acceptance; // Declare our projection. Note, the cuts stipulate charged // particles, so we just use a final state (rather than // charged-final state) projection here. const FinalState fs(cut); this->declare(fs, "FinalState"); } - /** - * Destructor + /** + * Destructor */ virtual ~CLMultiplicity() {} - /** + /** * Do the projection. Sums number of charged final state * particles within the acceptances of the specified CL * sub-detectors. - * - * @param e Event to project from + * + * @param e Event to project from */ virtual void project(const Event& e) { clear(); set(apply(e,"FinalState").particles().size()); } - /** - * Clone this projection - * - * @return New wrapped pointer to object of this class + /** + * Clone this projection + * + * @return New wrapped pointer to object of this class */ virtual std::unique_ptr clone() const { return std::unique_ptr(new CLMultiplicity(*this)); } - /** - * Compare to another projection + /** + * Compare to another projection * - * @param p Projection to compare against + * @param p Projection to compare against */ - virtual int compare(const Projection& p) const + virtual CmpState compare(const Projection& p) const { return dynamic_cast*>(&p) ? - EQUIVALENT : UNDEFINED; - } + CmpState::EQ : CmpState::NEQ; + } }; //---------------------------------------------------------------- - /** - * Convinience type definition + /** + * Convinience type definition * * @ingroup alice_rivet */ typedef CLMultiplicity CL0Multiplicity; - /** - * Convinience type definition + /** + * Convinience type definition * * @ingroup alice_rivet */ typedef CLMultiplicity CL1Multiplicity; //================================================================ - /** - * A template of ALICE V0-based triggers. - * + /** + * A template of ALICE V0-based triggers. + * * - @c MODE=-1 Check in the V0-C acceptance (@f$-3.7<\eta<-1.7@f$) * - @c MODE=+1 Check in the V0-A acceptance (@f$+2.8<\eta<+5.1@f$) * - @c MODE=0 Check in both V0-A and -C acceptances (V0-OR) * * @ingroup alice_rivet */ template class V0Trigger : public TriggerProjection { public: - /** - * Constructor + /** + * Constructor */ V0Trigger() : TriggerProjection() { setName("ALICE::V0Trigger"); // Declare our projection. Note, the cuts stipulate charged // particles, so we just use a final state (rather than // charged-final state) projection here. const V0Multiplicity fs; this->declare(fs, "FinalState"); } - /** - * Destructor + /** + * Destructor */ virtual ~V0Trigger() {} - /** + /** * Do the projection. Checks if the number of projected * particles is larger than 0 - * - * @param e Event to project from + * + * @param e Event to project from */ virtual void project(const Event& e) { fail(); // Assume failure if (apply>(e,"FinalState")() > 0) pass(); } - /** - * Clone this projection - * - * @return New wrapped pointer to object of this class + /** + * Clone this projection + * + * @return New wrapped pointer to object of this class */ virtual std::unique_ptr clone() const { return std::unique_ptr(new V0Trigger(*this)); } - /** - * Compare to projections. - * - * @param p Projection to compare to. - * + /** + * Compare to projections. + * + * @param p Projection to compare to. + * * @return true (EQUIVALENT) if the projection @a p is of the same * type as this. */ - virtual int compare(const Projection& p) const + virtual CmpState compare(const Projection& p) const { return dynamic_cast*>(&p) ? - EQUIVALENT : UNDEFINED; + CmpState::EQ : CmpState::NEQ; } }; //---------------------------------------------------------------- - /** - * Convinience type definition + /** + * Convinience type definition * * @ingroup alice_rivet */ // typedef V0Trigger<-1> V0ATrigger; using V0ATrigger = V0Trigger<-1>; - /** - * Convinience type definition + /** + * Convinience type definition * * @ingroup alice_rivet */ // typedef V0Trigger<+1> V0CTrigger; using V0CTrigger = V0Trigger<+1>; - /** - * Convinience type definition + /** + * Convinience type definition * * @ingroup alice_rivet */ // typedef V0Trigger<0> V0OrTrigger; using V0OrTrigger = V0Trigger<0>; //---------------------------------------------------------------- - /** + /** * A trigger projetion for the ALICE V0-AND (a.k.a. CINT7) * requirement. */ class V0AndTrigger : public TriggerProjection { public: - /** - * Constructor + /** + * Constructor */ V0AndTrigger() : TriggerProjection() { const V0ATrigger v0a; const V0CTrigger v0c; this->declare(v0a, "V0A"); this->declare(v0c, "V0C"); } - /** - * Destructor + /** + * Destructor */ virtual ~V0AndTrigger() {} - /** + /** * Do the projection. Checks if the numbers of projected * particles on both sides, are larger than 0 - * - * @param e Event to project from + * + * @param e Event to project from */ virtual void project(const Event& e) { fail(); // Assume failure if (apply(e,"V0A")() && apply(e,"V0C")()) pass(); } - /** - * Compare to projections. - * - * @param p Projection to compare to. - * + /** + * Compare to projections. + * + * @param p Projection to compare to. + * * @return true (EQUIVALENT) if the projection @a p is of the same * type as this. */ - virtual int compare(const Projection& p) const + virtual CmpState compare(const Projection& p) const { return dynamic_cast(&p) ? - EQUIVALENT : UNDEFINED; + CmpState::EQ : CmpState::NEQ; } - /** - * Clone this projection - * - * @return New wrapped pointer to object of this class + /** + * Clone this projection + * + * @return New wrapped pointer to object of this class */ virtual std::unique_ptr clone() const { return std::unique_ptr(new V0AndTrigger(*this)); } }; - - + + //================================================================ - /** - * Standard ALICE primary particle definition. - * also - * Primary particle definition according to public note + /** + * Standard ALICE primary particle definition. + * also + * Primary particle definition according to public note * ALICE-PUBLIC-2017-005 * * @ingroup alice_rivet */ class PrimaryParticles : public Rivet::PrimaryParticles { public: PrimaryParticles(const Cut& c=Cuts::open()) : Rivet::PrimaryParticles({},c) {} - /** - * Compare to projections. - * - * @param p Projection to compare to. - * + /** + * Compare to projections. + * + * @param p Projection to compare to. + * * @return true (EQUIVALENT) if the projection @a p is of the same * type as this, if the cuts are equal, and that the list of PDG * IDs are the same. */ - virtual int compare(const Projection& p) const + virtual CmpState compare(const Projection& p) const { const PrimaryParticles* o = dynamic_cast(&p); - if (_cuts != o->_cuts) return UNDEFINED; + if (_cuts != o->_cuts) return CmpState::NEQ; return mkPCmp(*o,"PrimaryParticles"); } - /** - * Clone this projection - * - * @return New wrapped pointer to object of this class + /** + * Clone this projection + * + * @return New wrapped pointer to object of this class */ virtual std::unique_ptr clone() const { return std::unique_ptr(new PrimaryParticles(*this)); } protected: - /** + /** * Check PDG ID of particle @a p is in the list of accepted * primaries. * * @param p Particle to investigate. * * @return true if the particle PDG ID is in the list of known * primary PDG IDs. * * @note We explicitly override this to allow for nuclei, and we * explicitly check for a specific set of particles (and * anti-particles). This means we do not use the base class * list of particles. Therefore, we also need to override the * compare method. */ bool isPrimaryPID(const HepMC::GenParticle* p) const { - int pdg = PID::abspid(p->pdg_id()); - // Check for nuclus + int pdg = abs(p->pdg_id()); + // Check for nuclus if (pdg > 1000000000) return true; - + switch (pdg) { case Rivet::PID::MUON: case Rivet::PID::ELECTRON: case Rivet::PID::GAMMA: case Rivet::PID::PIPLUS: case Rivet::PID::KPLUS: case Rivet::PID::K0S: case Rivet::PID::K0L: case Rivet::PID::PROTON: case Rivet::PID::NEUTRON: case Rivet::PID::LAMBDA: case Rivet::PID::SIGMAMINUS: case Rivet::PID::SIGMAPLUS: case Rivet::PID::XIMINUS: case Rivet::PID::XI0: case Rivet::PID::OMEGAMINUS: case Rivet::PID::NU_E: case Rivet::PID::NU_MU: case Rivet::PID::NU_TAU: return true; } return false; } }; } } #endif // // EOF // - - - diff --git a/include/Rivet/Projections/EventMixingFinalState.hh b/include/Rivet/Projections/EventMixingFinalState.hh --- a/include/Rivet/Projections/EventMixingFinalState.hh +++ b/include/Rivet/Projections/EventMixingFinalState.hh @@ -1,99 +1,104 @@ // -*- C++ -*- #ifndef RIVET_EventMixingFinalState_HH #define RIVET_EventMixingFinalState_HH #include "Rivet/Projection.hh" #include "Rivet/Projections/ParticleFinder.hh" #include #include - namespace Rivet { - // @brief Projects out an event mixed of several events, given - // a mixing observable (eg. number of final state particles), - // defining what should qualify as a mixable event. - // Binning in the mixing observable is defined in the constructor, - // as is the number of events one wants to mix with. - // The protected method calculateMixingObs() can be overloaded - // in derived classes, to define other mixing observables, eg. - // centrality or something even more elaborate. - // - // !!!DISCLAIMER!!! The projection makes no attempt at correct handling - // of event weights - ie. what one should use as event weight for several - // mixed events. Proceed with caution if you do not use an MC with - // unit weights. - // - // @author Christian Bierlich + using std::deque; - typedef map > MixMap; + + /// Projects out an event mixed of several events, given + /// a mixing observable (eg. number of final state particles), + /// defining what should qualify as a mixable event. + /// Binning in the mixing observable is defined in the constructor, + /// as is the number of events one wants to mix with. + /// The protected method calculateMixingObs() can be overloaded + /// in derived classes, to define other mixing observables, eg. + /// centrality or something even more elaborate. + /// + /// @warning !!!DISCLAIMER!!! The projection makes no attempt at correct handling + /// of event weights - ie. what one should use as event weight for several + /// mixed events. Proceed with caution if you do not use an MC with + /// unit weights. + /// + /// @author Christian Bierlich + /// + typedef map > MixMap; class EventMixingFinalState : public Projection { public: // Constructor EventMixingFinalState(const ParticleFinder& fsp, const ParticleFinder& mix, size_t nMixIn, double oMin, double oMax, double deltao ) : nMix(nMixIn){ setName("EventMixingFinalState"); - addProjection(fsp,"FS"); - addProjection(mix,"MIX"); + declare(fsp, "FS"); + declare(mix, "MIX"); MSG_WARNING("EventMixingFinalState is a naive implementation, not currently " << - "validated. Use with caution."); + "validated. Use with caution."); // Set up the map for mixing events - for(double o = oMin; o < oMax; o+=deltao ) + for (double o = oMin; o < oMax; o+=deltao) mixEvents[o] = deque(); - } + // Clone on the heap DEFAULT_RIVET_PROJ_CLONE(EventMixingFinalState); // Return a vector of mixing events. vector getMixingEvents() const { MixMap::const_iterator mixItr = mixEvents.lower_bound(mObs); if(mixItr == mixEvents.end() || mixItr->second.size() < nMix + 1) return vector(); return vector(mixItr->second.begin(), mixItr->second.end() - 1); } - + protected: // Calulate mixing observable. // Can be overloaded to define more elaborate observables. virtual void calculateMixingObs(const Particles& parts){ mObs = parts.size(); } - + /// Perform the projection on the Event void project(const Event& e){ const Particles parts = applyProjection(e, "FS").particles(); calculateMixingObs(parts); MixMap::iterator mixItr = mixEvents.lower_bound(mObs); if(mixItr == mixEvents.end()){ // We are out of bounds. MSG_DEBUG("Mixing observable out of bounds."); return; } const Particles mix = applyProjection(e, "MIX").particles(); - + mixItr->second.push_back(mix); if(mixItr->second.size() > nMix + 1) mixItr->second.pop_front(); } /// Compare with other projections - int compare(const Projection& p) const { - return mkNamedPCmp(p,"FS"); + CmpState compare(const Projection& p) const { + return mkNamedPCmp(p, "FS"); } - + private: // The number of event to mix with size_t nMix; // The mixing observable of the current event double mObs; // The event map; MixMap mixEvents; }; + + } + #endif diff --git a/include/Rivet/Projections/PrimaryParticles.hh b/include/Rivet/Projections/PrimaryParticles.hh --- a/include/Rivet/Projections/PrimaryParticles.hh +++ b/include/Rivet/Projections/PrimaryParticles.hh @@ -1,142 +1,139 @@ // -*- C++ -*- -/** - * @file PrimaryParticles.hh - * @author Christian Holm Christensen - * @date Mon Aug 27 08:46:34 2018 - * @brief Primary particle definition based on PDG code. - */ #ifndef RIVET_PrimaryParticles_HH #define RIVET_PrimaryParticles_HH -#include -#include +#include "Rivet/Projections/ParticleFinder.hh" +#include "Rivet/Tools/Cuts.hh" -namespace Rivet -{ +namespace Rivet { + + /// @brief Project out primary particles according to definition. + /// /// A Rivet projection that mimics an experimental primary partcile /// definition by projecting out according to particle id. - /// The projection can be further specialized to accomodate + /// The projection can be further specialized to accomodate /// specific experimental definitions. - // - class PrimaryParticles : public ParticleFinder - { + /// + /// @author Christian Holm Christensen + class PrimaryParticles : public ParticleFinder { public: - /** - * Constructor - * - * @param cuts Normal particle cuts - * @param pdgIds List of PDG IDs which are considered primary. + /** + * Constructor + * + * @param cuts Normal particle cuts + * @param pdgIds List of PDG IDs which are considered primary. * * @todo Instead of using a real vector use an initializer list - * more flexible. Also, do not provide a default for the PDG IDs * (even if empty) as this will force the user to actively select * which codes are primary. */ PrimaryParticles(std::initializer_list pdgIds, const Cut& c=Cuts::open()) : ParticleFinder(c), _pdgIds(pdgIds) { setName("PrimaryParticles"); } + // Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(PrimaryParticles); - + /** - * Copy constructor + * Copy constructor * - * @param other Other object to copy from + * @param other Other object to copy from */ - PrimaryParticles(const PrimaryParticles& other) : + PrimaryParticles(const PrimaryParticles& other) : ParticleFinder(other), _pdgIds(other._pdgIds) { } - /** - * Compare to projections. - * - * @param p Projection to compare to. - * + /** + * Compare to projections. + * + * @param p Projection to compare to. + * * @return true (EQUIVALENT) if the projection @a p is of the same * type as this, if the cuts are equal, and that the list of PDG * IDs are the same. */ - virtual int compare(const Projection& p) const + virtual CmpState compare(const Projection& p) const { const PrimaryParticles* other = dynamic_cast(&p); - if (!other) return UNDEFINED; - if (_cuts != other->_cuts || _pdgIds != other->_pdgIds) return UNDEFINED; - return EQUIVALENT; - + if (!other) return CmpState::NEQ; + if (_cuts != other->_cuts || _pdgIds != other->_pdgIds) return CmpState::NEQ; + return CmpState::EQ; + } protected: /** * Do the projection. * - * @param e Event to project from + * @param e Event to project from */ virtual void project(const Event& e); - /** + /** * @{ - * @name Internally used member functions + * @name Internally used member functions */ - /** - * Check if the particle is a priamry. + /** + * Check if the particle is a priamry. * - * @param p Pointer to a HepMC particle + * @param p Pointer to a HepMC particle * - * @return true if the particle @a p is considered primary + * @return true if the particle @a p is considered primary */ virtual bool isPrimary(const HepMC::GenParticle* p) const; /** * Check if the particle should be ignored by the status code of * the particle. */ virtual bool isIgnored(const HepMC::GenParticle* p) const; - /** + /** * Check PDG ID of particle @a p is in the list of accepted * primaries. * * @param p Particle to investigate. * * @return true if the particle PDG ID is in the list of known * primary PDG IDs. */ virtual bool isPrimaryPID(const HepMC::GenParticle* p) const; /* * Check if a particle @a p has decayed. * - * @param p Pointer to HepMC particle + * @param p Pointer to HepMC particle * * @return true if the particle has decayed according to the * status flag of the particle @a p */ virtual bool hasDecayed(const HepMC::GenParticle* p) const; /** * Check if a particle is a beam (remnant) particle. * - * @param p Particle to check + * @param p Particle to check * - * @return true if the particle @a p is a (remnant) beam particle + * @return true if the particle @a p is a (remnant) beam particle */ virtual bool isBeam(const HepMC::GenParticle* p) const; /* * Get the immediate ancestor of a particle. - * - * @param p Particle for which to get the immediate ancestor * - * @return Pointer to immediate ancestor or null if there's no ancestor. + * @param p Particle for which to get the immediate ancestor + * + * @return Pointer to immediate ancestor or null if there's no ancestor. */ const HepMC::GenParticle* ancestor(const HepMC::GenParticle* p) const; /* * Get the immediate ancestor of a particle, which is @e not an * ignored particle. - * - * @param p Particle for which to get the immediate ancestor * - * @return Pointer to immediate ancestor or null if there's no ancestor. + * @param p Particle for which to get the immediate ancestor + * + * @return Pointer to immediate ancestor or null if there's no ancestor. */ const HepMC::GenParticle* ancestor(const HepMC::GenParticle* p, bool) const; /** Particle types to test for */ std::vector _pdgIds; }; } #endif diff --git a/include/Rivet/Projections/TriggerProjection.hh b/include/Rivet/Projections/TriggerProjection.hh --- a/include/Rivet/Projections/TriggerProjection.hh +++ b/include/Rivet/Projections/TriggerProjection.hh @@ -1,68 +1,67 @@ // -*- C++ -*- #ifndef RIVET_TRIGGERPROJECTION_HH #define RIVET_TRIGGERPROJECTION_HH #include "Rivet/Projection.hh" namespace Rivet { /** @brief Base class for projections returning a bool corresponding to a trigger. @author Leif Lönnblad Project an event down to a single true or false value accessible through the operator() function, where true means that the event has passed some trigger criterion. */ class TriggerProjection: public Projection { public: /// The default constructor. TriggerProjection() : _passed(true) { setName("TriggerProjection"); } virtual ~TriggerProjection() {} - + /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(TriggerProjection); /// Return true if the event has passed some trigger or selection /// criteria. bool operator()() const { return _passed; } protected: virtual void project(const Event& e) { pass(); } /// Indicate that the event has passed the trigger. void pass() { _passed = true; } /// Compare projections - virtual int compare(const Projection&) const { - return 0; + virtual CmpState compare(const Projection&) const { + return CmpState::EQ; } /// Indicate that the event has failed the trigger. void fail() { _passed = false; } private: bool _passed; }; } #endif - diff --git a/include/Rivet/Projections/VetoedFinalState.hh b/include/Rivet/Projections/VetoedFinalState.hh --- a/include/Rivet/Projections/VetoedFinalState.hh +++ b/include/Rivet/Projections/VetoedFinalState.hh @@ -1,212 +1,236 @@ // -*- C++ -*- #ifndef RIVET_VetoedFinalState_HH #define RIVET_VetoedFinalState_HH #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief FS modifier to exclude classes of particles from the final state. class VetoedFinalState : public FinalState { public: /// @name Constructors //@{ - /// Default constructor. - VetoedFinalState(const Cut& c=Cuts::open()) { - setName("VetoedFinalState"); - declare(FinalState(c), "FS"); - } - /// Constructor with specific FinalState. - VetoedFinalState(const FinalState& fsp) + /// Constructor with a specific FinalState and a cuts list to veto + VetoedFinalState(const FinalState& fsp, const vector& cuts) + : FinalState(), _vetoCuts(cuts) { setName("VetoedFinalState"); declare(fsp, "FS"); } /// Constructor with a specific FinalState and a single cut to veto VetoedFinalState(const FinalState& fsp, const Cut& cut) : VetoedFinalState(fsp, vector{cut}) { } /// Constructor with a default FinalState and a cuts list to veto VetoedFinalState(const vector& cuts) : VetoedFinalState(FinalState(), cuts) { } /// Constructor with a default FinalState and a single cut to veto VetoedFinalState(const Cut& cut) : VetoedFinalState(FinalState(), vector{cut}) { } /// Constructor with a specific FinalState and a PID list to veto VetoedFinalState(const FinalState& fsp, const vector& vetopids) : VetoedFinalState(fsp, {}) { - setName("VetoedFinalState"); - declare(FinalState(), "FS"); + _vetoCuts.reserve(vetopids.size()); + for (PdgId pid : vetopids) addVeto(pid); } + /// Constructor with a specific FinalState and a PID to veto + VetoedFinalState(const FinalState& fsp, PdgId vetopid) + : VetoedFinalState(fsp, vector{Cuts::pid == vetopid}) + { } + + /// Constructor with a default FinalState and a PID list to veto + VetoedFinalState(const vector& vetopids) + : VetoedFinalState(FinalState(), {}) + { + _vetoCuts.reserve(vetopids.size()); + for (PdgId pid : vetopids) addVeto(pid); + } + + /// Constructor with a default FinalState and a PID to veto + VetoedFinalState(PdgId vetopid) + : VetoedFinalState(FinalState(), vector{Cuts::pid == vetopid}) + { } + + /// Constructor with specific FinalState but no cuts + VetoedFinalState(const FinalState& fsp) + : VetoedFinalState(fsp, vector()) + { } + + /// Default constructor with default FinalState and no cuts + VetoedFinalState() + : VetoedFinalState(FinalState(), vector()) + { } + /// You can add a map of ID plus a pair containing \f$ p_{Tmin} \f$ and /// \f$ p_{Tmax} \f$ -- these define the range of particles to be vetoed. //DEPRECATED("Prefer constructors using Cut arguments") VetoedFinalState(const map>& vetocodes) : VetoedFinalState(FinalState(), {}) { for (const auto& it : vetocodes) { addVeto(it.first, Cuts::pT > it.second.first && Cuts::pT < it.second.second); } } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(VetoedFinalState); //@} /// Get the list of particle IDs and \f$ p_T \f$ ranges to veto. const vector& vetoDetails() const { return _vetoCuts; } //using vetos = vetoDetails; /// Add a particle selection to be vetoed from the final state VetoedFinalState& addVeto(const Cut& cut) { _vetoCuts.push_back(cut); return *this; } /// Add a particle selection to be vetoed from the final state VetoedFinalState& addVeto(PdgId pid, const Cut& cut=Cuts::OPEN) { _vetoCuts.push_back(Cuts::pid == pid && cut); return *this; } /// Add a particle/antiparticle selection to be vetoed from the final state VetoedFinalState& addVetoPair(PdgId pid, const Cut& cut=Cuts::OPEN) { _vetoCuts.push_back(Cuts::abspid == pid && cut); return *this; } /// @brief Add a particle ID and \f$ p_T \f$ range to veto /// /// Particles with \f$ p_T \f$ IN the given range will be rejected. - VetoedFinalState& addVetoDetail(PdgId pid, double ptmin, double ptmax=numeric_limits::max()) { + VetoedFinalState& addVetoDetail(PdgId pid, double ptmin, double ptmax=std::numeric_limits::max()) { return addVeto(pid, Cuts::ptIn(ptmin, ptmax)); } //const auto addVeto = addVetoDetail; /// @brief Add a particle/antiparticle pair to veto in a given \f$ p_T \f$ range /// /// Given a single ID, both the particle and its conjugate antiparticle will /// be rejected if their \f$ p_T \f$ is IN the given range. - VetoedFinalState& addVetoPairDetail(PdgId pid, double ptmin, double ptmax=numeric_limits::max()) { + VetoedFinalState& addVetoPairDetail(PdgId pid, double ptmin, double ptmax=std::numeric_limits::max()) { return addVetoPair(pid, Cuts::ptIn(ptmin, ptmax)); } //using addVetoPair = addVetoPairDetail; /// @brief Add a particle ID to veto (all \f$ p_T \f$ range will be vetoed) VetoedFinalState& addVetoId(PdgId pid) { return addVeto(pid); } //using addVeto = addVetoId; /// @brief Add a particle/antiparticle pair to veto /// /// Given a single ID, both the particle and its corresponding antiparticle /// (for all \f$ p_T \f$ values) will be vetoed. VetoedFinalState& addVetoPairId(PdgId pid) { return addVetoPair(pid); } //using addVetoPair = addVetoPairId; /// Set the list of particle selections to veto VetoedFinalState& setVetoDetails(const vector& cuts) { _vetoCuts = cuts; return *this; } //const auto setVetos = setVetoDetails; /// Veto all neutrinos (convenience method) VetoedFinalState& vetoNeutrinos() { addVetoPairId(PID::NU_E); addVetoPairId(PID::NU_MU); addVetoPairId(PID::NU_TAU); return *this; } /// Add a veto on composite masses within a given width. /// The composite mass is composed of nProducts decay products /// @ todo might we want to specify a range of pdg ids for the decay products? VetoedFinalState& addCompositeMassVeto(double mass, double width, int nProducts=2) { const double halfWidth = 0.5*width; pair massRange(mass-halfWidth, mass+halfWidth); _compositeVetoes.insert(make_pair(nProducts, massRange)); _nCompositeDecays.insert(nProducts); return *this; } /// Veto the decay products of particle with PDG code @a pid /// @todo Need HepMC to sort themselves out and keep vector bosons from /// the hard vtx in the event record before this will work reliably for all pdg ids VetoedFinalState& addDecayProductsVeto(PdgId pid) { _parentVetoes.insert(pid); return *this; } /// Veto particles from a supplied final state VetoedFinalState& addVetoOnThisFinalState(const ParticleFinder& fs) { const string name = "FS_" + to_str(_vetofsnames.size()); - addProjection(fs, name); + declare(fs, name); _vetofsnames.insert(name); return *this; } /// Clear the list of particle IDs and ranges to veto. VetoedFinalState& reset() { _vetoCuts.clear(); return *this; } /// Apply the projection on the supplied event. void project(const Event& e); /// Compare projections. CmpState compare(const Projection& p) const; private: /// The veto cuts vector _vetoCuts; /// Composite particle masses to veto /// @todo Also generalise to use Cuts multimap > _compositeVetoes; set _nCompositeDecays; /// Set of decaying particle IDs to veto set _parentVetoes; /// Set of finalstate to be vetoed set _vetofsnames; }; } #endif diff --git a/include/Rivet/Tools/AtlasCommon.hh b/include/Rivet/Tools/AtlasCommon.hh --- a/include/Rivet/Tools/AtlasCommon.hh +++ b/include/Rivet/Tools/AtlasCommon.hh @@ -1,120 +1,124 @@ // -*- C++ -*- #ifndef RIVET_ATLAS_COMMON_HH #define RIVET_ATLAS_COMMON_HH #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/SingleValueProjection.hh" #include "Rivet/Projections/TriggerProjection.hh" namespace Rivet { /// Common projections for ATLAS trigger conditions and centrality. - namespace ATLAS { -/// Centrality projection for pPb collisions (one sided) + namespace ATLAS { + + + /// Centrality projection for pPb collisions (one sided) class SumET_PB_Centrality: public SingleValueProjection { public: /// Constructor. SumET_PB_Centrality() { declare(FinalState(Cuts::eta < -3.2 && Cuts::eta > -4.9 && Cuts::pT > 0.1*GeV), "SumET_PB_Centrality"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(SumET_PB_Centrality); protected: /// Perform the projection on the Event void project(const Event& e) { clear(); const FinalState & fsfwd = apply(e, "SumET_PB_Centrality"); double estimate = 0.0; for ( const Particle & p : fsfwd.particles() ) { estimate += p.Et(); } set(estimate); } - + /// Compare projections - int compare(const Projection& p) const { + CmpState compare(const Projection& p) const { return mkNamedPCmp(p, "SumET_PB_Centrality"); } }; - + + /// Centrality projection for PbPb collisions (two sided) class SumET_PBPB_Centrality: public SingleValueProjection { public: /// Constructor. SumET_PBPB_Centrality() { declare(FinalState(Cuts::abseta > 3.2 && Cuts::abseta < 4.9 && Cuts::pT > 0.1*GeV), "SumET_PBPB_Centrality"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(SumET_PBPB_Centrality); protected: /// Perform the projection on the Event void project(const Event& e) { clear(); const FinalState & fsfwd = apply(e, "SumET_PBPB_Centrality"); double estimate = 0.0; for ( const Particle & p : fsfwd.particles() ) { estimate += p.Et(); } set(estimate); } - + /// Compare projections - int compare(const Projection& p) const { + CmpState compare(const Projection& p) const { return mkNamedPCmp(p, "SumET_PBPB_Centrality"); } }; - + /// ATLAS min bias trigger conditions. class MinBiasTrigger: public TriggerProjection { public: /// Constructor. MinBiasTrigger() { declare(ChargedFinalState(Cuts::eta > 2.09 && Cuts::eta < 3.84 && Cuts::pT > 0.1*GeV), "MBB"); declare(ChargedFinalState(Cuts::eta < -2.09 && Cuts::eta > -3.84 && Cuts::pT > 0.1*GeV), "MBF"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(MinBiasTrigger); protected: /// Perform the projection on the Event void project(const Event& event) { pass(); if ( applyProjection(event,"MBF").particles().empty() || applyProjection(event,"MBB").particles().empty() ) fail(); } - + /// Compare projections - int compare(const Projection& p) const { + CmpState compare(const Projection& p) const { return mkNamedPCmp(p, "MBF") || mkNamedPCmp(p, "MBB"); } }; - + + } } #endif diff --git a/src/Projections/DressedLeptons.cc b/src/Projections/DressedLeptons.cc --- a/src/Projections/DressedLeptons.cc +++ b/src/Projections/DressedLeptons.cc @@ -1,181 +1,181 @@ // -*- C++ -*- #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/MergedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { // On DressedLepton helper class //{ DressedLepton::DressedLepton(const Particle& dlepton) : Particle(dlepton) { setConstituents({{dlepton}}); //< bare lepton is first constituent } DressedLepton::DressedLepton(const Particle& lepton, const Particles& photons, bool momsum) : Particle(lepton.pid(), lepton.momentum()) { setConstituents({{lepton}}); //< bare lepton is first constituent addConstituents(photons, momsum); } void DressedLepton::addPhoton(const Particle& p, bool momsum) { if (p.pid() != PID::PHOTON) throw Error("Clustering a non-photon on to a DressedLepton:"+to_string(p.pid())); addConstituent(p, momsum); } const Particle& DressedLepton::bareLepton() const { const Particle& l = constituents().front(); if (!l.isChargedLepton()) throw Error("First constituent of a DressedLepton is not a bare lepton: oops"); return l; } //} // Separate-FS version DressedLeptons::DressedLeptons(const FinalState& photons, const FinalState& bareleptons, double dRmax, const Cut& cut, bool useDecayPhotons, bool useJetClustering) : FinalState(cut), _dRmax(dRmax), _fromDecay(useDecayPhotons), _useJetClustering(useJetClustering) { setName("DressedLeptons"); // Find photons -- specialising to prompt photons if decay photons are to be vetoed IdentifiedFinalState photonfs(photons, PID::PHOTON); if (_fromDecay) { - addProjection(photonfs, "Photons"); + declare(photonfs, "Photons"); } else { - addProjection(PromptFinalState(photonfs), "Photons"); + declare(PromptFinalState(photonfs), "Photons"); } // Find bare leptons IdentifiedFinalState leptonfs(bareleptons); leptonfs.acceptIdPairs({PID::ELECTRON, PID::MUON, PID::TAU}); //< hmm, no final-state taus, so is this useful? - addProjection(leptonfs, "Leptons"); + declare(leptonfs, "Leptons"); // Set up FJ clustering option if (_useJetClustering) { MergedFinalState mergedfs(photonfs, leptonfs); FastJets leptonjets(mergedfs, FastJets::ANTIKT, dRmax); - addProjection(leptonjets, "LeptonJets"); + declare(leptonjets, "LeptonJets"); } } // Single-FS version DressedLeptons::DressedLeptons(const FinalState& barefs, double dRmax, const Cut& cut, bool useDecayPhotons, bool useJetClustering) : DressedLeptons(barefs, barefs, dRmax, cut, useDecayPhotons, useJetClustering) { } CmpState DressedLeptons::compare(const Projection& p) const { // Compare the two as final states (for pT and eta cuts) const DressedLeptons& other = dynamic_cast(p); CmpState fscmp = FinalState::compare(other); if (fscmp != CmpState::EQ) return fscmp; const PCmp phcmp = mkNamedPCmp(p, "Photons"); if (phcmp != CmpState::EQ) return phcmp; const PCmp sigcmp = mkNamedPCmp(p, "Leptons"); if (sigcmp != CmpState::EQ) return sigcmp; return (cmp(_dRmax, other._dRmax) || cmp(_fromDecay, other._fromDecay) || cmp(_useJetClustering, other._useJetClustering)); } void DressedLeptons::project(const Event& e) { _theParticles.clear(); // Get bare leptons const FinalState& signal = apply(e, "Leptons"); Particles bareleptons = signal.particles(); if (bareleptons.empty()) return; // Initialise DL collection with bare leptons vector allClusteredLeptons; allClusteredLeptons.reserve(bareleptons.size()); // If the radius is 0 or negative, don't even attempt to cluster if (_useJetClustering) { if (_dRmax <= 0) { for (const Particle& bl : bareleptons) { Particle dl(bl.pid(), bl.momentum()); dl.setConstituents({bl}); allClusteredLeptons += dl; } } else { const Jets& lepjets = apply(e, "LeptonJets").jets(); for (const Jet& lepjet : lepjets) { const Particles leps = sortByPt(lepjet.particles(isChargedLepton)); if (leps.empty()) continue; Particles constituents = {leps[0]}; //< note no dressing for subleading leptons Particle dl(leps[0].pid(), leps[0].momentum()); constituents += lepjet.particles(isPhoton); dl.setConstituents(constituents); allClusteredLeptons += dl; } } } else { for (const Particle& bl : bareleptons) { Particle dl(bl.pid(), bl.momentum()); dl.setConstituents({bl}); allClusteredLeptons += dl; } if (_dRmax > 0) { // Match each photon to its closest charged lepton within the dR cone const FinalState& photons = applyProjection(e, "Photons"); for (const Particle& photon : photons.particles()) { // Ignore photon if it's from a hadron/tau decay and we're avoiding those - /// @todo Can remove via the PromptFinalState conversion above? - if (!_fromDecay && photon.fromDecay()) continue; + /// @todo Already removed via the PromptFinalState conversion above? + if (!_fromDecay && !photon.isDirect()) continue; const FourMomentum& p_P = photon.momentum(); double dRmin = _dRmax; int idx = -1; for (size_t i = 0; i < bareleptons.size(); ++i) { const Particle& bl = bareleptons[i]; // Only cluster photons around *charged* signal particles if (bl.charge3() == 0) continue; // Find the closest lepton double dR = deltaR(bl, p_P); if (dR < dRmin) { dRmin = dR; idx = i; } } if (idx > -1) allClusteredLeptons[idx].addConstituent(photon, true); } } } // Fill the canonical particles collection with the composite DL Particles for (const Particle& lepton : allClusteredLeptons) { const bool acc = accept(lepton); MSG_TRACE("Clustered lepton " << lepton << " with constituents = " << lepton.constituents() << ", cut-pass = " << std::boolalpha << acc); if (acc) _theParticles.push_back(lepton); } MSG_DEBUG("#dressed leptons = " << allClusteredLeptons.size() << " -> " << _theParticles.size() << " after cuts"); } } diff --git a/src/Projections/PrimaryParticles.cc b/src/Projections/PrimaryParticles.cc --- a/src/Projections/PrimaryParticles.cc +++ b/src/Projections/PrimaryParticles.cc @@ -1,91 +1,93 @@ /** * @file PrimaryParticles.cc * @author Christian Holm Christensen * @date Mon Aug 27 09:06:03 2018 - * - * @brief Primary particle definition based on PDG IDs. + * + * @brief Primary particle definition based on PDG IDs. */ #include #include #include #include #include #include #include namespace Rivet { + void PrimaryParticles::project(const Event& e) { _theParticles.clear(); // Clear cache - bool open = _cuts == Cuts::open(); + bool open = _cuts == Cuts::open(); for (auto p : Rivet::particles(e.genEvent())) { - if (isPrimary(p) && (open || _cuts->accept(Particle(p)))) + if (isPrimary(p) && (open || _cuts->accept(Particle(p)))) _theParticles.push_back(Particle(*p)); } } bool PrimaryParticles::isPrimary(const HepMC::GenParticle* p) const { if(isIgnored(p)) return false; if(!isPrimaryPID(p)) return false; - // Loop back over ancestors that are _not_ ignored + // Loop back over ancestors that are _not_ ignored const HepMC::GenParticle* m = p; while ((m = ancestor(m,true))) { if (isBeam(m)) return true; if (isPrimaryPID(m)) return false; if (!hasDecayed(m)) return false; } return true; } + bool PrimaryParticles::isIgnored(const HepMC::GenParticle* p) const { return (p->status()==0 || (p->status()>=11 && p->status()<=200)); } - + bool PrimaryParticles::isPrimaryPID(const HepMC::GenParticle* p) const { - int thisPID = PID::abspid(p->pdg_id()); + int thisPID = abs(p->pdg_id()); for(const auto pid : _pdgIds) if (thisPID == pid) return true; return false; } bool PrimaryParticles::isBeam(const HepMC::GenParticle* p) const { // Pythia6 uses 3 for initial state - return p && (p->status() == 3 || p->status() == 4); + return p && (p->status() == 3 || p->status() == 4); } bool PrimaryParticles::hasDecayed(const HepMC::GenParticle* p) const { return p && p->status() == 2; } const HepMC::GenParticle* PrimaryParticles::ancestor(const HepMC::GenParticle* p) const { const HepMC::GenVertex* vtx = p->production_vertex(); if (!vtx) return 0; - + HepMC::GenVertex::particles_in_const_iterator i = vtx->particles_in_const_begin(); if (i == vtx->particles_in_const_end()) return 0; return *i; } - + const HepMC::GenParticle* PrimaryParticles::ancestor(const HepMC::GenParticle* p, bool) const { const HepMC::GenParticle* m = p; do { m = ancestor(m); } while (m && isIgnored(m)); return m; } } // // EOF // diff --git a/src/Projections/VetoedFinalState.cc b/src/Projections/VetoedFinalState.cc --- a/src/Projections/VetoedFinalState.cc +++ b/src/Projections/VetoedFinalState.cc @@ -1,124 +1,125 @@ // -*- C++ -*- #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { CmpState VetoedFinalState::compare(const Projection& p) const { const PCmp fscmp = mkNamedPCmp(p, "FS"); - if (fscmp != CmpState::EQ) return fscmp; + if (fscmp != CmpState::EQ) return CmpState::NEQ; /// @todo We can do better than this... - if (_vetofsnames.size() != 0) return CmpState::UNDEF; + if (_vetofsnames.size() != 0) return CmpState::NEQ; const VetoedFinalState& other = dynamic_cast(p); return \ cmp(_vetoCuts, other._vetoCuts) || cmp(_compositeVetoes, other._compositeVetoes) || cmp(_parentVetoes, other._parentVetoes); } void VetoedFinalState::project(const Event& e) { const FinalState& fs = applyProjection(e, "FS"); _theParticles.clear(); _theParticles.reserve(fs.particles().size()); // Veto by cut if (_vetoCuts.empty()) { _theParticles = fs.particles(); } else { // Test every particle against the codes for (const Particle& p : fs.particles()) { bool match = false; for (const Cut& c : _vetoCuts) { if (c->accept(p)) { match = true; break; } } if (!match) { MSG_TRACE("Storing particle " << p); _theParticles.push_back(p); } else { MSG_TRACE("Vetoing particle " << p); } } } // Composite particle mass vetoing /// @todo YUCK! Clean up... set toErase; for (auto nIt = _nCompositeDecays.begin(); nIt != _nCompositeDecays.end() && !_theParticles.empty(); ++nIt) { map, FourMomentum> oldMasses; map, FourMomentum> newMasses; set start; start.insert(_theParticles.begin()); oldMasses.insert(pair, FourMomentum>(start, _theParticles.begin()->momentum())); for (int nParts = 1; nParts != *nIt; ++nParts) { for (auto mIt = oldMasses.begin(); mIt != oldMasses.end(); ++mIt) { Particles::iterator pStart = *(mIt->first.rbegin()); for (auto pIt = pStart + 1; pIt != _theParticles.end(); ++pIt) { FourMomentum cMom = mIt->second + pIt->momentum(); set pList(mIt->first); pList.insert(pIt); newMasses[pList] = cMom; } } oldMasses = newMasses; newMasses.clear(); } for (auto mIt = oldMasses.begin(); mIt != oldMasses.end(); ++mIt) { double mass2 = mIt->second.mass2(); if (mass2 >= 0.0) { double mass = sqrt(mass2); for (auto cIt = _compositeVetoes.lower_bound(*nIt); cIt != _compositeVetoes.upper_bound(*nIt); ++cIt) { auto massRange = cIt->second; if (mass < massRange.second && mass > massRange.first) { for (auto lIt = mIt->first.begin(); lIt != mIt->first.end(); ++lIt) { toErase.insert(*lIt); } } } } } } for (auto p = toErase.rbegin(); p != toErase.rend(); ++p) { _theParticles.erase(*p); } // Remove particles whose parents match entries in the parent veto PDG ID codes list /// @todo There must be a nice way to do this -- an STL algorithm (or we provide a nicer wrapper) for (PdgId vetoid : _parentVetoes) { for (Particles::iterator ip = _theParticles.begin(); ip != _theParticles.end(); ++ip) { const GenVertex* startVtx = ip->genParticle()->production_vertex(); if (startVtx == NULL) continue; // Loop over parents and test their IDs /// @todo Could use any() here? for (const GenParticle* parent : Rivet::particles(startVtx, HepMC::ancestors)) { if (vetoid == parent->pdg_id()) { ip = _theParticles.erase(ip); --ip; //< Erase this _theParticles entry break; } } } } + // Finally veto on the registered FSes for (const string& ifs : _vetofsnames) { const ParticleFinder& vfs = applyProjection(e, ifs); const Particles& pvetos = vfs.rawParticles(); ifilter_discard(_theParticles, [&](const Particle& pcheck) { if (pcheck.genParticle() == nullptr) return false; for (const Particle& pveto : pvetos) { if (pveto.genParticle() == nullptr) continue; if (pveto.genParticle() == pcheck.genParticle()) { MSG_TRACE("Vetoing: " << pcheck); return true; } } return false; }); } MSG_DEBUG("FS vetoing from #particles = " << fs.size() << " -> " << _theParticles.size()); } } diff --git a/src/Tools/RivetYODA.cc b/src/Tools/RivetYODA.cc --- a/src/Tools/RivetYODA.cc +++ b/src/Tools/RivetYODA.cc @@ -1,407 +1,415 @@ #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Tools/RivetYODA.hh" #include "Rivet/Tools/RivetPaths.hh" #include "YODA/ReaderYODA.h" #include "YODA/ReaderAIDA.h" // use execinfo for backtrace if available #include "DummyConfig.hh" #ifdef HAVE_EXECINFO_H #include #endif using namespace std; namespace Rivet { template Wrapper::~Wrapper() {} template Wrapper::Wrapper(const vector& weightNames, const T & p) { for (const string& weightname : weightNames) { _persistent.push_back(make_shared(p)); auto obj = _persistent.back(); if (weightname != "") obj->setPath(obj->path() + "[" + weightname + "]"); } } template typename T::Ptr Wrapper::active() const { if ( !_active ) { #ifdef HAVE_BACKTRACE void * buffer[4]; backtrace(buffer, 4); backtrace_symbols_fd(buffer, 4 , 1); #endif assert(false && "No active pointer set. Was this object booked in init()?"); } return _active; } template void Wrapper::newSubEvent() { typename TupleWrapper::Ptr tmp = make_shared>(_persistent[0]->clone()); tmp->reset(); _evgroup.push_back( tmp ); _active = _evgroup.back(); assert(_active); } string getDatafilePath(const string& papername) { /// Try to find YODA otherwise fall back to try AIDA const string path1 = findAnalysisRefFile(papername + ".yoda"); if (!path1.empty()) return path1; const string path2 = findAnalysisRefFile(papername + ".aida"); if (!path2.empty()) return path2; throw Rivet::Error("Couldn't find ref data file '" + papername + ".yoda" + " in data path, '" + getRivetDataPath() + "', or '.'"); } map getRefData(const string& papername) { const string datafile = getDatafilePath(papername); // Make an appropriate data file reader and read the data objects /// @todo Remove AIDA support some day... YODA::Reader& reader = (datafile.find(".yoda") != string::npos) ? \ YODA::ReaderYODA::create() : YODA::ReaderAIDA::create(); vector aovec; reader.read(datafile, aovec); // Return value, to be populated map rtn; for ( YODA::AnalysisObject* ao : aovec ) { YODA::AnalysisObjectPtr refdata(ao); if (!refdata) continue; const string plotpath = refdata->path(); // Split path at "/" and only return the last field, i.e. the histogram ID const size_t slashpos = plotpath.rfind("/"); const string plotname = (slashpos+1 < plotpath.size()) ? plotpath.substr(slashpos+1) : ""; rtn[plotname] = refdata; } return rtn; } + + } + namespace { using Rivet::Fill; using Rivet::Fills; using Rivet::TupleWrapper; template double get_window_size(const typename T::Ptr & histo, typename T::BinType x) { // the bin index we fall in const auto binidx = histo->binIndexAt(x); // gaps, overflow, underflow don't contribute if ( binidx == -1 ) return 0; const auto & b = histo->bin(binidx); // if we don't have a valid neighbouring bin, // we use infinite width typename T::Bin b1(-1.0/0.0, 1.0/0.0); // points in the top half compare to the upper neighbour if ( x > b.xMid() ) { size_t nextidx = binidx + 1; if ( nextidx < histo->bins().size() ) b1 = histo->bin(nextidx); } else { // compare to the lower neighbour int nextidx = binidx - 1; if ( nextidx >= 0 ) b1 = histo->bin(nextidx); } // the factor 2 is arbitrary, could poss. be smaller return min( b.width(), b1.width() ) / 2.0; } template typename T::BinType fillT2binT(typename T::FillType a) { return a; } template <> YODA::Profile1D::BinType fillT2binT(YODA::Profile1D::FillType a) { return get<0>(a); } template <> YODA::Profile2D::BinType fillT2binT(YODA::Profile2D::FillType a) { return YODA::Profile2D::BinType{ get<0>(a), get<1>(a) }; } template void commit(vector & persistent, const vector< vector> > & tuple, const vector> & weights ) { // TODO check if all the xs are in the same bin anyway! // Then no windowing needed assert(persistent.size() == weights[0].size()); for ( const auto & x : tuple ) { double maxwindow = 0.0; for ( const auto & xi : x ) { // TODO check for NOFILL here // persistent[0] has the same binning as all the other persistent objects double window = get_window_size(persistent[0], fillT2binT(xi.first)); if ( window > maxwindow ) maxwindow = window; } const double wsize = maxwindow; // all windows have same size set edgeset; // bin edges need to be in here! for ( const auto & xi : x ) { edgeset.insert(fillT2binT(xi.first) - wsize); edgeset.insert(fillT2binT(xi.first) + wsize); } vector< std::tuple,double> > hfill; double sumf = 0.0; auto edgit = edgeset.begin(); double ehi = *edgit; while ( ++edgit != edgeset.end() ) { double elo = ehi; ehi = *edgit; valarray sumw(0.0, persistent.size()); // need m copies of this bool gap = true; // Check for gaps between the sub-windows. for ( size_t i = 0; i < x.size(); ++i ) { // check equals comparisons here! if ( fillT2binT(x[i].first) + wsize >= ehi && fillT2binT(x[i].first) - wsize <= elo ) { sumw += x[i].second * weights[i]; gap = false; } } if ( gap ) continue; hfill.push_back( make_tuple( (ehi + elo)/2.0, sumw, (ehi - elo) ) ); sumf += ehi - elo; } for ( auto f : hfill ) for ( size_t m = 0; m < persistent.size(); ++m ) persistent[m]->fill( get<0>(f), get<1>(f)[m], get<2>(f)/sumf ); // Note the scaling to one single fill } + } + template<> void commit(vector & persistent, const vector< vector> > & tuple, const vector> & weights) {} template<> void commit(vector & persistent, const vector< vector> > & tuple, const vector> & weights) {} template double distance(T a, T b) { return abs(a - b); } template <> double distance >(tuple a, tuple b) { return Rivet::sqr(get<0>(a) - get<0>(b)) + Rivet::sqr(get<1>(a) - get<1>(b)); } - - bool copyao(YODA::AnalysisObjectPtr src, YODA::AnalysisObjectPtr dst) { - for (const std::string& a : src->annotations()) - dst->setAnnotation(a, src->annotation(a)); - if ( aocopy(src,dst) ) return true; - if ( aocopy(src,dst) ) return true; - if ( aocopy(src,dst) ) return true; - if ( aocopy(src,dst) ) return true; - if ( aocopy(src,dst) ) return true; - if ( aocopy(src,dst) ) return true; - if ( aocopy(src,dst) ) return true; - if ( aocopy(src,dst) ) return true; - return false; } - bool addaos(YODA::AnalysisObjectPtr dst, YODA::AnalysisObjectPtr src, double scale) { - if ( aoadd(dst,src,scale) ) return true; - if ( aoadd(dst,src,scale) ) return true; - if ( aoadd(dst,src,scale) ) return true; - if ( aoadd(dst,src,scale) ) return true; - if ( aoadd(dst,src,scale) ) return true; - return false; + + namespace Rivet { + + bool copyao(YODA::AnalysisObjectPtr src, YODA::AnalysisObjectPtr dst) { + for (const std::string& a : src->annotations()) + dst->setAnnotation(a, src->annotation(a)); + if ( aocopy(src,dst) ) return true; + if ( aocopy(src,dst) ) return true; + if ( aocopy(src,dst) ) return true; + if ( aocopy(src,dst) ) return true; + if ( aocopy(src,dst) ) return true; + if ( aocopy(src,dst) ) return true; + if ( aocopy(src,dst) ) return true; + if ( aocopy(src,dst) ) return true; + return false; + } + + bool addaos(YODA::AnalysisObjectPtr dst, YODA::AnalysisObjectPtr src, double scale) { + if ( aoadd(dst,src,scale) ) return true; + if ( aoadd(dst,src,scale) ) return true; + if ( aoadd(dst,src,scale) ) return true; + if ( aoadd(dst,src,scale) ) return true; + if ( aoadd(dst,src,scale) ) return true; + return false; + } + } -} - - - +namespace { /// fills is a vector of sub-event with an ordered set of x-values of /// the fills in each sub-event. NOFILL should be an "impossible" /// value for this histogram. Returns a vector of sub-events with /// an ordered vector of fills (including NOFILLs) for each sub-event. template vector< vector > > match_fills(const vector::Ptr> & evgroup, const Fill & NOFILL) { vector< vector > > matched; // First just copy subevents into vectors and find the longest vector. unsigned int maxfill = 0; // length of biggest vector int imax = 0; // index position of biggest vector for ( const auto & it : evgroup ) { const auto & subev = it->fills(); if ( subev.size() > maxfill ) { maxfill = subev.size(); imax = matched.size(); } matched.push_back(vector >(subev.begin(), subev.end())); } // Now, go through all subevents with missing fills. const vector> & full = matched[imax]; // the longest one for ( auto & subev : matched ) { if ( subev.size() == maxfill ) continue; // Add NOFILLs to the end; while ( subev.size() < maxfill ) subev.push_back(NOFILL); // Iterate from the back and shift all fill values backwards by // swapping with NOFILLs so that they better match the full // subevent. for ( int i = maxfill - 1; i >= 0; --i ) { if ( subev[i] == NOFILL ) continue; size_t j = i; while ( j + 1 < maxfill && subev[j + 1] == NOFILL && distance(fillT2binT(subev[j].first), fillT2binT(full[j].first)) > distance(fillT2binT(subev[j].first), fillT2binT(full[j + 1].first)) ) { swap(subev[j], subev[j + 1]); ++j; } } } // transpose vector>> result(maxfill,vector>(matched.size())); for (size_t i = 0; i < matched.size(); ++i) for (size_t j = 0; j < maxfill; ++j) result.at(j).at(i) = matched.at(i).at(j); return result; } - +} namespace Rivet { template void Wrapper::pushToPersistent(const vector >& weight) { assert( _evgroup.size() == weight.size() ); // have we had subevents at all? const bool have_subevents = _evgroup.size() > 1; if ( ! have_subevents ) { // simple replay of all tuple entries // each recorded fill is inserted into all persistent weightname histos for ( size_t m = 0; m < _persistent.size(); ++m ) for ( const auto & f : _evgroup[0]->fills() ) _persistent[m]->fill( f.first, f.second * weight[0][m] ); } else { // outer index is subevent, inner index is jets in the event vector>> linedUpXs = match_fills(_evgroup, {typename T::FillType(), 0.0}); commit( _persistent, linedUpXs, weight ); } _evgroup.clear(); _active.reset(); } template <> void Wrapper::pushToPersistent(const vector >& weight) { for ( size_t m = 0; m < _persistent.size(); ++m ) { for ( size_t n = 0; n < _evgroup.size(); ++n ) { for ( const auto & f : _evgroup[n]->fills() ) { _persistent[m]->fill( f.second * weight[n][m] ); } } } _evgroup.clear(); _active.reset(); } template <> void Wrapper::pushToPersistent(const vector >& weight) { _evgroup.clear(); _active.reset(); } template <> void Wrapper::pushToPersistent(const vector >& weight) { _evgroup.clear(); _active.reset(); } template <> void Wrapper::pushToPersistent(const vector >& weight) { _evgroup.clear(); _active.reset(); } // explicitly instantiate all wrappers template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; }