diff --git a/analyses/pluginATLAS/ATLAS_2011_I926145.cc b/analyses/pluginATLAS/ATLAS_2011_I926145.cc --- a/analyses/pluginATLAS/ATLAS_2011_I926145.cc +++ b/analyses/pluginATLAS/ATLAS_2011_I926145.cc @@ -1,137 +1,137 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { /// @brief Measurement of electron and muon differential cross section from heavy flavour production /// /// Lepton cross sections differential in pT /// /// @author Paul Bell, Holger Schulz class ATLAS_2011_I926145 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2011_I926145); /// Book histograms and initialise projections before the run void init() { // Electrons and muons Cut cuts = (Cuts::abseta < 1.37 || Cuts::absetaIn(1.52, 2.00)) && Cuts::pT > 7*GeV; IdentifiedFinalState elecs(cuts, {PID::ELECTRON, PID::POSITRON}); declare(elecs, "elecs"); IdentifiedFinalState muons(Cuts::abseta < 2 && Cuts::pT > 7*GeV, {PID::MUON, PID::ANTIMUON}); declare(muons, "muons"); IdentifiedFinalState muons_full(Cuts::abseta < 2.5 && Cuts::pT > 4*GeV, {PID::MUON, PID::ANTIMUON}); declare(muons_full, "muons_full"); Cut cut20 = Cuts::abseta < 2.0; Cut cut25 = Cuts::abseta < 2.5; const FinalState fs20(cut20); const FinalState fs25(cut25); /// @todo Bare Zs ... - ZFinder zfinder_e(fs20, cut20, PID::ELECTRON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::NOCLUSTER); + ZFinder zfinder_e(fs20, cut20, PID::ELECTRON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::ClusterPhotons::NONE); declare(zfinder_e, "ZFinder_e"); - ZFinder zfinder_mu(fs20, cut20, PID::MUON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::NOCLUSTER); + ZFinder zfinder_mu(fs20, cut20, PID::MUON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::ClusterPhotons::NONE); declare(zfinder_mu, "ZFinder_mu"); - ZFinder zfinder_mufull(fs25, cut25, PID::MUON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::NOCLUSTER); + ZFinder zfinder_mufull(fs25, cut25, PID::MUON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::ClusterPhotons::NONE); declare(zfinder_mufull, "ZFinder_mufull"); /// @todo ... but dressed Ws? WFinder wfinder_e(fs20, cut20, PID::ELECTRON, 60.0*GeV, 100.0*GeV, 25.0*GeV, 0.2); declare(wfinder_e, "WFinder_e"); WFinder wfinder_mu(fs20, cut20, PID::MUON, 60.0*GeV, 100.0*GeV, 25.0*GeV, 0.2); declare(wfinder_mu, "WFinder_mu"); WFinder wfinder_mufull(fs25, cut25, PID::MUON, 60.0*GeV, 100.0*GeV, 25.0*GeV, 0.2); declare(wfinder_mufull, "WFinder_mufull"); // Book histograms book(_histPt_elecs ,1 ,1 ,1); book(_histPt_muons ,2 ,1 ,1); book(_histPt_muons_full ,3 ,1 ,1); } /// Perform the per-event analysis void analyze(const Event& event) { // Veto event if no lepton is present const FinalState& elecs = apply(event, "elecs"); const FinalState& muons = apply(event, "muons"); const FinalState& muons_full = apply(event, "muons_full"); if (elecs.empty() && muons.empty() && muons_full.empty()) vetoEvent; // Z veto const ZFinder& zfinder_e = apply(event, "ZFinder_e"); const ZFinder& zfinder_mu = apply(event, "ZFinder_mu"); const ZFinder& zfinder_mufull = apply(event, "ZFinder_mufull"); if (zfinder_e.bosons().size() > 0 || zfinder_mu.bosons().size() > 0 || zfinder_mufull.bosons().size() > 0) { MSG_DEBUG("Num elec Z-bosons found: " << zfinder_e.bosons().size()); MSG_DEBUG("Num muon Z-bosons found: " << zfinder_mu.bosons().size()); MSG_DEBUG("Num muon Z-bosons found (|eta|<2.5): " << zfinder_mufull.bosons().size()); vetoEvent; } // W veto const WFinder& wfinder_e = apply(event, "WFinder_e"); const WFinder& wfinder_mu = apply(event, "WFinder_mu"); const WFinder& wfinder_mufull = apply(event, "WFinder_mufull"); if (wfinder_e.bosons().size() > 0 || wfinder_mu.bosons().size() > 0 || wfinder_mufull.bosons().size() > 0) { MSG_DEBUG("Num elec W-bosons found: " << wfinder_e.bosons().size()); MSG_DEBUG("Num muon W-bosons found: " << wfinder_mu.bosons().size()); MSG_DEBUG("Num muon W-bosons found (|eta|<2.5): " << wfinder_mufull.bosons().size()); vetoEvent; } // Electron histogram if (elecs.size() > 0) { for (const Particle& ele : elecs.particles()) { if (ele.pT() < 26.0*GeV) _histPt_elecs->fill(ele.pT()*GeV); } } // Muon histogram if (muons.size() > 0) { for (const Particle& muo : muons.particles()) { if (muo.pT() < 26.0*GeV) _histPt_muons->fill(muo.pT()*GeV); } } // Muon full histogram if (muons_full.size() > 0) { for (const Particle& muo : muons_full.particles()) { if (muo.pT() < 100.0*GeV) _histPt_muons_full->fill(muo.pT()*GeV); } } } /// Normalise histograms etc., after the run void finalize() { scale(_histPt_elecs, crossSection()/nanobarn/sumOfWeights()); scale(_histPt_muons, crossSection()/nanobarn/sumOfWeights()); scale(_histPt_muons_full, crossSection()/nanobarn/sumOfWeights()); } private: /// @name Histograms //@{ Histo1DPtr _histPt_elecs, _histPt_muons, _histPt_muons_full; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2011_I926145); } diff --git a/analyses/pluginATLAS/ATLAS_2011_I928289_W.cc b/analyses/pluginATLAS/ATLAS_2011_I928289_W.cc --- a/analyses/pluginATLAS/ATLAS_2011_I928289_W.cc +++ b/analyses/pluginATLAS/ATLAS_2011_I928289_W.cc @@ -1,147 +1,147 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/WFinder.hh" namespace Rivet { class ATLAS_2011_I928289_W : public Analysis { public: /// Constructor ATLAS_2011_I928289_W() : Analysis("ATLAS_2011_I928289_W") { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { ///Initialise and register projections here FinalState fs; Cut cut = (Cuts::pT >= 20*GeV); - WFinder wfinder_el_bare( fs, cut, PID::ELECTRON, 40.0*GeV, 7000.0*GeV, 25.0*GeV, 0.0, WFinder::CLUSTERNODECAY, WFinder::NOTRACK, WFinder::TRANSMASS); - WFinder wfinder_el_dressed(fs, cut, PID::ELECTRON, 40.0*GeV, 7000.0*GeV, 25.0*GeV, 0.1, WFinder::CLUSTERNODECAY, WFinder::NOTRACK, WFinder::TRANSMASS); - WFinder wfinder_mu_bare (fs, cut, PID::MUON , 40.0*GeV, 7000.0*GeV, 25.0*GeV, 0.0, WFinder::CLUSTERNODECAY, WFinder::NOTRACK, WFinder::TRANSMASS); - WFinder wfinder_mu_dressed(fs, cut, PID::MUON , 40.0*GeV, 7000.0*GeV, 25.0*GeV, 0.1, WFinder::CLUSTERNODECAY, WFinder::NOTRACK, WFinder::TRANSMASS); + WFinder wfinder_el_bare( fs, cut, PID::ELECTRON, 40.0*GeV, 7000.0*GeV, 25.0*GeV, 0.0, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); + WFinder wfinder_el_dressed(fs, cut, PID::ELECTRON, 40.0*GeV, 7000.0*GeV, 25.0*GeV, 0.1, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); + WFinder wfinder_mu_bare (fs, cut, PID::MUON , 40.0*GeV, 7000.0*GeV, 25.0*GeV, 0.0, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); + WFinder wfinder_mu_dressed(fs, cut, PID::MUON , 40.0*GeV, 7000.0*GeV, 25.0*GeV, 0.1, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); declare(wfinder_el_bare , "WFinder_el_bare"); declare(wfinder_el_dressed, "WFinder_el_dressed"); declare(wfinder_mu_bare , "WFinder_mu_bare"); declare(wfinder_mu_dressed, "WFinder_mu_dressed"); /// Book histograms here book(_h_Wminus_lepton_eta_el_bare ,3, 1, 1); book(_h_Wminus_lepton_eta_el_dressed ,3, 1, 2); book(_h_Wminus_lepton_eta_mu_bare ,3, 1, 3); book(_h_Wminus_lepton_eta_mu_dressed ,3, 1, 4); book(_h_Wplus_lepton_eta_el_bare ,5, 1, 1); book(_h_Wplus_lepton_eta_el_dressed ,5, 1, 2); book(_h_Wplus_lepton_eta_mu_bare ,5, 1, 3); book(_h_Wplus_lepton_eta_mu_dressed ,5, 1, 4); book(_h_W_asym_eta_el_bare ,7, 1, 1); book(_h_W_asym_eta_el_dressed ,7, 1, 2); book(_h_W_asym_eta_mu_bare ,7, 1, 3); book(_h_W_asym_eta_mu_dressed ,7, 1, 4); } /// Perform the per-event analysis void analyze(const Event& event) { const WFinder& wfinder_el_bare = apply(event, "WFinder_el_bare"); const WFinder& wfinder_el_dressed = apply(event, "WFinder_el_dressed"); const WFinder& wfinder_mu_bare = apply(event, "WFinder_mu_bare"); const WFinder& wfinder_mu_dressed = apply(event, "WFinder_mu_dressed"); fillPlots1D(wfinder_el_bare , _h_Wplus_lepton_eta_el_bare , _h_Wminus_lepton_eta_el_bare); fillPlots1D(wfinder_el_dressed, _h_Wplus_lepton_eta_el_dressed, _h_Wminus_lepton_eta_el_dressed); fillPlots1D(wfinder_mu_bare , _h_Wplus_lepton_eta_mu_bare , _h_Wminus_lepton_eta_mu_bare); fillPlots1D(wfinder_mu_dressed, _h_Wplus_lepton_eta_mu_dressed, _h_Wminus_lepton_eta_mu_dressed); } void fillPlots1D(const WFinder& wfinder, Histo1DPtr hist_plus, Histo1DPtr hist_minus) { if (wfinder.bosons().size() != 1) return; const Particle l = wfinder.constituentLeptons()[0]; const FourMomentum miss = wfinder.constituentNeutrinos()[0]; if (l.pT() > 20*GeV && miss.Et() > 25*GeV && wfinder.mT() > 40*GeV) (l.charge3() > 0 ? hist_plus : hist_minus)->fill(l.abseta()); } /// Normalise histograms etc., after the run void finalize() { // Construct asymmetry: (dsig+/deta - dsig-/deta) / (dsig+/deta + dsig-/deta) divide(*_h_Wplus_lepton_eta_el_bare - *_h_Wminus_lepton_eta_el_bare, *_h_Wplus_lepton_eta_el_bare + *_h_Wminus_lepton_eta_el_bare, _h_W_asym_eta_el_bare); divide(*_h_Wplus_lepton_eta_el_dressed - *_h_Wminus_lepton_eta_el_dressed, *_h_Wplus_lepton_eta_el_dressed + *_h_Wminus_lepton_eta_el_dressed, _h_W_asym_eta_el_dressed); divide(*_h_Wplus_lepton_eta_mu_bare - *_h_Wminus_lepton_eta_mu_bare, *_h_Wplus_lepton_eta_mu_bare + *_h_Wminus_lepton_eta_mu_bare, _h_W_asym_eta_mu_bare); divide(*_h_Wplus_lepton_eta_mu_dressed - *_h_Wminus_lepton_eta_mu_dressed, *_h_Wplus_lepton_eta_mu_dressed + *_h_Wminus_lepton_eta_mu_dressed, _h_W_asym_eta_mu_dressed); // Print summary info const double xs_pb(crossSection() / picobarn); const double sumw(sumOfWeights()); MSG_DEBUG( "Cross-section/pb : " << xs_pb ); MSG_DEBUG( "Sum of weights : " << sumw ); MSG_DEBUG( "nEvents : " << numEvents() ); /// Normalise, scale and otherwise manipulate histograms here const double sf = 0.5 * xs_pb / sumw; // 0.5 accounts for rapidity bin width scale(_h_Wminus_lepton_eta_el_bare , sf); scale(_h_Wminus_lepton_eta_el_dressed, sf); scale(_h_Wminus_lepton_eta_mu_bare , sf); scale(_h_Wminus_lepton_eta_mu_dressed, sf); scale(_h_Wplus_lepton_eta_el_bare , sf); scale(_h_Wplus_lepton_eta_el_dressed , sf); scale(_h_Wplus_lepton_eta_mu_bare , sf); scale(_h_Wplus_lepton_eta_mu_dressed , sf); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_Wminus_lepton_eta_el_bare; Histo1DPtr _h_Wminus_lepton_eta_el_dressed; Histo1DPtr _h_Wminus_lepton_eta_mu_bare; Histo1DPtr _h_Wminus_lepton_eta_mu_dressed; Histo1DPtr _h_Wplus_lepton_eta_el_bare; Histo1DPtr _h_Wplus_lepton_eta_el_dressed; Histo1DPtr _h_Wplus_lepton_eta_mu_bare; Histo1DPtr _h_Wplus_lepton_eta_mu_dressed; Scatter2DPtr _h_W_asym_eta_el_bare; Scatter2DPtr _h_W_asym_eta_el_dressed; Scatter2DPtr _h_W_asym_eta_mu_bare; Scatter2DPtr _h_W_asym_eta_mu_dressed; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2011_I928289_W); } diff --git a/analyses/pluginATLAS/ATLAS_2011_I928289_Z.cc b/analyses/pluginATLAS/ATLAS_2011_I928289_Z.cc --- a/analyses/pluginATLAS/ATLAS_2011_I928289_Z.cc +++ b/analyses/pluginATLAS/ATLAS_2011_I928289_Z.cc @@ -1,110 +1,110 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { class ATLAS_2011_I928289_Z : public Analysis { public: /// Constructor ATLAS_2011_I928289_Z() : Analysis("ATLAS_2011_I928289_Z") { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FinalState fs; Cut cut = (Cuts::pT >= 20.0*GeV); - ZFinder zfinder_ee_bare( fs, cut, PID::ELECTRON, 66.0*GeV, 116.0*GeV, 0.0, ZFinder::CLUSTERNODECAY, ZFinder::NOTRACK); - ZFinder zfinder_ee_dressed(fs, cut, PID::ELECTRON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::CLUSTERNODECAY, ZFinder::NOTRACK); - ZFinder zfinder_mm_bare( fs, cut, PID::MUON , 66.0*GeV, 116.0*GeV, 0.0, ZFinder::CLUSTERNODECAY, ZFinder::NOTRACK); - ZFinder zfinder_mm_dressed(fs, cut, PID::MUON , 66.0*GeV, 116.0*GeV, 0.1, ZFinder::CLUSTERNODECAY, ZFinder::NOTRACK); + ZFinder zfinder_ee_bare( fs, cut, PID::ELECTRON, 66.0*GeV, 116.0*GeV, 0.0, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::NO); + ZFinder zfinder_ee_dressed(fs, cut, PID::ELECTRON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::NO); + ZFinder zfinder_mm_bare( fs, cut, PID::MUON , 66.0*GeV, 116.0*GeV, 0.0, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::NO); + ZFinder zfinder_mm_dressed(fs, cut, PID::MUON , 66.0*GeV, 116.0*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::NO); declare(zfinder_ee_bare , "ZFinder_ee_bare" ); declare(zfinder_ee_dressed, "ZFinder_ee_dressed"); declare(zfinder_mm_bare , "ZFinder_mm_bare" ); declare(zfinder_mm_dressed, "ZFinder_mm_dressed"); // y(Z) cross-section dependence book(_h_Z_y_ee_bare ,1, 1, 1); book(_h_Z_y_ee_dressed ,1, 1, 2); book(_h_Z_y_mm_bare ,1, 1, 3); book(_h_Z_y_mm_dressed ,1, 1, 4); } /// Perform the per-event analysis void analyze(const Event& event) { const ZFinder& zfinder_ee_bare = apply(event, "ZFinder_ee_bare" ); const ZFinder& zfinder_ee_dressed = apply(event, "ZFinder_ee_dressed"); const ZFinder& zfinder_mm_bare = apply(event, "ZFinder_mm_bare" ); const ZFinder& zfinder_mm_dressed = apply(event, "ZFinder_mm_dressed"); fillPlots1D(zfinder_ee_bare , _h_Z_y_ee_bare); fillPlots1D(zfinder_ee_dressed, _h_Z_y_ee_dressed); fillPlots1D(zfinder_mm_bare , _h_Z_y_mm_bare); fillPlots1D(zfinder_mm_dressed, _h_Z_y_mm_dressed); } void fillPlots1D(const ZFinder& zfinder, Histo1DPtr hist) { if (zfinder.bosons().size() != 1) return; const FourMomentum zmom = zfinder.bosons()[0].momentum(); hist->fill(zmom.absrap()); } /// Normalise histograms etc., after the run void finalize() { // Print summary info const double xs_pb(crossSection() / picobarn); const double sumw(sumOfWeights()); MSG_DEBUG("Cross-Section/pb: " << xs_pb ); MSG_DEBUG("Sum of weights : " << sumw ); MSG_DEBUG("nEvents : " << numEvents()); // Normalise, scale and otherwise manipulate histograms here const double sf(0.5 * xs_pb / sumw); // 0.5 accounts for rapidity bin width scale(_h_Z_y_ee_bare , sf); scale(_h_Z_y_ee_dressed, sf); scale(_h_Z_y_mm_bare , sf); scale(_h_Z_y_mm_dressed, sf); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_Z_y_ee_bare; Histo1DPtr _h_Z_y_ee_dressed; Histo1DPtr _h_Z_y_mm_bare; Histo1DPtr _h_Z_y_mm_dressed; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2011_I928289_Z); } diff --git a/analyses/pluginATLAS/ATLAS_2011_I929691.cc b/analyses/pluginATLAS/ATLAS_2011_I929691.cc --- a/analyses/pluginATLAS/ATLAS_2011_I929691.cc +++ b/analyses/pluginATLAS/ATLAS_2011_I929691.cc @@ -1,102 +1,102 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { /// Jet fragmentation at 7 TeV class ATLAS_2011_I929691 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2011_I929691); /// Initialisation void init() { const FinalState fs(Cuts::abseta < 2.0); - FastJets antikt_06_jets(fs, FastJets::ANTIKT, 0.6, JetAlg::NO_MUONS, JetAlg::NO_INVISIBLES); + FastJets antikt_06_jets(fs, FastJets::ANTIKT, 0.6, JetAlg::Muons::NONE, JetAlg::Invisibles::NONE); declare(antikt_06_jets, "jets"); ChargedFinalState tracks(Cuts::pT > 0.5*GeV && Cuts::abseta < 2.0); declare(tracks, "tracks"); // Set up the histograms (each element is a binning in jet pT) for (size_t i = 0; i < 10; i++) { book(_p_F_z[i] , i+ 1, 1, 1); book(_p_rho_r[i] , i+11, 1, 1); book(_p_f_pTrel[i], i+21, 1, 1); } } // Per-event analysis void analyze(const Event& event) { const Jets alljets = apply(event, "jets").jetsByPt(Cuts::absrap < 1.2); const Particles& tracks = apply(event, "tracks").particlesByPt(); for (size_t i = 0; i < 10; ++i) { const Jets jets = filter_select(alljets, Cuts::pT > bedges[i] && Cuts::pT < bedges[i+1]); const int n_jets = jets.size(); if (n_jets == 0) continue; // First... count the tracks Histo1D h_ntracks_z(*_p_F_z[i]), h_ntracks_r(*_p_rho_r[i]), h_ntracks_pTrel(*_p_f_pTrel[i]); for (const Jet& j : jets) { for (const Particle& p : tracks) { const double dr = deltaR(j, p, RAPIDITY); if (dr > 0.6) continue; // The paper uses pseudorapidity, but this is a requirement for filling the histogram h_ntracks_z.fill(z(j, p), 1.0/n_jets); h_ntracks_r.fill(dr, 1.0/n_jets); h_ntracks_pTrel.fill(pTrel(j, p), 1.0/n_jets); } } // Then... calculate the observable and fill the profiles for (const HistoBin1D& b : h_ntracks_z.bins()) _p_F_z[i]->fill(b.xMid(), b.height()); for (const HistoBin1D& b : h_ntracks_r.bins()) _p_rho_r[i]->fill(b.xMid(), b.area()/annulus_area(b.xMin(), b.xMax())); for (const HistoBin1D& b : h_ntracks_pTrel.bins()) _p_f_pTrel[i]->fill(b.xMid(), b.height()); } } double z (const Jet& jet, const Particle& ch) { return dot(jet.p3(), ch.p3()) / jet.p3().mod2(); } double pTrel (const Jet& jet, const Particle& ch) { return (ch.p3().cross(jet.p3())).mod()/(jet.p3().mod()); } // To calculate the area of the annulus in an r bin double annulus_area(double r1, double r2) { return M_PI*(sqr(r2) - sqr(r1)); } private: Profile1DPtr _p_F_z[10], _p_rho_r[10], _p_f_pTrel[10]; const vector bedges = { 25., 40., 60., 80., 110., 160., 210., 260., 310., 400., 500. }; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2011_I929691); } diff --git a/analyses/pluginATLAS/ATLAS_2011_I945498.cc b/analyses/pluginATLAS/ATLAS_2011_I945498.cc --- a/analyses/pluginATLAS/ATLAS_2011_I945498.cc +++ b/analyses/pluginATLAS/ATLAS_2011_I945498.cc @@ -1,303 +1,303 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" namespace Rivet { /// ATLAS Z+jets in pp at 7 TeV class ATLAS_2011_I945498 : public Analysis { public: /// Constructor ATLAS_2011_I945498() : Analysis("ATLAS_2011_I945498") { } /// Book histograms and initialise projections before the run void init() { // Variable initialisation _isZeeSample = false; _isZmmSample = false; for (size_t chn = 0; chn < 3; ++chn) { book(weights_nj0[chn], "weights_nj0_" + to_str(chn)); book(weights_nj1[chn], "weights_nj1_" + to_str(chn)); book(weights_nj2[chn], "weights_nj2_" + to_str(chn)); book(weights_nj3[chn], "weights_nj3_" + to_str(chn)); book(weights_nj4[chn], "weights_nj4_" + to_str(chn)); } // Set up projections FinalState fs; - ZFinder zfinder_mu(fs, Cuts::abseta < 2.4 && Cuts::pT > 20*GeV, PID::MUON, 66*GeV, 116*GeV, 0.1, ZFinder::CLUSTERNODECAY); + ZFinder zfinder_mu(fs, Cuts::abseta < 2.4 && Cuts::pT > 20*GeV, PID::MUON, 66*GeV, 116*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY); declare(zfinder_mu, "ZFinder_mu"); Cut cuts = (Cuts::abseta < 1.37 || Cuts::absetaIn(1.52, 2.47)) && Cuts::pT > 20*GeV; - ZFinder zfinder_el(fs, cuts, PID::ELECTRON, 66*GeV, 116*GeV, 0.1, ZFinder::CLUSTERNODECAY); + ZFinder zfinder_el(fs, cuts, PID::ELECTRON, 66*GeV, 116*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY); declare(zfinder_el, "ZFinder_el"); Cut cuts25_20 = Cuts::abseta < 2.5 && Cuts::pT > 20*GeV; // For combined cross-sections (combined phase space + dressed level) - ZFinder zfinder_comb_mu(fs, cuts25_20, PID::MUON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::CLUSTERNODECAY); + ZFinder zfinder_comb_mu(fs, cuts25_20, PID::MUON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY); declare(zfinder_comb_mu, "ZFinder_comb_mu"); - ZFinder zfinder_comb_el(fs, cuts25_20, PID::ELECTRON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::CLUSTERNODECAY); + ZFinder zfinder_comb_el(fs, cuts25_20, PID::ELECTRON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY); declare(zfinder_comb_el, "ZFinder_comb_el"); // Define veto FS in order to prevent Z-decay products entering the jet algorithm VetoedFinalState remfs; remfs.addVetoOnThisFinalState(zfinder_el); remfs.addVetoOnThisFinalState(zfinder_mu); VetoedFinalState remfs_comb; remfs_comb.addVetoOnThisFinalState(zfinder_comb_el); remfs_comb.addVetoOnThisFinalState(zfinder_comb_mu); FastJets jets(remfs, FastJets::ANTIKT, 0.4); jets.useInvisibles(); declare(jets, "jets"); FastJets jets_comb(remfs_comb, FastJets::ANTIKT, 0.4); jets_comb.useInvisibles(); declare(jets_comb, "jets_comb"); // 0=el, 1=mu, 2=comb for (size_t chn = 0; chn < 3; ++chn) { book(_h_njet_incl[chn] ,1, 1, chn+1); book(_h_njet_ratio[chn] ,2, 1, chn+1); book(_h_ptjet[chn] ,3, 1, chn+1); book(_h_ptlead[chn] ,4, 1, chn+1); book(_h_ptseclead[chn] ,5, 1, chn+1); book(_h_yjet[chn] ,6, 1, chn+1); book(_h_ylead[chn] ,7, 1, chn+1); book(_h_yseclead[chn] ,8, 1, chn+1); book(_h_mass[chn] ,9, 1, chn+1); book(_h_deltay[chn] ,10, 1, chn+1); book(_h_deltaphi[chn] ,11, 1, chn+1); book(_h_deltaR[chn] ,12, 1, chn+1); } } // Jet selection criteria universal for electron and muon channel /// @todo Replace with a Cut passed to jetsByPt Jets selectJets(const ZFinder* zf, const FastJets* allJets) { const FourMomentum l1 = zf->constituents()[0].momentum(); const FourMomentum l2 = zf->constituents()[1].momentum(); Jets jets; for (const Jet& jet : allJets->jetsByPt(30*GeV)) { const FourMomentum jmom = jet.momentum(); if (jmom.absrap() < 4.4 && deltaR(l1, jmom) > 0.5 && deltaR(l2, jmom) > 0.5) { jets.push_back(jet); } } return jets; } /// Perform the per-event analysis void analyze(const Event& event) { vector zfs; zfs.push_back(& (apply(event, "ZFinder_el"))); zfs.push_back(& (apply(event, "ZFinder_mu"))); zfs.push_back(& (apply(event, "ZFinder_comb_el"))); zfs.push_back(& (apply(event, "ZFinder_comb_mu"))); vector fjs; fjs.push_back(& (apply(event, "jets"))); fjs.push_back(& (apply(event, "jets_comb"))); // Determine what kind of MC sample this is const bool isZee = (zfs[0]->bosons().size() == 1) || (zfs[2]->bosons().size() == 1); const bool isZmm = (zfs[1]->bosons().size() == 1) || (zfs[3]->bosons().size() == 1); if (isZee) _isZeeSample = true; if (isZmm) _isZmmSample = true; // Require exactly one electronic or muonic Z-decay in the event bool isZeemm = ( (zfs[0]->bosons().size() == 1 && zfs[1]->bosons().size() != 1) || (zfs[1]->bosons().size() == 1 && zfs[0]->bosons().size() != 1) ); bool isZcomb = ( (zfs[2]->bosons().size() == 1 && zfs[3]->bosons().size() != 1) || (zfs[3]->bosons().size() == 1 && zfs[2]->bosons().size() != 1) ); if (!isZeemm && !isZcomb) vetoEvent; vector zfIDs; vector fjIDs; if (isZeemm) { int chn = zfs[0]->bosons().size() == 1 ? 0 : 1; zfIDs.push_back(chn); fjIDs.push_back(0); } if (isZcomb) { int chn = zfs[2]->bosons().size() == 1 ? 2 : 3; zfIDs.push_back(chn); fjIDs.push_back(1); } for (size_t izf = 0; izf < zfIDs.size(); ++izf) { int zfID = zfIDs[izf]; int fjID = fjIDs[izf]; int chn = zfID; if (zfID == 2 || zfID == 3) chn = 2; Jets jets = selectJets(zfs[zfID], fjs[fjID]); switch (jets.size()) { case 0: weights_nj0[chn]->fill(); break; case 1: weights_nj0[chn]->fill(); weights_nj1[chn]->fill(); break; case 2: weights_nj0[chn]->fill(); weights_nj1[chn]->fill(); weights_nj2[chn]->fill(); break; case 3: weights_nj0[chn]->fill(); weights_nj1[chn]->fill(); weights_nj2[chn]->fill(); weights_nj3[chn]->fill(); break; default: // >= 4 weights_nj0[chn]->fill(); weights_nj1[chn]->fill(); weights_nj2[chn]->fill(); weights_nj3[chn]->fill(); weights_nj4[chn]->fill(); } // Require at least one jet if (jets.empty()) continue; // Fill jet multiplicities for (size_t ijet = 1; ijet <= jets.size(); ++ijet) { _h_njet_incl[chn]->fill(ijet); } // Loop over selected jets, fill inclusive jet distributions for (size_t ijet = 0; ijet < jets.size(); ++ijet) { _h_ptjet[chn]->fill(jets[ijet].pT()/GeV); _h_yjet [chn]->fill(fabs(jets[ijet].rapidity())); } // Leading jet histos const double ptlead = jets[0].pT()/GeV; const double yabslead = fabs(jets[0].rapidity()); _h_ptlead[chn]->fill(ptlead); _h_ylead [chn]->fill(yabslead); if (jets.size() >= 2) { // Second jet histos const double pt2ndlead = jets[1].pT()/GeV; const double yabs2ndlead = fabs(jets[1].rapidity()); _h_ptseclead[chn] ->fill(pt2ndlead); _h_yseclead [chn] ->fill(yabs2ndlead); // Dijet histos const double deltaphi = fabs(deltaPhi(jets[1], jets[0])); const double deltarap = fabs(jets[0].rapidity() - jets[1].rapidity()) ; const double deltar = fabs(deltaR(jets[0], jets[1], RAPIDITY)); const double mass = (jets[0].momentum() + jets[1].momentum()).mass(); _h_mass [chn] ->fill(mass/GeV); _h_deltay [chn] ->fill(deltarap); _h_deltaphi[chn] ->fill(deltaphi); _h_deltaR [chn] ->fill(deltar); } } } /// @name Ratio calculator util functions //@{ /// Calculate the ratio, being careful about div-by-zero double ratio(double a, double b) { return (b != 0) ? a/b : 0; } /// Calculate the ratio error, being careful about div-by-zero double ratio_err(double a, double b) { return (b != 0) ? sqrt(a/sqr(b) + sqr(a)/(b*b*b)) : 0; } //@} void finalize() { // Fill ratio histograms for (size_t chn = 0; chn < 3; ++chn) { _h_njet_ratio[chn]->addPoint(1, ratio(weights_nj1[chn]->val(), weights_nj0[chn]->val()), 0.5, ratio_err(weights_nj1[chn]->val(), weights_nj0[chn]->val())); _h_njet_ratio[chn]->addPoint(2, ratio(weights_nj2[chn]->val(), weights_nj1[chn]->val()), 0.5, ratio_err(weights_nj2[chn]->val(), weights_nj1[chn]->val())); _h_njet_ratio[chn]->addPoint(3, ratio(weights_nj3[chn]->val(), weights_nj2[chn]->val()), 0.5, ratio_err(weights_nj3[chn]->val(), weights_nj2[chn]->val())); _h_njet_ratio[chn]->addPoint(4, ratio(weights_nj4[chn]->val(), weights_nj3[chn]->val()), 0.5, ratio_err(weights_nj4[chn]->val(), weights_nj3[chn]->val())); } // Scale other histos for (size_t chn = 0; chn < 3; ++chn) { // For ee and mumu channels: normalize to Njet inclusive cross-section double xs = (chn == 2) ? crossSectionPerEvent()/picobarn : 1 / weights_nj0[chn]->val(); // For inclusive MC sample(ee/mmu channels together) we want the single-lepton-flavor xsec if (_isZeeSample && _isZmmSample) xs /= 2; // Special case histogram: always not normalized scale(_h_njet_incl[chn], (chn < 2) ? crossSectionPerEvent()/picobarn : xs); scale(_h_ptjet[chn] , xs); scale(_h_ptlead[chn] , xs); scale(_h_ptseclead[chn], xs); scale(_h_yjet[chn] , xs); scale(_h_ylead[chn] , xs); scale(_h_yseclead[chn] , xs); scale(_h_deltaphi[chn] , xs); scale(_h_deltay[chn] , xs); scale(_h_deltaR[chn] , xs); scale(_h_mass[chn] , xs); } } //@} private: bool _isZeeSample; bool _isZmmSample; CounterPtr weights_nj0[3]; CounterPtr weights_nj1[3]; CounterPtr weights_nj2[3]; CounterPtr weights_nj3[3]; CounterPtr weights_nj4[3]; Scatter2DPtr _h_njet_ratio[3]; Histo1DPtr _h_njet_incl[3]; Histo1DPtr _h_ptjet[3]; Histo1DPtr _h_ptlead[3]; Histo1DPtr _h_ptseclead[3]; Histo1DPtr _h_yjet[3]; Histo1DPtr _h_ylead[3]; Histo1DPtr _h_yseclead[3]; Histo1DPtr _h_deltaphi[3]; Histo1DPtr _h_deltay[3]; Histo1DPtr _h_deltaR[3]; Histo1DPtr _h_mass[3]; }; DECLARE_RIVET_PLUGIN(ATLAS_2011_I945498); } diff --git a/analyses/pluginATLAS/ATLAS_2011_I954993.cc b/analyses/pluginATLAS/ATLAS_2011_I954993.cc --- a/analyses/pluginATLAS/ATLAS_2011_I954993.cc +++ b/analyses/pluginATLAS/ATLAS_2011_I954993.cc @@ -1,117 +1,117 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// @brief WZ fiducial cross-section measurement class ATLAS_2011_I954993 : public Analysis { public: /// Default constructor ATLAS_2011_I954993() : Analysis("ATLAS_2011_I954993") { } /// @name Analysis methods //@{ /// Projection and histogram setup void init() { FinalState fs; Cut cuts = Cuts::abseta < 2.5 && Cuts::pT > 15*GeV; - ZFinder zfinder_e(fs, cuts, PID::ELECTRON, 81.1876*GeV, 101.1876*GeV, 0.1, ZFinder::CLUSTERNODECAY); + ZFinder zfinder_e(fs, cuts, PID::ELECTRON, 81.1876*GeV, 101.1876*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY); declare(zfinder_e, "ZFinder_e"); - ZFinder zfinder_mu(fs, cuts, PID::MUON, 81.1876*GeV, 101.1876*GeV, 0.1, ZFinder::CLUSTERNODECAY); + ZFinder zfinder_mu(fs, cuts, PID::MUON, 81.1876*GeV, 101.1876*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY); declare(zfinder_mu, "ZFinder_mu"); VetoedFinalState weinput; weinput.addVetoOnThisFinalState(zfinder_e); - WFinder wfinder_e(weinput, cuts, PID::ELECTRON, 0*GeV, 1000*GeV, 25*GeV, 0.1, WFinder::CLUSTERNODECAY); + WFinder wfinder_e(weinput, cuts, PID::ELECTRON, 0*GeV, 1000*GeV, 25*GeV, 0.1, WFinder::ClusterPhotons::NODECAY); declare(wfinder_e, "WFinder_e"); VetoedFinalState wminput; wminput.addVetoOnThisFinalState(zfinder_mu); - WFinder wfinder_mu(wminput,cuts, PID::MUON, 0*GeV, 1000*GeV, 25*GeV, 0.1, WFinder::CLUSTERNODECAY); + WFinder wfinder_mu(wminput,cuts, PID::MUON, 0*GeV, 1000*GeV, 25*GeV, 0.1, WFinder::ClusterPhotons::NODECAY); declare(wfinder_mu, "WFinder_mu"); // Histograms book(_h_fiducial ,1,1,1); } /// Do the analysis void analyze(const Event& e) { const ZFinder& zfinder_e = apply(e, "ZFinder_e"); const ZFinder& zfinder_mu = apply(e, "ZFinder_mu"); const WFinder& wfinder_e = apply(e, "WFinder_e"); const WFinder& wfinder_mu = apply(e, "WFinder_mu"); // Looking for a Z, exit if not found if (zfinder_e.bosons().size() != 1 && zfinder_mu.bosons().size() != 1) { MSG_DEBUG("No Z boson found, vetoing event"); vetoEvent; } // Looking for a W, exit if not found if (wfinder_e.bosons().size()!= 1 && wfinder_mu.bosons().size() != 1) { MSG_DEBUG("No W boson found, vetoing event"); vetoEvent; } // If we find a W, make fiducial acceptance cuts and exit if not found if (wfinder_e.bosons().size() == 1) { const FourMomentum We = wfinder_e.constituentLeptons()[0]; const FourMomentum Wenu = wfinder_e.constituentNeutrinos()[0]; const double mT = wfinder_e.mT(); if (Wenu.pT() < 25*GeV || We.pT() < 20*GeV || mT < 20*GeV) { MSG_DEBUG("Wnu pT = " << Wenu.pT()/GeV << " GeV, Wl pT = " << We.pT()/GeV << " GeV, mT = " << mT/GeV << " GeV"); vetoEvent; } } else if (wfinder_mu.bosons().size() == 1) { const FourMomentum Wmu = wfinder_mu.constituentLeptons()[0]; const FourMomentum Wmunu = wfinder_mu.constituentNeutrinos()[0]; const double mT = wfinder_mu.mT(); if (Wmunu.pT() < 25*GeV || Wmu.pT() < 20*GeV || mT < 20*GeV) { MSG_DEBUG("Wnu pT = " << Wmunu.pT()/GeV << ", Wl pT = " << Wmu.pT()/GeV << " GeV, mT = " << mT/GeV << " GeV"); vetoEvent; } } else { MSG_DEBUG("No W boson found: vetoing event"); vetoEvent; } // Update the fiducial cross-section histogram _h_fiducial->fill(7000); } /// Finalize void finalize() { scale(_h_fiducial, crossSection()/femtobarn/sumOfWeights()); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_fiducial; //@} }; //// The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2011_I954993); } diff --git a/analyses/pluginATLAS/ATLAS_2011_S9131140.cc b/analyses/pluginATLAS/ATLAS_2011_S9131140.cc --- a/analyses/pluginATLAS/ATLAS_2011_S9131140.cc +++ b/analyses/pluginATLAS/ATLAS_2011_S9131140.cc @@ -1,109 +1,109 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { /// @brief ATLAS Z pT in Drell-Yan events at 7 TeV /// @author Elena Yatsenko, Judith Katzy class ATLAS_2011_S9131140 : public Analysis { public: /// Constructor ATLAS_2011_S9131140() : Analysis("ATLAS_2011_S9131140") { } /// @name Analysis methods //@{ void init() { // Set up projections FinalState fs; Cut cut = Cuts::abseta < 2.4 && Cuts::pT > 20*GeV; - ZFinder zfinder_dressed_el(fs, cut, PID::ELECTRON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::CLUSTERNODECAY); + ZFinder zfinder_dressed_el(fs, cut, PID::ELECTRON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY); declare(zfinder_dressed_el, "ZFinder_dressed_el"); - ZFinder zfinder_bare_el(fs, cut, PID::ELECTRON, 66.0*GeV, 116.0*GeV, 0.0, ZFinder::NOCLUSTER); + ZFinder zfinder_bare_el(fs, cut, PID::ELECTRON, 66.0*GeV, 116.0*GeV, 0.0, ZFinder::ClusterPhotons::NONE); declare(zfinder_bare_el, "ZFinder_bare_el"); - ZFinder zfinder_dressed_mu(fs, cut, PID::MUON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::CLUSTERNODECAY); + ZFinder zfinder_dressed_mu(fs, cut, PID::MUON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY); declare(zfinder_dressed_mu, "ZFinder_dressed_mu"); - ZFinder zfinder_bare_mu(fs, cut, PID::MUON, 66.0*GeV, 116.0*GeV, 0.0, ZFinder::NOCLUSTER); + ZFinder zfinder_bare_mu(fs, cut, PID::MUON, 66.0*GeV, 116.0*GeV, 0.0, ZFinder::ClusterPhotons::NONE); declare(zfinder_bare_mu, "ZFinder_bare_mu"); // Book histograms book(_hist_zpt_el_dressed ,1, 1, 2); // electron "dressed" book(_hist_zpt_el_bare ,1, 1, 3); // electron "bare" book(_hist_zpt_mu_dressed ,2, 1, 2); // muon "dressed" book(_hist_zpt_mu_bare ,2, 1, 3); // muon "bare" book(_sumw_el_bare, "_sumw_el_bare"); book(_sumw_el_dressed, "_sumw_el_dressed"); book(_sumw_mu_bare, "_sumw_mu_bare"); book(_sumw_mu_dressed, "_sumw_mu_dressed"); } /// Do the analysis void analyze(const Event& evt) { const ZFinder& zfinder_dressed_el = apply(evt, "ZFinder_dressed_el"); if (!zfinder_dressed_el.bosons().empty()) { _sumw_el_dressed->fill(); const FourMomentum pZ = zfinder_dressed_el.bosons()[0].momentum(); _hist_zpt_el_dressed->fill(pZ.pT()/GeV); } const ZFinder& zfinder_bare_el = apply(evt, "ZFinder_bare_el"); if (!zfinder_bare_el.bosons().empty()) { _sumw_el_bare->fill(); const FourMomentum pZ = zfinder_bare_el.bosons()[0].momentum(); _hist_zpt_el_bare->fill(pZ.pT()/GeV); } const ZFinder& zfinder_dressed_mu = apply(evt, "ZFinder_dressed_mu"); if (!zfinder_dressed_mu.bosons().empty()) { _sumw_mu_dressed->fill(); const FourMomentum pZ = zfinder_dressed_mu.bosons()[0].momentum(); _hist_zpt_mu_dressed->fill(pZ.pT()/GeV); } const ZFinder& zfinder_bare_mu = apply(evt, "ZFinder_bare_mu"); if (!zfinder_bare_mu.bosons().empty()) { _sumw_mu_bare->fill(); const FourMomentum pZ = zfinder_bare_mu.bosons()[0].momentum(); _hist_zpt_mu_bare->fill(pZ.pT()/GeV); } } void finalize() { if (_sumw_el_dressed->val() != 0) scale(_hist_zpt_el_dressed, 1/ *_sumw_el_dressed); if (_sumw_el_bare->val() != 0) scale(_hist_zpt_el_bare, 1/ *_sumw_el_bare); if (_sumw_mu_dressed->val() != 0) scale(_hist_zpt_mu_dressed, 1/ *_sumw_mu_dressed); if (_sumw_mu_bare->val() != 0) scale(_hist_zpt_mu_bare, 1/ *_sumw_mu_bare); } //@} private: CounterPtr _sumw_el_bare, _sumw_el_dressed; CounterPtr _sumw_mu_bare, _sumw_mu_dressed; Histo1DPtr _hist_zpt_el_dressed; Histo1DPtr _hist_zpt_el_bare; Histo1DPtr _hist_zpt_mu_dressed; Histo1DPtr _hist_zpt_mu_bare; }; // Hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2011_S9131140); } diff --git a/analyses/pluginATLAS/ATLAS_2012_I1204784.cc b/analyses/pluginATLAS/ATLAS_2012_I1204784.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1204784.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1204784.cc @@ -1,146 +1,146 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Tools/BinnedHistogram.hh" namespace Rivet { /// ATLAS Z phi* measurement class ATLAS_2012_I1204784 : public Analysis { public: /// Constructor ATLAS_2012_I1204784() : Analysis("ATLAS_2012_I1204784") { } public: /// Book histograms and initialise projections before the run void init() { FinalState fs; Cut cuts = Cuts::abseta < 2.4 && Cuts::pT > 20*GeV; - ZFinder zfinder_dressed_el(fs, cuts, PID::ELECTRON, 66*GeV, 116*GeV, 0.1, ZFinder::CLUSTERNODECAY); + ZFinder zfinder_dressed_el(fs, cuts, PID::ELECTRON, 66*GeV, 116*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY); declare(zfinder_dressed_el, "ZFinder_dressed_el"); - ZFinder zfinder_bare_el(fs, cuts, PID::ELECTRON, 66*GeV, 116*GeV, 0.0, ZFinder::NOCLUSTER); + ZFinder zfinder_bare_el(fs, cuts, PID::ELECTRON, 66*GeV, 116*GeV, 0.0, ZFinder::ClusterPhotons::NONE); declare(zfinder_bare_el, "ZFinder_bare_el"); - ZFinder zfinder_dressed_mu(fs, cuts, PID::MUON, 66*GeV, 116*GeV, 0.1, ZFinder::CLUSTERNODECAY); + ZFinder zfinder_dressed_mu(fs, cuts, PID::MUON, 66*GeV, 116*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY); declare(zfinder_dressed_mu, "ZFinder_dressed_mu"); - ZFinder zfinder_bare_mu(fs, cuts, PID::MUON, 66*GeV, 116*GeV, 0.0, ZFinder::NOCLUSTER); + ZFinder zfinder_bare_mu(fs, cuts, PID::MUON, 66*GeV, 116*GeV, 0.0, ZFinder::ClusterPhotons::NONE); declare(zfinder_bare_mu, "ZFinder_bare_mu"); // Book histograms // Single-differential plots book(_hist_zphistar_el_bare ,1, 1, 1); book(_hist_zphistar_mu_bare ,1, 1, 2); book(_hist_zphistar_el_dressed ,2, 1, 1); book(_hist_zphistar_mu_dressed ,2, 1, 2); // Double-differential plots {Histo1DPtr tmp; _h_phistar_el_bare.add(0.0, 0.8, book(tmp, 3, 1, 1));} {Histo1DPtr tmp; _h_phistar_el_bare.add(0.8, 1.6, book(tmp, 3, 1, 2));} {Histo1DPtr tmp; _h_phistar_el_bare.add(1.6, 10.0, book(tmp, 3, 1, 3));} {Histo1DPtr tmp; _h_phistar_el_dressed.add(0.0, 0.8, book(tmp, 3, 2, 1));} {Histo1DPtr tmp; _h_phistar_el_dressed.add(0.8, 1.6, book(tmp, 3, 2, 2));} {Histo1DPtr tmp; _h_phistar_el_dressed.add(1.6, 10.0, book(tmp, 3, 2, 3));} {Histo1DPtr tmp; _h_phistar_mu_bare.add(0.0, 0.8, book(tmp, 4, 1, 1));} {Histo1DPtr tmp; _h_phistar_mu_bare.add(0.8, 1.6, book(tmp, 4, 1, 2));} {Histo1DPtr tmp; _h_phistar_mu_bare.add(1.6, 10.0, book(tmp, 4, 1, 3));} {Histo1DPtr tmp; _h_phistar_mu_dressed.add(0.0, 0.8, book(tmp, 4, 2, 1));} {Histo1DPtr tmp; _h_phistar_mu_dressed.add(0.8, 1.6, book(tmp, 4, 2, 2));} {Histo1DPtr tmp; _h_phistar_mu_dressed.add(1.6, 10.0, book(tmp, 4, 2, 3));} } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; const ZFinder& zfinder_dressed_el = apply(event, "ZFinder_dressed_el"); const ZFinder& zfinder_bare_el = apply(event, "ZFinder_bare_el"); const ZFinder& zfinder_dressed_mu = apply(event, "ZFinder_dressed_mu"); const ZFinder& zfinder_bare_mu = apply(event, "ZFinder_bare_mu"); fillPlots(zfinder_dressed_el, _hist_zphistar_el_dressed, _h_phistar_el_dressed, weight); fillPlots(zfinder_bare_el, _hist_zphistar_el_bare, _h_phistar_el_bare, weight); fillPlots(zfinder_dressed_mu, _hist_zphistar_mu_dressed, _h_phistar_mu_dressed, weight); fillPlots(zfinder_bare_mu, _hist_zphistar_mu_bare, _h_phistar_mu_bare, weight); } void fillPlots(const ZFinder& zfind, Histo1DPtr hist, BinnedHistogram& binnedHist, double weight) { if (zfind.bosons().size() != 1) return; Particles leptons = sortBy(zfind.constituents(), cmpMomByPt); const FourMomentum lminus = leptons[0].charge() < 0 ? leptons[0].momentum() : leptons[1].momentum(); const FourMomentum lplus = leptons[0].charge() < 0 ? leptons[1].momentum() : leptons[0].momentum(); const double phi_acop = M_PI - deltaPhi(lminus, lplus); const double costhetastar = tanh((lminus.eta()-lplus.eta())/2.0); const double sin2thetastar = (costhetastar <= 1) ? 1.0 - sqr(costhetastar) : 0; const double phistar = tan(phi_acop/2.0) * sqrt(sin2thetastar); hist->fill(phistar, weight); binnedHist.fill(zfind.bosons()[0].absrap(), phistar, weight); } /// Normalise histograms etc., after the run void finalize() { normalize(_hist_zphistar_el_dressed); normalize(_hist_zphistar_el_bare); normalize(_hist_zphistar_mu_dressed); normalize(_hist_zphistar_mu_bare); for (Histo1DPtr hist : _h_phistar_mu_dressed.histos()) { normalize(hist); } for (Histo1DPtr hist : _h_phistar_mu_bare.histos()) { normalize(hist); } for (Histo1DPtr hist : _h_phistar_el_bare.histos()) { normalize(hist); } for (Histo1DPtr hist : _h_phistar_el_dressed.histos()) { normalize(hist); } } //@} private: BinnedHistogram _h_phistar_mu_dressed; BinnedHistogram _h_phistar_mu_bare; BinnedHistogram _h_phistar_el_dressed; BinnedHistogram _h_phistar_el_bare; Histo1DPtr _hist_zphistar_el_dressed; Histo1DPtr _hist_zphistar_el_bare; Histo1DPtr _hist_zphistar_mu_dressed; Histo1DPtr _hist_zphistar_mu_bare; Histo1DPtr _hist_zphistar_el_bare_1; Histo1DPtr _hist_zphistar_el_bare_2; Histo1DPtr _hist_zphistar_el_bare_3; Histo1DPtr _hist_zphistar_el_dressed_1; Histo1DPtr _hist_zphistar_el_dressed_2; Histo1DPtr _hist_zphistar_el_dressed_3; Histo1DPtr _hist_zphistar_mu_bare_1; Histo1DPtr _hist_zphistar_mu_bare_2; Histo1DPtr _hist_zphistar_mu_bare_3; Histo1DPtr _hist_zphistar_mu_dressed_1; Histo1DPtr _hist_zphistar_mu_dressed_2; Histo1DPtr _hist_zphistar_mu_dressed_3; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2012_I1204784); } diff --git a/analyses/pluginATLAS/ATLAS_2013_I1216670.cc b/analyses/pluginATLAS/ATLAS_2013_I1216670.cc --- a/analyses/pluginATLAS/ATLAS_2013_I1216670.cc +++ b/analyses/pluginATLAS/ATLAS_2013_I1216670.cc @@ -1,120 +1,120 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// @brief MPI sensitive di-jet balance variables for W->ejj or W->mujj events. class ATLAS_2013_I1216670 : public Analysis { public: /// @name Constructor ATLAS_2013_I1216670() : Analysis("ATLAS_2013_I1216670") { } /// @name Analysis methods //@{ /// Book histograms, set up projections for W and jets void init() { book(_h_delta_jets_n ,1, 1, 1); book(_h_delta_jets ,2, 1, 1); FinalState fs; Cut cuts = Cuts::abseta < 2.5 && Cuts::pT >= 20*GeV; - WFinder w_e_finder(fs, cuts, PID::ELECTRON, 40*GeV, MAXDOUBLE, 0.0*GeV, 0.0, WFinder::CLUSTERNODECAY, - WFinder::NOTRACK, WFinder::TRANSMASS); + WFinder w_e_finder(fs, cuts, PID::ELECTRON, 40*GeV, MAXDOUBLE, 0.0*GeV, 0.0, WFinder::ClusterPhotons::NODECAY, + WFinder::AddPhotons::NO, WFinder::MassWindow::MT); declare(w_e_finder, "W_E_FINDER"); - WFinder w_mu_finder(fs, cuts, PID::MUON, 40*GeV, MAXDOUBLE, 0.0*GeV, 0.0, WFinder::CLUSTERNODECAY, - WFinder::NOTRACK, WFinder::TRANSMASS); + WFinder w_mu_finder(fs, cuts, PID::MUON, 40*GeV, MAXDOUBLE, 0.0*GeV, 0.0, WFinder::ClusterPhotons::NODECAY, + WFinder::AddPhotons::NO, WFinder::MassWindow::MT); declare(w_mu_finder, "W_MU_FINDER"); VetoedFinalState jet_fs(fs); jet_fs.addVetoOnThisFinalState(getProjection("W_E_FINDER")); jet_fs.addVetoOnThisFinalState(getProjection("W_MU_FINDER")); FastJets jets(jet_fs, FastJets::ANTIKT, 0.4); declare(jets, "JETS"); } /// Do the analysis void analyze(const Event &e) { const WFinder& w_e_finder = apply(e, "W_E_FINDER" ); const WFinder& w_mu_finder = apply(e, "W_MU_FINDER"); Particle lepton, neutrino; Jets all_jets, jets; // Find exactly 1 W->e or W->mu boson if(w_e_finder.bosons().size() == 1 && w_mu_finder.bosons().size() == 0) { MSG_DEBUG(" Event identified as W->e nu."); if( !(w_e_finder.mT() > 40*GeV && w_e_finder.constituentNeutrino().Et() > 25.0*GeV) ) vetoEvent; lepton = w_e_finder.constituentLepton(); } else if(w_mu_finder.bosons().size() == 1 && w_e_finder.bosons().size() == 0) { MSG_DEBUG(" Event identified as W->mu nu."); if( !(w_mu_finder.mT() > 40*GeV && w_mu_finder.constituentNeutrino().Et() > 25.0*GeV) ) vetoEvent; lepton = w_mu_finder.constituentLepton(); } else { MSG_DEBUG(" No W found passing cuts."); vetoEvent; } all_jets = apply(e, "JETS").jetsByPt(Cuts::pt>20.0*GeV && Cuts::absrap<2.8); // Remove jets DeltaR < 0.5 from W lepton for(Jets::iterator it = all_jets.begin(); it != all_jets.end(); ++it) { double distance = deltaR( lepton, (*it) ); if(distance < 0.5) { MSG_DEBUG(" Veto jet DeltaR " << distance << " from W lepton"); } else { jets.push_back(*it); } } // Exactly two jets required if( jets.size() != 2 ) vetoEvent; // Calculate analysis quantities from the two jets double delta_jets = (jets.front().momentum() + jets.back().momentum()).pT(); double total_pt = jets.front().momentum().pT() + jets.back().momentum().pT(); double delta_jets_n = delta_jets / total_pt; _h_delta_jets->fill( delta_jets); // Jet pT balance _h_delta_jets_n->fill( delta_jets_n); // Jet pT balance, normalised by scalar dijet pT } /// Finalize void finalize() { // Data is normalised to 0.03 and 3 normalize(_h_delta_jets_n, 0.03); normalize(_h_delta_jets , 3.0 ); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_delta_jets_n; Histo1DPtr _h_delta_jets; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2013_I1216670); } diff --git a/analyses/pluginATLAS/ATLAS_2013_I1217863_W.cc b/analyses/pluginATLAS/ATLAS_2013_I1217863_W.cc --- a/analyses/pluginATLAS/ATLAS_2013_I1217863_W.cc +++ b/analyses/pluginATLAS/ATLAS_2013_I1217863_W.cc @@ -1,198 +1,198 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" namespace Rivet { class ATLAS_2013_I1217863_W : public Analysis { public: /// Constructor ATLAS_2013_I1217863_W(string name="ATLAS_2013_I1217863_W") : Analysis(name) { // the electron mode is used by default _mode = 1; } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FinalState fs; declare(fs, "FS"); Cut cuts = Cuts::abseta < 2.47 && Cuts::pT > 25*GeV; // W finder for electrons and muons WFinder wf(fs, cuts, _mode==3? PID::MUON : PID::ELECTRON, 0.0*GeV, 1000.0*GeV, 35.0*GeV, 0.1, - WFinder::CLUSTERNODECAY, WFinder::NOTRACK, WFinder::TRANSMASS); + WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); declare(wf, "WF"); // leading photon LeadingParticlesFinalState photonfs(FinalState(Cuts::abseta < 2.37 && Cuts::pT > 15*GeV)); photonfs.addParticleId(PID::PHOTON); declare(photonfs, "LeadingPhoton"); // jets VetoedFinalState jet_fs(fs); jet_fs.addVetoOnThisFinalState(getProjection("WF")); jet_fs.addVetoOnThisFinalState(getProjection("LeadingPhoton")); FastJets jets(jet_fs, FastJets::ANTIKT, 0.4); jets.useInvisibles(true); declare(jets, "Jets"); // FS excluding the leading photon VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(photonfs); declare(vfs, "isolatedFS"); // Book histograms book(_hist_EgammaT_incl , 7, 1, _mode); // dSigma / dE^gamma_T for Njet >= 0 book(_hist_EgammaT_excl , 8, 1, _mode); // dSigma / dE^gamma_T for Njet = 0 book(_hist_Njet_EgammaT15 ,15, 1, _mode); // dSigma / dNjet for E^gamma_T > 15 book(_hist_Njet_EgammaT60 ,16, 1, _mode); // dSigma / dNjet for E^gamma_T > 60 book(_hist_mWgammaT ,19, 1, _mode); // dSigma / dm^{Wgamma}_T } /// Perform the per-event analysis void analyze(const Event& event) { // retrieve leading photon Particles photons = apply(event, "LeadingPhoton").particles(); if (photons.size() != 1) vetoEvent; const Particle& leadingPhoton = photons[0]; if (leadingPhoton.Et() < 15.0*GeV) vetoEvent; if (leadingPhoton.abseta() > 2.37) vetoEvent; // check photon isolation double coneEnergy(0.0); Particles fs = apply(event, "isolatedFS").particles(); for (const Particle& p : fs) { if ( deltaR(leadingPhoton, p) < 0.4 ) coneEnergy += p.E(); } if ( coneEnergy / leadingPhoton.E() >= 0.5 ) vetoEvent; // retrieve W boson candidate const WFinder& wf = apply(event, "WF"); if ( wf.bosons().size() != 1 ) vetoEvent; // only one W boson candidate //const Particle& Wboson = wf.boson(); // retrieve constituent neutrino const Particle& neutrino = wf.constituentNeutrino(); if ( !(neutrino.pT() > 35.0*GeV) ) vetoEvent; // retrieve constituent lepton const Particle& lepton = wf.constituentLepton(); if ( !(lepton.pT() > 25.0*GeV && lepton.abseta() < 2.47) ) vetoEvent; // check photon-lepton overlap if ( !(deltaR(leadingPhoton, lepton) > 0.7) ) vetoEvent; // count jets const FastJets& jetfs = apply(event, "Jets"); Jets jets = jetfs.jets(cmpMomByEt); int goodJets = 0; for (const Jet& j : jets) { if ( !(j.Et() > 30.0*GeV) ) break; if ( (j.abseta() < 4.4) && \ (deltaR(leadingPhoton, j) > 0.3) && \ (deltaR(lepton, j) > 0.3) ) ++goodJets; } double Njets = double(goodJets) + 0.5; double photonEt = leadingPhoton.Et()*GeV; const FourMomentum& lep_gamma = lepton.momentum() + leadingPhoton.momentum(); double term1 = sqrt(lep_gamma.mass2() + lep_gamma.pT2()) + neutrino.Et(); double term2 = (lep_gamma + neutrino.momentum()).pT2(); double mWgammaT = sqrt(term1 * term1 - term2) * GeV; _hist_EgammaT_incl->fill(photonEt); _hist_Njet_EgammaT15->fill(Njets); if ( !goodJets ) _hist_EgammaT_excl->fill(photonEt); if (photonEt > 40.0*GeV) { _hist_mWgammaT->fill(mWgammaT); if (photonEt > 60.0*GeV) _hist_Njet_EgammaT60->fill(Njets); } } /// Normalise histograms etc., after the run void finalize() { const double xs_fb = crossSection()/femtobarn; const double sumw = sumOfWeights(); const double sf = xs_fb / sumw; scale(_hist_EgammaT_excl, sf); scale(_hist_EgammaT_incl, sf); normalize(_hist_Njet_EgammaT15); normalize(_hist_Njet_EgammaT60); normalize(_hist_mWgammaT); } //@} protected: size_t _mode; private: /// @name Histograms //@{ Histo1DPtr _hist_EgammaT_incl; Histo1DPtr _hist_EgammaT_excl; Histo1DPtr _hist_Njet_EgammaT15; Histo1DPtr _hist_Njet_EgammaT60; Histo1DPtr _hist_mWgammaT; //@} }; class ATLAS_2013_I1217863_W_EL : public ATLAS_2013_I1217863_W { public: ATLAS_2013_I1217863_W_EL() : ATLAS_2013_I1217863_W("ATLAS_2013_I1217863_W_EL") { _mode = 2; } }; class ATLAS_2013_I1217863_W_MU : public ATLAS_2013_I1217863_W { public: ATLAS_2013_I1217863_W_MU() : ATLAS_2013_I1217863_W("ATLAS_2013_I1217863_W_MU") { _mode = 3; } }; DECLARE_RIVET_PLUGIN(ATLAS_2013_I1217863_W); DECLARE_RIVET_PLUGIN(ATLAS_2013_I1217863_W_EL); DECLARE_RIVET_PLUGIN(ATLAS_2013_I1217863_W_MU); } diff --git a/analyses/pluginATLAS/ATLAS_2013_I1217863_Z.cc b/analyses/pluginATLAS/ATLAS_2013_I1217863_Z.cc --- a/analyses/pluginATLAS/ATLAS_2013_I1217863_Z.cc +++ b/analyses/pluginATLAS/ATLAS_2013_I1217863_Z.cc @@ -1,193 +1,193 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" namespace Rivet { class ATLAS_2013_I1217863_Z : public Analysis { public: /// Constructor ATLAS_2013_I1217863_Z(string name="ATLAS_2013_I1217863_Z") : Analysis(name) { // the electron mode is used by default _mode = 1; } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FinalState fs; declare(fs, "FS"); Cut cuts = Cuts::abseta < 2.47 && Cuts::pT > 25*GeV; // Z finder - ZFinder zf(fs, cuts, _mode==3? PID::MUON : PID::ELECTRON, 40.0*GeV, 1000.0*GeV, 0.1, ZFinder::CLUSTERNODECAY, ZFinder::NOTRACK); + ZFinder zf(fs, cuts, _mode==3? PID::MUON : PID::ELECTRON, 40.0*GeV, 1000.0*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::NO); declare(zf, "ZF"); // leading photon LeadingParticlesFinalState photonfs(FinalState(Cuts::abseta < 2.37 && Cuts::pT > 15*GeV)); photonfs.addParticleId(PID::PHOTON); declare(photonfs, "LeadingPhoton"); // jets VetoedFinalState jet_fs(fs); jet_fs.addVetoOnThisFinalState(getProjection("ZF")); jet_fs.addVetoOnThisFinalState(getProjection("LeadingPhoton")); FastJets jets(jet_fs, FastJets::ANTIKT, 0.4); jets.useInvisibles(true); declare(jets, "Jets"); // FS excluding the leading photon VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(photonfs); declare(vfs, "isolatedFS"); // Book histograms book(_hist_EgammaT_incl ,11, 1, _mode); // dSigma / dE^gamma_T for Njet >= 0 book(_hist_EgammaT_excl ,12, 1, _mode); // dSigma / dE^gamma_T for Njet = 0 book(_hist_Njet_EgammaT15 ,17, 1, _mode); // dSigma / dNjet for E^gamma_T >= 15 book(_hist_Njet_EgammaT60 ,18, 1, _mode); // dSigma / dNjet for E^gamma_T >= 60 book(_hist_mZgamma ,20, 1, _mode); // dSigma / dm^{Zgamma} } /// Perform the per-event analysis void analyze(const Event& event) { // retrieve leading photon Particles photons = apply(event, "LeadingPhoton").particles(); if (photons.size() != 1) vetoEvent; const Particle& leadingPhoton = photons[0]; if (leadingPhoton.Et() < 15.0*GeV) vetoEvent; if (leadingPhoton.abseta() > 2.37) vetoEvent; // check photon isolation double coneEnergy(0.0); Particles fs = apply(event, "isolatedFS").particles(); for (const Particle& p : fs) { if ( deltaR(leadingPhoton, p) < 0.4 ) coneEnergy += p.E(); } if (coneEnergy / leadingPhoton.E() >= 0.5 ) vetoEvent; // retrieve Z boson candidate const ZFinder& zf = apply(event, "ZF"); if ( zf.bosons().size() != 1 ) vetoEvent; // only one Z boson candidate const Particle& Zboson = zf.boson(); if ( !(Zboson.mass() > 40.0*GeV) ) vetoEvent; // check charge of constituent leptons const ParticleVector& leptons = zf.constituents(); if (leptons.size() != 2 || leptons[0].charge() * leptons[1].charge() > 0.) vetoEvent; // check photon-lepton overlap for (const Particle& p : leptons) { if ( !(p.pT() > 25.0*GeV && p.abseta() < 2.47 && deltaR(leadingPhoton, p) > 0.7) ) vetoEvent; } // count jets const FastJets& jetfs = apply(event, "Jets"); Jets jets = jetfs.jets(cmpMomByEt); int goodJets = 0; for (const Jet& j : jets) { if ( !(j.Et() > 30.0*GeV) ) break; if ( (j.abseta() < 4.4) && \ (deltaR(leadingPhoton, j) > 0.3) && \ (deltaR(leptons[0], j) > 0.3) && \ (deltaR(leptons[1], j) > 0.3) ) ++goodJets; } double Njets = double(goodJets) + 0.5; double photonEt = leadingPhoton.Et()*GeV; double mZgamma = (Zboson.momentum() + leadingPhoton.momentum()).mass() * GeV; _hist_EgammaT_incl->fill(photonEt); _hist_Njet_EgammaT15->fill(Njets); if ( !goodJets ) _hist_EgammaT_excl->fill(photonEt); if (photonEt >= 40.0*GeV) { _hist_mZgamma->fill(mZgamma); if (photonEt >= 60.0*GeV) _hist_Njet_EgammaT60->fill(Njets); } } /// Normalise histograms etc., after the run void finalize() { const double xs_fb = crossSection()/femtobarn; const double sumw = sumOfWeights(); const double sf = xs_fb / sumw; scale(_hist_EgammaT_excl, sf); scale(_hist_EgammaT_incl, sf); normalize(_hist_Njet_EgammaT15); normalize(_hist_Njet_EgammaT60); normalize(_hist_mZgamma); } //@} protected: size_t _mode; private: /// @name Histograms //@{ Histo1DPtr _hist_EgammaT_incl; Histo1DPtr _hist_EgammaT_excl; Histo1DPtr _hist_Njet_EgammaT15; Histo1DPtr _hist_Njet_EgammaT60; Histo1DPtr _hist_mZgamma; //@} }; class ATLAS_2013_I1217863_Z_EL : public ATLAS_2013_I1217863_Z { public: ATLAS_2013_I1217863_Z_EL() : ATLAS_2013_I1217863_Z("ATLAS_2013_I1217863_Z_EL") { _mode = 2; } }; class ATLAS_2013_I1217863_Z_MU : public ATLAS_2013_I1217863_Z { public: ATLAS_2013_I1217863_Z_MU() : ATLAS_2013_I1217863_Z("ATLAS_2013_I1217863_Z_MU") { _mode = 3; } }; DECLARE_RIVET_PLUGIN(ATLAS_2013_I1217863_Z); DECLARE_RIVET_PLUGIN(ATLAS_2013_I1217863_Z_EL); DECLARE_RIVET_PLUGIN(ATLAS_2013_I1217863_Z_MU); } diff --git a/analyses/pluginATLAS/ATLAS_2013_I1219109.cc b/analyses/pluginATLAS/ATLAS_2013_I1219109.cc --- a/analyses/pluginATLAS/ATLAS_2013_I1219109.cc +++ b/analyses/pluginATLAS/ATLAS_2013_I1219109.cc @@ -1,157 +1,157 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/HeavyHadrons.hh" namespace Rivet { /// @brief ATLAS W+b measurement class ATLAS_2013_I1219109: public Analysis { public: ATLAS_2013_I1219109(string name = "ATLAS_2013_I1219109") : Analysis(name) { // the electron mode is used by default _mode = 1; } void init() { FinalState fs; declare(fs, "FinalState"); Cut cuts = Cuts::abseta < 2.5 && Cuts::pT >= 25*GeV; // W finder for electrons and muons WFinder wf(fs, cuts, _mode==3? PID::MUON : PID::ELECTRON, 0.0*GeV, MAXDOUBLE, 0.0, 0.1, - WFinder::CLUSTERNODECAY, WFinder::NOTRACK, WFinder::TRANSMASS); + WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); declare(wf, "WF"); // jets VetoedFinalState jet_fs(fs); jet_fs.addVetoOnThisFinalState(getProjection("WF")); FastJets fj(jet_fs, FastJets::ANTIKT, 0.4); fj.useInvisibles(); declare(fj, "Jets"); declare(HeavyHadrons(Cuts::abseta < 2.5 && Cuts::pT > 5*GeV), "BHadrons"); // book histograms book(_njet ,1, 1, _mode); // dSigma / dNjet book(_jet1_bPt ,2, 1, _mode); // dSigma / dBjetPt for Njet = 1 book(_jet2_bPt ,2, 2, _mode); // dSigma / dBjetPt for Njet = 2 } void analyze(const Event& event) { // retrieve W boson candidate const WFinder& wf = apply(event, "WF"); if( wf.bosons().size() != 1 ) vetoEvent; // only one W boson candidate if( !(wf.mT() > 60.0*GeV) ) vetoEvent; //const Particle& Wboson = wf.boson(); // retrieve constituent neutrino const Particle& neutrino = wf.constituentNeutrino(); if( !(neutrino.pT() > 25.0*GeV) ) vetoEvent; // retrieve constituent lepton const Particle& lepton = wf.constituentLepton(); // count good jets, check if good jet contains B hadron const Particles& bHadrons = apply(event, "BHadrons").bHadrons(); const Jets& jets = apply(event, "Jets").jetsByPt(25*GeV); int goodjets = 0, bjets = 0; double bPt = 0.; for(const Jet& j : jets) { if( (j.abseta() < 2.1) && (deltaR(lepton, j) > 0.5) ) { // this jet passes the selection! ++goodjets; // j.bTagged() uses ghost association which is // more elegant, but not what has been used in // this analysis originally, will match B had- // rons in eta-phi space instead for(const Particle& b : bHadrons) { if( deltaR(j, b) < 0.3 ) { // jet matched to B hadron! if(!bPt) bPt = j.pT() * GeV; // leading b-jet pT ++bjets; // count number of b-jets break; } } } } if( goodjets > 2 ) vetoEvent; // at most two jets if( !bjets ) vetoEvent; // at least one of them b-tagged double njets = double(goodjets); double ncomb = 3.0; _njet->fill(njets); _njet->fill(ncomb); if( goodjets == 1) _jet1_bPt->fill(bPt); else if(goodjets == 2) _jet2_bPt->fill(bPt); } void finalize() { // Print summary info const double xs_pb(crossSection() / picobarn); const double sumw(sumOfWeights()); MSG_INFO("Cross-Section/pb: " << xs_pb ); MSG_INFO("Sum of weights : " << sumw ); MSG_INFO("nEvents : " << numEvents()); const double sf(xs_pb / sumw); scale(_njet, sf); scale(_jet1_bPt, sf); scale(_jet2_bPt, sf); } protected: size_t _mode; private: Histo1DPtr _njet; Histo1DPtr _jet1_bPt; Histo1DPtr _jet2_bPt; //bool _isMuon; }; class ATLAS_2013_I1219109_EL : public ATLAS_2013_I1219109 { public: ATLAS_2013_I1219109_EL() : ATLAS_2013_I1219109("ATLAS_2013_I1219109_EL") { _mode = 2; } }; class ATLAS_2013_I1219109_MU : public ATLAS_2013_I1219109 { public: ATLAS_2013_I1219109_MU() : ATLAS_2013_I1219109("ATLAS_2013_I1219109_MU") { _mode = 3; } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2013_I1219109); DECLARE_RIVET_PLUGIN(ATLAS_2013_I1219109_EL); DECLARE_RIVET_PLUGIN(ATLAS_2013_I1219109_MU); } diff --git a/analyses/pluginATLAS/ATLAS_2014_I1282447.cc b/analyses/pluginATLAS/ATLAS_2014_I1282447.cc --- a/analyses/pluginATLAS/ATLAS_2014_I1282447.cc +++ b/analyses/pluginATLAS/ATLAS_2014_I1282447.cc @@ -1,595 +1,595 @@ // -*- C++ -*- // ATLAS W+c analysis ////////////////////////////////////////////////////////////////////////// /* Description of rivet analysis ATLAS_2014_I1282447 W+c production This rivet routine implements the ATLAS W+c analysis. Apart from those histograms, described and published on HEP Data, here are some helper histograms defined, these are: d02-x01-y01, d02-x01-y02 and d08-x01-y01 are ratios, the nominator ("_plus") and denominator ("_minus") histograms are also given, so that the ratios can be reconstructed if need be (e.g. when running on separate samples). d05 and d06 are ratios over inclusive W production. The routine has to be run on a sample for inclusive W production in order to make sure the denominator ("_winc") is correctly filled. The ratios can be constructed using the following sample code: python divideWCharm.py import yoda hists_wc = yoda.read("Rivet_Wc.yoda") hists_winc = yoda.read("Rivet_Winc.yoda") ## division histograms --> ONLY for different plus minus runs # (merge before using yodamerge Rivet_plus.yoda Rivet_minus.yoda > Rivet_Wc.yoda) d02y01_plus = hists_wc["/ATLAS_2014_I1282447/d02-x01-y01_plus"] d02y01_minus = hists_wc["/ATLAS_2014_I1282447/d02-x01-y01_minus"] ratio_d02y01 = d02y01_plus.divide(d02y01_minus) ratio_d02y01.path = "/ATLAS_2014_I1282447/d02-x01-y01" d02y02_plus = hists_wc["/ATLAS_2014_I1282447/d02-x01-y02_plus"] d02y02_minus = hists_wc["/ATLAS_2014_I1282447/d02-x01-y02_minus"] ratio_d02y02= d02y02_plus.divide(d02y02_minus) ratio_d02y02.path = "/ATLAS_2014_I1282447/d02-x01-y02" d08y01_plus = hists_wc["/ATLAS_2014_I1282447/d08-x01-y01_plus"] d08y01_minus = hists_wc["/ATLAS_2014_I1282447/d08-x01-y01_minus"] ratio_d08y01= d08y01_plus.divide(d08y01_minus) ratio_d08y01.path = "/ATLAS_2014_I1282447/d08-x01-y01" # inclusive cross section h_winc = hists_winc["/ATLAS_2014_I1282447/d05-x01-y01"] h_d = hists_wc["/ATLAS_2014_I1282447/d01-x01-y02"] h_dstar= hists_wc["/ATLAS_2014_I1282447/d01-x01-y03"] ratio_wd = h_d.divide(h_winc) ratio_wd.path = "/ATLAS_2014_I1282447/d05-x01-y02" ratio_wdstar = h_d.divide(h_winc) ratio_wdstar.path = "/ATLAS_2014_I1282447/d05-x01-y03" # pT differential h_winc_plus = hists_winc["/ATLAS_2014_I1282447/d06-x01-y01_winc"] h_winc_minus = hists_winc["/ATLAS_2014_I1282447/d06-x01-y02_winc"] h_wd_plus = hists_wc["/ATLAS_2014_I1282447/d06-x01-y01_wplus"] h_wd_minus = hists_wc["/ATLAS_2014_I1282447/d06-x01-y02_wminus"] h_wdstar_plus = hists_wc["/ATLAS_2014_I1282447/d06-x01-y03_wplus"] h_wdstar_minus = hists_wc["/ATLAS_2014_I1282447/d06-x01-y04_wminus"] ratio_wd_plus = h_wd_plus.divide(h_winc_plus) ratio_wd_plus.path = "/ATLAS_2014_I1282447/d06-x01-y01" ratio_wd_minus = h_wd_plus.divide(h_winc_minus) ratio_wd_minus.path = "/ATLAS_2014_I1282447/d06-x01-y02" ratio_wdstar_plus = h_wdstar_plus.divide(h_winc_plus) ratio_wdstar_plus.path = "/ATLAS_2014_I1282447/d06-x01-y03" ratio_wdstar_minus = h_wdstar_plus.divide(h_winc_minus) ratio_wdstar_minus.path = "/ATLAS_2014_I1282447/d06-x01-y04" ratio_wd_plus = h_wd_plus.divide(h_winc_plus) ratio_wd_plus.path = "/ATLAS_2014_I1282447/d06-x01-y01" ratio_wd_minus = h_wd_plus.divide(h_winc_minus) ratio_wd_minus.path = "/ATLAS_2014_I1282447/d06-x01-y02" h_winc_plus= hists_winc["/ATLAS_2014_I1282447/d06-x01-y01_winc"] h_winc_minus= hists_winc["/ATLAS_2014_I1282447/d06-x01-y02_winc"] ## copy other histograms for plotting d01x01y01= hists_wc["/ATLAS_2014_I1282447/d01-x01-y01"] d01x01y01.path = "/ATLAS_2014_I1282447/d01-x01-y01" d01x01y02= hists_wc["/ATLAS_2014_I1282447/d01-x01-y02"] d01x01y02.path = "/ATLAS_2014_I1282447/d01-x01-y02" d01x01y03= hists_wc["/ATLAS_2014_I1282447/d01-x01-y03"] d01x01y03.path = "/ATLAS_2014_I1282447/d01-x01-y03" d03x01y01= hists_wc["/ATLAS_2014_I1282447/d03-x01-y01"] d03x01y01.path = "/ATLAS_2014_I1282447/d03-x01-y01" d03x01y02= hists_wc["/ATLAS_2014_I1282447/d03-x01-y02"] d03x01y02.path = "/ATLAS_2014_I1282447/d03-x01-y02" d04x01y01= hists_wc["/ATLAS_2014_I1282447/d04-x01-y01"] d04x01y01.path = "/ATLAS_2014_I1282447/d04-x01-y01" d04x01y02= hists_wc["/ATLAS_2014_I1282447/d04-x01-y02"] d04x01y02.path = "/ATLAS_2014_I1282447/d04-x01-y02" d04x01y03= hists_wc["/ATLAS_2014_I1282447/d04-x01-y03"] d04x01y03.path = "/ATLAS_2014_I1282447/d04-x01-y03" d04x01y04= hists_wc["/ATLAS_2014_I1282447/d04-x01-y04"] d04x01y04.path = "/ATLAS_2014_I1282447/d04-x01-y04" d07x01y01= hists_wc["/ATLAS_2014_I1282447/d07-x01-y01"] d07x01y01.path = "/ATLAS_2014_I1282447/d07-x01-y01" yoda.write([ratio_d02y01,ratio_d02y02,ratio_d08y01, ratio_wd ,ratio_wdstar,ratio_wd_plus,ratio_wd_minus ,ratio_wdstar_plus,ratio_wdstar_minus,d01x01y01,d01x01y02,d01x01y03,d03x01y01,d03x01y02,d04x01y01,d04x01y02,d04x01y03,d04x01y04,d07x01y01],"validation.yoda") */ ////////////////////////////////////////////////////////////////////////// #include "Rivet/Analysis.hh" #include "Rivet/Projections/UnstableFinalState.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { class ATLAS_2014_I1282447 : public Analysis { public: /// Constructor ATLAS_2014_I1282447() : Analysis("ATLAS_2014_I1282447") { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { /// @todo Initialise and register projections here UnstableFinalState fs; Cut cuts = Cuts::etaIn(-2.5, 2.5) & (Cuts::pT > 20*GeV); /// should use sample WITHOUT QED radiation off the electron - WFinder wfinder_born_el(fs, cuts, PID::ELECTRON, 25*GeV, 8000*GeV, 15*GeV, 0.1, WFinder::CLUSTERALL, WFinder::TRACK); + WFinder wfinder_born_el(fs, cuts, PID::ELECTRON, 25*GeV, 8000*GeV, 15*GeV, 0.1, WFinder::ClusterPhotons::ALL, WFinder::AddPhotons::YES); declare(wfinder_born_el, "WFinder_born_el"); - WFinder wfinder_born_mu(fs, cuts, PID::MUON , 25*GeV, 8000*GeV, 15*GeV, 0.1, WFinder::CLUSTERALL, WFinder::TRACK); + WFinder wfinder_born_mu(fs, cuts, PID::MUON , 25*GeV, 8000*GeV, 15*GeV, 0.1, WFinder::ClusterPhotons::ALL, WFinder::AddPhotons::YES); declare(wfinder_born_mu, "WFinder_born_mu"); // all hadrons that could be coming from a charm decay -- // -- for safety, use region -3.5 - 3.5 declare(UnstableFinalState(Cuts::abseta <3.5), "hadrons"); // Input for the jets: no neutrinos, no muons, and no electron which passed the electron cuts // also: NO electron, muon or tau (needed due to ATLAS jet truth reconstruction feature) VetoedFinalState veto; veto.addVetoOnThisFinalState(wfinder_born_el); veto.addVetoOnThisFinalState(wfinder_born_mu); veto.addVetoPairId(PID::ELECTRON); veto.addVetoPairId(PID::MUON); veto.addVetoPairId(PID::TAU); FastJets jets(veto, FastJets::ANTIKT, 0.4); declare(jets, "jets"); // Book histograms // charge separated integrated cross sections book(_hist_wcjet_charge ,"d01-x01-y01"); book(_hist_wd_charge ,"d01-x01-y02"); book(_hist_wdstar_charge ,"d01-x01-y03"); // charge integrated total cross sections book(_hist_wcjet_ratio,"d02-x01-y01"); book(_hist_wd_ratio ,"d02-x01-y02"); book(_hist_wcjet_minus ,"d02-x01-y01_minus"); book(_hist_wd_minus ,"d02-x01-y02_minus"); book(_hist_wcjet_plus ,"d02-x01-y01_plus"); book(_hist_wd_plus ,"d02-x01-y02_plus"); // eta distributions book(_hist_wplus_wcjet_eta_lep ,"d03-x01-y01"); book(_hist_wminus_wcjet_eta_lep ,"d03-x01-y02"); book(_hist_wplus_wdminus_eta_lep ,"d04-x01-y01"); book(_hist_wminus_wdplus_eta_lep ,"d04-x01-y02"); book(_hist_wplus_wdstar_eta_lep ,"d04-x01-y03"); book(_hist_wminus_wdstar_eta_lep ,"d04-x01-y04"); // ratio of cross section (WD over W inclusive) // postprocess! book(_hist_w_inc ,"d05-x01-y01"); book(_hist_wd_winc_ratio ,"d05-x01-y02"); book(_hist_wdstar_winc_ratio,"d05-x01-y03"); // ratio of cross section (WD over W inclusive -- function of pT of D meson) book(_hist_wplusd_wplusinc_pt_ratio ,"d06-x01-y01"); book(_hist_wminusd_wminusinc_pt_ratio ,"d06-x01-y02"); book(_hist_wplusdstar_wplusinc_pt_ratio ,"d06-x01-y03"); book(_hist_wminusdstar_wminusinc_pt_ratio,"d06-x01-y04"); // could use for postprocessing! book(_hist_wplusd_wplusinc_pt ,"d06-x01-y01_wplus"); book(_hist_wminusd_wminusinc_pt ,"d06-x01-y02_wminus"); book(_hist_wplusdstar_wplusinc_pt ,"d06-x01-y03_wplus"); book(_hist_wminusdstar_wminusinc_pt ,"d06-x01-y04_wminus"); book(_hist_wplus_winc ,"d06-x01-y01_winc"); book(_hist_wminus_winc ,"d06-x01-y02_winc"); // jet multiplicity of charge integrated W+cjet cross section (+0 or +1 jet in addition to the charm jet) book(_hist_wcjet_jets ,"d07-x01-y01"); // jet multiplicity of W+cjet cross section ratio (+0 or +1 jet in addition to the charm jet) book(_hist_wcjet_jets_ratio ,"d08-x01-y01"); book(_hist_wcjet_jets_plus ,"d08-x01-y01_plus"); book(_hist_wcjet_jets_minus ,"d08-x01-y01_minus"); } /// Perform the per-event analysis void analyze(const Event& event) { double charge_weight = 0; // account for OS/SS events int lepton_charge = 0; double lepton_eta = 0.; /// Find leptons const WFinder& wfinder_born_el = apply(event, "WFinder_born_el"); const WFinder& wfinder_born_mu = apply(event, "WFinder_born_mu"); if (wfinder_born_el.empty() && wfinder_born_mu.empty()) { MSG_DEBUG("No W bosons found"); vetoEvent; } bool keepevent = false; //check electrons if (!wfinder_born_el.empty()) { const FourMomentum nu = wfinder_born_el.constituentNeutrinos()[0]; if (wfinder_born_el.mT() > 40*GeV && nu.pT() > 25*GeV) { keepevent = true; lepton_charge = wfinder_born_el.constituentLeptons()[0].charge(); lepton_eta = fabs(wfinder_born_el.constituentLeptons()[0].pseudorapidity()); } } //check muons if (!wfinder_born_mu.empty()) { const FourMomentum nu = wfinder_born_mu.constituentNeutrinos()[0]; if (wfinder_born_mu.mT() > 40*GeV && nu.pT() > 25*GeV) { keepevent = true; lepton_charge = wfinder_born_mu.constituentLeptons()[0].charge(); lepton_eta = fabs(wfinder_born_mu.constituentLeptons()[0].pseudorapidity()); } } if (!keepevent) { MSG_DEBUG("Event does not pass mT and MET cuts"); vetoEvent; } if (lepton_charge > 0) { _hist_wplus_winc->fill(10.); _hist_wplus_winc->fill(16.); _hist_wplus_winc->fill(30.); _hist_wplus_winc->fill(60.); _hist_w_inc->fill(+1); } else if (lepton_charge < 0) { _hist_wminus_winc->fill(10.); _hist_wminus_winc->fill(16.); _hist_wminus_winc->fill(30.); _hist_wminus_winc->fill(60.); _hist_w_inc->fill(-1); } // Find hadrons in the event const UnstableFinalState& fs = apply(event, "hadrons"); /// FIND Different channels // 1: wcjet // get jets const Jets& jets = apply(event, "jets").jetsByPt(Cuts::pT>25.0*GeV && Cuts::abseta<2.5); // loop over jets to select jets used to match to charm Jets js; int matched_charmHadron = 0; double charm_charge = 0.; int njets = 0; int nj = 0; bool mat_jet = false; double ptcharm = 0; if (matched_charmHadron > -1) { for (const Jet& j : jets) { mat_jet = false; njets += 1; for (const Particle& p : fs.particles()) { /// @todo Avoid touching HepMC! const GenParticle* part = p.genParticle(); if (p.hasCharm()) { //if(isFromBDecay(p)) continue; if (p.fromBottom()) continue; if (p.pT() < 5*GeV ) continue; if (hasCharmedChildren(part)) continue; if (deltaR(p, j) < 0.3) { mat_jet = true; if (p.pT() > ptcharm) { charm_charge = part->pdg_id(); ptcharm = p.pT(); } } } } if (mat_jet) nj++; } if (charm_charge * lepton_charge > 0) charge_weight = -1; else charge_weight = +1; if (nj == 1) { if (lepton_charge > 0) { _hist_wcjet_charge ->fill( 1, charge_weight); _hist_wcjet_plus ->fill( 0, charge_weight); _hist_wplus_wcjet_eta_lep ->fill(lepton_eta, charge_weight); _hist_wcjet_jets_plus ->fill(njets-1 , charge_weight); } else if (lepton_charge < 0) { _hist_wcjet_charge ->fill( -1, charge_weight); _hist_wcjet_minus ->fill( 0, charge_weight); _hist_wminus_wcjet_eta_lep->fill(lepton_eta, charge_weight); _hist_wcjet_jets_minus ->fill(njets-1 , charge_weight); } _hist_wcjet_jets->fill(njets-1, charge_weight); } } // // 1/2: w+d(*) meson for (const Particle& p : fs.particles()) { /// @todo Avoid touching HepMC! const GenParticle* part = p.genParticle(); if (p.pT() < 8*GeV) continue; if (fabs(p.eta()) > 2.2) continue; // W+D if (abs(part->pdg_id()) == 411) { if (lepton_charge * part->pdg_id() > 0) charge_weight = -1; else charge_weight = +1; // fill histos if (lepton_charge > 0) { _hist_wd_charge ->fill( 1, charge_weight); _hist_wd_plus ->fill( 0, charge_weight); _hist_wplus_wdminus_eta_lep->fill(lepton_eta, charge_weight); _hist_wplusd_wplusinc_pt ->fill( p.pT(), charge_weight); } else if (lepton_charge < 0) { _hist_wd_charge ->fill( -1, charge_weight); _hist_wd_minus ->fill( 0, charge_weight); _hist_wminus_wdplus_eta_lep->fill(lepton_eta, charge_weight); _hist_wminusd_wminusinc_pt ->fill(p.pT() , charge_weight); } } // W+Dstar if ( abs(part->pdg_id()) == 413 ) { if (lepton_charge*part->pdg_id() > 0) charge_weight = -1; else charge_weight = +1; if (lepton_charge > 0) { _hist_wdstar_charge->fill(+1, charge_weight); _hist_wd_plus->fill( 0, charge_weight); _hist_wplus_wdstar_eta_lep->fill( lepton_eta, charge_weight); _hist_wplusdstar_wplusinc_pt->fill( p.pT(), charge_weight); } else if (lepton_charge < 0) { _hist_wdstar_charge->fill(-1, charge_weight); _hist_wd_minus->fill(0, charge_weight); _hist_wminus_wdstar_eta_lep->fill(lepton_eta, charge_weight); _hist_wminusdstar_wminusinc_pt->fill(p.pT(), charge_weight); } } } } /// Normalise histograms etc., after the run void finalize() { const double sf = crossSection() / sumOfWeights(); // norm to cross section // d01 scale(_hist_wcjet_charge, sf); scale(_hist_wd_charge, sf); scale(_hist_wdstar_charge, sf); //d02 scale(_hist_wcjet_plus, sf); scale(_hist_wcjet_minus, sf); scale(_hist_wd_plus, sf); scale(_hist_wd_minus, sf); divide(_hist_wcjet_plus, _hist_wcjet_minus, _hist_wcjet_ratio); divide(_hist_wd_plus, _hist_wd_minus, _hist_wd_ratio ); //d03 scale(_hist_wplus_wcjet_eta_lep, sf); scale(_hist_wminus_wcjet_eta_lep, sf); //d04 scale(_hist_wplus_wdminus_eta_lep, crossSection()/sumOfWeights()); scale(_hist_wminus_wdplus_eta_lep, crossSection()/sumOfWeights()); scale(_hist_wplus_wdstar_eta_lep , crossSection()/sumOfWeights()); scale(_hist_wminus_wdstar_eta_lep, crossSection()/sumOfWeights()); //d05 scale(_hist_w_inc, 0.01 * sf); // in percent --> /100 divide(_hist_wd_charge, _hist_w_inc, _hist_wd_winc_ratio ); divide(_hist_wdstar_charge, _hist_w_inc, _hist_wdstar_winc_ratio); //d06, in percentage! scale(_hist_wplusd_wplusinc_pt, sf); scale(_hist_wminusd_wminusinc_pt, sf); scale(_hist_wplusdstar_wplusinc_pt, sf); scale(_hist_wminusdstar_wminusinc_pt, sf); scale(_hist_wplus_winc, 0.01 * sf); // in percent --> /100 scale(_hist_wminus_winc, 0.01 * sf); // in percent --> /100 divide(_hist_wplusd_wplusinc_pt, _hist_wplus_winc , _hist_wplusd_wplusinc_pt_ratio ); divide(_hist_wminusd_wminusinc_pt, _hist_wminus_winc, _hist_wminusd_wminusinc_pt_ratio ); divide(_hist_wplusdstar_wplusinc_pt, _hist_wplus_winc , _hist_wplusdstar_wplusinc_pt_ratio ); divide(_hist_wminusdstar_wminusinc_pt, _hist_wminus_winc, _hist_wminusdstar_wminusinc_pt_ratio); //d07 scale(_hist_wcjet_jets, sf); //d08 scale(_hist_wcjet_jets_minus, sf); scale(_hist_wcjet_jets_plus, sf); divide(_hist_wcjet_jets_plus, _hist_wcjet_jets_minus , _hist_wcjet_jets_ratio); } //@} private: // Data members like post-cuts event weight counters go here // Check whether particle comes from b-decay /// @todo Use built-in method and avoid HepMC bool isFromBDecay(const Particle& p) { bool isfromB = false; if (p.genParticle() == nullptr) return false; const GenParticle* part = p.genParticle(); const GenVertex* ivtx = const_cast(part->production_vertex()); while (ivtx) { if (ivtx->particles_in_size() < 1) { isfromB = false; break; } const HepMC::GenVertex::particles_in_const_iterator iPart_invtx = ivtx->particles_in_const_begin(); part = (*iPart_invtx); if (!part) { isfromB = false; break; } isfromB = PID::hasBottom(part->pdg_id()); if (isfromB == true) break; ivtx = const_cast(part->production_vertex()); if ( part->pdg_id() == 2212 || !ivtx ) break; // reached beam } return isfromB; } // Check whether particle has charmed children /// @todo Use built-in method and avoid HepMC! bool hasCharmedChildren(const GenParticle *part) { bool hasCharmedChild = false; if (part == nullptr) return false; const GenVertex* ivtx = const_cast(part->end_vertex()); if (ivtx == nullptr) return false; // if (ivtx->particles_out_size() < 2) return false; HepMC::GenVertex::particles_out_const_iterator iPart_invtx = ivtx->particles_out_const_begin(); HepMC::GenVertex::particles_out_const_iterator end_invtx = ivtx->particles_out_const_end(); for ( ; iPart_invtx != end_invtx; iPart_invtx++ ) { const GenParticle* p2 = (*iPart_invtx); if (p2 == part) continue; hasCharmedChild = PID::hasCharm(p2->pdg_id()); if (hasCharmedChild == true) break; hasCharmedChild = hasCharmedChildren(p2); if (hasCharmedChild == true) break; } return hasCharmedChild; } private: /// @name Histograms //@{ //d01-x01- Histo1DPtr _hist_wcjet_charge; Histo1DPtr _hist_wd_charge; Histo1DPtr _hist_wdstar_charge; //d02-x01- Scatter2DPtr _hist_wcjet_ratio; Scatter2DPtr _hist_wd_ratio; Histo1DPtr _hist_wcjet_plus; Histo1DPtr _hist_wd_plus; Histo1DPtr _hist_wcjet_minus; Histo1DPtr _hist_wd_minus; //d03-x01- Histo1DPtr _hist_wplus_wcjet_eta_lep; Histo1DPtr _hist_wminus_wcjet_eta_lep; //d04-x01- Histo1DPtr _hist_wplus_wdminus_eta_lep; Histo1DPtr _hist_wminus_wdplus_eta_lep; //d05-x01- Histo1DPtr _hist_wplus_wdstar_eta_lep; Histo1DPtr _hist_wminus_wdstar_eta_lep; // postprocessing histos //d05-x01 Histo1DPtr _hist_w_inc; Scatter2DPtr _hist_wd_winc_ratio; Scatter2DPtr _hist_wdstar_winc_ratio; //d06-x01 Histo1DPtr _hist_wplus_winc; Histo1DPtr _hist_wminus_winc; Scatter2DPtr _hist_wplusd_wplusinc_pt_ratio; Scatter2DPtr _hist_wminusd_wminusinc_pt_ratio; Scatter2DPtr _hist_wplusdstar_wplusinc_pt_ratio; Scatter2DPtr _hist_wminusdstar_wminusinc_pt_ratio; Histo1DPtr _hist_wplusd_wplusinc_pt ; Histo1DPtr _hist_wminusd_wminusinc_pt; Histo1DPtr _hist_wplusdstar_wplusinc_pt; Histo1DPtr _hist_wminusdstar_wminusinc_pt; // d07-x01 Histo1DPtr _hist_wcjet_jets ; //d08-x01 Scatter2DPtr _hist_wcjet_jets_ratio ; Histo1DPtr _hist_wcjet_jets_plus ; Histo1DPtr _hist_wcjet_jets_minus; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2014_I1282447); } diff --git a/analyses/pluginATLAS/ATLAS_2014_I1306294.cc b/analyses/pluginATLAS/ATLAS_2014_I1306294.cc --- a/analyses/pluginATLAS/ATLAS_2014_I1306294.cc +++ b/analyses/pluginATLAS/ATLAS_2014_I1306294.cc @@ -1,230 +1,230 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/HeavyHadrons.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { class ATLAS_2014_I1306294 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructors ATLAS_2014_I1306294(std::string name="ATLAS_2014_I1306294") : Analysis(name) { _mode = 1; } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FinalState fs; Cut cuts = Cuts::etaIn(-2.5,2.5) & (Cuts::pT > 20.0*GeV); - ZFinder zfinder(fs, cuts, _mode==1? PID::ELECTRON : PID::MUON, 76.0*GeV, 106.0*GeV, 0.1, ZFinder::CLUSTERNODECAY, ZFinder::NOTRACK); + ZFinder zfinder(fs, cuts, _mode==1? PID::ELECTRON : PID::MUON, 76.0*GeV, 106.0*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::NO); declare(zfinder, "ZFinder"); //FastJets jetpro1( getProjection("ZFinder").remainingFinalState(), FastJets::ANTIKT, 0.4); VetoedFinalState jet_fs(fs); jet_fs.addVetoOnThisFinalState(getProjection("ZFinder")); FastJets jetpro1(jet_fs, FastJets::ANTIKT, 0.4); jetpro1.useInvisibles(); declare(jetpro1, "AntiKtJets04"); declare(HeavyHadrons(), "BHadrons"); //Histograms with data binning book(_h_bjet_Pt , 3, 1, 1); book(_h_bjet_Y , 5, 1, 1); book(_h_bjet_Yboost , 7, 1, 1); book(_h_bjet_DY20 , 9, 1, 1); book(_h_bjet_ZdPhi20 ,11, 1, 1); book(_h_bjet_ZdR20 ,13, 1, 1); book(_h_bjet_ZPt ,15, 1, 1); book(_h_bjet_ZY ,17, 1, 1); book(_h_2bjet_dR ,21, 1, 1); book(_h_2bjet_Mbb ,23, 1, 1); book(_h_2bjet_ZPt ,25, 1, 1); book(_h_2bjet_ZY ,27, 1, 1); } //========================================================================================== /// Perform the per-event analysis void analyze(const Event& e) { //--------------------------- // -- check we have a Z: const ZFinder& zfinder = apply(e, "ZFinder"); if(zfinder.bosons().size() != 1) vetoEvent; const ParticleVector boson_s = zfinder.bosons(); const Particle boson_f = boson_s[0] ; const ParticleVector zleps = zfinder.constituents(); //--------------------------- //--------------------------- //------------- stop processing the event if no true b-partons or hadrons are found const Particles& allBs = apply(e, "BHadrons").bHadrons(5.0*GeV); Particles stableBs; for(Particle p : allBs) { if(p.abseta() < 2.5) stableBs += p; } if( stableBs.empty() ) vetoEvent; //--------------------------- // -- get the b-jets: const Jets& jets = apply(e, "AntiKtJets04").jetsByPt(Cuts::pT >20.0*GeV && Cuts::abseta <2.4); Jets b_jets; for(const Jet& jet : jets) { //veto overlaps with Z leptons: bool veto = false; for(const Particle& zlep : zleps) { if(deltaR(jet, zlep) < 0.5) veto = true; } if(veto) continue; for(const Particle& bhadron : stableBs) { if( deltaR(jet, bhadron) <= 0.3 ) { b_jets.push_back(jet); break; // match } } // end loop on b-hadrons } //and make sure we have at least 1: if(b_jets.empty()) vetoEvent; //--------------------------- // fill the plots: const double ZpT = boson_f.pT()/GeV; const double ZY = boson_f.absrap(); _h_bjet_ZPt->fill(ZpT); _h_bjet_ZY ->fill(ZY); for(const Jet& jet : b_jets) { _h_bjet_Pt->fill(jet.pT()/GeV); _h_bjet_Y ->fill(jet.absrap()); const double Yboost = 0.5 * fabs(boson_f.rapidity() + jet.rapidity()); _h_bjet_Yboost->fill(Yboost); if(ZpT > 20.) { const double ZBDY = fabs( boson_f.rapidity() - jet.rapidity() ); const double ZBDPHI = fabs( deltaPhi(jet.phi(), boson_f.phi()) ); const double ZBDR = deltaR(jet, boson_f, RAPIDITY); _h_bjet_DY20->fill( ZBDY); _h_bjet_ZdPhi20->fill(ZBDPHI); _h_bjet_ZdR20->fill( ZBDR); } } //loop over b-jets if (b_jets.size() < 2) return; _h_2bjet_ZPt->fill(ZpT); _h_2bjet_ZY ->fill(ZY); const double BBDR = deltaR(b_jets[0], b_jets[1], RAPIDITY); const double Mbb = (b_jets[0].momentum() + b_jets[1].momentum()).mass(); _h_2bjet_dR ->fill(BBDR); _h_2bjet_Mbb->fill(Mbb); } // end of analysis loop /// Normalise histograms etc., after the run void finalize() { const double normfac = crossSection() / sumOfWeights(); scale( _h_bjet_Pt, normfac); scale( _h_bjet_Y, normfac); scale( _h_bjet_Yboost, normfac); scale( _h_bjet_DY20, normfac); scale( _h_bjet_ZdPhi20, normfac); scale( _h_bjet_ZdR20, normfac); scale( _h_bjet_ZPt, normfac); scale( _h_bjet_ZY, normfac); scale( _h_2bjet_dR, normfac); scale( _h_2bjet_Mbb, normfac); scale( _h_2bjet_ZPt, normfac); scale( _h_2bjet_ZY, normfac); } //@} protected: // Data members like post-cuts event weight counters go here size_t _mode; private: Histo1DPtr _h_bjet_Pt; Histo1DPtr _h_bjet_Y; Histo1DPtr _h_bjet_Yboost; Histo1DPtr _h_bjet_DY20; Histo1DPtr _h_bjet_ZdPhi20; Histo1DPtr _h_bjet_ZdR20; Histo1DPtr _h_bjet_ZPt; Histo1DPtr _h_bjet_ZY; Histo1DPtr _h_2bjet_dR; Histo1DPtr _h_2bjet_Mbb; Histo1DPtr _h_2bjet_ZPt; Histo1DPtr _h_2bjet_ZY; }; class ATLAS_2014_I1306294_EL : public ATLAS_2014_I1306294 { public: ATLAS_2014_I1306294_EL() : ATLAS_2014_I1306294("ATLAS_2014_I1306294_EL") { _mode = 1; } }; class ATLAS_2014_I1306294_MU : public ATLAS_2014_I1306294 { public: ATLAS_2014_I1306294_MU() : ATLAS_2014_I1306294("ATLAS_2014_I1306294_MU") { _mode = 2; } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2014_I1306294); DECLARE_RIVET_PLUGIN(ATLAS_2014_I1306294_MU); DECLARE_RIVET_PLUGIN(ATLAS_2014_I1306294_EL); } diff --git a/analyses/pluginATLAS/ATLAS_2014_I1310835.cc b/analyses/pluginATLAS/ATLAS_2014_I1310835.cc --- a/analyses/pluginATLAS/ATLAS_2014_I1310835.cc +++ b/analyses/pluginATLAS/ATLAS_2014_I1310835.cc @@ -1,267 +1,267 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" namespace Rivet { /// @brief H(125)->ZZ->4l at 8 TeV class ATLAS_2014_I1310835 : public Analysis { public: /// Default constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2014_I1310835); void init() { const FinalState fs(Cuts::abseta < 5.0); PromptFinalState photons(Cuts::abspid == PID::PHOTON); PromptFinalState bare_el(Cuts::abspid == PID::ELECTRON); PromptFinalState bare_mu(Cuts::abspid == PID::MUON); // Selection: lepton selection Cut etaranges_el = Cuts::abseta < 2.47 && Cuts::pT > 7*GeV; DressedLeptons electron_sel4l(photons, bare_el, 0.1, etaranges_el, false); addProjection(electron_sel4l, "electrons"); Cut etaranges_mu = Cuts::abseta < 2.7 && Cuts::pT > 6*GeV; DressedLeptons muon_sel4l(photons, bare_mu, 0.1, etaranges_mu, false); addProjection(muon_sel4l, "muons"); - FastJets jetpro(fs, FastJets::ANTIKT, 0.4, JetAlg::NO_MUONS, JetAlg::NO_INVISIBLES); + FastJets jetpro(fs, FastJets::ANTIKT, 0.4, JetAlg::Muons::NONE, JetAlg::Invisibles::NONE); addProjection(jetpro, "jet"); // Book histos book(_h_pt , 1, 1, 1); book(_h_rapidity , 2, 1, 1); book(_h_m34 , 3, 1, 1); book(_h_costheta , 4, 1, 1); book(_h_njets , 5, 1, 1); book(_h_leadingjetpt, 6, 1, 1); } /// Do the analysis void analyze(const Event& e) { //////////////////////////////////////////////////////////////////// // preselection of leptons for ZZ-> llll final state //////////////////////////////////////////////////////////////////// const vector& mu_sel4l = applyProjection(e, "muons").dressedLeptons(); const vector& el_sel4l = applyProjection(e, "electrons").dressedLeptons(); vector leptonsFS_sel4l; leptonsFS_sel4l.insert( leptonsFS_sel4l.end(), mu_sel4l.begin(), mu_sel4l.end() ); leptonsFS_sel4l.insert( leptonsFS_sel4l.end(), el_sel4l.begin(), el_sel4l.end() ); ///////////////////////////////////////////////////////////////////////////// /// H->ZZ->4l pairing ///////////////////////////////////////////////////////////////////////////// size_t el_p = 0; size_t el_n = 0; size_t mu_p = 0; size_t mu_n = 0; for (const Particle& l : leptonsFS_sel4l) { if (l.abspid() == PID::ELECTRON) { if (l.pid() < 0) ++el_n; if (l.pid() > 0) ++el_p; } else if (l.abspid() == PID::MUON) { if (l.pid() < 0) ++mu_n; if (l.pid() > 0) ++mu_p; } } bool pass_sfos = ( (el_p >=2 && el_n >=2) || (mu_p >=2 && mu_n >=2) || (el_p >=1 && el_n >=1 && mu_p >=1 && mu_n >=1) ); if (!pass_sfos) vetoEvent; Zstate Z1, Z2, Zcand; size_t n_parts = leptonsFS_sel4l.size(); size_t l1_index = 0; size_t l2_index = 0; // determine Z1 first double min_mass_diff = -1; for (size_t i = 0; i < n_parts; ++i) { for (size_t j = 0; j < n_parts; ++j) { if (i >= j) continue; if (leptonsFS_sel4l[i].pid() != -1*leptonsFS_sel4l[j].pid()) continue; //only pair SFOS leptons Zcand = Zstate( ParticlePair(leptonsFS_sel4l[i], leptonsFS_sel4l[j]) ); double mass_diff = fabs( Zcand.mom().mass() - 91.1876 ); if (min_mass_diff == -1 || mass_diff < min_mass_diff) { min_mass_diff = mass_diff; Z1 = Zcand; l1_index = i; l2_index = j; } } } //determine Z2 second min_mass_diff = -1; for (size_t i = 0; i < n_parts; ++i) { if (i == l1_index || i == l2_index) continue; for (size_t j = 0; j < n_parts; ++j) { if (j == l1_index || j == l2_index || i >= j) continue; if (leptonsFS_sel4l[i].pid() != -1*leptonsFS_sel4l[j].pid()) continue; // only pair SFOS leptons Zcand = Zstate( ParticlePair(leptonsFS_sel4l[i], leptonsFS_sel4l[j]) ); double mass_diff = fabs( Zcand.mom().mass() - 91.1876 ); if (min_mass_diff == -1 || mass_diff < min_mass_diff) { min_mass_diff = mass_diff; Z2 = Zcand; } } } Particles leptons_sel4l; leptons_sel4l.push_back(Z1.first); leptons_sel4l.push_back(Z1.second); leptons_sel4l.push_back(Z2.first); leptons_sel4l.push_back(Z2.second); //////////////////////////////////////////////////////////////////////////// // Kinematic Requirements /////////////////////////////////////////////////////////////////////////// //leading lepton pT requirement std::vector lepton_pt; for (const Particle& i : leptons_sel4l) lepton_pt.push_back(i.pT() / GeV); std::sort(lepton_pt.begin(), lepton_pt.end(), [](const double pT1, const double pT2) { return pT1 > pT2; }); if (!(lepton_pt[0] > 20*GeV && lepton_pt[1] > 15*GeV && lepton_pt[2] > 10*GeV)) vetoEvent; //invariant mass requirements if (!(inRange(Z1.mom().mass(), 50*GeV, 106*GeV) && inRange(Z2.mom().mass(), 12*GeV, 115*GeV))) vetoEvent; //lepton separation requirements for (unsigned int i = 0; i < 4; ++i) { for (unsigned int j = 0; j < 4; ++j) { if (i >= j) continue; double dR = deltaR(leptons_sel4l[i], leptons_sel4l[j]); bool sameflavor = leptons_sel4l[i].abspid() == leptons_sel4l[j].abspid(); if ( sameflavor && dR < 0.1) vetoEvent; if (!sameflavor && dR < 0.2) vetoEvent; } } // J/Psi veto requirement for (unsigned int i = 0; i < 4; ++i) { for (unsigned int j = 0; j < 4; ++j) { if (i >= j) continue; if ( leptons_sel4l[i].pid() != -1*leptons_sel4l[j].pid() ) continue; if ((leptons_sel4l[i].momentum() + leptons_sel4l[j].momentum()).mass() <= 5*GeV) vetoEvent; } } // 4-lepton invariant mass requirement double m4l = (Z1.mom() + Z2.mom()).mass(); if (!(inRange(m4l, 118*GeV, 129*GeV))) vetoEvent; //////////////////////////////////////////////////////////////////////////// // Higgs observables /////////////////////////////////////////////////////////////////////////// FourMomentum Higgs = Z1.mom() + Z2.mom(); double H4l_pt = Higgs.pt()/GeV; double H4l_rapidity = Higgs.absrap(); LorentzTransform HRF_boost; //HRF_boost.mkFrameTransformFromBeta(Higgs.boostVector()); HRF_boost.setBetaVec(- Higgs.boostVector()); FourMomentum Z1_in_HRF = HRF_boost.transform( Z1.mom() ); double H4l_costheta = fabs(cos( Z1_in_HRF.theta())); double H4l_m34 = Z2.mom().mass()/GeV; //////////////////////////////////////////////////////////////////////////// // Jet observables /////////////////////////////////////////////////////////////////////////// Jets jets; for (const Jet& jet : applyProjection(e, "jet").jetsByPt(Cuts::pT > 30*GeV && Cuts::absrap < 4.4)) { bool overlaps = false; for (const Particle& lep : leptonsFS_sel4l) { if (lep.abspid() != PID::ELECTRON) continue; const double dR = deltaR(lep, jet); if (dR < 0.2) { overlaps = true; break; } } if (!overlaps) jets += jet; } size_t n_jets = jets.size(); if (n_jets > 3) n_jets = 3; std::vector jet_pt; for (const Jet& i : jets) jet_pt.push_back(i.pT()/GeV); double leading_jet_pt = n_jets? jet_pt[0] : 0.; //////////////////////////////////////////////////////////////////////////// // End of H->ZZ->llll selection: now fill histograms //////////////////////////////////////////////////////////////////////////// _h_pt->fill(H4l_pt); _h_rapidity->fill(H4l_rapidity); _h_costheta->fill(H4l_costheta); _h_m34->fill(H4l_m34); _h_njets->fill(n_jets + 1); _h_leadingjetpt->fill(leading_jet_pt); } /// Generic Z candidate struct Zstate : public ParticlePair { Zstate() { } Zstate(ParticlePair _particlepair) : ParticlePair(_particlepair) { } FourMomentum mom() const { return first.momentum() + second.momentum(); } operator FourMomentum() const { return mom(); } }; /// Finalize void finalize() { const double norm = crossSection()/sumOfWeights()/femtobarn; std::cout << "xsec: " << crossSection() << '\n'; std::cout << "sumw: " << sumOfWeights() << '\n'; std::cout << "femb: " << femtobarn << '\n'; std::cout << "norm: " << norm << '\n'; scale(_h_pt, norm); scale(_h_rapidity, norm); scale(_h_costheta, norm); scale(_h_m34, norm); scale(_h_njets, norm); scale(_h_leadingjetpt, norm); } private: Histo1DPtr _h_pt, _h_rapidity, _h_costheta; Histo1DPtr _h_m34, _h_njets, _h_leadingjetpt; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2014_I1310835); } diff --git a/analyses/pluginATLAS/ATLAS_2014_I1315949.cc b/analyses/pluginATLAS/ATLAS_2014_I1315949.cc --- a/analyses/pluginATLAS/ATLAS_2014_I1315949.cc +++ b/analyses/pluginATLAS/ATLAS_2014_I1315949.cc @@ -1,225 +1,225 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { class ATLAS_2014_I1315949 : public Analysis { public: /// Constructor ATLAS_2014_I1315949() : Analysis("ATLAS_2014_I1315949") { } void init() { FinalState fs; - ZFinder zfinder(fs, Cuts::abseta<2.4 && Cuts::pT>20.0*GeV, PID::MUON, 66*GeV, 116*GeV, 0.1, ZFinder::CLUSTERNODECAY); + ZFinder zfinder(fs, Cuts::abseta<2.4 && Cuts::pT>20.0*GeV, PID::MUON, 66*GeV, 116*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY); declare(zfinder, "ZFinder"); ChargedFinalState cfs( zfinder.remainingFinalState() ); declare(cfs, "cfs"); book(_h_pTsum_tow , 1, 1, 1); book(_h_pTsum_trv , 1, 1, 2); book(_h_pTsum_away , 1, 1, 3); book(_h_pTsum_tmin , 1, 1, 4); book(_h_pTsum_tmax , 1, 1, 5); book(_h_pTsum_tdif , 1, 1, 6); book(_h_Nchg_tow , 2, 1, 1); book(_h_Nchg_trv , 2, 1, 2); book(_h_Nchg_away , 2, 1, 3); book(_h_Nchg_tmin , 2, 1, 4); book(_h_Nchg_tmax , 2, 1, 5); book(_h_Nchg_tdif , 2, 1, 6); book(_h_pTavg_tow , 3, 1, 1); book(_h_pTavg_trv , 3, 1, 2); book(_h_pTavg_away , 3, 1, 3); book(_h_pTavgvsmult_tow , 4, 1, 1); book(_h_pTavgvsmult_trv , 4, 1, 2); book(_h_pTavgvsmult_away , 4, 1, 3); // Book sumpt and nch histos for (int i_reg = 0; i_reg < 4; i_reg++) { for (int i_bin = 0; i_bin < 6.; i_bin++) { book(_h_ptSum_1D[i_reg][i_bin], 5, i_reg+1 , i_bin+1); book( _h_Nchg_1D[i_reg][i_bin], 6, i_reg+1 , i_bin+1); } } } /// Perform the per-event analysis void analyze(const Event& event) { const ZFinder& zfinder = apply(event, "ZFinder"); if (zfinder.bosons().size() != 1) vetoEvent; double Zpt = zfinder.bosons()[0].momentum().pT()/GeV; double Zphi = zfinder.bosons()[0].momentum().phi(); double Zmass = zfinder.bosons()[0].momentum().mass()/GeV; if(Zmass < 66. || Zmass > 116.) vetoEvent; // Initialise counters for Nch and sumPt for all regions int nTowards(0), nTransverse(0), nLeft(0), nRight(0), nTrmin(0), nTrmax(0), nAway(0); double ptSumTowards(0.0), ptSumTransverse(0.0), ptSumLeft(0.0), ptSumRight(0.0), ptSumTrmin(0.0), ptSumTrmax(0.0), ptSumAway(0.0); // The charged particles ParticleVector particles = apply(event, "cfs").particlesByPt( Cuts::pT > 0.5*GeV && Cuts::abseta <2.5); // Loop over charged particles with pT>500 MeV and |eta|<2.5 for(const Particle& p : particles) { double dphi = p.momentum().phi() - Zphi, pT = p.momentum().pT(); // Get multiples of 2pi right for(; std::fabs(dphi) > M_PI; dphi += (dphi > 0. ? -2.*M_PI : 2.*M_PI) ); // Towards region if( std::fabs(dphi) < M_PI/3. ) { nTowards++; ptSumTowards += pT; } // Transverse region else if( std::fabs(dphi) < 2.*M_PI/3. ) { nTransverse++; ptSumTransverse += pT; if(dphi > 0.) { nRight++; ptSumRight += pT; } else { nLeft++; ptSumLeft += pT; } } // Away region else { nAway++; ptSumAway += pT; } } // TransMAX, TransMIN regions if (ptSumLeft > ptSumRight) { ptSumTrmax = ptSumLeft; ptSumTrmin = ptSumRight; nTrmax = nLeft; nTrmin = nRight; } else { ptSumTrmax = ptSumRight; ptSumTrmin = ptSumLeft; nTrmax = nRight; nTrmin = nLeft; } // min max regions have difference are than all other regions const double area = 5.*2./3.*M_PI; // Fill sumPt vs. Zpt region profiles _h_pTsum_tow->fill( Zpt, ptSumTowards/area); _h_pTsum_trv->fill( Zpt, ptSumTransverse/area); _h_pTsum_away->fill(Zpt, ptSumAway/area); _h_pTsum_tmin->fill(Zpt, ptSumTrmin/(0.5*area)); _h_pTsum_tmax->fill(Zpt, ptSumTrmax/(0.5*area)); _h_pTsum_tdif->fill(Zpt, (ptSumTrmax - ptSumTrmin)/(0.5*area)); // Fill Nch vs. Zpt region profiles _h_Nchg_tow->fill( Zpt, nTowards/area); _h_Nchg_trv->fill( Zpt, nTransverse/area); _h_Nchg_away->fill(Zpt, nAway/area); _h_Nchg_tmin->fill(Zpt, nTrmin/(0.5*area)); _h_Nchg_tmax->fill(Zpt, nTrmax/(0.5*area)); _h_Nchg_tdif->fill(Zpt, (nTrmax - nTrmin)/(0.5*area)); // Fill vs. ZpT profiles _h_pTavg_tow->fill( Zpt, nTowards > 0.? ptSumTowards/nTowards : 0.); _h_pTavg_trv->fill( Zpt, nTransverse > 0.? ptSumTransverse/nTransverse : 0.); _h_pTavg_away->fill(Zpt, nAway > 0.? ptSumAway/nAway : 0.); // Fill vs. ZpT profiles _h_pTavgvsmult_tow->fill( nTowards, nTowards > 0.? ptSumTowards/nTowards : 0.); _h_pTavgvsmult_trv->fill( nTransverse, nTransverse > 0.? ptSumTransverse/nTransverse : 0.); _h_pTavgvsmult_away->fill(nAway, nAway > 0.? ptSumAway/nAway : 0.); // Determine Zpt region histo to fill int i_bin(0); if (inRange(Zpt,0,5) ) i_bin=0; if (inRange(Zpt,5,10) ) i_bin=1; if (inRange(Zpt,10,20) ) i_bin=2; if (inRange(Zpt,20,50) ) i_bin=3; if (inRange(Zpt,50,110) ) i_bin=4; if (Zpt>110) i_bin=5; // SumPt histos for Zpt region _h_ptSum_1D[0][i_bin]->fill(ptSumTowards/area); _h_ptSum_1D[1][i_bin]->fill(ptSumTransverse/area); _h_ptSum_1D[2][i_bin]->fill(ptSumTrmin/(0.5*area)); _h_ptSum_1D[3][i_bin]->fill(ptSumTrmax/(0.5*area)); // Nch histos for Zpt region _h_Nchg_1D[0][i_bin]->fill(nTowards/area); _h_Nchg_1D[1][i_bin]->fill(nTransverse/area); _h_Nchg_1D[2][i_bin]->fill(nTrmin/(0.5*area)); _h_Nchg_1D[3][i_bin]->fill(nTrmax/(0.5*area)); } /// Normalise histograms etc., after the run void finalize() { for(int i_reg = 0; i_reg < 4; i_reg++) { for(int i_bin = 0; i_bin < 6; i_bin++) { normalize( _h_ptSum_1D[i_reg][i_bin] ); normalize( _h_Nchg_1D[ i_reg][i_bin] ); } } } private: Profile1DPtr _h_pTsum_tow, _h_pTsum_trv, _h_pTsum_away, _h_pTsum_tmin, _h_pTsum_tmax, _h_pTsum_tdif, _h_Nchg_tow, _h_Nchg_trv, _h_Nchg_away, _h_Nchg_tmin, _h_Nchg_tmax, _h_Nchg_tdif, _h_pTavg_tow, _h_pTavg_trv, _h_pTavg_away, _h_pTavgvsmult_tow, _h_pTavgvsmult_trv, _h_pTavgvsmult_away; Histo1DPtr _h_ptSum_1D[4][6], _h_Nchg_1D[4][6]; }; // This global object acts as a hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2014_I1315949); } diff --git a/analyses/pluginATLAS/ATLAS_2014_I1319490.cc b/analyses/pluginATLAS/ATLAS_2014_I1319490.cc --- a/analyses/pluginATLAS/ATLAS_2014_I1319490.cc +++ b/analyses/pluginATLAS/ATLAS_2014_I1319490.cc @@ -1,232 +1,232 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { class ATLAS_2014_I1319490 : public Analysis { public: ATLAS_2014_I1319490(string name = "ATLAS_2014_I1319490") : Analysis(name) { _mode = 0; // using electron channel for combined data by default } // Book histograms and initialise projections before the run void init() { FinalState fs; Cut cuts; if (_mode == 2) { // muon channel cuts = (Cuts::pT > 25.0*GeV) & Cuts::etaIn(-2.4, 2.4); } else if (_mode) { // electron channel cuts = (Cuts::pT > 25.0*GeV) & ( Cuts::etaIn(-2.47, -1.52) | Cuts::etaIn(-1.37, 1.37) | Cuts::etaIn(1.52, 2.47) ); } else { // combined data extrapolated to common phase space cuts = (Cuts::pT > 25.0*GeV) & Cuts::etaIn(-2.5, 2.5); } // bosons WFinder wfinder(fs, cuts, _mode > 1? PID::MUON : PID::ELECTRON, 40.0*GeV, MAXDOUBLE, 0.0*GeV, 0.1, - WFinder::CLUSTERNODECAY, WFinder::NOTRACK, WFinder::TRANSMASS); + WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); declare(wfinder, "WF"); // jets VetoedFinalState jet_fs(fs); jet_fs.addVetoOnThisFinalState(getProjection("WF")); FastJets jets(jet_fs, FastJets::ANTIKT, 0.4); jets.useInvisibles(true); declare(jets, "Jets"); // book histograms book(histos["h_N_incl"] ,1,1,_mode+1); book(histos["h_N"] ,4,1,_mode+1); book(histos["h_pt_jet1_1jet"] ,5,1,_mode+1); book(histos["h_pt_jet1_1jet_excl"] ,6,1,_mode+1); book(histos["h_pt_jet1_2jet"] ,7,1,_mode+1); book(histos["h_pt_jet1_3jet"] ,8,1,_mode+1); book(histos["h_pt_jet2_2jet"] ,9,1,_mode+1); book(histos["h_pt_jet3_3jet"] ,10,1,_mode+1); book(histos["h_pt_jet4_4jet"] ,11,1,_mode+1); book(histos["h_pt_jet5_5jet"] ,12,1,_mode+1); book(histos["h_y_jet1_1jet"] ,13,1,_mode+1); book(histos["h_y_jet2_2jet"] ,14,1,_mode+1); book(histos["h_HT_1jet"] ,15,1,_mode+1); book(histos["h_HT_1jet_excl"] ,16,1,_mode+1); book(histos["h_HT_2jet"] ,17,1,_mode+1); book(histos["h_HT_2jet_excl"] ,18,1,_mode+1); book(histos["h_HT_3jet"] ,19,1,_mode+1); book(histos["h_HT_3jet_excl"] ,20,1,_mode+1); book(histos["h_HT_4jet"] ,21,1,_mode+1); book(histos["h_HT_5jet"] ,22,1,_mode+1); book(histos["h_deltaPhi_jet12"] ,23,1,_mode+1); book(histos["h_deltaRap_jet12"] ,24,1,_mode+1); book(histos["h_deltaR_jet12"] ,25,1,_mode+1); book(histos["h_M_Jet12_2jet"] ,26,1,_mode+1); book(histos["h_y_jet3_3jet"] ,27,1,_mode+1); book(histos["h_y_jet4_4jet"] ,28,1,_mode+1); book(histos["h_y_jet5_5jet"] ,29,1,_mode+1); book(histos["h_ST_1jet"] ,30,1,_mode+1); book(histos["h_ST_2jet"] ,31,1,_mode+1); book(histos["h_ST_2jet_excl"] ,32,1,_mode+1); book(histos["h_ST_3jet"] ,33,1,_mode+1); book(histos["h_ST_3jet_excl"] ,34,1,_mode+1); book(histos["h_ST_4jet"] ,35,1,_mode+1); book(histos["h_ST_5jet"] ,36,1,_mode+1); } void fillPlots(const Particle& lepton, const double& missET, Jets& all_jets) { // do jet-lepton overlap removal Jets jets; double ST = 0.0; // scalar pT sum of all selected jets for (const Jet &j : all_jets) { if (deltaR(j, lepton) > 0.5) { jets += j; ST += j.pT() / GeV; } } const size_t njets = jets.size(); const double HT = ST + lepton.pT() / GeV + missET; histos["h_N"]->fill(njets + 0.5); for (size_t i = 0; i <= njets; ++i) { histos["h_N_incl"]->fill(i + 0.5); } if (njets) { const double pT1 = jets[0].pT() / GeV; const double rap1 = jets[0].absrap(); histos["h_pt_jet1_1jet" ]->fill(pT1); histos["h_y_jet1_1jet"]->fill(rap1); histos["h_HT_1jet"]->fill(HT); histos["h_ST_1jet"]->fill(ST); if (njets == 1) { histos["h_pt_jet1_1jet_excl"]->fill(pT1); histos["h_HT_1jet_excl"]->fill(HT); } else { const double pT2 = jets[1].pT() / GeV; const double rap2 = jets[1].absrap(); const double dR = deltaR(jets[0], jets[1]); const double dRap = deltaRap(jets[0], jets[1]); const double dPhi = deltaPhi(jets[0], jets[1]); const double mjj = (jets[0].momentum() + jets[1].momentum()).mass() / GeV; histos["h_pt_jet1_2jet"]->fill(pT1); histos["h_pt_jet2_2jet"]->fill(pT2); histos["h_y_jet2_2jet"]->fill(rap2); histos["h_M_Jet12_2jet"]->fill(mjj); histos["h_HT_2jet"]->fill(HT); histos["h_ST_2jet"]->fill(ST); histos["h_deltaPhi_jet12"]->fill(dPhi); histos["h_deltaRap_jet12"]->fill(dRap); histos["h_deltaR_jet12"]->fill(dR); if (njets == 2) { histos["h_ST_2jet_excl"]->fill(ST); histos["h_HT_2jet_excl"]->fill(HT); } else { const double pT3 = jets[2].pT() / GeV; const double rap3 = jets[2].absrap(); histos["h_pt_jet1_3jet"]->fill(pT1); histos["h_pt_jet3_3jet"]->fill(pT3); histos["h_y_jet3_3jet"]->fill(rap3); histos["h_HT_3jet"]->fill(HT); histos["h_ST_3jet"]->fill(ST); if(njets == 3) { histos["h_ST_3jet_excl"]->fill(ST); histos["h_HT_3jet_excl"]->fill(HT); } else { const double pT4 = jets[3].pT() / GeV; const double rap4 = jets[3].absrap(); histos["h_pt_jet4_4jet"]->fill(pT4); histos["h_y_jet4_4jet"]->fill(rap4); histos["h_HT_4jet"]->fill(HT); histos["h_ST_4jet"]->fill(ST); if (njets > 4) { const double pT5 = jets[4].pT() / GeV; const double rap5 = jets[4].absrap(); histos["h_pt_jet5_5jet"]->fill(pT5); histos["h_y_jet5_5jet"]->fill(rap5); histos["h_HT_5jet"]->fill(HT); histos["h_ST_5jet"]->fill(ST); } } } } } } // Perform the per-event analysis void analyze(const Event& event) { // Retrieve boson candidate const WFinder& wf = apply(event, "WF"); if (wf.empty()) vetoEvent; // Retrieve jets const JetAlg& jetfs = apply(event, "Jets"); Jets all_jets = jetfs.jetsByPt(Cuts::pT > 30.0*GeV && Cuts::absrap < 4.4); const Particles& leptons = wf.constituentLeptons(); const double missET = wf.constituentNeutrino().pT() / GeV; if (leptons.size() == 1 && missET > 25.0 && wf.mT() > 40.0*GeV) { const Particle& lep = leptons[0]; fillPlots(lep, missET, all_jets); } } void finalize() { const double scalefactor(crossSection() / sumOfWeights()); /// @todo Update to use C++11 range-for for (map::iterator hit = histos.begin(); hit != histos.end(); ++hit) { scale(hit->second, scalefactor); } } protected: size_t _mode; private: map histos; }; class ATLAS_2014_I1319490_EL : public ATLAS_2014_I1319490 { public: ATLAS_2014_I1319490_EL() : ATLAS_2014_I1319490("ATLAS_2014_I1319490_EL") { _mode = 1; } }; class ATLAS_2014_I1319490_MU : public ATLAS_2014_I1319490 { public: ATLAS_2014_I1319490_MU() : ATLAS_2014_I1319490("ATLAS_2014_I1319490_MU") { _mode = 2; } }; // The hooks for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2014_I1319490); DECLARE_RIVET_PLUGIN(ATLAS_2014_I1319490_EL); DECLARE_RIVET_PLUGIN(ATLAS_2014_I1319490_MU); } diff --git a/analyses/pluginATLAS/ATLAS_2015_I1394679.cc b/analyses/pluginATLAS/ATLAS_2015_I1394679.cc --- a/analyses/pluginATLAS/ATLAS_2015_I1394679.cc +++ b/analyses/pluginATLAS/ATLAS_2015_I1394679.cc @@ -1,202 +1,202 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// Differential multijet cross-section measurement in different variables class ATLAS_2015_I1394679 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2015_I1394679); /// @name Analysis methods //@ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections here const FinalState fs; declare(fs, "FinalState"); - FastJets fj04(fs, FastJets::ANTIKT, 0.4, JetAlg::ALL_MUONS, JetAlg::DECAY_INVISIBLES); + FastJets fj04(fs, FastJets::ANTIKT, 0.4, JetAlg::Muons::ALL, JetAlg::Invisibles::DECAY); declare(fj04, "AntiKt4jets"); // Histograms book(_h["pt1"] ,1, 1, 1); book(_h["pt2"] ,2, 1, 1); book(_h["pt3"] ,3, 1, 1); book(_h["pt4"] ,4, 1, 1); book(_h["HT"] ,5, 1, 1); book(_h["M4j"] ,6, 1, 1); // Histograms with different pt/m4j cuts for (size_t i_hist = 0; i_hist < 4; ++i_hist) { book(_h["M2jratio_"+to_str(i_hist)] , 7 + i_hist, 1, 1); book(_h["dPhiMin2j_"+to_str(i_hist)] ,11 + i_hist, 1, 1); book(_h["dPhiMin3j_"+to_str(i_hist)] ,15 + i_hist, 1, 1); book(_h["dYMin2j_"+to_str(i_hist)] ,19 + i_hist, 1, 1); book(_h["dYMin3j_"+to_str(i_hist)] ,23 + i_hist, 1, 1); book(_h["dYMax2j_"+to_str(i_hist)] ,27 + i_hist, 1, 1); for (size_t ygap = 0; ygap < 4; ++ygap) { book(_h["sumPtCent_"+to_str(ygap)+to_str(i_hist)] ,31 + i_hist + ygap * 4, 1, 1); } } } /// Perform the per-event analysis void analyze(const Event& event) { const Jets& alljetskt4 = apply(event, "AntiKt4jets").jetsByPt(Cuts::pT > 60*GeV && Cuts::absrap < 2.8); // Require 4 jets with rap < 2.8 and passing pT cuts int nJets = alljetskt4.size(); if (nJets < 4) vetoEvent; Jets leadingJetskt4 = alljetskt4; leadingJetskt4.resize(4); Jet jet1(leadingJetskt4[0]), jet2(leadingJetskt4[1]), jet3(leadingJetskt4[2]), jet4(leadingJetskt4[3]); if (jet1.pT() < 100*GeV) vetoEvent; if (jet4.pT() < 64*GeV) vetoEvent; // dR cut const double dRcut = 0.65; double drmin = 9999; for (int ijet = 0; ijet < 4; ++ijet) { for (int jjet = ijet + 1; jjet < 4; ++jjet) { double myDR = deltaR(alljetskt4[ijet], alljetskt4[jjet], RAPIDITY); if (myDR < drmin) drmin = myDR; } } if (drmin < dRcut) vetoEvent; // Variables for calculation in loops over jets FourMomentum sum_alljets; double HT = 0; // scalar sum of pt of 4 leading jets double Mjj = 99999; // minimum combined mass of 2 jets double minDphi_ij = 999, minDphi_ijk = 999; // minimum azimuthal distance btw 2 & 3 jets double maxDrap_ij = -999; // maximum rapidity distance btw 2 jets double minDrap_ij = 999, minDrap_ijk = 999; // minimum rapidity distance btw 2 & 3 jets size_t maxY_i = -1, maxY_j = -1; // Loop over 4 leading jets for (size_t ij = 0; ij< 4; ++ij) { Jet& jeti = leadingJetskt4.at(ij); sum_alljets += jeti.mom(); HT += jeti.pT(); for (size_t jj = 0; jj< 4; ++jj) { if ( ij == jj ) continue; Jet& jetj = leadingJetskt4.at(jj); const double auxDphi = fabs(deltaPhi(jeti, jetj)); minDphi_ij = std::min(auxDphi, minDphi_ij); const double auxDrap = fabs(deltaRap(jeti, jetj)); minDrap_ij = std::min(auxDrap, minDrap_ij); if (auxDrap > maxDrap_ij) { maxDrap_ij = auxDrap; maxY_i = ij; maxY_j = jj; } FourMomentum sum_twojets = jeti.mom() + jetj.mom(); Mjj = std::min(Mjj, sum_twojets.mass()); for (size_t kj = 0; kj < 4; ++kj) { if (kj == ij || kj == jj) continue; Jet& jetk = leadingJetskt4.at(kj); const double auxDphi2 = auxDphi + fabs(deltaPhi(jeti, jetk)); minDphi_ijk = std::min(auxDphi2, minDphi_ijk); const double auxDrap2 = auxDrap + fabs(deltaRap(jeti, jetk)); minDrap_ijk = std::min(auxDrap2, minDrap_ijk); } } } //end loop over 4 leading jets // Combined mass of 4 leading jets const double Mjjjj = sum_alljets.mass(); // Sum of central jets pT double sumpt_twojets_cent = 0; // Scalar sum pt of central jets, with different rapidity gaps for (size_t ijet = 0; ijet < 4; ++ijet) { if (ijet == maxY_i || ijet == maxY_j) continue; // these are the forward jets sumpt_twojets_cent += leadingJetskt4.at(ijet).pT(); } // Fill histos // Mass and pT cuts in which the analysis tables are split; values are in GeV and cuts are inclusive const double m4jcuts[4] = {500, 1000, 1500, 2000}; const double pt1cutA[4] = {100, 400, 700, 1000}; const double pt1cutB[4] = {100, 250, 400, 550}; const double rapGapCut[4] = {1, 2, 3, 4}; _h["pt1"]->fill(jet1.pt()); _h["pt2"]->fill(jet2.pt()); _h["pt3"]->fill(jet3.pt()); _h["pt4"]->fill(jet4.pt()); _h["HT"] ->fill(HT); _h["M4j"]->fill(Mjjjj); for (size_t i_cut = 0; i_cut < 4; ++i_cut) { const string icutstr = to_str(i_cut); if (Mjjjj > m4jcuts[i_cut]) _h["M2jratio_"+icutstr]->fill( Mjj/Mjjjj ); if (jet1.pT() > pt1cutA[i_cut]) { _h["dPhiMin2j_"+icutstr]->fill(minDphi_ij ); _h["dPhiMin3j_"+icutstr]->fill(minDphi_ijk); _h["dYMin2j_"+icutstr]->fill(minDrap_ij ); _h["dYMin3j_"+icutstr]->fill(minDrap_ijk); } if (jet1.pt() > pt1cutB[i_cut]) { _h["dYMax2j_"+icutstr]->fill( maxDrap_ij ); for (size_t yy = 0; yy < 4; ++yy) { if (maxDrap_ij > rapGapCut[yy]) _h["sumPtCent_"+to_str(yy)+icutstr]->fill(sumpt_twojets_cent); } } } //end loop over pt/m4j cuts } /// Normalise histograms etc., after the run void finalize() { const double sf = (crossSection()/femtobarn) / sumOfWeights(); /// @todo Migrate to C++11 range-for loop for (map::iterator hit = _h.begin(); hit != _h.end(); ++hit) { scale(hit->second, sf); } } //@ private: /// @name Histograms //@{ map _h; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2015_I1394679); } diff --git a/analyses/pluginATLAS/ATLAS_2015_I1397637.cc b/analyses/pluginATLAS/ATLAS_2015_I1397637.cc --- a/analyses/pluginATLAS/ATLAS_2015_I1397637.cc +++ b/analyses/pluginATLAS/ATLAS_2015_I1397637.cc @@ -1,218 +1,218 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/UnstableFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class ATLAS_2015_I1397637 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2015_I1397637); /// Book projections and histograms void init() { // Base final state definition const FinalState fs(Cuts::abseta < 4.5); // Neutrinos for MET IdentifiedFinalState nu_id; nu_id.acceptNeutrinos(); PromptFinalState neutrinos(nu_id); neutrinos.acceptTauDecays(true); declare(neutrinos, "neutrinos"); // Get photons used to dress leptons IdentifiedFinalState photons(fs); photons.acceptId(PID::PHOTON); // Use all bare muons as input to the DressedMuons projection IdentifiedFinalState mu_id(fs); mu_id.acceptIdPair(PID::MUON); PromptFinalState bare_mu(mu_id); bare_mu.acceptTauDecays(true); // Use all bare electrons as input to the DressedElectrons projection IdentifiedFinalState el_id(fs); el_id.acceptIdPair(PID::ELECTRON); PromptFinalState bare_el(el_id); bare_el.acceptTauDecays(true); // Use all bare leptons including taus for single-lepton filter IdentifiedFinalState lep_id(fs); lep_id.acceptIdPair(PID::MUON); lep_id.acceptIdPair(PID::ELECTRON); PromptFinalState bare_lep(lep_id); declare(bare_lep, "bare_lep"); // Tau finding /// @todo Use TauFinder UnstableFinalState ufs; IdentifiedFinalState tau_id(ufs); tau_id.acceptIdPair(PID::TAU); PromptFinalState bare_tau(tau_id); declare(bare_tau, "bare_tau"); // Muons and electrons must have |eta| < 2.5 Cut eta_ranges = Cuts::abseta < 2.5; // Get dressed muons and the good muons (pt>25GeV) DressedLeptons all_dressed_mu(photons, bare_mu, 0.1, eta_ranges, true); DressedLeptons dressed_mu(photons, bare_mu, 0.1, eta_ranges && Cuts::pT > 25*GeV, true); declare(dressed_mu, "muons"); // Get dressed electrons and the good electrons (pt>25GeV) DressedLeptons all_dressed_el(photons, bare_el, 0.1, eta_ranges, true); DressedLeptons dressed_el(photons, bare_el, 0.1, eta_ranges && Cuts::pT > 25*GeV, true); declare(dressed_el, "electrons"); // Jet clustering VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(all_dressed_el); vfs.addVetoOnThisFinalState(all_dressed_mu); vfs.addVetoOnThisFinalState(neutrinos); // Small-R jets /// @todo Use extra constructor args FastJets jets(vfs, FastJets::ANTIKT, 0.4); - jets.useInvisibles(JetAlg::ALL_INVISIBLES); - jets.useMuons(JetAlg::DECAY_MUONS); + jets.useInvisibles(JetAlg::Invisibles::ALL); + jets.useMuons(JetAlg::Muons::DECAY); declare(jets, "jets"); // Large-R jets /// @todo Use extra constructor args FastJets large_jets(vfs, FastJets::ANTIKT, 1.0); - large_jets.useInvisibles(JetAlg::ALL_INVISIBLES); - large_jets.useMuons(JetAlg::DECAY_MUONS); + large_jets.useInvisibles(JetAlg::Invisibles::ALL); + large_jets.useMuons(JetAlg::Muons::DECAY); declare(large_jets, "fat_jets"); /// Book histogram book(_h_pttop ,1, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { // Single lepton filter on bare leptons with no cuts const Particles& bare_lep = apply(event, "bare_lep").particles(); const Particles& bare_tau = apply(event, "bare_tau").particles(); if (bare_lep.size() + bare_tau.size() != 1) vetoEvent; // Electrons and muons const vector& electrons = apply(event, "electrons").dressedLeptons(); const vector& muons = apply(event, "muons").dressedLeptons(); if (electrons.size() + muons.size() != 1) vetoEvent; const DressedLepton& lepton = muons.empty() ? electrons[0] : muons[0]; // Get the neutrinos from the event record (they have pT > 0.0 and |eta| < 4.5 at this stage const Particles& neutrinos = apply(event, "neutrinos").particlesByPt(); FourMomentum met; for (const Particle& nu : neutrinos) met += nu.momentum(); if (met.pT() < 20*GeV) vetoEvent; // Thin jets and trimmed fat jets /// @todo Use Rivet built-in FJ trimming support const Jets& jets = apply(event, "jets").jetsByPt(Cuts::pT > 25*GeV && Cuts::abseta < 2.5); const PseudoJets& fat_pjets = apply(event, "fat_jets").pseudoJetsByPt(); const double Rfilt = 0.3, ptFrac_min = 0.05; ///< @todo Need to be careful about the units for the pT cut passed to FJ? PseudoJets trimmed_fat_pjets; fastjet::Filter trimmer(fastjet::JetDefinition(fastjet::kt_algorithm, Rfilt), fastjet::SelectorPtFractionMin(ptFrac_min)); for (const PseudoJet& pjet : fat_pjets) trimmed_fat_pjets += trimmer(pjet); trimmed_fat_pjets = fastjet::sorted_by_pt(trimmed_fat_pjets); // Jet reclustering // Use a kT cluster sequence to recluster the trimmed jets so that a d12 can then be obtained from the reclustered jet vector splittingScales; for (const PseudoJet& tpjet : trimmed_fat_pjets) { const PseudoJets tpjet_constits = tpjet.constituents(); const fastjet::ClusterSequence kt_cs(tpjet_constits, fastjet::JetDefinition(fastjet::kt_algorithm, 1.5, fastjet::E_scheme, fastjet::Best)); const PseudoJets kt_jets = kt_cs.inclusive_jets(); const double d12 = 1.5 * sqrt(kt_jets[0].exclusive_subdmerge(1)); splittingScales += d12; } Jets trimmed_fat_jets; for (size_t i = 0; i < trimmed_fat_pjets.size(); ++i) { const Jet tj = trimmed_fat_pjets[i]; if (tj.mass() <= 100*GeV) continue; if (tj.pT() <= 300*GeV) continue; if (splittingScales[i] <= 40*GeV) continue; if (tj.abseta() >= 2.0) continue; trimmed_fat_jets += tj; } if (trimmed_fat_jets.empty()) vetoEvent; // Jet b-tagging Jets bjets, non_bjets; for (const Jet& jet : jets) (jet.bTagged() ? bjets : non_bjets) += jet; if (bjets.empty()) vetoEvent; // Boosted selection: lepton/jet overlap const double transmass = sqrt( 2 * lepton.pT() * met.pT() * (1 - cos(deltaPhi(lepton, met))) ); if (transmass + met.pt() <= 60*GeV) vetoEvent; int lepJetIndex = -1; for (size_t i = 0; i < jets.size(); ++i) { const Jet& jet = jets[i]; if (deltaR(jet, lepton) < 1.5) { lepJetIndex = i; break; } } if (lepJetIndex < 0) vetoEvent; const Jet& ljet = jets[lepJetIndex]; // Boosted selection: lepton-jet/fat-jet matching int fatJetIndex = -1; for (size_t j = 0; j < trimmed_fat_jets.size(); ++j) { const Jet& fjet = trimmed_fat_jets[j]; const double dR_fatjet = deltaR(ljet, fjet); const double dPhi_fatjet = deltaPhi(lepton, fjet); if (dR_fatjet > 1.5 && dPhi_fatjet > 2.3) { fatJetIndex = j; break; } } if (fatJetIndex < 0) vetoEvent; const Jet& fjet = trimmed_fat_jets[fatJetIndex]; // Boosted selection: b-tag matching const bool lepbtag = ljet.bTagged(); bool hadbtag = false; for (const Jet& bjet : bjets) { hadbtag |= (deltaR(fjet, bjet) < 1.0); } // Fill histo if selection passed if (hadbtag || lepbtag) _h_pttop->fill(fjet.pT()/GeV); } /// Normalise histograms etc., after the run void finalize() { scale(_h_pttop, crossSection()/femtobarn / sumOfWeights()); } private: Histo1DPtr _h_pttop; }; DECLARE_RIVET_PLUGIN(ATLAS_2015_I1397637); } diff --git a/analyses/pluginATLAS/ATLAS_2015_I1408516.cc b/analyses/pluginATLAS/ATLAS_2015_I1408516.cc --- a/analyses/pluginATLAS/ATLAS_2015_I1408516.cc +++ b/analyses/pluginATLAS/ATLAS_2015_I1408516.cc @@ -1,242 +1,242 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { class ATLAS_2015_I1408516 : public Analysis { public: /// Constructor ATLAS_2015_I1408516(string name="ATLAS_2015_I1408516", size_t mode=0) : Analysis(name), _mode(mode) // using electron channel for combined data { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Configure projections FinalState fs; Cut cuts = Cuts::abseta < 2.4 && Cuts::pT > 20*GeV; ZFinder zfinder(fs, cuts, (_mode ? PID::MUON : PID::ELECTRON), - 12*GeV, 150*GeV, 0.1, ZFinder::CLUSTERNODECAY, ZFinder::NOTRACK); + 12*GeV, 150*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::NO); declare(zfinder, _mode ? "ZFinder_mu" : "ZFinder_el"); // Book histograms const size_t offset = _mode ? 4 : 1; book(_h["phistar_lo_00_08"] , 2, 1, offset); book(_h["phistar_lo_08_16"] , 3, 1, offset); book(_h["phistar_lo_16_24"] , 4, 1, offset); book(_h["phistar_me_00_04"] , 5, 1, offset); book(_h["phistar_me_04_08"] , 6, 1, offset); book(_h["phistar_me_08_12"] , 7, 1, offset); book(_h["phistar_me_12_16"] , 8, 1, offset); book(_h["phistar_me_16_20"] , 9, 1, offset); book(_h["phistar_me_20_24"] ,10, 1, offset); book(_h["phistar_hi_00_08"] ,11, 1, offset); book(_h["phistar_hi_08_16"] ,12, 1, offset); book(_h["phistar_hi_16_24"] ,13, 1, offset); book(_h["phistar_mll_46_66" ] ,14, 1, offset); book(_h["phistar_mll_66_116" ] ,15, 1, offset); book(_h["phistar_mll_116_150"] ,16, 1, offset); book(_h["zpt_00_04"] ,17, 1, offset); book(_h["zpt_04_08"] ,18, 1, offset); book(_h["zpt_08_12"] ,19, 1, offset); book(_h["zpt_12_16"] ,20, 1, offset); book(_h["zpt_16_20"] ,21, 1, offset); book(_h["zpt_20_24"] ,22, 1, offset); book(_h["zpt_mll_12_20" ] ,23, 1, offset); book(_h["zpt_mll_20_30" ] ,24, 1, offset); book(_h["zpt_mll_30_46" ] ,25, 1, offset); book(_h["zpt_mll_46_66" ] ,26, 1, offset); book(_h["zpt_mll_66_116" ] ,27, 1, offset); book(_h["zpt_mll_116_150"] ,28, 1, offset); book(_h["zpt_00_04_xsec"] ,29, 1, offset); book(_h["zpt_04_08_xsec"] ,30, 1, offset); book(_h["zpt_08_12_xsec"] ,31, 1, offset); book(_h["zpt_12_16_xsec"] ,32, 1, offset); book(_h["zpt_16_20_xsec"] ,33, 1, offset); book(_h["zpt_20_24_xsec"] ,34, 1, offset); book(_h["zpt_mll_12_20_xsec" ] ,35, 1, offset); book(_h["zpt_mll_20_30_xsec" ] ,36, 1, offset); book(_h["zpt_mll_30_46_xsec" ] ,37, 1, offset); book(_h["zpt_mll_46_66_xsec" ] ,38, 1, offset); book(_h["zpt_mll_66_116_xsec" ] ,39, 1, offset); book(_h["zpt_mll_116_150_xsec"] ,40, 1, offset); book(_h["mll_xsec"] ,41, 1, 1 + _mode); } /// Perform the per-event analysis void analyze(const Event& event) { // Get leptonic Z boson const ZFinder& zfinder = apply(event, _mode ? "ZFinder_mu" : "ZFinder_el"); if (zfinder.bosons().size() != 1 ) vetoEvent; const Particle& Zboson = zfinder.boson(); // Get/cut on heavily used Z boson properties const double zpt = Zboson.pT(); const double zrap = Zboson.absrap(); const double zmass = Zboson.mass(); if (zrap > 2.4) vetoEvent; // Get/cut on Z boson leptons const ParticleVector& leptons = zfinder.constituents(); if (leptons.size() != 2 || leptons[0].threeCharge() * leptons[1].threeCharge() > 0) vetoEvent; const Particle& lminus = leptons[0].charge() < 0 ? leptons[0] : leptons[1]; const Particle& lplus = leptons[0].charge() < 0 ? leptons[1] : leptons[0]; // Compute phi* const double phi_acop = M_PI - deltaPhi(lminus, lplus); const double costhetastar = tanh( 0.5 * (lminus.eta() - lplus.eta()) ); const double sin2thetastar = (costhetastar > 1) ? 0.0 : (1.0 - sqr(costhetastar)); const double phistar = tan(0.5 * phi_acop) * sqrt(sin2thetastar); // Inclusive mll if (zmass > 46*GeV || zpt > 45*GeV) { // 46 GeV < mll < 150 GeV OR (12 GeV < mll < 46 GeV AND ZpT >45 GeV) _h["mll_xsec"]->fill(zmass); } // 12 GeV < mll < 150 GeV observables if (zmass < 20*GeV) { // 12 GeV < mll < 20 GeV if (zpt > 45*GeV) { // ZpT cut only for low-mass regions _h["zpt_mll_12_20_xsec"]->fill(zpt); _h["zpt_mll_12_20" ]->fill(zpt); } } else if (zmass < 30*GeV) { // 20 GeV < mll < 30 GeV if (zpt > 45*GeV) { // ZpT cut only for low-mass regions _h["zpt_mll_20_30_xsec"]->fill(zpt); _h["zpt_mll_20_30" ]->fill(zpt); } } else if (zmass < 46*GeV) { // 30 GeV < mll < 46 GeV if (zpt > 45*GeV) { // ZpT cut only for low-mass regions _h["zpt_mll_30_46_xsec"]->fill(zpt); _h["zpt_mll_30_46" ]->fill(zpt); } } else if (zmass < 66*GeV) { // 46 GeV < mll < 66 GeV _h["zpt_mll_46_66_xsec"]->fill(zpt); _h["zpt_mll_46_66" ]->fill(zpt); _h["phistar_mll_46_66"]->fill(phistar); if (zrap < 0.8) _h["phistar_lo_00_08"]->fill(phistar); else if (zrap < 1.6) _h["phistar_lo_08_16"]->fill(phistar); else _h["phistar_lo_16_24"]->fill(phistar); } else if (zmass < 116*GeV) { // 66 GeV < mll < 116 GeV _h["zpt_mll_66_116_xsec"]->fill(zpt); _h["zpt_mll_66_116" ]->fill(zpt); if (zrap < 0.4) { _h["zpt_00_04_xsec"]->fill(zpt); _h["zpt_00_04"]->fill(zpt); } else if (zrap < 0.8) { _h["zpt_04_08_xsec"]->fill(zpt); _h["zpt_04_08"]->fill(zpt); } else if (zrap < 1.2) { _h["zpt_08_12_xsec"]->fill(zpt); _h["zpt_08_12"]->fill(zpt); } else if (zrap < 1.6) { _h["zpt_12_16_xsec"]->fill(zpt); _h["zpt_12_16"]->fill(zpt); } else if (zrap < 2.0) { _h["zpt_16_20_xsec"]->fill(zpt); _h["zpt_16_20"]->fill(zpt); } else { _h["zpt_20_24_xsec"]->fill(zpt); _h["zpt_20_24"]->fill(zpt); } _h["phistar_mll_66_116"]->fill(phistar); if (zrap < 0.4) _h["phistar_me_00_04"]->fill(phistar); else if (zrap < 0.8) _h["phistar_me_04_08"]->fill(phistar); else if (zrap < 1.2) _h["phistar_me_08_12"]->fill(phistar); else if (zrap < 1.6) _h["phistar_me_12_16"]->fill(phistar); else if (zrap < 2.0) _h["phistar_me_16_20"]->fill(phistar); else _h["phistar_me_20_24"]->fill(phistar); } else { // 116 GeV < mll < 150 GeV _h["zpt_mll_116_150_xsec"]->fill(zpt); _h["zpt_mll_116_150" ]->fill(zpt); _h["phistar_mll_116_150"]->fill(phistar); if (zrap < 0.8) _h["phistar_hi_00_08"]->fill(phistar); else if (zrap < 1.6) _h["phistar_hi_08_16"]->fill(phistar); else _h["phistar_hi_16_24"]->fill(phistar); } } /// Normalise histograms etc., after the run void finalize() { // Scale non-xsec plots to cross-section const double sf = crossSection() / picobarn / sumOfWeights(); for (const auto& key_hist : _h) { scale(key_hist.second, sf); if (!contains(key_hist.first, "_xsec")) normalize(key_hist.second); } // M(ll) plot isn't a differential cross section so shouldn't be divided by bin width for (size_t i = 0; i < 6; ++i) { double bw = _h["mll_xsec"]->bin(i).xWidth(); _h["mll_xsec"]->bin(i).scaleW(bw); } } //@} protected: size_t _mode; private: /// @name Histograms //@{ map _h; //@} }; class ATLAS_2015_I1408516_EL : public ATLAS_2015_I1408516 { public: ATLAS_2015_I1408516_EL() : ATLAS_2015_I1408516("ATLAS_2015_I1408516_EL", 0) { } }; class ATLAS_2015_I1408516_MU : public ATLAS_2015_I1408516 { public: ATLAS_2015_I1408516_MU() : ATLAS_2015_I1408516("ATLAS_2015_I1408516_MU", 1) { } }; DECLARE_RIVET_PLUGIN(ATLAS_2015_I1408516); DECLARE_RIVET_PLUGIN(ATLAS_2015_I1408516_EL); DECLARE_RIVET_PLUGIN(ATLAS_2015_I1408516_MU); } diff --git a/analyses/pluginATLAS/ATLAS_2016_I1424838.cc b/analyses/pluginATLAS/ATLAS_2016_I1424838.cc --- a/analyses/pluginATLAS/ATLAS_2016_I1424838.cc +++ b/analyses/pluginATLAS/ATLAS_2016_I1424838.cc @@ -1,215 +1,215 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/Thrust.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FParameter.hh" #include "Rivet/Projections/Spherocity.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// @brief Event shapes in leptonic $Z$-events class ATLAS_2016_I1424838 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2016_I1424838); /// Book histograms and initialise projections before the run void init() { // Charged particles inside acceptance region const ChargedFinalState cfs(Cuts::abseta < 2.5 && Cuts::pT > 500*MeV); declare(cfs, "CFS"); // ZFinders - ZFinder zfinder(cfs, Cuts::abseta<2.4 && Cuts::pT>20.0*GeV, PID::ELECTRON, 66*GeV, 116*GeV, 0.1, ZFinder::CLUSTERNODECAY); + ZFinder zfinder(cfs, Cuts::abseta<2.4 && Cuts::pT>20.0*GeV, PID::ELECTRON, 66*GeV, 116*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY); declare(zfinder, "ZFinder"); - ZFinder zfinder_mu(cfs, Cuts::abseta<2.4 && Cuts::pT>20.0*GeV, PID::MUON, 66*GeV, 116*GeV, 0.1, ZFinder::CLUSTERNODECAY); + ZFinder zfinder_mu(cfs, Cuts::abseta<2.4 && Cuts::pT>20.0*GeV, PID::MUON, 66*GeV, 116*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY); declare(zfinder_mu, "ZFinderMu"); // This CFS only contains charged particles inside the acceptance excluding the leptons VetoedFinalState remfs(cfs); remfs.addVetoOnThisFinalState(zfinder); remfs.addVetoOnThisFinalState(zfinder_mu); declare(remfs, "REMFS"); const FParameter fparam(remfs); declare(fparam, "FParameter_"); const Spherocity sphero(remfs); declare(sphero, "Spherocity_"); // Booking of ES histos for (size_t alg = 0; alg < 5; ++alg) { // Book the inclusive histograms book(_h_Elec_Ntrk[alg] ,_mkHistoName(1, 1, alg)); book(_h_Elec_SumPt[alg] ,_mkHistoName(2, 1, alg)); book(_h_Elec_Beamthrust[alg] ,_mkHistoName(3, 1, alg)); book(_h_Elec_Thrust[alg] ,_mkHistoName(4, 1, alg)); book(_h_Elec_FParam[alg] ,_mkHistoName(5, 1, alg)); book(_h_Elec_Spherocity[alg] ,_mkHistoName(6, 1, alg)); book(_h_Muon_Ntrk[alg] ,_mkHistoName(1, 2, alg)); book(_h_Muon_SumPt[alg] ,_mkHistoName(2, 2, alg)); book(_h_Muon_Beamthrust[alg] ,_mkHistoName(3, 2, alg)); book(_h_Muon_Thrust[alg] ,_mkHistoName(4, 2, alg)); book(_h_Muon_FParam[alg] ,_mkHistoName(5, 2, alg)); book(_h_Muon_Spherocity[alg] ,_mkHistoName(6, 2, alg)); } } /// Perform the per-event analysis void analyze(const Event& event) { // Check for Z boson in event const ZFinder& zfinder = apply(event, "ZFinder"); MSG_DEBUG("Num e+ e- pairs found = " << zfinder.bosons().size()); const bool isElec = zfinder.bosons().size() == 1; const ZFinder& zfinder_mu = apply(event, "ZFinderMu"); MSG_DEBUG("Num mu+ mu- pairs found = " << zfinder_mu.bosons().size()); const bool isMuon = zfinder_mu.bosons().size() == 1; // Only accept events with exactly two electrons or exactly two muons if (isElec && isMuon) vetoEvent; if (!(isElec || isMuon)) vetoEvent; // This determines the Zpt phase-space double zpT = -1000; if (isElec) zpT = zfinder.bosons()[0].pT(); if (isMuon) zpT = zfinder_mu.bosons()[0].pT(); unsigned int alg = 4; //< for > 25 GeV if (zpT < 6*GeV) alg = 1; else if (inRange(zpT/GeV, 6, 12)) alg = 2; else if (inRange(zpT/GeV, 12, 25)) alg = 3; assert(alg < 5); assert(alg > 0); // All charged particles within |eta|<2.5 except the leptons from Z-decay const VetoedFinalState& remfs = apply(event, "REMFS"); // sumPt and Beamthrust (the latter will only be filled if the min Nch criterion is met) // and Thrust preparation double sumPt = 0.0, beamThrust = 0.0; vector momenta; for (const Particle& p : remfs.particles()) { const double pT = p.pT(); sumPt += pT; beamThrust += pT*exp(-p.abseta()); const Vector3 mom = p.mom().pTvec(); momenta.push_back(mom); } // Fill inclusive histos if (isElec) { _h_Elec_Ntrk[alg] ->fill(remfs.size()); _h_Elec_Ntrk[0] ->fill(remfs.size()); _h_Elec_SumPt[alg] ->fill(sumPt); _h_Elec_SumPt[0] ->fill(sumPt); } if (isMuon) { _h_Muon_Ntrk[alg] ->fill(remfs.size()); _h_Muon_Ntrk[0] ->fill(remfs.size()); _h_Muon_SumPt[alg] ->fill(sumPt); _h_Muon_SumPt[0] ->fill(sumPt); } // Skip event shape calculation if we don't match the minimum Nch criterion if (remfs.size() >=2) { // Eventshape calculations // Calculate transverse Thrust using all charged FS particles except the lepton // This is copied/inspired from the CMS_6000011_S8957746 analysis if (momenta.size() == 2) { // We need to use a ghost so that Thrust.calc() doesn't return 1. momenta.push_back(Vector3(1e-10*MeV, 0., 0.)); } Thrust thrustC; thrustC.calc(momenta); double thrust = thrustC.thrust(); // F-Parameter const FParameter& fparam = apply(event, "FParameter_"); // Spherocity const Spherocity& sphero = apply(event, "Spherocity_"); // Histos differential in NMPI // Fill inclusive histos if (isElec) { _h_Elec_Thrust[alg] ->fill(thrust); _h_Elec_Thrust[0] ->fill(thrust); _h_Elec_FParam[alg] ->fill(fparam.F()); _h_Elec_FParam[0] ->fill(fparam.F()); _h_Elec_Spherocity[alg] ->fill(sphero.spherocity()); _h_Elec_Spherocity[0] ->fill(sphero.spherocity()); _h_Elec_Beamthrust[alg] ->fill(beamThrust/GeV); _h_Elec_Beamthrust[0] ->fill(beamThrust/GeV); } if (isMuon) { _h_Muon_Thrust[alg] ->fill(thrust); _h_Muon_Thrust[0] ->fill(thrust); _h_Muon_FParam[alg] ->fill(fparam.F()); _h_Muon_FParam[0] ->fill(fparam.F()); _h_Muon_Spherocity[alg] ->fill(sphero.spherocity()); _h_Muon_Spherocity[0] ->fill(sphero.spherocity()); _h_Muon_Beamthrust[alg] ->fill(beamThrust/GeV); _h_Muon_Beamthrust[0] ->fill(beamThrust/GeV); } } } /// Normalise histograms etc., after the run void finalize() { for (size_t alg = 0; alg < 5; ++alg) { normalize(_h_Elec_Ntrk[alg]); normalize(_h_Elec_SumPt[alg]); normalize(_h_Elec_Beamthrust[alg]); normalize(_h_Elec_Thrust[alg]); normalize(_h_Elec_FParam[alg]); normalize(_h_Elec_Spherocity[alg]); normalize(_h_Muon_Ntrk[alg]); normalize(_h_Muon_SumPt[alg]); normalize(_h_Muon_Beamthrust[alg]); normalize(_h_Muon_Thrust[alg]); normalize(_h_Muon_FParam[alg]); normalize(_h_Muon_Spherocity[alg]); } } private: // Convenience method for histogram booking string _mkHistoName(int idDS, int channel, int i) { return "d0" + toString(idDS) + "-x0" + toString(channel) + "-y0" + toString(i+1); } Histo1DPtr _h_Elec_Ntrk[5]; Histo1DPtr _h_Elec_SumPt[5]; Histo1DPtr _h_Elec_Beamthrust[5]; Histo1DPtr _h_Elec_Thrust[5]; Histo1DPtr _h_Elec_FParam[5]; Histo1DPtr _h_Elec_Spherocity[5]; Histo1DPtr _h_Muon_Ntrk[5]; Histo1DPtr _h_Muon_SumPt[5]; Histo1DPtr _h_Muon_Beamthrust[5]; Histo1DPtr _h_Muon_Thrust[5]; Histo1DPtr _h_Muon_FParam[5]; Histo1DPtr _h_Muon_Spherocity[5]; }; DECLARE_RIVET_PLUGIN(ATLAS_2016_I1424838); } diff --git a/analyses/pluginATLAS/ATLAS_2016_I1426515.cc b/analyses/pluginATLAS/ATLAS_2016_I1426515.cc --- a/analyses/pluginATLAS/ATLAS_2016_I1426515.cc +++ b/analyses/pluginATLAS/ATLAS_2016_I1426515.cc @@ -1,268 +1,268 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" namespace Rivet { /// WW production at 8 TeV class ATLAS_2016_I1426515 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2016_I1426515); /// Book histograms and initialise projections before the run void init() { const FinalState fs(Cuts::abseta < 4.5); // Project photons for dressing IdentifiedFinalState photon_id(fs); photon_id.acceptIdPair(PID::PHOTON); // Project dressed electrons with pT > 15 GeV and |eta| < 2.47 PromptFinalState el_bare(FinalState(Cuts::abspid == PID::ELECTRON)); Cut cuts = (Cuts::abseta < 2.47) && ( (Cuts::abseta <= 1.37) || (Cuts::abseta >= 1.52) ) && (Cuts::pT > 10*GeV); DressedLeptons el_dressed_FS(photon_id, el_bare, 0.1, cuts, true); declare(el_dressed_FS, "EL_DRESSED_FS"); // Project dressed muons with pT > 15 GeV and |eta| < 2.5 PromptFinalState mu_bare(FinalState(Cuts::abspid == PID::MUON)); DressedLeptons mu_dressed_FS(photon_id, mu_bare, 0.1, Cuts::abseta < 2.4 && Cuts::pT > 15*GeV, true); declare(mu_dressed_FS, "MU_DRESSED_FS"); Cut cuts_WW = (Cuts::abseta < 2.5) && (Cuts::pT > 20*GeV); IdentifiedFinalState lep_id(fs); lep_id.acceptIdPair(PID::MUON); lep_id.acceptIdPair(PID::ELECTRON); PromptFinalState lep_bare(lep_id); DressedLeptons leptons(photon_id, lep_bare, 0.1, cuts_WW, true); declare(leptons,"leptons"); declare(FinalState(Cuts::abspid == PID::TAU || Cuts::abspid == PID::NU_TAU), "tau_id"); // Get MET from generic invisibles VetoedFinalState ivfs(fs); ivfs.addVetoOnThisFinalState(VisibleFinalState(fs)); addProjection(ivfs, "InvisibleFS"); // Project jets - FastJets jets(fs, FastJets::ANTIKT, 0.4, JetAlg::NO_MUONS, JetAlg::NO_INVISIBLES); + FastJets jets(fs, FastJets::ANTIKT, 0.4, JetAlg::Muons::NONE, JetAlg::Invisibles::NONE); addProjection(jets, "jets"); // Integrated cross sections // d01 ee/mm fiducial integrated cross sections book(_hist_mm_fid_intxsec, 1, 1, 1); book(_hist_ee_fid_intxsec, 1, 1, 2); // d02 emme fiducial integrated cross sections book(_hist_emme_fid_intxsec, 2, 1, 1); // d10 emme fiducial differential cross section (leading lepton ptlead + ptlead normalized) book(_hist_emme_fid_ptlead, 10, 1, 1); book(_hist_emme_fid_ptleadnorm, 10, 1, 2); // d11 emme fiducial differential cross section (dilepton-system ptll + ptll normalized) book(_hist_emme_fid_ptll, 11, 1, 1); book(_hist_emme_fid_ptllnorm, 11, 1, 2); // d12 emme fiducial differential cross section (dilepton-system mll + mll normalized) book(_hist_emme_fid_mll, 12, 1, 1); book(_hist_emme_fid_mllnorm, 12, 1, 2); // d13 emme fiducial differential cross section (dilepton-system delta_phi_ll + dphill normalized) book(_hist_emme_fid_dphill, 13, 1, 1); book(_hist_emme_fid_dphillnorm, 13, 1, 2); // d14 emme fiducial differential cross section (absolute rapidity of dilepton-system y_ll + y_ll normalized) book(_hist_emme_fid_yll, 14, 1, 1); book(_hist_emme_fid_yllnorm, 14, 1, 2); // d15 emme fiducial differential cross section (absolute costheta* of dilepton-system costhetastar_ll + costhetastar_ll normalized) book(_hist_emme_fid_costhetastarll, 15, 1, 1); book(_hist_emme_fid_costhetastarllnorm, 15, 1, 2); } /// Perform the per-event analysis void analyze(const Event& event) { // Find leptons const FinalState& ifs = apply(event, "InvisibleFS"); const vector& leptons = apply(event, "leptons").dressedLeptons(); const vector& good_mu = apply(event, "MU_DRESSED_FS").dressedLeptons(); const vector& good_el = apply(event, "EL_DRESSED_FS").dressedLeptons(); const Jets& jets = applyProjection(event, "jets").jetsByPt(Cuts::pT > 25*GeV && Cuts::abseta < 4.5); // Taus are excluded from the fiducial cross section const Particles taus = applyProjection(event, "tau_id").particlesByPt(Cuts::pT>12.*GeV && Cuts::abseta < 3.0); if (!taus.empty()) vetoEvent; // Remove events that do not contain 2 good leptons (either muons or electrons) if (leptons.size() != 2 && (good_el.size() != 1 || good_mu.size() != 1)) vetoEvent; // Split into channels int channel = -1; // 1=mm, 2=ee; 3=emu; 4=mue if (good_mu.size() == 2) channel = 1; //mm else if (good_el.size() == 2) channel = 2; //ee else if (good_mu.size() == 1 && good_el.size() == 1 && good_el[0].pT() > good_mu[0].pT()) channel = 3; //emu else if (good_mu.size() == 1 && good_el.size() == 1 && good_el[0].pT() < good_mu[0].pT()) channel = 4; //mue if (channel == -1) vetoEvent; // Assign leptons const DressedLepton *lep1, *lep2; if (channel == 1) { //mm if (good_mu[0].pT() > good_mu[1].pT()) { lep1 = &good_mu[0]; lep2 = &good_mu[1]; } else { lep1 = &good_mu[1]; lep2 = &good_mu[0]; } } else if (channel == 2) { //ee if (good_el[0].pT() > good_el[1].pT()) { lep1 = &good_el[0]; lep2 = &good_el[1]; } else { lep1 = &good_el[1]; lep2 = &good_el[0]; } } else if (channel == 3) { //emu lep1 = &good_el[0]; lep2 = &good_mu[0]; } else { // if (channel == 4) { //mue lep1 = &good_mu[0]; lep2 = &good_el[0]; } // Cut on leptons if (lep1->pT() < 25*GeV || lep2->pT() < 20*GeV) vetoEvent; // Select jets isolated from electrons const Jets jets_selected = filter_select(jets, [&](const Jet& j){ return all(good_el, deltaRGtr(j, 0.3)); }); // Define variables const FourMomentum met = sum(ifs.particles(), FourMomentum()); const FourMomentum dilep = lep1->momentum() + lep2->momentum(); const double ptll = dilep.pT()/GeV; const double Mll = dilep.mass()/GeV; const double Yll = dilep.absrap(); const double DPhill = fabs(deltaPhi(*lep1, *lep2)); const double costhetastar = fabs(tanh((lep1->eta() - lep2->eta()) / 2)); // Calculate dphi to MET double DPhi_met = fabs(deltaPhi((*lep1), met)); if (fabs(deltaPhi( (*lep2), met)) < DPhi_met) DPhi_met = fabs(deltaPhi((*lep2), met)); if (DPhi_met > M_PI/2) DPhi_met = 1.; else DPhi_met = fabs(sin(DPhi_met)); // Apply selections // mll lower cut (reject quarkonia) if ((channel == 1 || channel == 2) && Mll < 15.) vetoEvent; else if (Mll < 10.) vetoEvent; // Z veto (reject Z -- only dilepton channels) if ((channel == 1 || channel == 2) && abs(Mll - 91.1876) < 15.) vetoEvent; // Met rel cut if ((channel == 1 || channel == 2) && met.pT()*DPhi_met < 45*GeV) vetoEvent; else if (met.pT()*DPhi_met < 15*GeV) vetoEvent; // MET (pt-MET) cut if ((channel == 1 || channel == 2) && met.pT() <= 45*GeV) vetoEvent; // begin MET cut else if (met.pT() <= 20*GeV) vetoEvent; // Require 0 jets if (!jets_selected.empty()) vetoEvent; // Fill histograms if (channel == 1) { _hist_mm_fid_intxsec->fill(1.0); } else if (channel == 2) { _hist_ee_fid_intxsec->fill(1.0); } else if (channel == 3 || channel == 4) { _hist_emme_fid_intxsec->fill(1.0); _hist_emme_fid_ptlead->fill(lep1->pT()/GeV); _hist_emme_fid_ptleadnorm->fill(lep1->pT()/GeV); _hist_emme_fid_ptll->fill(ptll); _hist_emme_fid_ptllnorm->fill(ptll); _hist_emme_fid_mll->fill(Mll); _hist_emme_fid_mllnorm->fill(Mll); _hist_emme_fid_dphill->fill(DPhill); _hist_emme_fid_dphillnorm->fill(DPhill); _hist_emme_fid_yll->fill(Yll); _hist_emme_fid_yllnorm->fill(Yll); _hist_emme_fid_costhetastarll->fill(costhetastar); _hist_emme_fid_costhetastarllnorm->fill(costhetastar); } } /// Normalise histograms etc., after the run void finalize() { const double sf(crossSection()/femtobarn/sumOfWeights()); scale({_hist_mm_fid_intxsec, _hist_ee_fid_intxsec, _hist_emme_fid_intxsec}, sf); scale({_hist_emme_fid_ptlead, _hist_emme_fid_ptll, _hist_emme_fid_mll, _hist_emme_fid_dphill, _hist_emme_fid_yll, _hist_emme_fid_costhetastarll}, sf); normalize({_hist_emme_fid_ptleadnorm, _hist_emme_fid_ptllnorm, _hist_emme_fid_mllnorm, _hist_emme_fid_dphillnorm, _hist_emme_fid_yllnorm, _hist_emme_fid_costhetastarllnorm}); } private: /// @name Histograms //@{ // d01 ee/mm fiducial integrated cross sections Histo1DPtr _hist_mm_fid_intxsec, _hist_ee_fid_intxsec; // d02 emme fiducial integrated cross sections Histo1DPtr _hist_emme_fid_intxsec; // d10 emme fiducial differential cross section (leading lepton ptlead + ptlead normalized) Histo1DPtr _hist_emme_fid_ptlead, _hist_emme_fid_ptleadnorm; // d11 emme fiducial differential cross section (dilepton-system ptll + ptll normalized) Histo1DPtr _hist_emme_fid_ptll, _hist_emme_fid_ptllnorm; // d12 emme fiducial differential cross section (dilepton-system mll + mll normalized) Histo1DPtr _hist_emme_fid_mll, _hist_emme_fid_mllnorm; // d13 emme fiducial differential cross section (dilepton-system delta_phi_ll + dphill normalized) Histo1DPtr _hist_emme_fid_dphill, _hist_emme_fid_dphillnorm; // d14 emme fiducial differential cross section (absolute rapidity of dilepton-system y_ll + y_ll normalized) Histo1DPtr _hist_emme_fid_yll, _hist_emme_fid_yllnorm; // d15 emme fiducial differential cross section (absolute costheta* of dilepton-system costhetastar_ll + costhetastar_ll normalized) Histo1DPtr _hist_emme_fid_costhetastarll, _hist_emme_fid_costhetastarllnorm; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2016_I1426515); } diff --git a/analyses/pluginATLAS/ATLAS_2016_I1444991.cc b/analyses/pluginATLAS/ATLAS_2016_I1444991.cc --- a/analyses/pluginATLAS/ATLAS_2016_I1444991.cc +++ b/analyses/pluginATLAS/ATLAS_2016_I1444991.cc @@ -1,193 +1,193 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VisibleFinalState.hh" namespace Rivet { class ATLAS_2016_I1444991 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2016_I1444991); public: /// Book histograms and initialise projections before the run void init() { // All particles within |eta| < 5.0 const FinalState FS(Cuts::abseta < 5.0); // Project photons for dressing IdentifiedFinalState photon_id(FS); photon_id.acceptIdPair(PID::PHOTON); // Project dressed electrons with pT > 15 GeV and |eta| < 2.47 IdentifiedFinalState el_id(FS); el_id.acceptIdPair(PID::ELECTRON); PromptFinalState el_bare(el_id); Cut cuts = (Cuts::abseta < 2.47) && ( (Cuts::abseta <= 1.37) || (Cuts::abseta >= 1.52) ) && (Cuts::pT > 15*GeV); DressedLeptons el_dressed_FS(photon_id, el_bare, 0.1, cuts, true); declare(el_dressed_FS,"EL_DRESSED_FS"); // Project dressed muons with pT > 15 GeV and |eta| < 2.5 IdentifiedFinalState mu_id(FS); mu_id.acceptIdPair(PID::MUON); PromptFinalState mu_bare(mu_id); DressedLeptons mu_dressed_FS(photon_id, mu_bare, 0.1, Cuts::abseta < 2.5 && Cuts::pT > 15*GeV, true); declare(mu_dressed_FS,"MU_DRESSED_FS"); // get MET from generic invisibles VetoedFinalState inv_fs(FS); inv_fs.addVetoOnThisFinalState(VisibleFinalState(FS)); declare(inv_fs, "InvisibleFS"); // Project jets FastJets jets(FS, FastJets::ANTIKT, 0.4); - jets.useInvisibles(JetAlg::NO_INVISIBLES); - jets.useMuons(JetAlg::NO_MUONS); + jets.useInvisibles(JetAlg::Invisibles::NONE); + jets.useMuons(JetAlg::Muons::NONE); declare(jets, "jets"); // Book histograms book(_h_Njets , 2,1,1); book(_h_PtllMET , 3,1,1); book(_h_Yll , 4,1,1); book(_h_PtLead , 5,1,1); book(_h_Njets_norm , 6,1,1); book(_h_PtllMET_norm , 7,1,1); book(_h_Yll_norm , 8,1,1); book(_h_PtLead_norm , 9,1,1); book(_h_JetVeto , 10, 1, 1, true); //histos for jetveto std::vector ptlead25_bins = { 0., 25., 300. }; std::vector ptlead40_bins = { 0., 40., 300. }; book(_h_pTj1_sel25 , "pTj1_sel25", ptlead25_bins, "", "", "" ); book(_h_pTj1_sel40 , "pTj1_sel40", ptlead40_bins, "", "", "" ); } /// Perform the per-event analysis void analyze(const Event& event) { // Get final state particles const FinalState& ifs = applyProjection(event, "InvisibleFS"); const vector& good_mu = applyProjection(event, "MU_DRESSED_FS").dressedLeptons(); const vector& el_dressed = applyProjection(event, "EL_DRESSED_FS").dressedLeptons(); const Jets& jets = applyProjection(event, "jets").jetsByPt(Cuts::pT>25*GeV && Cuts::abseta < 4.5); //find good electrons vector good_el; for (const DressedLepton& el : el_dressed){ bool keep = true; for (const DressedLepton& mu : good_mu) { keep &= deltaR(el, mu) >= 0.1; } if (keep) good_el += el; } // select only emu events if ((good_el.size() != 1) || good_mu.size() != 1) vetoEvent; //built dilepton FourMomentum dilep = good_el[0].momentum() + good_mu[0].momentum(); double Mll = dilep.mass(); double Yll = dilep.rapidity(); double DPhill = fabs(deltaPhi(good_el[0], good_mu[0])); double pTl1 = (good_el[0].pT() > good_mu[0].pT())? good_el[0].pT() : good_mu[0].pT(); //get MET FourMomentum met; for (const Particle& p : ifs.particles()) met += p.momentum(); // do a few cuts before looking at jets if (pTl1 <= 22. || DPhill >= 1.8 || met.pT() <= 20.) vetoEvent; if (Mll <= 10. || Mll >= 55.) vetoEvent; Jets jets_selected; for (const Jet &j : jets) { if( j.abseta() > 2.4 && j.pT()<=30*GeV ) continue; bool keep = true; for(DressedLepton el : good_el) { keep &= deltaR(j, el) >= 0.3; } if (keep) jets_selected += j; } double PtllMET = (met + good_el[0].momentum() + good_mu[0].momentum()).pT(); double Njets = jets_selected.size() > 2 ? 2 : jets_selected.size(); double pTj1 = jets_selected.size()? jets_selected[0].pT() : 0.1; // Fill histograms _h_Njets->fill(Njets); _h_PtllMET->fill(PtllMET); _h_Yll->fill(fabs(Yll)); _h_PtLead->fill(pTj1); _h_Njets_norm->fill(Njets); _h_PtllMET_norm->fill(PtllMET); _h_Yll_norm->fill(fabs(Yll)); _h_PtLead_norm->fill(pTj1); _h_pTj1_sel25->fill(pTj1); _h_pTj1_sel40->fill(pTj1); } /// Normalise histograms etc., after the run void finalize() { const double xs = crossSectionPerEvent()/femtobarn; /// @todo Normalise, scale and otherwise manipulate histograms here scale(_h_Njets, xs); scale(_h_PtllMET, xs); scale(_h_Yll, xs); scale(_h_PtLead, xs); normalize(_h_Njets_norm); normalize(_h_PtllMET_norm); normalize(_h_Yll_norm); normalize(_h_PtLead_norm); scale(_h_pTj1_sel25, xs); scale(_h_pTj1_sel40, xs); normalize(_h_pTj1_sel25); normalize(_h_pTj1_sel40); // fill jet veto efficiency histogram _h_JetVeto->point(0).setY(_h_pTj1_sel25->bin(0).sumW(), sqrt(_h_pTj1_sel25->bin(0).sumW2())); _h_JetVeto->point(1).setY(_h_PtLead_norm->bin(0).sumW(), sqrt(_h_PtLead_norm->bin(0).sumW2())); _h_JetVeto->point(2).setY(_h_pTj1_sel40->bin(0).sumW(), sqrt(_h_pTj1_sel25->bin(0).sumW2())); } private: /// @name Histograms //@{ Histo1DPtr _h_Njets; Histo1DPtr _h_PtllMET; Histo1DPtr _h_Yll; Histo1DPtr _h_PtLead; Histo1DPtr _h_Njets_norm; Histo1DPtr _h_PtllMET_norm; Histo1DPtr _h_Yll_norm; Histo1DPtr _h_PtLead_norm; Scatter2DPtr _h_JetVeto; Histo1DPtr _h_pTj1_sel25; Histo1DPtr _h_pTj1_sel40; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2016_I1444991); } diff --git a/analyses/pluginATLAS/ATLAS_2016_I1479760.cc b/analyses/pluginATLAS/ATLAS_2016_I1479760.cc --- a/analyses/pluginATLAS/ATLAS_2016_I1479760.cc +++ b/analyses/pluginATLAS/ATLAS_2016_I1479760.cc @@ -1,120 +1,120 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// Hard double-parton scattering in four-jet events at 7 TeV class ATLAS_2016_I1479760 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2016_I1479760); /// Book histograms and initialise projections before the run void init() { /// Declare AntiKt 0.6 jets without muons and neutrinos FastJets fastJets(FinalState(), FastJets::ANTIKT, 0.6); - fastJets.useInvisibles(JetAlg::NO_INVISIBLES); - fastJets.useMuons(JetAlg::NO_MUONS); + fastJets.useInvisibles(JetAlg::Invisibles::NONE); + fastJets.useMuons(JetAlg::Muons::NONE); addProjection(fastJets, "AntiKt6Jets"); book(_hists["deltaPt34"] , 1, 1, 1); book(_hists["deltaPhi34"] , 2, 1, 1); book(_hists["deltaPt12"] , 3, 1, 1); book(_hists["deltaPt13"] , 4, 1, 1); book(_hists["deltaPt23"] , 5, 1, 1); book(_hists["deltaPt14"] , 6, 1, 1); book(_hists["deltaPt24"] , 7, 1, 1); book(_hists["deltaPhi12"] , 8, 1, 1); book(_hists["deltaPhi13"] , 9, 1, 1); book(_hists["deltaPhi23"] , 10, 1, 1); book(_hists["deltaPhi14"] , 11, 1, 1); book(_hists["deltaPhi24"] , 12, 1, 1); book(_hists["deltaY12"] , 13, 1, 1); book(_hists["deltaY34"] , 14, 1, 1); book(_hists["deltaY13"] , 15, 1, 1); book(_hists["deltaY23"] , 16, 1, 1); book(_hists["deltaY14"] , 17, 1, 1); book(_hists["deltaY24"] , 18, 1, 1); book(_hists["deltaPhiPlanes12"], 19, 1, 1); book(_hists["deltaPhiPlanes13"], 20, 1, 1); book(_hists["deltaPhiPlanes14"], 21, 1, 1); } /// Calculate the DeltaPt variable double calcDeltaPt(const Jet& j1, const Jet& j2) { return (j1.momentum() + j2.momentum()).pT() / (j1.pT() + j2.pT()); } /// Calculate the DeltaPhi variable between event planes double calcDeltaPhiPlanes(const Jet& j1, const Jet& j2, const Jet& j3, const Jet& j4) { const FourMomentum sumVec1 = j1.momentum() + j2.momentum(); const FourMomentum sumVec2 = j3.momentum() + j4.momentum(); return deltaPhi(sumVec1, sumVec2); } /// Perform the per-event analysis void analyze(const Event& event) { // Retrieve all anti-kt R=0.6 jets with pT above 20 GeV and eta < 4.4 const Jets jets = applyProjection(event, "AntiKt6Jets").jetsByPt(Cuts::pT >= 20*GeV && Cuts::abseta <= 4.4); // Require at least 4 jets, with the leading jet pT above 42.5 GeV if (jets.size() < 4) vetoEvent; if (jets[0].pT() < 42.5*GeV) vetoEvent; /// Fill histograms _hists["deltaPt12"]->fill( calcDeltaPt( jets[0], jets[1] )); _hists["deltaPt34"]->fill( calcDeltaPt( jets[2], jets[3] )); _hists["deltaPt13"]->fill( calcDeltaPt( jets[0], jets[2] )); _hists["deltaPt23"]->fill( calcDeltaPt( jets[1], jets[2] )); _hists["deltaPt14"]->fill( calcDeltaPt( jets[0], jets[3] )); _hists["deltaPt24"]->fill( calcDeltaPt( jets[1], jets[3] )); // _hists["deltaPhi12"]->fill( deltaPhi( jets[0],jets[1] )); _hists["deltaPhi34"]->fill( deltaPhi( jets[2],jets[3] )); _hists["deltaPhi13"]->fill( deltaPhi( jets[0],jets[2] )); _hists["deltaPhi23"]->fill( deltaPhi( jets[1],jets[2] )); _hists["deltaPhi14"]->fill( deltaPhi( jets[0],jets[3] )); _hists["deltaPhi24"]->fill( deltaPhi( jets[1],jets[3] )); // _hists["deltaY12"]->fill( deltaRap( jets[0], jets[1] )); _hists["deltaY34"]->fill( deltaRap( jets[2], jets[3] )); _hists["deltaY13"]->fill( deltaRap( jets[0], jets[2] )); _hists["deltaY23"]->fill( deltaRap( jets[1], jets[2] )); _hists["deltaY14"]->fill( deltaRap( jets[0], jets[3] )); _hists["deltaY24"]->fill( deltaRap( jets[1], jets[3] )); // _hists["deltaPhiPlanes12"]->fill( calcDeltaPhiPlanes(jets[0], jets[1], jets[2], jets[3] )); _hists["deltaPhiPlanes13"]->fill( calcDeltaPhiPlanes(jets[0], jets[2], jets[1], jets[3] )); _hists["deltaPhiPlanes14"]->fill( calcDeltaPhiPlanes(jets[0], jets[3], jets[1], jets[2] )); } /// Post-run processing void finalize() { for (auto& key_hist : _hists) normalize(key_hist.second); } //@} /// Histograms map _hists; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2016_I1479760); } diff --git a/analyses/pluginATLAS/ATLAS_2016_I1502620_W.cc b/analyses/pluginATLAS/ATLAS_2016_I1502620_W.cc --- a/analyses/pluginATLAS/ATLAS_2016_I1502620_W.cc +++ b/analyses/pluginATLAS/ATLAS_2016_I1502620_W.cc @@ -1,137 +1,137 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/WFinder.hh" namespace Rivet { /// @brief inclusive W cross sections at 7 TeV class ATLAS_2016_I1502620_W : public Analysis { public: /// Constructor ATLAS_2016_I1502620_W(string name="ATLAS_2016_I1502620_W") : Analysis(name) { // using electron channel by default _mode = 0; } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { ///Initialise and register projections here const FinalState fs; Cut cut = Cuts::pT >= 25*GeV; // minimum lepton pT WFinder wfinder_dressed(fs, cut, _mode? PID::MUON : PID::ELECTRON, 40*GeV, 13*TeV, 25*GeV, 0.1, - WFinder::CLUSTERNODECAY, WFinder::NOTRACK, WFinder::TRANSMASS); + WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); declare(wfinder_dressed, "WFinder_dressed"); /// Book histograms here book(_h_Wp_eta, 9, 1, _mode + 1); book(_h_Wm_eta, 10, 1, _mode + 1); book(_h_W_asym, 35, 1, _mode + 1); } /// Perform the per-event analysis void analyze(const Event& event) { const WFinder& wfinder = apply(event, "WFinder_dressed"); if (wfinder.bosons().size() != 1) vetoEvent; const Particle lep = wfinder.constituentLeptons()[0]; if (lep.charge3() > 0) _h_Wp_eta->fill(lep.abseta()); else _h_Wm_eta->fill(lep.abseta()); } /// Normalise histograms etc., after the run void finalize() { // Construct asymmetry: (dsig+/deta - dsig-/deta) / (dsig+/deta + dsig-/deta) //divide(*_h_Wp_eta - *_h_Wm_eta, *_h_Wp_eta + *_h_Wm_eta, _h_W_asym); for (size_t i = 0; i < _h_Wp_eta->numBins(); ++i) { YODA::HistoBin1D& bp = _h_Wp_eta->bin(i); YODA::HistoBin1D& bm = _h_Wm_eta->bin(i); const double sum = bp.height() + bm.height(); //const double xerr = 0.5 * bp.xWidth(); double val = 0., yerr = 0.; if (sum) { const double pos2 = bp.height() * bp.height(); const double min2 = bm.height() * bm.height(); const double errp2 = bp.heightErr() * bp.heightErr(); const double errm2 = bm.heightErr() * bm.heightErr(); val = (bp.height() - bm.height()) / sum; yerr = 2. * sqrt(errm2 * pos2 + errp2 * min2) / (sum * sum); } _h_W_asym->addPoint(bp.midpoint(), val, 0.5*bp.xWidth(), yerr); } // Print summary info const double xs_pb(crossSection() / picobarn); const double sumw(sumOfWeights()); MSG_DEBUG( "Cross-section/pb : " << xs_pb ); MSG_DEBUG( "Sum of weights : " << sumw ); MSG_DEBUG( "nEvents : " << numEvents() ); /// Normalise, scale and otherwise manipulate histograms here const double sf = 0.5 * xs_pb / sumw; // 0.5 accounts for rapidity bin width scale(_h_Wp_eta, sf); scale(_h_Wm_eta, sf); } //@} protected: size_t _mode; private: /// @name Histograms //@{ Histo1DPtr _h_Wp_eta, _h_Wm_eta; Scatter2DPtr _h_W_asym; //@} }; class ATLAS_2016_I1502620_W_EL : public ATLAS_2016_I1502620_W { public: ATLAS_2016_I1502620_W_EL() : ATLAS_2016_I1502620_W("ATLAS_2016_I1502620_W_EL") { _mode = 0; } }; class ATLAS_2016_I1502620_W_MU : public ATLAS_2016_I1502620_W { public: ATLAS_2016_I1502620_W_MU() : ATLAS_2016_I1502620_W("ATLAS_2016_I1502620_W_MU") { _mode = 1; } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2016_I1502620_W); DECLARE_RIVET_PLUGIN(ATLAS_2016_I1502620_W_EL); DECLARE_RIVET_PLUGIN(ATLAS_2016_I1502620_W_MU); } diff --git a/analyses/pluginATLAS/ATLAS_2016_I1502620_Z.cc b/analyses/pluginATLAS/ATLAS_2016_I1502620_Z.cc --- a/analyses/pluginATLAS/ATLAS_2016_I1502620_Z.cc +++ b/analyses/pluginATLAS/ATLAS_2016_I1502620_Z.cc @@ -1,134 +1,134 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { /// @brief inclusive Z cross sections at 7 TeV class ATLAS_2016_I1502620_Z : public Analysis { public: /// Constructor ATLAS_2016_I1502620_Z(string name="ATLAS_2016_I1502620_Z") : Analysis(name) { // using electron channel by default _mode = 0; } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { const FinalState fs; Cut cuts = Cuts::pT >= 20.0*GeV; - ZFinder zfinder(fs, cuts, (_mode ? PID::MUON : PID::ELECTRON), 46.0*GeV, 150*GeV, 0.1, ZFinder::CLUSTERNODECAY, ZFinder::NOTRACK); + ZFinder zfinder(fs, cuts, (_mode ? PID::MUON : PID::ELECTRON), 46.0*GeV, 150*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::NO); declare(zfinder, "ZFinder"); // Book histograms book(_h_Zcenlow_y_dressed , 11, 1, _mode + 1); book(_h_Zcenpeak_y_dressed, 12, 1, _mode + 1); book(_h_Zcenhigh_y_dressed, 13, 1, _mode + 1); book(_h_Zfwdpeak_y_dressed, 14, 1, _mode + 1); book(_h_Zfwdhigh_y_dressed, 15, 1, _mode + 1); } /// Perform the per-event analysis void analyze(const Event& event) { const ZFinder& zfinder = apply(event, "ZFinder"); if (zfinder.bosons().size() != 1 ) vetoEvent; const Particle& Zboson = zfinder.boson(); const double zrap = Zboson.absrap(); const double zmass = Zboson.mass(); // Get/cut on Z boson leptons const ParticleVector& leptons = zfinder.constituents(); const double eta1 = leptons[0].abseta(); const double eta2 = leptons[1].abseta(); // separation into central/forward and three mass bins if (eta1 < 2.5 && eta2 < 2.5) { if (zmass < 66.0*GeV) _h_Zcenlow_y_dressed->fill(zrap); else if (zmass < 116.0*GeV) _h_Zcenpeak_y_dressed->fill(zrap); else _h_Zcenhigh_y_dressed->fill(zrap); } else if ((eta1 < 2.5 && 2.5 < eta2 && eta2 < 4.9) || (eta2 < 2.5 && 2.5 < eta1 && eta1 < 4.9)) { if (zmass < 66.0*GeV) vetoEvent; if (zmass < 116.0*GeV) _h_Zfwdpeak_y_dressed->fill(zrap); else _h_Zfwdhigh_y_dressed->fill(zrap); } } /// Normalise histograms etc., after the run void finalize() { // Print summary info const double xs_pb(crossSection() / picobarn); const double sumw(sumOfWeights()); MSG_DEBUG("Cross-Section/pb: " << xs_pb ); MSG_DEBUG("Sum of weights : " << sumw ); MSG_DEBUG("nEvents : " << numEvents()); // Normalise, scale and otherwise manipulate histograms here const double sf(0.5 * xs_pb / sumw); // 0.5 accounts for rapidity bin width scale(_h_Zcenlow_y_dressed, sf); scale(_h_Zcenpeak_y_dressed, sf); scale(_h_Zcenhigh_y_dressed, sf); scale(_h_Zfwdpeak_y_dressed, sf); scale(_h_Zfwdhigh_y_dressed, sf); } //@} protected: size_t _mode; private: /// @name Histograms //@{ Histo1DPtr _h_Zcenlow_y_dressed; Histo1DPtr _h_Zcenpeak_y_dressed; Histo1DPtr _h_Zcenhigh_y_dressed; Histo1DPtr _h_Zfwdpeak_y_dressed; Histo1DPtr _h_Zfwdhigh_y_dressed; //@} }; class ATLAS_2016_I1502620_Z_EL : public ATLAS_2016_I1502620_Z { public: ATLAS_2016_I1502620_Z_EL() : ATLAS_2016_I1502620_Z("ATLAS_2016_I1502620_Z_EL") { _mode = 0; } }; class ATLAS_2016_I1502620_Z_MU : public ATLAS_2016_I1502620_Z { public: ATLAS_2016_I1502620_Z_MU() : ATLAS_2016_I1502620_Z("ATLAS_2016_I1502620_Z_MU") { _mode = 1; } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2016_I1502620_Z); DECLARE_RIVET_PLUGIN(ATLAS_2016_I1502620_Z_EL); DECLARE_RIVET_PLUGIN(ATLAS_2016_I1502620_Z_MU); } diff --git a/analyses/pluginATLAS/ATLAS_2017_I1609253.cc b/analyses/pluginATLAS/ATLAS_2017_I1609253.cc --- a/analyses/pluginATLAS/ATLAS_2017_I1609253.cc +++ b/analyses/pluginATLAS/ATLAS_2017_I1609253.cc @@ -1,133 +1,133 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief Multijet transverse energy-energy correlations (TEEC) at 8 TeV class ATLAS_2017_I1609253 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2017_I1609253); /// Initialization, called once before running void init() { // Projections - const FastJets jets(FinalState(), FastJets::ANTIKT, 0.4, JetAlg::ALL_MUONS, JetAlg::ALL_INVISIBLES); + const FastJets jets(FinalState(), FastJets::ANTIKT, 0.4, JetAlg::Muons::ALL, JetAlg::Invisibles::ALL); addProjection(jets, "Jets"); // Book histograms book(_hist_EEC1 , 1, 1, 1); book(_hist_AEEC1, 2, 1, 1); book(_hist_EEC2 , 3, 1, 1); book(_hist_AEEC2, 4, 1, 1); book(_hist_EEC3 , 5, 1, 1); book(_hist_AEEC3, 6, 1, 1); book(_hist_EEC4 , 7, 1, 1); book(_hist_AEEC4, 8, 1, 1); book(_hist_EEC5 , 9, 1, 1); book(_hist_AEEC5, 10, 1, 1); book(_hist_EEC6 , 11, 1, 1); book(_hist_AEEC6, 12, 1, 1); } void analyze(const Event& event) { const Jets& jets = applyProjection(event, "Jets").jetsByPt(Cuts::abseta < 2.5 && Cuts::pT > 100*GeV); if (jets.size() < 2) vetoEvent; double sumPt12 = jets[0].pt() + jets[1].pt(); if (sumPt12 < 800*GeV) vetoEvent; double sumEt = 0.; for (const Jet& j : jets) sumEt += j.Et(); for (const Jet& j1 : jets) { double et1 = j1.Et(); for (const Jet& j2 : jets) { double et2 = j2.Et(); double etWeight = et1*et2/(sumEt*sumEt); double dPhi = deltaPhi(j1, j2); double cosPhi = cos(dPhi); if (cos(dPhi) == 1.0) cosPhi = 0.9999; if (sumPt12 > 800*GeV && sumPt12 <= 850*GeV) _hist_EEC1->fill(cosPhi, etWeight); if (sumPt12 > 850*GeV && sumPt12 <= 900*GeV) _hist_EEC2->fill(cosPhi, etWeight); if (sumPt12 > 900*GeV && sumPt12 <= 1000*GeV) _hist_EEC3->fill(cosPhi, etWeight); if (sumPt12 > 1000*GeV && sumPt12 <= 1100*GeV) _hist_EEC4->fill(cosPhi, etWeight); if (sumPt12 > 1100*GeV && sumPt12 <= 1400*GeV) _hist_EEC5->fill(cosPhi, etWeight); if (sumPt12 > 1400*GeV) _hist_EEC6->fill(cosPhi, etWeight); } } } void finalize() { normalize(_hist_EEC1); normalize(_hist_EEC2); normalize(_hist_EEC3); normalize(_hist_EEC4); normalize(_hist_EEC5); normalize(_hist_EEC6); vector points1, points2, points3, points4, points5, points6; size_t nBins = _hist_EEC1->numBins(); for (size_t k = 0; k < nBins/2; ++k) { double x = _hist_EEC1->bin(k).midpoint(); double ex = _hist_EEC1->bin(k).xWidth()/2; double y1 = _hist_EEC1->bin(k).height() - _hist_EEC1->bin(nBins-(k+1)).height(); double ey1 = sqrt( pow(_hist_EEC1->bin(k).heightErr(),2) + pow(_hist_EEC1->bin(nBins-(k+1)).heightErr(),2) ); points1.push_back(Point2D(x,y1,ex,ey1)); double y2 = _hist_EEC2->bin(k).height() - _hist_EEC2->bin(nBins-(k+1)).height(); double ey2 = sqrt( pow(_hist_EEC2->bin(k).heightErr(),2) + pow(_hist_EEC2->bin(nBins-(k+1)).heightErr(),2) ); points2.push_back(Point2D(x,y2,ex,ey2)); double y3 = _hist_EEC3->bin(k).height() - _hist_EEC3->bin(nBins-(k+1)).height(); double ey3 = sqrt( pow(_hist_EEC3->bin(k).heightErr(),2) + pow(_hist_EEC3->bin(nBins-(k+1)).heightErr(),2) ); points3.push_back(Point2D(x,y3,ex,ey3)); double y4 = _hist_EEC4->bin(k).height() - _hist_EEC4->bin(nBins-(k+1)).height(); double ey4 = sqrt( pow(_hist_EEC4->bin(k).heightErr(),2) + pow(_hist_EEC4->bin(nBins-(k+1)).heightErr(),2) ); points4.push_back(Point2D(x,y4,ex,ey4)); double y5 = _hist_EEC5->bin(k).height() - _hist_EEC5->bin(nBins-(k+1)).height(); double ey5 = sqrt( pow(_hist_EEC5->bin(k).heightErr(),2) + pow(_hist_EEC5->bin(nBins-(k+1)).heightErr(),2) ); points5.push_back(Point2D(x,y5,ex,ey5)); double y6 = _hist_EEC6->bin(k).height() - _hist_EEC6->bin(nBins-(k+1)).height(); double ey6 = sqrt( pow(_hist_EEC6->bin(k).heightErr(),2) + pow(_hist_EEC6->bin(nBins-(k+1)).heightErr(),2) ); points6.push_back(Point2D(x,y6,ex,ey6)); } _hist_AEEC1->addPoints(points1); _hist_AEEC2->addPoints(points2); _hist_AEEC3->addPoints(points3); _hist_AEEC4->addPoints(points4); _hist_AEEC5->addPoints(points5); _hist_AEEC6->addPoints(points6); } private: Histo1DPtr _hist_EEC1, _hist_EEC2, _hist_EEC3, _hist_EEC4, _hist_EEC5, _hist_EEC6; Scatter2DPtr _hist_AEEC1, _hist_AEEC2, _hist_AEEC3, _hist_AEEC4, _hist_AEEC5, _hist_AEEC6; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2017_I1609253); } diff --git a/analyses/pluginATLAS/ATLAS_2017_I1609448.cc b/analyses/pluginATLAS/ATLAS_2017_I1609448.cc --- a/analyses/pluginATLAS/ATLAS_2017_I1609448.cc +++ b/analyses/pluginATLAS/ATLAS_2017_I1609448.cc @@ -1,285 +1,285 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" namespace Rivet { /// ATLAS pTmiss+jets cross-section ratios at 13 TeV class ATLAS_2017_I1609448 : public Analysis { public: /// Constructor ATLAS_2017_I1609448(string name="ATLAS_2017_I1609448") : Analysis(name) { _mode = 0; // using Z -> nunu channel by default } struct HistoHandler { Histo1DPtr histo; Scatter2DPtr scatter; unsigned int d, x, y; void fill(double value) { histo->fill(value); } }; /// Initialize void init() { // Prompt photons PromptFinalState photon_fs(Cuts::abspid == PID::PHOTON && Cuts::abseta < 4.9); // Prompt electrons PromptFinalState el_fs(Cuts::abseta < 4.9 && Cuts::abspid == PID::ELECTRON); // Prompt muons PromptFinalState mu_fs(Cuts::abseta < 4.9 && Cuts::abspid == PID::MUON); // Dressed leptons Cut lep_cuts = Cuts::pT > 7*GeV && Cuts::abseta < 2.5; DressedLeptons dressed_leps(photon_fs, (_mode == 2 ? el_fs : mu_fs), 0.1, lep_cuts); declare(dressed_leps, "DressedLeptons"); // In-acceptance leptons for lepton veto PromptFinalState veto_lep_fs(Cuts::abseta < 4.9 && (Cuts::abspid == PID::ELECTRON || Cuts::abspid == PID::MUON)); veto_lep_fs.acceptTauDecays(); veto_lep_fs.acceptMuonDecays(); DressedLeptons veto_lep(photon_fs, veto_lep_fs, 0.1, lep_cuts); declare(veto_lep, "VetoLeptons"); // MET VetoedFinalState met_fs(!(Cuts::abseta > 2.5 && Cuts::abspid == PID::MUON)); if (_mode) met_fs.addVetoOnThisFinalState(dressed_leps); declare(MissingMomentum(met_fs), "MET"); // Jet collection - FastJets jets(FinalState(Cuts::abseta < 4.9), FastJets::ANTIKT, 0.4, JetAlg::NO_MUONS, JetAlg::NO_INVISIBLES); + FastJets jets(FinalState(Cuts::abseta < 4.9), FastJets::ANTIKT, 0.4, JetAlg::Muons::NONE, JetAlg::Invisibles::NONE); declare(jets, "Jets"); _h["met_mono"] = bookHandler(1, 1, 2); _h["met_vbf" ] = bookHandler(2, 1, 2); _h["mjj_vbf" ] = bookHandler(3, 1, 2); _h["dphijj_vbf"] = bookHandler(4, 1, 2); } HistoHandler bookHandler(unsigned int id_d, unsigned int id_x, unsigned int id_y) { HistoHandler dummy; if (_mode < 2) { // numerator mode const string histName = "_" + makeAxisCode(id_d, id_x, id_y); book(dummy.histo, histName, refData(id_d, id_x, id_y)); // hidden auxiliary output book(dummy.scatter, id_d, id_x, id_y - 1, true); // ratio dummy.d = id_d; dummy.x = id_x; dummy.y = id_y; } else { book(dummy.histo, id_d, id_x, 4); // denominator mode } return dummy; } bool isBetweenJets(const Jet& probe, const Jet& boundary1, const Jet& boundary2) { const double y_p = probe.rapidity(); const double y_b1 = boundary1.rapidity(); const double y_b2 = boundary2.rapidity(); const double y_min = std::min(y_b1, y_b2); const double y_max = std::max(y_b1, y_b2); return (y_p > y_min && y_p < y_max); } int centralJetVeto(Jets& jets) { if (jets.size() < 2) return 0; const Jet bj1 = jets.at(0); const Jet bj2 = jets.at(1); // Start loop at the 3rd hardest pT jet int n_between = 0; for (size_t i = 2; i < jets.size(); ++i) { const Jet j = jets.at(i); if (isBetweenJets(j, bj1, bj2) && j.pT() > 25*GeV) ++n_between; } return n_between; } /// Perform the per-event analysis void analyze(const Event& event) { // Require 0 (Znunu) or 2 (Zll) dressed leptons bool isZll = bool(_mode); const vector &vetoLeptons = applyProjection(event, "VetoLeptons").dressedLeptons(); const vector &all_leps = applyProjection(event, "DressedLeptons").dressedLeptons(); if (!isZll && vetoLeptons.size()) vetoEvent; if ( isZll && all_leps.size() != 2) vetoEvent; vector leptons; bool pass_Zll = true; if (isZll) { // Sort dressed leptons by pT if (all_leps[0].pt() > all_leps[1].pt()) { leptons.push_back(all_leps[0]); leptons.push_back(all_leps[1]); } else { leptons.push_back(all_leps[1]); leptons.push_back(all_leps[0]); } // Leading lepton pT cut pass_Zll &= leptons[0].pT() > 80*GeV; // Opposite-charge requirement pass_Zll &= threeCharge(leptons[0]) + threeCharge(leptons[1]) == 0; // Z-mass requirement const double Zmass = (leptons[0].mom() + leptons[1].mom()).mass(); pass_Zll &= (Zmass >= 66*GeV && Zmass <= 116*GeV); } if (!pass_Zll) vetoEvent; // Get jets and remove those within dR = 0.5 of a dressed lepton Jets jets = applyProjection(event, "Jets").jetsByPt(Cuts::pT > 25*GeV && Cuts::absrap < 4.4); for (const DressedLepton& lep : leptons) ifilter_discard(jets, deltaRLess(lep, 0.5)); const size_t njets = jets.size(); if (!njets) vetoEvent; const int njets_gap = centralJetVeto(jets); double jpt1 = jets[0].pT(); double jeta1 = jets[0].eta(); double mjj = 0., jpt2 = 0., dphijj = 0.; if (njets >= 2) { mjj = (jets[0].momentum() + jets[1].momentum()).mass(); jpt2 = jets[1].pT(); dphijj = deltaPhi(jets[0], jets[1]); } // MET Vector3 met_vec = apply(event, "MET").vectorMPT(); double met = met_vec.mod(); // Cut on deltaPhi between MET and first 4 jets, but only if jet pT > 30 GeV bool dphi_fail = false; for (size_t i = 0; i < jets.size() && i < 4; ++i) { dphi_fail |= (deltaPhi(jets[i], met_vec) < 0.4 && jets[i].pT() > 30*GeV); } const bool pass_met_dphi = met > 200*GeV && !dphi_fail; const bool pass_vbf = pass_met_dphi && mjj > 200*GeV && jpt1 > 80*GeV && jpt2 > 50*GeV && njets >= 2 && !njets_gap; const bool pass_mono = pass_met_dphi && jpt1 > 120*GeV && fabs(jeta1) < 2.4; if (pass_mono) _h["met_mono"].fill(met); if (pass_vbf) { _h["met_vbf"].fill(met/GeV); _h["mjj_vbf"].fill(mjj/GeV); _h["dphijj_vbf"].fill(dphijj); } } /// Normalise, scale and otherwise manipulate histograms here void finalize() { const double sf(crossSection() / femtobarn / sumOfWeights()); for (map::iterator hit = _h.begin(); hit != _h.end(); ++hit) { scale(hit->second.histo, sf); if (_mode < 2) constructRmiss(hit->second); } } void constructRmiss(HistoHandler& handler) { // Load transfer function from reference data file const YODA::Scatter2D& rmiss = refData(handler.d, handler.x, handler.y); const YODA::Scatter2D& numer = refData(handler.d, handler.x, handler.y + 1); const YODA::Scatter2D& denom = refData(handler.d, handler.x, handler.y + 2); for (size_t i = 0; i < handler.scatter->numPoints(); ++i) { const Point2D& r = rmiss.point(i); // SM Rmiss const Point2D& n = numer.point(i); // SM numerator const Point2D& d = denom.point(i); // SM denominator const HistoBin1D& b = handler.histo->bin(i); // BSM double bsmy; try { bsmy = b.height(); } catch (const Exception&) { // LowStatsError or WeightError bsmy = 0; } double bsmey; try { bsmey = b.heightErr(); } catch (const Exception&) { // LowStatsError or WeightError bsmey = 0; } // Combined numerator double sm_plus_bsm = n.y() + bsmy; // Rmiss central value double rmiss_y = safediv(sm_plus_bsm, d.y()); // Ratio error (Rmiss = SM_num/SM_denom + BSM/SM_denom ~ Rmiss_SM + BSM/SM_denom double rmiss_p = sqrt(r.yErrPlus()*r.yErrPlus() + safediv(bsmey*bsmey, d.y()*d.y())); double rmiss_m = sqrt(r.yErrMinus()*r.yErrMinus() + safediv(bsmey*bsmey, d.y()*d.y())); // Set new values Point2D& p = handler.scatter->point(i); // (SM + BSM) Rmiss p.setY(rmiss_y); p.setYErrMinus(rmiss_m); p.setYErrPlus(rmiss_p); } } protected: // Analysis-mode switch size_t _mode; /// Histograms map _h; }; /// ATLAS pTmiss+jets specialisation for Znunu channel class ATLAS_2017_I1609448_Znunu : public ATLAS_2017_I1609448 { public: ATLAS_2017_I1609448_Znunu() : ATLAS_2017_I1609448("ATLAS_2017_I1609448_Znunu") { _mode = 0; } }; /// ATLAS pTmiss+jets specialisation for Zmumu channel class ATLAS_2017_I1609448_Zmumu : public ATLAS_2017_I1609448 { public: ATLAS_2017_I1609448_Zmumu() : ATLAS_2017_I1609448("ATLAS_2017_I1609448_Zmumu") { _mode = 1; } }; /// ATLAS pTmiss+jets specialisation for Zee channel class ATLAS_2017_I1609448_Zee : public ATLAS_2017_I1609448 { public: ATLAS_2017_I1609448_Zee() : ATLAS_2017_I1609448("ATLAS_2017_I1609448_Zee") { _mode = 2; } }; // Hooks for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2017_I1609448); DECLARE_RIVET_PLUGIN(ATLAS_2017_I1609448_Znunu); DECLARE_RIVET_PLUGIN(ATLAS_2017_I1609448_Zmumu); DECLARE_RIVET_PLUGIN(ATLAS_2017_I1609448_Zee); } diff --git a/analyses/pluginATLAS/ATLAS_2017_I1614149.cc b/analyses/pluginATLAS/ATLAS_2017_I1614149.cc --- a/analyses/pluginATLAS/ATLAS_2017_I1614149.cc +++ b/analyses/pluginATLAS/ATLAS_2017_I1614149.cc @@ -1,351 +1,351 @@ // -*- C++ -* #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "fastjet/tools/Filter.hh" // substructure includes included in fjcontrib-1.021 (http://fastjet.hepforge.org/contrib/) #include "Rivet/Tools/Nsubjettiness/Njettiness.hh" #include "Rivet/Tools/Nsubjettiness/Nsubjettiness.hh" #include "Rivet/Tools/Nsubjettiness/NjettinessPlugin.hh" namespace Rivet { class ATLAS_2017_I1614149 : public Analysis { public: /// Constructor ///@brief: Resolved and boosted ttbar l+jets cross sections at 13 TeV DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2017_I1614149); void init() { // Eta ranges Cut eta_full = (Cuts::abseta < 5.0); Cut lep_cuts = (Cuts::abseta < 2.5) && (Cuts::pT > 25*GeV); // All final state particles FinalState fs(eta_full); IdentifiedFinalState all_photons(fs); all_photons.acceptIdPair(PID::PHOTON); // Get photons to dress leptons IdentifiedFinalState ph_id(fs); ph_id.acceptIdPair(PID::PHOTON); // Projection to find the electrons IdentifiedFinalState el_id(fs); el_id.acceptIdPair(PID::ELECTRON); PromptFinalState photons(ph_id); photons.acceptTauDecays(true); addProjection(photons, "photons"); PromptFinalState electrons(el_id); electrons.acceptTauDecays(true); DressedLeptons dressedelectrons(photons, electrons, 0.1, lep_cuts); addProjection(dressedelectrons, "elecs"); DressedLeptons ewdressedelectrons(all_photons, electrons, 0.1, eta_full); // Projection to find the muons IdentifiedFinalState mu_id(fs); mu_id.acceptIdPair(PID::MUON); PromptFinalState muons(mu_id); muons.acceptTauDecays(true); DressedLeptons dressedmuons(photons, muons, 0.1, lep_cuts); addProjection(dressedmuons, "muons"); DressedLeptons ewdressedmuons(all_photons, muons, 0.1, eta_full); // Projection to find MET declare(MissingMomentum(fs), "MET"); // remove prompt neutrinos from jet clustering IdentifiedFinalState nu_id(fs); nu_id.acceptNeutrinos(); PromptFinalState neutrinos(nu_id); neutrinos.acceptTauDecays(true); // Jet clustering. VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(ewdressedelectrons); vfs.addVetoOnThisFinalState(ewdressedmuons); vfs.addVetoOnThisFinalState(neutrinos); FastJets jets(vfs, FastJets::ANTIKT, 0.4); jets.useInvisibles(true); addProjection(jets, "jets"); // Addition of the large-R jets VetoedFinalState vfs1(fs); vfs1.addVetoOnThisFinalState(neutrinos); FastJets fjets(vfs1, FastJets::ANTIKT, 1.); - fjets.useInvisibles(JetAlg::NO_INVISIBLES); - fjets.useMuons(JetAlg::NO_MUONS); + fjets.useInvisibles(JetAlg::Invisibles::NONE); + fjets.useMuons(JetAlg::Muons::NONE); addProjection(fjets, "fjets"); bookHists("top_pt_res", 15); bookHists("top_absrap_res", 17); bookHists("ttbar_pt_res", 19); bookHists("ttbar_absrap_res", 21); bookHists("ttbar_m_res", 23); bookHists("top_pt_boost", 25); bookHists("top_absrap_boost", 27); } void analyze(const Event& event) { // Get the selected objects, using the projections. vector electrons = apply(event, "elecs").dressedLeptons(); vector muons = apply(event, "muons").dressedLeptons(); const Jets& jets = apply(event, "jets").jetsByPt(Cuts::pT > 25*GeV && Cuts::abseta < 2.5); const PseudoJets& all_fjets = apply(event, "fjets").pseudoJetsByPt(); // get MET const Vector3 met = apply(event, "MET").vectorMPT(); Jets bjets, lightjets; for (const Jet& jet : jets) { bool b_tagged = jet.bTags(Cuts::pT > 5*GeV).size(); if ( b_tagged && bjets.size() < 2) bjets +=jet; else lightjets += jet; } // Implementing large-R jets definition // trim the jets PseudoJets trimmed_fatJets; float Rfilt = 0.2; float pt_fraction_min = 0.05; fastjet::Filter trimmer(fastjet::JetDefinition(fastjet::kt_algorithm, Rfilt), fastjet::SelectorPtFractionMin(pt_fraction_min)); for (PseudoJet pjet : all_fjets) trimmed_fatJets += trimmer(pjet); trimmed_fatJets = fastjet::sorted_by_pt(trimmed_fatJets); PseudoJets trimmed_jets; for (unsigned int i = 0; i < trimmed_fatJets.size(); ++i) { FourMomentum tj_mom = momentum(trimmed_fatJets[i]); if (tj_mom.pt() <= 300*GeV) continue; if (tj_mom.abseta() >= 2.0) continue; trimmed_jets.push_back(trimmed_fatJets[i]); } bool single_electron = (electrons.size() == 1) && (muons.empty()); bool single_muon = (muons.size() == 1) && (electrons.empty()); DressedLepton *lepton = NULL; if (single_electron) lepton = &electrons[0]; else if (single_muon) lepton = &muons[0]; if (!single_electron && !single_muon) vetoEvent; bool pass_resolved = true; bool num_b_tagged_jets = (bjets.size() == 2); if (!num_b_tagged_jets) pass_resolved = false; if (jets.size() < 4) pass_resolved = false; bool pass_boosted = true; int fatJetIndex = -1; bool passTopTag = false; bool passDphi = false; bool passAddJet = false; bool goodLepJet = false; bool lepbtag = false; bool hadbtag=false; vector lepJetIndex; vector jet_farFromHadTopJetCandidate; if (met.mod() < 20*GeV) pass_boosted = false; if (pass_boosted) { double transmass = _mT(lepton->momentum(), met); if (transmass + met.mod() < 60*GeV) pass_boosted = false; } if (pass_boosted) { if (trimmed_jets.size() >= 1) { for (unsigned int j = 0; j 100*GeV && momentum(trimmed_jets.at(j)).pt() > 300*GeV && momentum(trimmed_jets.at(j)).pt() < 1500*GeV && fabs(momentum(trimmed_jets.at(j)).eta()) < 2.) { passTopTag = true; fatJetIndex = j; break; } } } } if(!passTopTag && fatJetIndex == -1) pass_boosted = false; if (pass_boosted) { double dPhi_fatjet = deltaPhi(lepton->phi(), momentum(trimmed_jets.at(fatJetIndex)).phi()); double dPhi_fatjet_lep_cut = 1.0; //2.3 if (dPhi_fatjet > dPhi_fatjet_lep_cut ) { passDphi = true; } } if (!passDphi) pass_boosted = false; if (bjets.empty()) pass_boosted = false; if (pass_boosted) { for (unsigned int sj = 0; sj < jets.size(); ++sj) { double dR = deltaR(jets.at(sj).momentum(), momentum(trimmed_jets.at(fatJetIndex))); if(dR > 1.5) { passAddJet = true; jet_farFromHadTopJetCandidate.push_back(sj); } } } if (!passAddJet) pass_boosted = false; if (pass_boosted) { for (int ltj : jet_farFromHadTopJetCandidate) { double dR_jet_lep = deltaR(jets.at(ltj).momentum(), lepton->momentum()); double dR_jet_lep_cut = 2.0;//1.5 if (dR_jet_lep < dR_jet_lep_cut) { lepJetIndex.push_back(ltj); goodLepJet = true; } } } if(!goodLepJet) pass_boosted = false; if (pass_boosted) { for (int lepj : lepJetIndex) { lepbtag = jets.at(lepj).bTags(Cuts::pT > 5*GeV).size(); if (lepbtag) break; } } double dR_fatBjet_cut = 1.0; if (pass_boosted) { for (const Jet& bjet : bjets) { hadbtag |= deltaR(momentum(trimmed_jets.at(fatJetIndex)), bjet) < dR_fatBjet_cut; } } if (!(lepbtag || hadbtag)) pass_boosted = false; FourMomentum pbjet1; //Momentum of bjet1 FourMomentum pbjet2; //Momentum of bjet int Wj1index = -1, Wj2index = -1; if (pass_resolved) { if ( deltaR(bjets[0], *lepton) <= deltaR(bjets[1], *lepton) ) { pbjet1 = bjets[0].momentum(); pbjet2 = bjets[1].momentum(); } else { pbjet1 = bjets[1].momentum(); pbjet2 = bjets[0].momentum(); } double bestWmass = 1000.0*TeV; double mWPDG = 80.399*GeV; for (unsigned int i = 0; i < (lightjets.size() - 1); ++i) { for (unsigned int j = i + 1; j < lightjets.size(); ++j) { double wmass = (lightjets[i].momentum() + lightjets[j].momentum()).mass(); if (fabs(wmass - mWPDG) < fabs(bestWmass - mWPDG)) { bestWmass = wmass; Wj1index = i; Wj2index = j; } } } FourMomentum pjet1 = lightjets[Wj1index].momentum(); FourMomentum pjet2 = lightjets[Wj2index].momentum(); // compute hadronic W boson FourMomentum pWhadron = pjet1 + pjet2; double pz = computeneutrinoz(lepton->momentum(), met); FourMomentum ppseudoneutrino( sqrt(sqr(met.x()) + sqr(met.y()) + sqr(pz)), met.x(), met.y(), pz); //compute leptonic, hadronic, combined pseudo-top FourMomentum ppseudotoplepton = lepton->momentum() + ppseudoneutrino + pbjet1; FourMomentum ppseudotophadron = pbjet2 + pWhadron; FourMomentum pttbar = ppseudotoplepton + ppseudotophadron; fillHists("top_pt_res", ppseudotophadron.pt()/GeV); fillHists("top_absrap_res", ppseudotophadron.absrap()); fillHists("ttbar_pt_res", pttbar.pt()/GeV); fillHists("ttbar_absrap_res", pttbar.absrap()); fillHists("ttbar_m_res", pttbar.mass()/GeV); } if (pass_boosted) {// Boosted selection double hadtop_pt= momentum(trimmed_jets.at(fatJetIndex)).pt() / GeV; double hadtop_absrap= momentum(trimmed_jets.at(fatJetIndex)).absrap(); fillHists("top_pt_boost", hadtop_pt); fillHists("top_absrap_boost", hadtop_absrap); } } void finalize() { // Normalize to cross-section const double sf = (crossSection() / sumOfWeights()); for (HistoMap::value_type& hist : _h) { scale(hist.second, sf); if (hist.first.find("_norm") != string::npos) normalize(hist.second); } } void bookHists(std::string name, unsigned int index) { book(_h[name], index, 1 ,1); book(_h[name + "_norm"], index + 1, 1 ,1); } void fillHists(std::string name, double value) { _h[name]->fill(value); _h[name + "_norm"]->fill(value); } double _mT(const FourMomentum &l, const Vector3 &met) const { return sqrt(2.0 * l.pT() * met.mod() * (1 - cos(deltaPhi(l, met))) ); } double tau32(const fastjet::PseudoJet &jet, double jet_rad) const { double alpha = 1.0; Nsubjettiness::NormalizedCutoffMeasure normalized_measure(alpha, jet_rad, 1000000); // WTA definition // Nsubjettiness::OnePass_WTA_KT_Axes wta_kt_axes; // as in JetSubStructure recommendations Nsubjettiness::KT_Axes kt_axes; /// NsubjettinessRatio uses the results from Nsubjettiness to calculate the ratio /// tau_N/tau_M, where N and M are specified by the user. The ratio of different tau values /// is often used in analyses, so this class is helpful to streamline code. Nsubjettiness::NsubjettinessRatio tau32_kt(3, 2, kt_axes, normalized_measure); double tau32 = tau32_kt.result(jet); return tau32; } double computeneutrinoz(const FourMomentum& lepton, const Vector3 &met) const { //computing z component of neutrino momentum given lepton and met double pzneutrino; double m_W = 80.399; // in GeV, given in the paper double k = (( sqr( m_W ) - sqr( lepton.mass() ) ) / 2 ) + (lepton.px() * met.x() + lepton.py() * met.y()); double a = sqr ( lepton.E() )- sqr ( lepton.pz() ); double b = -2*k*lepton.pz(); double c = sqr( lepton.E() ) * sqr( met.mod() ) - sqr( k ); double discriminant = sqr(b) - 4 * a * c; double quad[2] = { (- b - sqrt(discriminant)) / (2 * a), (- b + sqrt(discriminant)) / (2 * a) }; //two possible quadratic solns if (discriminant < 0) pzneutrino = - b / (2 * a); //if the discriminant is negative else { //if the discriminant is greater than or equal to zero, take the soln with smallest absolute value double absquad[2]; for (int n=0; n<2; ++n) absquad[n] = fabs(quad[n]); if (absquad[0] < absquad[1]) pzneutrino = quad[0]; else pzneutrino = quad[1]; } return pzneutrino; } private: /// @name Objects that are used by the event selection decisions typedef map HistoMap; HistoMap _h; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2017_I1614149); } diff --git a/analyses/pluginATLAS/ATLAS_2017_I1632756.cc b/analyses/pluginATLAS/ATLAS_2017_I1632756.cc --- a/analyses/pluginATLAS/ATLAS_2017_I1632756.cc +++ b/analyses/pluginATLAS/ATLAS_2017_I1632756.cc @@ -1,182 +1,182 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/HeavyHadrons.hh" namespace Rivet { /// @brief Measurement of prompt isolated photon + b/c-jet + X differential cross-sections class ATLAS_2017_I1632756 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2017_I1632756); /// Book histograms and initialise projections before the run void init() { // particles for photon isolation: no muons, no neutrinos declare(VisibleFinalState(Cuts::abspid != PID::MUON), "caloParticles"); // Voronoi eta-phi tessellation with KT jets, for ambient energy density calculation - FastJets fj(FinalState(), FastJets::KT, 0.5, JetAlg::NO_MUONS, JetAlg::NO_INVISIBLES); + FastJets fj(FinalState(), FastJets::KT, 0.5, JetAlg::Muons::NONE, JetAlg::Invisibles::NONE); fj.useJetArea(new fastjet::AreaDefinition(fastjet::VoronoiAreaSpec())); declare(fj, "KtJetsD05"); // Leading photon LeadingParticlesFinalState photonfs(PromptFinalState(Cuts::abseta < 2.37 && Cuts::pT > 25*GeV)); photonfs.addParticleId(PID::PHOTON); declare(photonfs, "LeadingPhoton"); // Jets - FastJets jetpro(FinalState(), FastJets::ANTIKT, 0.4, JetAlg::DECAY_MUONS, JetAlg::DECAY_INVISIBLES); + FastJets jetpro(FinalState(), FastJets::ANTIKT, 0.4, JetAlg::Muons::DECAY, JetAlg::Invisibles::DECAY); declare(jetpro, "Jets"); // Heavy hadrons declare(HeavyHadrons(), "HeavyHadrons"); // Book the dsigma/dEt (in eta bins) histograms // d02 and d03 are for photon+b; d04 and d05 are for photon+c for (size_t i = 0; i < _eta_bins.size() - 1; ++i) { if (fuzzyEquals(_eta_bins[i], 1.37)) continue; // This region is not used int offset = i > 1? 1 : 2; book(_h_Et_photonb[i], i + offset, 1, 1); book(_h_Et_photonc[i], i + offset + 2, 1, 1); } } /// Return eta bin for either dsigma/dET histogram (area_eta=false) or energy density correction (area_eta=true) size_t _getEtaBin(double eta_w, bool area_eta) const { const double eta = fabs(eta_w); if (!area_eta) { return binIndex(eta, _eta_bins); } else { return binIndex(eta, _eta_bins_areaoffset); } } /// Perform the per-event analysis void analyze(const Event& event) { // Get the leading photon const Particles& photons = apply(event, "LeadingPhoton").particlesByPt(); if (photons.empty()) vetoEvent; const Particle& leadingPhoton = photons[0]; // Veto events with leading photon in ECAL crack if (inRange(leadingPhoton.abseta(), 1.37, 1.56)) vetoEvent; // Compute isolation energy in cone of radius .4 around photon (all particles except muons, neutrinos and leading photon) FourMomentum mom_in_EtCone; const Particles& fs = apply(event, "caloParticles").particles(); for (const Particle& p : fs) { // increment if inside cone of 0.4 if (deltaR(leadingPhoton, p) < 0.4) mom_in_EtCone += p.momentum(); } // Remove the photon energy from the isolation mom_in_EtCone -= leadingPhoton.momentum(); // Get the area-filtered jet inputs for computing median energy density, etc. vector ptDensity; vector< vector > ptDensities(_eta_bins_areaoffset.size()-1); const FastJets& fast_jets = apply(event, "KtJetsD05"); const auto clust_seq_area = fast_jets.clusterSeqArea(); for (const Jet& jet : fast_jets.jets()) { const double area = clust_seq_area->area(jet); if (area > 10e-4 && jet.abseta() < _eta_bins_areaoffset.back()) ptDensities.at( _getEtaBin(jet.abseta(), true) ).push_back(jet.pT()/area); } // Compute the median energy density, etc. for (size_t b = 0; b < _eta_bins_areaoffset.size() - 1; ++b) { const double ptmedian = (ptDensities[b].size() > 0) ? median(ptDensities[b]) : 0; ptDensity.push_back(ptmedian); } // Compute the isolation energy correction (cone area*energy density) const double etCone_area = PI * sqr(0.4); const double correction = ptDensity[_getEtaBin(leadingPhoton.abseta(), true)] * etCone_area; // Apply isolation cut on area-corrected value // cut is Etiso < 4.8GeV + 4.2E-03 * Et_gamma. if (mom_in_EtCone.Et() - correction > 4.8*GeV + 0.0042*leadingPhoton.Et()) vetoEvent; // Get the leading jet Jets jets = apply(event, "Jets").jetsByPt(Cuts::pT > 20*GeV); ifilter_discard(jets, deltaRLess(leadingPhoton, 0.4)); if (jets.empty()) vetoEvent; const Jet& leadingJet = jets[0]; // Veto events with leading jet outside |y|<2.5 if (leadingJet.absrap() > 2.5) vetoEvent; // Veto events with leading photon and leading jet with deltaR < 1.0 if (deltaR(leadingPhoton, leadingJet) < 1.0) vetoEvent; // Veto events with leading jet not b-tagged (deltaR match with a B-hadron) nor c-tagged (deltaR match with a C-hadron) const Particles& allBs = apply(event, "HeavyHadrons").bHadrons(5*GeV); bool bTagged = false; for (const Particle& thisB : allBs) { if(deltaR(thisB, leadingJet) < 0.3) { bTagged = true; break; } } bool cTagged = false; if (!bTagged) { const Particles& allCs = apply(event, "HeavyHadrons").cHadrons(5*GeV); for (const Particle& thisC : allCs) { if (deltaR(thisC, leadingJet) < 0.3) { cTagged = true; break; } } if (!cTagged) vetoEvent; } // Fill histograms const size_t eta_bin = _getEtaBin(leadingPhoton.abseta(), false); if (bTagged) _h_Et_photonb[eta_bin]->fill(leadingPhoton.Et()/GeV); if (cTagged) _h_Et_photonc[eta_bin]->fill(leadingPhoton.Et()/GeV); } /// Normalise histograms etc., after the run void finalize() { const double sf = crossSection() / (picobarn * sumOfWeights()); for (size_t i = 0; i < _eta_bins.size() - 1; ++i) { if (fuzzyEquals(_eta_bins[i], 1.37)) continue; // This region is not used scale(_h_Et_photonb[i], sf); scale(_h_Et_photonc[i], sf); } } private: Histo1DPtr _h_Et_photonb[3]; Histo1DPtr _h_Et_photonc[3]; const vector _eta_bins = { 0.00, 1.37, 1.56, 2.37 }; const vector _eta_bins_areaoffset = { 0.0, 1.5, 3.0 }; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2017_I1632756); } diff --git a/analyses/pluginCDF/CDF_2000_S4155203.cc b/analyses/pluginCDF/CDF_2000_S4155203.cc --- a/analyses/pluginCDF/CDF_2000_S4155203.cc +++ b/analyses/pluginCDF/CDF_2000_S4155203.cc @@ -1,71 +1,71 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { /// @brief CDF Run I Z \f$ p_\perp \f$ in Drell-Yan events /// @author Hendrik Hoeth class CDF_2000_S4155203 : public Analysis { public: /// Constructor CDF_2000_S4155203() : Analysis("CDF_2000_S4155203") { } /// @name Analysis methods //@{ void init() { // Set up projections ZFinder zfinder(FinalState(), Cuts::open(), PID::ELECTRON, - 66*GeV, 116*GeV, 0.0, ZFinder::NOCLUSTER); + 66*GeV, 116*GeV, 0.0, ZFinder::ClusterPhotons::NONE); declare(zfinder, "ZFinder"); // Book histogram book(_hist_zpt ,1, 1, 1); } /// Do the analysis void analyze(const Event& e) { const ZFinder& zfinder = apply(e, "ZFinder"); if (zfinder.bosons().size() != 1) { MSG_DEBUG("Num e+ e- pairs found = " << zfinder.bosons().size()); vetoEvent; } FourMomentum pZ = zfinder.bosons()[0].momentum(); if (pZ.mass2() < 0) { MSG_DEBUG("Negative Z mass**2 = " << pZ.mass2()/GeV2 << "!"); vetoEvent; } MSG_DEBUG("Dilepton mass = " << pZ.mass()/GeV << " GeV"); MSG_DEBUG("Dilepton pT = " << pZ.pT()/GeV << " GeV"); _hist_zpt->fill(pZ.pT()/GeV); } void finalize() { scale(_hist_zpt, crossSection()/picobarn/sumOfWeights()); } //@} private: Histo1DPtr _hist_zpt; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2000_S4155203); } diff --git a/analyses/pluginCDF/CDF_2009_I856131.cc b/analyses/pluginCDF/CDF_2009_I856131.cc --- a/analyses/pluginCDF/CDF_2009_I856131.cc +++ b/analyses/pluginCDF/CDF_2009_I856131.cc @@ -1,86 +1,86 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { /// @brief CDF Z boson rapidity measurement class CDF_2009_I856131 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor CDF_2009_I856131() : Analysis("CDF_2009_I856131") { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { /// Initialise and register projections here // this seems to have been corrected completely for all selection cuts, // i.e. eta cuts and pT cuts on leptons. ZFinder zfinder(FinalState(), Cuts::open(), PID::ELECTRON, - 66*GeV, 116*GeV, 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK); + 66*GeV, 116*GeV, 0.2, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zfinder, "ZFinder"); /// Book histograms here book(_h_xs ,1, 1, 1); book(_h_yZ ,2, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { const ZFinder& zfinder = apply(event, "ZFinder"); if (zfinder.bosons().size() == 1) { _h_yZ->fill(fabs(zfinder.bosons()[0].rapidity())); _h_xs->fill(1960); } else { MSG_DEBUG("no unique lepton pair found."); } } /// Normalise histograms etc., after the run void finalize() { scale(_h_xs, crossSection()/sumOfWeights()); // Data seems to have been normalized for the avg of the two sides // (+ve & -ve rapidity) rather than the sum, hence the 0.5: scale(_h_yZ, 0.5*crossSection()/sumOfWeights()); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_yZ; Histo1DPtr _h_xs; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2009_I856131); } diff --git a/analyses/pluginCDF/CDF_2012_I1124333.cc b/analyses/pluginCDF/CDF_2012_I1124333.cc --- a/analyses/pluginCDF/CDF_2012_I1124333.cc +++ b/analyses/pluginCDF/CDF_2012_I1124333.cc @@ -1,78 +1,78 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { /// @ CDF Run II Z \f$ p_\perp \f$ in Drell-Yan events /// @author Simone Amoroso class CDF_2012_I1124333 : public Analysis { public: /// Constructor CDF_2012_I1124333() : Analysis("CDF_2012_I1124333") { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { /// Initialise and register projections here - ZFinder zfinder(FinalState(), Cuts::open(), PID::ELECTRON, 66*GeV, 116*GeV, 0.0, ZFinder::NOCLUSTER); + ZFinder zfinder(FinalState(), Cuts::open(), PID::ELECTRON, 66*GeV, 116*GeV, 0.0, ZFinder::ClusterPhotons::NONE); declare(zfinder, "ZFinder"); /// Book histograms here, e.g.: //book( _hist_z_xs ,1, 1, 1); book(_hist_zpt ,2, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { /// @todo Do the event by event analysis here const ZFinder& zfinder = apply(event, "ZFinder"); if (zfinder.bosons().size() != 1) { MSG_DEBUG("Num e+ e- pairs found = " << zfinder.bosons().size()); vetoEvent; } const FourMomentum& pZ = zfinder.bosons()[0].momentum(); if (pZ.mass2() < 0) { MSG_DEBUG("Negative Z mass**2 = " << pZ.mass2()/GeV2 << "!"); vetoEvent; } MSG_DEBUG("Dilepton mass = " << pZ.mass()/GeV << " GeV"); _hist_zpt->fill(pZ.pT()); // _hist_z_xs->fill(1); } /// Normalise histograms etc., after the run void finalize() { scale(_hist_zpt, crossSection()/picobarn/sumOfWeights()); } //@} private: /// @name Histograms //@{ Histo1DPtr _hist_zpt; // Histo1DPtr _hist_z_xs; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2012_I1124333); } diff --git a/analyses/pluginCMS/CMS_2012_I1107658.cc b/analyses/pluginCMS/CMS_2012_I1107658.cc --- a/analyses/pluginCMS/CMS_2012_I1107658.cc +++ b/analyses/pluginCMS/CMS_2012_I1107658.cc @@ -1,172 +1,172 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// Underlying event activity in the Drell-Yan process at 7 TeV class CMS_2012_I1107658 : public Analysis { public: /// Constructor CMS_2012_I1107658() : Analysis("CMS_2012_I1107658") { } /// Initialization void init() { /// @note Using a bare muon Z (but with a clustering radius!?) Cut cut = Cuts::abseta < 2.4 && Cuts::pT > 20*GeV; - ZFinder zfinder(FinalState(), cut, PID::MUON, 4*GeV, 140*GeV, 0.2, ZFinder::NOCLUSTER); + ZFinder zfinder(FinalState(), cut, PID::MUON, 4*GeV, 140*GeV, 0.2, ZFinder::ClusterPhotons::NONE); declare(zfinder, "ZFinder"); ChargedFinalState cfs(-2, 2, 500*MeV); VetoedFinalState nonmuons(cfs); nonmuons.addVetoPairId(PID::MUON); declare(nonmuons, "nonmuons"); book(_h_Nchg_towards_pTmumu ,1, 1, 1); book(_h_Nchg_transverse_pTmumu ,2, 1, 1); book(_h_Nchg_away_pTmumu ,3, 1, 1); book(_h_pTsum_towards_pTmumu ,4, 1, 1); book(_h_pTsum_transverse_pTmumu ,5, 1, 1); book(_h_pTsum_away_pTmumu ,6, 1, 1); book(_h_avgpT_towards_pTmumu ,7, 1, 1); book(_h_avgpT_transverse_pTmumu ,8, 1, 1); book(_h_avgpT_away_pTmumu ,9, 1, 1); book(_h_Nchg_towards_plus_transverse_Mmumu ,10, 1, 1); book(_h_pTsum_towards_plus_transverse_Mmumu ,11, 1, 1); book(_h_avgpT_towards_plus_transverse_Mmumu ,12, 1, 1); book(_h_Nchg_towards_zmass_81_101 ,13, 1, 1); book(_h_Nchg_transverse_zmass_81_101 ,14, 1, 1); book(_h_Nchg_away_zmass_81_101 ,15, 1, 1); book(_h_pT_towards_zmass_81_101 ,16, 1, 1); book(_h_pT_transverse_zmass_81_101 ,17, 1, 1); book(_h_pT_away_zmass_81_101 ,18, 1, 1); book(_h_Nchg_transverse_zpt_5 ,19, 1, 1); book(_h_pT_transverse_zpt_5 ,20, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; const ZFinder& zfinder = apply(event, "ZFinder"); if (zfinder.bosons().size() != 1) vetoEvent; double Zpt = zfinder.bosons()[0].pT()/GeV; double Zphi = zfinder.bosons()[0].phi(); double Zmass = zfinder.bosons()[0].mass()/GeV; Particles particles = apply(event, "nonmuons").particles(); int nTowards = 0; int nTransverse = 0; int nAway = 0; double ptSumTowards = 0; double ptSumTransverse = 0; double ptSumAway = 0; for (const Particle& p : particles) { double dphi = fabs(deltaPhi(Zphi, p.phi())); double pT = p.pT(); if ( dphi < M_PI/3 ) { nTowards++; ptSumTowards += pT; if (Zmass > 81. && Zmass < 101.) _h_pT_towards_zmass_81_101->fill(pT, weight); } else if ( dphi < 2.*M_PI/3 ) { nTransverse++; ptSumTransverse += pT; if (Zmass > 81. && Zmass < 101.) _h_pT_transverse_zmass_81_101->fill(pT, weight); if (Zpt < 5.) _h_pT_transverse_zpt_5->fill(pT, weight); } else { nAway++; ptSumAway += pT; if (Zmass > 81. && Zmass < 101.) _h_pT_away_zmass_81_101->fill(pT, weight); } } // Loop over particles const double area = 8./3.*M_PI; if (Zmass > 81. && Zmass < 101.) { _h_Nchg_towards_pTmumu-> fill(Zpt, 1./area * nTowards, weight); _h_Nchg_transverse_pTmumu-> fill(Zpt, 1./area * nTransverse, weight); _h_Nchg_away_pTmumu-> fill(Zpt, 1./area * nAway, weight); _h_pTsum_towards_pTmumu-> fill(Zpt, 1./area * ptSumTowards, weight); _h_pTsum_transverse_pTmumu-> fill(Zpt, 1./area * ptSumTransverse, weight); _h_pTsum_away_pTmumu-> fill(Zpt, 1./area * ptSumAway, weight); if (nTowards > 0) _h_avgpT_towards_pTmumu-> fill(Zpt, ptSumTowards/nTowards, weight); if (nTransverse > 0) _h_avgpT_transverse_pTmumu-> fill(Zpt, ptSumTransverse/nTransverse, weight); if (nAway > 0) _h_avgpT_away_pTmumu-> fill(Zpt, ptSumAway/nAway, weight); _h_Nchg_towards_zmass_81_101-> fill(nTowards, weight); _h_Nchg_transverse_zmass_81_101->fill(nTransverse, weight); _h_Nchg_away_zmass_81_101-> fill(nAway, weight); } if (Zpt < 5.) { _h_Nchg_towards_plus_transverse_Mmumu->fill(Zmass, (nTowards + nTransverse)/(2.*area), weight); _h_pTsum_towards_plus_transverse_Mmumu->fill(Zmass, (ptSumTowards + ptSumTransverse)/(2.*area), weight); if ((nTowards + nTransverse) > 0) _h_avgpT_towards_plus_transverse_Mmumu->fill(Zmass, (ptSumTowards + ptSumTransverse)/(nTowards + nTransverse), weight); _h_Nchg_transverse_zpt_5->fill(nTransverse, weight); } } /// Normalise histograms etc., after the run void finalize() { scale(_h_pT_towards_zmass_81_101, safediv(1, _h_Nchg_towards_zmass_81_101->integral(), 0)); scale(_h_pT_transverse_zmass_81_101, safediv(1, _h_Nchg_transverse_zmass_81_101->integral(), 0)); scale(_h_pT_away_zmass_81_101, safediv(1, _h_Nchg_away_zmass_81_101->integral(), 0)); scale(_h_pT_transverse_zpt_5, safediv(1, _h_Nchg_transverse_zpt_5->integral(), 0)); normalize(_h_Nchg_towards_zmass_81_101); normalize(_h_Nchg_transverse_zmass_81_101); normalize(_h_Nchg_away_zmass_81_101); normalize(_h_Nchg_transverse_zpt_5); } private: /// @name Histogram objects //@{ Profile1DPtr _h_Nchg_towards_pTmumu; Profile1DPtr _h_Nchg_transverse_pTmumu; Profile1DPtr _h_Nchg_away_pTmumu; Profile1DPtr _h_pTsum_towards_pTmumu; Profile1DPtr _h_pTsum_transverse_pTmumu; Profile1DPtr _h_pTsum_away_pTmumu; Profile1DPtr _h_avgpT_towards_pTmumu; Profile1DPtr _h_avgpT_transverse_pTmumu; Profile1DPtr _h_avgpT_away_pTmumu; Profile1DPtr _h_Nchg_towards_plus_transverse_Mmumu; Profile1DPtr _h_pTsum_towards_plus_transverse_Mmumu; Profile1DPtr _h_avgpT_towards_plus_transverse_Mmumu; Histo1DPtr _h_Nchg_towards_zmass_81_101; Histo1DPtr _h_Nchg_transverse_zmass_81_101; Histo1DPtr _h_Nchg_away_zmass_81_101; Histo1DPtr _h_pT_towards_zmass_81_101; Histo1DPtr _h_pT_transverse_zmass_81_101; Histo1DPtr _h_pT_away_zmass_81_101; Histo1DPtr _h_Nchg_transverse_zpt_5; Histo1DPtr _h_pT_transverse_zpt_5; //@} }; // Hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2012_I1107658); } diff --git a/analyses/pluginCMS/CMS_2012_I1298807.cc b/analyses/pluginCMS/CMS_2012_I1298807.cc --- a/analyses/pluginCMS/CMS_2012_I1298807.cc +++ b/analyses/pluginCMS/CMS_2012_I1298807.cc @@ -1,196 +1,196 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/MergedFinalState.hh" namespace Rivet { /// Inclusive ZZ production cross section and constraints on anomalous triple gauge couplings class CMS_2012_I1298807 : public Analysis { public: // Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2012_I1298807); /// Initialise projections and histograms void init() { // FinalState electrons(Cuts::abseta < 2.5 && Cuts::abspid == PID::ELECTRON); // FinalState muons(Cuts::abseta < 2.4 && Cuts::abspid == PID::MUON); // MergedFinalState leptons(electrons, muons); FinalState leptons((Cuts::abspid == PID::ELECTRON && Cuts::abseta < 2.5) || (Cuts::abspid == PID::MUON && Cuts::abseta < 2.4)); declare(leptons, "Leptons"); Cut cut_el = Cuts::abseta < 2.5 && Cuts::pT > 7.0*GeV; Cut cut_mu = Cuts::abseta < 2.4 && Cuts::pT > 5.0*GeV; - ZFinder zeefinder(FinalState(), cut_el, PID::ELECTRON, 60*GeV, 120*GeV, 0.1, ZFinder::CLUSTERNODECAY, ZFinder::TRACK); + ZFinder zeefinder(FinalState(), cut_el, PID::ELECTRON, 60*GeV, 120*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zeefinder, "ZeeFinder"); - ZFinder zmmfinder(FinalState(), cut_mu, PID::MUON, 60*GeV, 120*GeV, 0.1, ZFinder::CLUSTERNODECAY, ZFinder::TRACK); + ZFinder zmmfinder(FinalState(), cut_mu, PID::MUON, 60*GeV, 120*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zmmfinder, "ZmmFinder"); VetoedFinalState fs_woZmm; fs_woZmm.addVetoOnThisFinalState(zmmfinder); VetoedFinalState fs_woZee; fs_woZee.addVetoOnThisFinalState(zeefinder); - ZFinder zeefinder_woZee(fs_woZee, cut_el, PID::ELECTRON, 60*GeV, 120*GeV, 0.1, ZFinder::CLUSTERNODECAY); + ZFinder zeefinder_woZee(fs_woZee, cut_el, PID::ELECTRON, 60*GeV, 120*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY); declare(zeefinder_woZee, "Zeefinder_WoZee"); - ZFinder zmmfinder_woZmm(fs_woZmm, cut_mu, PID::MUON, 60*GeV, 120*GeV, 0.1, ZFinder::CLUSTERNODECAY); + ZFinder zmmfinder_woZmm(fs_woZmm, cut_mu, PID::MUON, 60*GeV, 120*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY); declare(zmmfinder_woZmm, "Zmmfinder_WoZmm"); // Book histograms book(_hist_pt_l1 , 1, 1, 1); book(_hist_pt_z1 , 1, 1, 2); book(_hist_pt_zz , 1, 1, 3); book(_hist_m_zz , 1, 1, 4); book(_hist_dphi_zz, 1, 1, 5); book(_hist_dR_zz , 1, 1, 6); } // Perform the per-event analysis void analyze(const Event& evt) { // Find leading leptons and apply cuts const Particles& leptons = apply(evt, "Leptons").particlesByPt(); if (leptons.size() < 2) vetoEvent; const double leading_l_pt = leptons[0].pT(); const double second_l_pt = leptons[1].pT(); if (leading_l_pt < 20*GeV || second_l_pt < 10*GeV) vetoEvent; // Find acceptable ZZ combinations and build four-momenta, otherwise veto const ZFinder& zeefinder = applyProjection(evt, "ZeeFinder"); const ZFinder& zeefinder_woZee = applyProjection(evt, "Zeefinder_WoZee"); const ZFinder& zmmfinder = applyProjection(evt, "ZmmFinder"); const ZFinder& zmmfinder_woZmm = applyProjection(evt, "Zmmfinder_WoZmm"); FourMomentum pZ_a, pZ_b, pZ_1, pZ_2; FourMomentum pZZ, Z_a_l1, Z_a_l2, Z_b_l1, Z_b_l2; if (zeefinder.bosons().size() > 0 && zmmfinder.bosons().size() > 0) { pZ_a = zeefinder.bosons()[0]; pZ_b = zmmfinder.bosons()[0]; pZZ = pZ_a + pZ_b; pZ_1 = pZ_a; pZ_2 = pZ_b; Z_a_l1 = zeefinder.constituents()[0]; Z_a_l2 = zeefinder.constituents()[1]; Z_b_l1 = zmmfinder.constituents()[0]; Z_b_l2 = zmmfinder.constituents()[1]; } else if (zeefinder.bosons().size() > 0 && zeefinder_woZee.bosons().size() > 0) { pZ_a = zeefinder.bosons()[0]; pZ_b = zeefinder_woZee.bosons()[0]; pZZ = pZ_a + pZ_b; pZ_1 = pZ_a; pZ_2 = pZ_b; Z_a_l1 = zeefinder.constituents()[0]; Z_a_l2 = zeefinder.constituents()[1]; Z_b_l1 = zeefinder_woZee.constituents()[0]; Z_b_l2 = zeefinder_woZee.constituents()[1]; } else if (zmmfinder.bosons().size() > 0 && zmmfinder_woZmm.bosons().size() > 0) { pZ_a = zmmfinder.bosons()[0]; pZ_b = zmmfinder_woZmm.bosons()[0]; pZZ = pZ_a + pZ_b; pZ_1 = pZ_a; pZ_2 = pZ_b; Z_a_l1 = zmmfinder.constituents()[0]; Z_a_l2 = zmmfinder.constituents()[1]; Z_b_l1 = zmmfinder_woZmm.constituents()[0]; Z_b_l2 = zmmfinder_woZmm.constituents()[1]; } else { vetoEvent; } // Set ordered pT variables /// @todo Looks like there should be a nicer way than this double pt_l1 = Z_a_l1.pT(); if (Z_a_l2.pT() > pt_l1) pt_l1 = Z_a_l2.pT(); if (Z_b_l1.pT() > pt_l1) pt_l1 = Z_b_l1.pT(); if (Z_b_l2.pT() > pt_l1) pt_l1 = Z_b_l2.pT(); // Leading Z pT double pt_z1 = pZ_a.pT(); if (pZ_b.pT() > pZ_a.pT()) { pt_z1 = pZ_b.pT(); pZ_1 = pZ_b; pZ_2 = pZ_a; } // Fill histograms const double weight = 1.0; _hist_pt_zz->fill(pZZ.pT()/GeV, weight); _hist_m_zz->fill(pZZ.mass()/GeV, weight); _hist_dphi_zz->fill(deltaPhi(pZ_a, pZ_b), weight); _hist_dR_zz->fill(deltaR(pZ_a, pZ_b, PSEUDORAPIDITY), weight); _hist_pt_z1->fill(pt_z1/GeV, weight); _hist_pt_l1->fill(pt_l1/GeV, weight); } /// Scale histograms /// @note This is all needed to undo bin width factor -- WHY DO PEOPLE USE UNPHYSICAL HISTOGRAMS?!? /// @todo If we introduce a "bar plot" or similar, it'd work better here void finalize() { double sum_height_pt_zz = 0; for (size_t i = 0; i < _hist_pt_zz->numBins(); i++) { _hist_pt_zz->bin(i).scaleW(1. / _hist_pt_zz->bin(i).width()); sum_height_pt_zz += _hist_pt_zz->bin(i).height(); } scale(_hist_pt_zz, 1. / sum_height_pt_zz); double sum_height_m_zz = 0; for (size_t i = 0; i < _hist_m_zz->numBins(); i++) { _hist_m_zz->bin(i).scaleW(1. / _hist_m_zz->bin(i).width()); sum_height_m_zz += _hist_m_zz->bin(i).height(); } scale(_hist_m_zz, 1. / sum_height_m_zz); double sum_height_dphi_zz = 0; for (size_t i = 0; i < _hist_dphi_zz->numBins(); i++) { _hist_dphi_zz->bin(i).scaleW(1. / _hist_dphi_zz->bin(i).width()); sum_height_dphi_zz += _hist_dphi_zz->bin(i).height(); } scale(_hist_dphi_zz, 1. / sum_height_dphi_zz); double sum_height_dR_zz = 0; for (size_t i = 0; i < _hist_dR_zz->numBins(); i++) { _hist_dR_zz->bin(i).scaleW(1. / _hist_dR_zz->bin(i).width()); sum_height_dR_zz += _hist_dR_zz->bin(i).height(); } scale(_hist_dR_zz, 1. / sum_height_dR_zz); double sum_height_pt_z1 = 0; for (size_t i = 0; i < _hist_pt_z1->numBins(); i++) { _hist_pt_z1->bin(i).scaleW(1. / _hist_pt_z1->bin(i).width()); sum_height_pt_z1 += _hist_pt_z1->bin(i).height(); } scale(_hist_pt_z1, 1. / sum_height_pt_z1); double sum_height_pt_l1 = 0; for (size_t i = 0; i < _hist_pt_l1->numBins(); i++) { _hist_pt_l1->bin(i).scaleW(1. / _hist_pt_l1->bin(i).width()); sum_height_pt_l1 += _hist_pt_l1->bin(i).height(); } scale(_hist_pt_l1, 1. / sum_height_pt_l1); } /// Histograms Histo1DPtr _hist_pt_zz, _hist_m_zz, _hist_dphi_zz, _hist_dR_zz, _hist_pt_z1, _hist_pt_l1; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2012_I1298807); } diff --git a/analyses/pluginCMS/CMS_2013_I1224539_WJET.cc b/analyses/pluginCMS/CMS_2013_I1224539_WJET.cc --- a/analyses/pluginCMS/CMS_2013_I1224539_WJET.cc +++ b/analyses/pluginCMS/CMS_2013_I1224539_WJET.cc @@ -1,193 +1,193 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/ZFinder.hh" #include "fastjet/tools/Filter.hh" #include "fastjet/tools/Pruner.hh" namespace Rivet { class CMS_2013_I1224539_WJET : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor CMS_2013_I1224539_WJET() : Analysis("CMS_2013_I1224539_WJET"), _filter(fastjet::Filter(fastjet::JetDefinition(fastjet::cambridge_algorithm, 0.3), fastjet::SelectorNHardest(3))), _trimmer(fastjet::Filter(fastjet::JetDefinition(fastjet::kt_algorithm, 0.2), fastjet::SelectorPtFractionMin(0.03))), _pruner(fastjet::Pruner(fastjet::cambridge_algorithm, 0.1, 0.5)) { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FinalState fs(-2.4, 2.4, 0*GeV); declare(fs, "FS"); // Find W's with pT > 120, MET > 50 WFinder wfinder(fs, Cuts::abseta < 2.4 && Cuts::pT > 80*GeV, PID::ELECTRON, 50*GeV, 1000*GeV, 50.0*GeV, - 0.2, WFinder::CLUSTERNODECAY, WFinder::NOTRACK, WFinder::TRANSMASS); + 0.2, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); declare(wfinder, "WFinder"); // W+jet jet collections declare(FastJets(wfinder.remainingFinalState(), FastJets::ANTIKT, 0.7), "JetsAK7_wj"); declare(FastJets(wfinder.remainingFinalState(), FastJets::CAM, 0.8), "JetsCA8_wj"); declare(FastJets(wfinder.remainingFinalState(), FastJets::CAM, 1.2), "JetsCA12_wj"); // Histograms /// @note These are 2D histos rendered into slices const int wjetsOffset = 51; for (size_t i = 0; i < N_PT_BINS_vj; ++i) { book(_h_ungroomedJetMass_AK7_wj[i] ,wjetsOffset+i+1+0*N_PT_BINS_vj, 1, 1); book(_h_filteredJetMass_AK7_wj[i] ,wjetsOffset+i+1+1*N_PT_BINS_vj, 1, 1); book(_h_trimmedJetMass_AK7_wj[i] ,wjetsOffset+i+1+2*N_PT_BINS_vj, 1, 1); book(_h_prunedJetMass_AK7_wj[i] ,wjetsOffset+i+1+3*N_PT_BINS_vj, 1, 1); book(_h_prunedJetMass_CA8_wj[i] ,wjetsOffset+i+1+4*N_PT_BINS_vj, 1, 1); if (i > 0) book(_h_filteredJetMass_CA12_wj[i] ,wjetsOffset+i+5*N_PT_BINS_vj, 1, 1); } } bool isBackToBack_wj(const WFinder& wf, const fastjet::PseudoJet& psjet) { const FourMomentum w = wf.bosons()[0]; const FourMomentum l1 = wf.constituentLeptons()[0]; const FourMomentum l2 = wf.constituentNeutrinos()[0]; /// @todo We should make FourMomentum know how to construct itself from a PseudoJet const FourMomentum jmom(psjet.e(), psjet.px(), psjet.py(), psjet.pz()); return (deltaPhi(w, jmom) > 2.0 && deltaR(l1, jmom) > 1.0 && deltaPhi(l2, jmom) > 0.4); } // Find the pT histogram bin index for value pt (in GeV), to hack a 2D histogram equivalent /// @todo Use a YODA axis/finder alg when available size_t findPtBin(double ptJ) { const double ptBins_vj[N_PT_BINS_vj+1] = { 125.0, 150.0, 220.0, 300.0, 450.0 }; for (size_t ibin = 0; ibin < N_PT_BINS_vj; ++ibin) { if (inRange(ptJ, ptBins_vj[ibin], ptBins_vj[ibin+1])) return ibin; } return N_PT_BINS_vj; } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; // Get the W const WFinder& wfinder = apply(event, "WFinder"); if (wfinder.bosons().size() != 1) vetoEvent; const Particle w = wfinder.bosons()[0]; const Particle l = wfinder.constituentLeptons()[0]; // Require a fairly high-pT W and charged lepton if (l.pT() < 80*GeV || w.pT() < 120*GeV) vetoEvent; // Get the pseudojets. const PseudoJets psjetsCA8_wj = apply(event, "JetsCA8_wj").pseudoJetsByPt( 50.0*GeV ); const PseudoJets psjetsCA12_wj = apply(event, "JetsCA12_wj").pseudoJetsByPt( 50.0*GeV ); // AK7 jets const PseudoJets psjetsAK7_wj = apply(event, "JetsAK7_wj").pseudoJetsByPt( 50.0*GeV ); if (!psjetsAK7_wj.empty()) { // Get the leading jet and make sure it's back-to-back with the W const fastjet::PseudoJet& j0 = psjetsAK7_wj[0]; if (isBackToBack_wj(wfinder, j0)) { const size_t njetBin = findPtBin(j0.pt()/GeV); if (njetBin < N_PT_BINS_vj) { fastjet::PseudoJet filtered0 = _filter(j0); fastjet::PseudoJet trimmed0 = _trimmer(j0); fastjet::PseudoJet pruned0 = _pruner(j0); _h_ungroomedJetMass_AK7_wj[njetBin]->fill(j0.m()/GeV, weight); _h_filteredJetMass_AK7_wj[njetBin]->fill(filtered0.m()/GeV, weight); _h_trimmedJetMass_AK7_wj[njetBin]->fill(trimmed0.m()/GeV, weight); _h_prunedJetMass_AK7_wj[njetBin]->fill(pruned0.m()/GeV, weight); } } } // CA8 jets if (!psjetsCA8_wj.empty()) { // Get the leading jet and make sure it's back-to-back with the W const fastjet::PseudoJet& j0 = psjetsCA8_wj[0]; if (isBackToBack_wj(wfinder, j0)) { const size_t njetBin = findPtBin(j0.pt()/GeV); if (njetBin < N_PT_BINS_vj) { fastjet::PseudoJet pruned0 = _pruner(j0); _h_prunedJetMass_CA8_wj[njetBin]->fill(pruned0.m()/GeV, weight); } } } // CA12 jets if (!psjetsCA12_wj.empty()) { // Get the leading jet and make sure it's back-to-back with the W const fastjet::PseudoJet& j0 = psjetsCA12_wj[0]; if (isBackToBack_wj(wfinder, j0)) { const size_t njetBin = findPtBin(j0.pt()/GeV); if (njetBin < N_PT_BINS_vj&&njetBin>0) { fastjet::PseudoJet filtered0 = _filter(j0); _h_filteredJetMass_CA12_wj[njetBin]->fill( filtered0.m() / GeV, weight); } } } } /// Normalise histograms etc., after the run void finalize() { const double normalizationVal = 1000; for (size_t i = 0; i < N_PT_BINS_vj; ++i) { normalize(_h_ungroomedJetMass_AK7_wj[i], normalizationVal); normalize(_h_filteredJetMass_AK7_wj[i], normalizationVal); normalize(_h_trimmedJetMass_AK7_wj[i], normalizationVal); normalize(_h_prunedJetMass_AK7_wj[i], normalizationVal); normalize(_h_prunedJetMass_CA8_wj[i], normalizationVal); if (i > 0) normalize( _h_filteredJetMass_CA12_wj[i], normalizationVal); } } //@} private: /// @name FastJet grooming tools (configured in constructor init list) //@{ const fastjet::Filter _filter; const fastjet::Filter _trimmer; const fastjet::Pruner _pruner; //@} /// @name Histograms //@{ enum BINS_vj { PT_125_150_vj=0, PT_150_220_vj, PT_220_300_vj, PT_300_450_vj, N_PT_BINS_vj }; Histo1DPtr _h_ungroomedJetMass_AK7_wj[N_PT_BINS_vj]; Histo1DPtr _h_filteredJetMass_AK7_wj[N_PT_BINS_vj]; Histo1DPtr _h_trimmedJetMass_AK7_wj[N_PT_BINS_vj]; Histo1DPtr _h_prunedJetMass_AK7_wj[N_PT_BINS_vj]; Histo1DPtr _h_prunedJetMass_CA8_wj[N_PT_BINS_vj]; Histo1DPtr _h_filteredJetMass_CA12_wj[N_PT_BINS_vj]; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2013_I1224539_WJET); } diff --git a/analyses/pluginCMS/CMS_2013_I1224539_ZJET.cc b/analyses/pluginCMS/CMS_2013_I1224539_ZJET.cc --- a/analyses/pluginCMS/CMS_2013_I1224539_ZJET.cc +++ b/analyses/pluginCMS/CMS_2013_I1224539_ZJET.cc @@ -1,202 +1,202 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/ZFinder.hh" #include "fastjet/tools/Filter.hh" #include "fastjet/tools/Pruner.hh" namespace Rivet { class CMS_2013_I1224539_ZJET : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor CMS_2013_I1224539_ZJET() : Analysis("CMS_2013_I1224539_ZJET"), _filter(fastjet::Filter(fastjet::JetDefinition(fastjet::cambridge_algorithm, 0.3), fastjet::SelectorNHardest(3))), _trimmer(fastjet::Filter(fastjet::JetDefinition(fastjet::kt_algorithm, 0.2), fastjet::SelectorPtFractionMin(0.03))), _pruner(fastjet::Pruner(fastjet::cambridge_algorithm, 0.1, 0.5)) { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FinalState fs(Cuts::abseta < 2.4); declare(fs, "FS"); // Find Zs with pT > 120 GeV ZFinder zfinder(fs, Cuts::abseta < 2.4 && Cuts::pT > 30*GeV, PID::ELECTRON, 80*GeV, 100*GeV, - 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK); + 0.2, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zfinder, "ZFinder"); // Z+jet jet collections declare(FastJets(zfinder.remainingFinalState(), FastJets::ANTIKT, 0.7), "JetsAK7_zj"); declare(FastJets(zfinder.remainingFinalState(), FastJets::CAM, 0.8), "JetsCA8_zj"); declare(FastJets(zfinder.remainingFinalState(), FastJets::CAM, 1.2), "JetsCA12_zj"); // Histograms /// @note These are 2D histos rendered into slices const int zjetsOffset = 28; for (size_t i = 0; i < N_PT_BINS_vj; ++i ) { book(_h_ungroomedJetMass_AK7_zj[i] ,zjetsOffset+i+1+0*N_PT_BINS_vj, 1, 1); book(_h_filteredJetMass_AK7_zj[i] ,zjetsOffset+i+1+1*N_PT_BINS_vj,1,1); book(_h_trimmedJetMass_AK7_zj[i] ,zjetsOffset+i+1+2*N_PT_BINS_vj,1,1); book(_h_prunedJetMass_AK7_zj[i] ,zjetsOffset+i+1+3*N_PT_BINS_vj,1,1); book(_h_prunedJetMass_CA8_zj[i] ,zjetsOffset+i+1+4*N_PT_BINS_vj,1,1); if (i > 0) book(_h_filteredJetMass_CA12_zj[i] ,zjetsOffset+i+5*N_PT_BINS_vj,1,1); } } bool isBackToBack_zj(const ZFinder& zf, const fastjet::PseudoJet& psjet) { const FourMomentum& z = zf.bosons()[0].momentum(); const FourMomentum& l1 = zf.constituents()[0].momentum(); const FourMomentum& l2 = zf.constituents()[1].momentum(); /// @todo We should make FourMomentum know how to construct itself from a PseudoJet const FourMomentum jmom(psjet.e(), psjet.px(), psjet.py(), psjet.pz()); return (deltaPhi(z, jmom) > 2.0 && deltaR(l1, jmom) > 1.0 && deltaR(l2, jmom) > 1.0); } // Find the pT histogram bin index for value pt (in GeV), to hack a 2D histogram equivalent /// @todo Use a YODA axis/finder alg when available size_t findPtBin(double ptJ) { const double ptBins_vj[N_PT_BINS_vj+1] = { 125.0, 150.0, 220.0, 300.0, 450.0 }; for (size_t ibin = 0; ibin < N_PT_BINS_vj; ++ibin) { if (inRange(ptJ, ptBins_vj[ibin], ptBins_vj[ibin+1])) return ibin; } return N_PT_BINS_vj; } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; // Get the Z const ZFinder& zfinder = apply(event, "ZFinder"); if (zfinder.bosons().size() != 1) vetoEvent; const Particle& z = zfinder.bosons()[0]; if (z.constituents().size() < 2) { MSG_WARNING("Found a Z with less than 2 constituents."); vetoEvent; } const Particle& l1 = z.constituents()[0]; const Particle& l2 = z.constituents()[1]; MSG_DEBUG(l1.pT() << " " << l2.pT()); assert(&l1 != &l2); // Require a high-pT Z (and constituents) if (l1.pT() < 30*GeV ) vetoEvent; if (l2.pT() < 30*GeV ) vetoEvent; if (z.pT() < 120*GeV) vetoEvent; // AK7 jets const PseudoJets& psjetsAK7_zj = apply(event, "JetsAK7_zj").pseudoJetsByPt(50.0*GeV); if (!psjetsAK7_zj.empty()) { // Get the leading jet and make sure it's back-to-back with the Z const fastjet::PseudoJet& j0 = psjetsAK7_zj[0]; if (isBackToBack_zj(zfinder, j0)) { const size_t njetBin = findPtBin(j0.pt()/GeV); if (njetBin < N_PT_BINS_vj) { fastjet::PseudoJet filtered0 = _filter(j0); fastjet::PseudoJet trimmed0 = _trimmer(j0); fastjet::PseudoJet pruned0 = _pruner(j0); _h_ungroomedJetMass_AK7_zj[njetBin]->fill(j0.m()/GeV, weight); _h_filteredJetMass_AK7_zj[njetBin]->fill(filtered0.m()/GeV, weight); _h_trimmedJetMass_AK7_zj[njetBin]->fill(trimmed0.m()/GeV, weight); _h_prunedJetMass_AK7_zj[njetBin]->fill(pruned0.m()/GeV, weight); } } } // CA8 jets const PseudoJets& psjetsCA8_zj = apply(event, "JetsCA8_zj").pseudoJetsByPt(50.0*GeV); if (!psjetsCA8_zj.empty()) { // Get the leading jet and make sure it's back-to-back with the Z const fastjet::PseudoJet& j0 = psjetsCA8_zj[0]; if (isBackToBack_zj(zfinder, j0)) { const size_t njetBin = findPtBin(j0.pt()/GeV); if (njetBin < N_PT_BINS_vj) { fastjet::PseudoJet pruned0 = _pruner(j0); _h_prunedJetMass_CA8_zj[njetBin]->fill(pruned0.m()/GeV, weight); } } } // CA12 jets const PseudoJets& psjetsCA12_zj = apply(event, "JetsCA12_zj").pseudoJetsByPt(50.0*GeV); if (!psjetsCA12_zj.empty()) { // Get the leading jet and make sure it's back-to-back with the Z const fastjet::PseudoJet& j0 = psjetsCA12_zj[0]; if (isBackToBack_zj(zfinder, j0)) { const size_t njetBin = findPtBin(j0.pt()/GeV); if (njetBin>0 && njetBin < N_PT_BINS_vj) { fastjet::PseudoJet filtered0 = _filter(j0); _h_filteredJetMass_CA12_zj[njetBin]->fill( filtered0.m() / GeV, weight); } } } } /// Normalise histograms etc., after the run void finalize() { const double normalizationVal = 1000; for (size_t i = 0; i < N_PT_BINS_vj; ++i ) { normalize( _h_ungroomedJetMass_AK7_zj[i], normalizationVal); normalize( _h_filteredJetMass_AK7_zj[i], normalizationVal); normalize( _h_trimmedJetMass_AK7_zj[i], normalizationVal); normalize( _h_prunedJetMass_AK7_zj[i], normalizationVal); normalize( _h_prunedJetMass_CA8_zj[i], normalizationVal); if (i > 0) normalize( _h_filteredJetMass_CA12_zj[i], normalizationVal); } } //@} private: /// @name FastJet grooming tools (configured in constructor init list) //@{ const fastjet::Filter _filter; const fastjet::Filter _trimmer; const fastjet::Pruner _pruner; //@} /// @name Histograms //@{ enum BINS_vj { PT_125_150_vj=0, PT_150_220_vj, PT_220_300_vj, PT_300_450_vj, N_PT_BINS_vj }; Histo1DPtr _h_ungroomedJetMass_AK7_zj[N_PT_BINS_vj]; Histo1DPtr _h_filteredJetMass_AK7_zj[N_PT_BINS_vj]; Histo1DPtr _h_trimmedJetMass_AK7_zj[N_PT_BINS_vj]; Histo1DPtr _h_prunedJetMass_AK7_zj[N_PT_BINS_vj]; Histo1DPtr _h_prunedJetMass_CA8_zj[N_PT_BINS_vj]; Histo1DPtr _h_filteredJetMass_CA12_zj[N_PT_BINS_vj]; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2013_I1224539_ZJET); } diff --git a/analyses/pluginCMS/CMS_2013_I1256943.cc b/analyses/pluginCMS/CMS_2013_I1256943.cc --- a/analyses/pluginCMS/CMS_2013_I1256943.cc +++ b/analyses/pluginCMS/CMS_2013_I1256943.cc @@ -1,183 +1,183 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/UnstableFinalState.hh" namespace Rivet { /// CMS cross-section and angular correlations in Z boson + b-hadrons events at 7 TeV class CMS_2013_I1256943 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2013_I1256943); /// Add projections and book histograms void init() { book(_sumW, "sumW"); book(_sumW50, "sumW50"); book(_sumWpT, "sumWpT"); FinalState fs(Cuts::abseta < 2.4 && Cuts::pT > 20*GeV); declare(fs, "FS"); UnstableFinalState ufs(Cuts::abseta < 2 && Cuts::pT > 15*GeV); declare(ufs, "UFS"); Cut zetacut = Cuts::abseta < 2.4; - ZFinder zfindermu(fs, zetacut, PID::MUON, 81.0*GeV, 101.0*GeV, 0.1, ZFinder::NOCLUSTER, ZFinder::TRACK, 91.2*GeV); + ZFinder zfindermu(fs, zetacut, PID::MUON, 81.0*GeV, 101.0*GeV, 0.1, ZFinder::ClusterPhotons::NONE, ZFinder::AddPhotons::YES, 91.2*GeV); declare(zfindermu, "ZFinderMu"); - ZFinder zfinderel(fs, zetacut, PID::ELECTRON, 81.0*GeV, 101.0*GeV, 0.1, ZFinder::NOCLUSTER, ZFinder::TRACK, 91.2*GeV); + ZFinder zfinderel(fs, zetacut, PID::ELECTRON, 81.0*GeV, 101.0*GeV, 0.1, ZFinder::ClusterPhotons::NONE, ZFinder::AddPhotons::YES, 91.2*GeV); declare(zfinderel, "ZFinderEl"); // Histograms in non-boosted region of Z pT book(_h_dR_BB ,1, 1, 1); book(_h_dphi_BB ,2, 1, 1); book(_h_min_dR_ZB ,3, 1, 1); book(_h_A_ZBB ,4, 1, 1); // Histograms in boosted region of Z pT (pT > 50 GeV) book(_h_dR_BB_boost ,5, 1, 1); book(_h_dphi_BB_boost ,6, 1, 1); book(_h_min_dR_ZB_boost ,7, 1, 1); book(_h_A_ZBB_boost ,8, 1, 1); book(_h_min_ZpT ,9,1,1); } /// Do the analysis void analyze(const Event& e) { vector Bmom; const UnstableFinalState& ufs = apply(e, "UFS"); const ZFinder& zfindermu = apply(e, "ZFinderMu"); const ZFinder& zfinderel = apply(e, "ZFinderEl"); // Look for a Z --> mu+ mu- event in the final state if (zfindermu.empty() && zfinderel.empty()) vetoEvent; const Particles& z = !zfindermu.empty() ? zfindermu.bosons() : zfinderel.bosons(); const bool is_boosted = ( z[0].pT() > 50*GeV ); // Loop over the unstable particles for (const Particle& p : ufs.particles()) { const PdgId pid = p.pid(); // Look for particles with a bottom quark if (PID::hasBottom(pid)) { bool good_B = false; const GenParticle* pgen = p.genParticle(); const GenVertex* vgen = pgen -> end_vertex(); // Loop over the decay products of each unstable particle, looking for a b-hadron pair /// @todo Avoid HepMC API for (GenVertex::particles_out_const_iterator it = vgen->particles_out_const_begin(); it != vgen->particles_out_const_end(); ++it) { // If the particle produced has a bottom quark do not count it and go to the next loop cycle. if (!( PID::hasBottom( (*it)->pdg_id() ) ) ) { good_B = true; continue; } else { good_B = false; break; } } if (good_B ) Bmom.push_back( p.momentum() ); } else continue; } // If there are more than two B's in the final state veto the event if (Bmom.size() != 2 ) vetoEvent; // Calculate the observables double dphiBB = deltaPhi(Bmom[0], Bmom[1]); double dRBB = deltaR(Bmom[0], Bmom[1]); const FourMomentum& pZ = z[0].momentum(); const bool closest_B = ( deltaR(pZ, Bmom[0]) < deltaR(pZ, Bmom[1]) ); const double mindR_ZB = closest_B ? deltaR(pZ, Bmom[0]) : deltaR(pZ, Bmom[1]); const double maxdR_ZB = closest_B ? deltaR(pZ, Bmom[1]) : deltaR(pZ, Bmom[0]); const double AZBB = ( maxdR_ZB - mindR_ZB ) / ( maxdR_ZB + mindR_ZB ); // Fill the histograms in the non-boosted region _h_dphi_BB->fill(dphiBB); _h_dR_BB->fill(dRBB); _h_min_dR_ZB->fill(mindR_ZB); _h_A_ZBB->fill(AZBB); _sumW->fill(); _sumWpT->fill(); // Fill the histograms in the boosted region if (is_boosted) { _sumW50->fill(); _h_dphi_BB_boost->fill(dphiBB); _h_dR_BB_boost->fill(dRBB); _h_min_dR_ZB_boost->fill(mindR_ZB); _h_A_ZBB_boost->fill(AZBB); } // Fill Z pT (cumulative) histogram _h_min_ZpT->fill(0); if (pZ.pT() > 40*GeV ) { _sumWpT->fill(); _h_min_ZpT->fill(40); } if (pZ.pT() > 80*GeV ) { _sumWpT->fill(); _h_min_ZpT->fill(80); } if (pZ.pT() > 120*GeV ) { _sumWpT->fill(); _h_min_ZpT->fill(120); } Bmom.clear(); } /// Finalize void finalize() { // Normalize excluding overflow bins (d'oh) normalize(_h_dR_BB, 0.7*crossSection()*dbl(*_sumW)/sumOfWeights(), false); // d01-x01-y01 normalize(_h_dphi_BB, 0.53*crossSection()*dbl(*_sumW)/sumOfWeights(), false); // d02-x01-y01 normalize(_h_min_dR_ZB, 0.84*crossSection()*dbl(*_sumW)/sumOfWeights(), false); // d03-x01-y01 normalize(_h_A_ZBB, 0.2*crossSection()*dbl(*_sumW)/sumOfWeights(), false); // d04-x01-y01 normalize(_h_dR_BB_boost, 0.84*crossSection()*dbl(*_sumW50)/sumOfWeights(), false); // d05-x01-y01 normalize(_h_dphi_BB_boost, 0.63*crossSection()*dbl(*_sumW50)/sumOfWeights(), false); // d06-x01-y01 normalize(_h_min_dR_ZB_boost, 1*crossSection()*dbl(*_sumW50)/sumOfWeights(), false); // d07-x01-y01 normalize(_h_A_ZBB_boost, 0.25*crossSection()*dbl(*_sumW50)/sumOfWeights(), false); // d08-x01-y01 normalize(_h_min_ZpT, 40*crossSection()*dbl(*_sumWpT)/sumOfWeights(), false); // d09-x01-y01 } private: /// @name Weight counters //@{ CounterPtr _sumW, _sumW50, _sumWpT; //@} /// @name Histograms //@{ Histo1DPtr _h_dphi_BB, _h_dR_BB, _h_min_dR_ZB, _h_A_ZBB; Histo1DPtr _h_dphi_BB_boost, _h_dR_BB_boost, _h_min_dR_ZB_boost, _h_A_ZBB_boost, _h_min_ZpT; //@} }; // Hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2013_I1256943); } diff --git a/analyses/pluginCMS/CMS_2015_I1370682_PARTON.cc b/analyses/pluginCMS/CMS_2015_I1370682_PARTON.cc --- a/analyses/pluginCMS/CMS_2015_I1370682_PARTON.cc +++ b/analyses/pluginCMS/CMS_2015_I1370682_PARTON.cc @@ -1,111 +1,111 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/PartonicTops.hh" namespace Rivet { class CMS_2015_I1370682_PARTON : public Analysis { public: DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2015_I1370682_PARTON); /// Book projections and histograms void init() { - declare(PartonicTops(PartonicTops::E_MU, false), "LeptonicPartonTops"); - declare(PartonicTops(PartonicTops::HADRONIC), "HadronicPartonTops"); + declare(PartonicTops(PartonicTops::DecayMode::E_MU, false), "LeptonicPartonTops"); + declare(PartonicTops(PartonicTops::DecayMode::HADRONIC), "HadronicPartonTops"); book(_hSL_topPt , 15, 1, 1); book(_hSL_topPtTtbarSys, 16, 1, 1); book(_hSL_topY , 17, 1, 1); book(_hSL_ttbarDelPhi , 18, 1, 1); book(_hSL_topPtLead , 19, 1, 1); book(_hSL_topPtSubLead , 20, 1, 1); book(_hSL_ttbarPt , 21, 1, 1); book(_hSL_ttbarY , 22, 1, 1); book(_hSL_ttbarMass , 23, 1, 1); book(_hDL_topPt , 24, 1, 1); book(_hDL_topPtTtbarSys, 25, 1, 1); book(_hDL_topY , 26, 1, 1); book(_hDL_ttbarDelPhi , 27, 1, 1); book(_hDL_topPtLead , 28, 1, 1); book(_hDL_topPtSubLead , 29, 1, 1); book(_hDL_ttbarPt , 30, 1, 1); book(_hDL_ttbarY , 31, 1, 1); book(_hDL_ttbarMass , 32, 1, 1); } void analyze(const Event& event) { // Do the analysis only for the ttbar full leptonic or semileptonic channel, without tau decay const Particles leptonicpartontops = apply(event, "LeptonicPartonTops").particlesByPt(); const Particles hadronicpartontops = apply(event, "HadronicPartonTops").particlesByPt(); const bool isSemilepton = (leptonicpartontops.size() == 1 && hadronicpartontops.size() == 1); const bool isDilepton = (leptonicpartontops.size() == 2 && hadronicpartontops.size() == 0); if (!isSemilepton && !isDilepton) vetoEvent; // Parton level at full phase space // Fill top quarks defined in the parton level, full phase space const FourMomentum t1P4 = leptonicpartontops[0]; const FourMomentum t2P4 = isSemilepton ? hadronicpartontops[0] : leptonicpartontops[1]; const double t1Pt = t1P4.pT(), t2Pt = t2P4.pT(); const FourMomentum ttbarP4 = t1P4 + t2P4; const FourMomentum t1P4AtCM = LorentzTransform::mkFrameTransformFromBeta(ttbarP4.betaVec()).transform(t1P4); const double dPhi = deltaPhi(t1P4.phi(), t2P4.phi()); const double weight = 1.0; if (isSemilepton) { _hSL_topPt->fill(t1Pt, weight); _hSL_topPt->fill(t2Pt, weight); _hSL_topPtTtbarSys->fill(t1P4AtCM.pT(), weight); _hSL_topY->fill(t1P4.rapidity(), weight); _hSL_topY->fill(t2P4.rapidity(), weight); _hSL_ttbarDelPhi->fill(dPhi, weight); _hSL_topPtLead->fill(std::max(t1Pt, t2Pt), weight); _hSL_topPtSubLead->fill(std::min(t1Pt, t2Pt), weight); _hSL_ttbarPt->fill(ttbarP4.pT(), weight); _hSL_ttbarY->fill(ttbarP4.rapidity(), weight); _hSL_ttbarMass->fill(ttbarP4.mass(), weight); } else { // if (isDilepton) { _hDL_topPt->fill(t1Pt, weight); _hDL_topPt->fill(t2Pt, weight); _hDL_topPtTtbarSys->fill(t1P4AtCM.pT(), weight); _hDL_topY->fill(t1P4.rapidity(), weight); _hDL_topY->fill(t2P4.rapidity(), weight); _hDL_ttbarDelPhi->fill(dPhi, weight); _hDL_topPtLead->fill(std::max(t1Pt, t2Pt), weight); _hDL_topPtSubLead->fill(std::min(t1Pt, t2Pt), weight); _hDL_ttbarPt->fill(ttbarP4.pT(), weight); _hDL_ttbarY->fill(ttbarP4.rapidity(), weight); _hDL_ttbarMass->fill(ttbarP4.mass(), weight); } } void finalize() { normalize({_hSL_topPt, _hSL_topPtTtbarSys, _hSL_topY, _hSL_ttbarDelPhi, _hSL_topPtLead, _hSL_topPtSubLead, _hSL_ttbarPt, _hSL_ttbarY, _hSL_ttbarMass}); normalize({_hDL_topPt, _hDL_topPtTtbarSys, _hDL_topY, _hDL_ttbarDelPhi, _hDL_topPtLead, _hDL_topPtSubLead, _hDL_ttbarPt, _hDL_ttbarY, _hDL_ttbarMass}); } private: /// @name Histograms //@{ Histo1DPtr _hSL_topPt, _hSL_topPtTtbarSys, _hSL_topY, _hSL_ttbarDelPhi, _hSL_topPtLead, _hSL_topPtSubLead, _hSL_ttbarPt, _hSL_ttbarY, _hSL_ttbarMass; Histo1DPtr _hDL_topPt, _hDL_topPtTtbarSys, _hDL_topY, _hDL_ttbarDelPhi, _hDL_topPtLead, _hDL_topPtSubLead, _hDL_ttbarPt, _hDL_ttbarY, _hDL_ttbarMass; //@} }; DECLARE_RIVET_PLUGIN(CMS_2015_I1370682_PARTON); } diff --git a/analyses/pluginCMS/CMS_2015_I1397174.cc b/analyses/pluginCMS/CMS_2015_I1397174.cc --- a/analyses/pluginCMS/CMS_2015_I1397174.cc +++ b/analyses/pluginCMS/CMS_2015_I1397174.cc @@ -1,386 +1,386 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/PartonicTops.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// Fully leptonic partonic ttbar analysis class CMS_2015_I1397174 : public Analysis { public: /// Minimal constructor CMS_2015_I1397174() : Analysis("CMS_2015_I1397174") { } /// @name Analysis methods //@{ /// Set up projections and book histograms void init() { // Parton level top quarks - addProjection(PartonicTops(PartonicTops::E_MU, false), "PartonTops"); + addProjection(PartonicTops(PartonicTops::DecayMode::E_MU, false), "PartonTops"); // Find jets not related to the top/W decays VetoedFinalState vfs; vfs.addDecayProductsVeto(PID::WPLUSBOSON); vfs.addDecayProductsVeto(PID::WMINUSBOSON); - FastJets fj(vfs, FastJets::ANTIKT, 0.5, JetAlg::ALL_MUONS, JetAlg::ALL_INVISIBLES); + FastJets fj(vfs, FastJets::ANTIKT, 0.5, JetAlg::Muons::ALL, JetAlg::Invisibles::ALL); addProjection(fj, "Jets"); // Book histograms book(_hVis_nJet30_abs , 1, 1, 1); book(_hVis_nJet30 , 2, 1, 1); book(_hVis_nJet60_abs , 3, 1, 1); book(_hVis_nJet60 , 4, 1, 1); book(_hVis_nJet100_abs , 5, 1, 1); book(_hVis_nJet100 , 6, 1, 1); book(_hVis_addJet1Pt_abs , 7, 1, 1); book(_hVis_addJet1Pt , 8, 1, 1); book(_hVis_addJet1Eta_abs , 9, 1, 1); book(_hVis_addJet1Eta ,10, 1, 1); book(_hVis_addJet2Pt_abs ,11, 1, 1); book(_hVis_addJet2Pt ,12, 1, 1); book(_hVis_addJet2Eta_abs ,13, 1, 1); book(_hVis_addJet2Eta ,14, 1, 1); book(_hVis_addJJMass_abs ,15, 1, 1); book(_hVis_addJJMass ,16, 1, 1); book(_hVis_addJJDR_abs ,17, 1, 1); book(_hVis_addJJDR ,18, 1, 1); book(_hVis_addJJHT_abs ,19, 1, 1); book(_hVis_addJJHT ,20, 1, 1); book(_hFull_addJet1Pt_abs ,21, 1, 1); book(_hFull_addJet1Pt ,22, 1, 1); book(_hFull_addJet1Eta_abs ,23, 1, 1); book(_hFull_addJet1Eta ,24, 1, 1); book(_hFull_addJet2Pt_abs ,25, 1, 1); book(_hFull_addJet2Pt ,26, 1, 1); book(_hFull_addJet2Eta_abs ,27, 1, 1); book(_hFull_addJet2Eta ,28, 1, 1); book(_hFull_addJJMass_abs ,29, 1, 1); book(_hFull_addJJMass ,30, 1, 1); book(_hFull_addJJDR_abs ,31, 1, 1); book(_hFull_addJJDR ,32, 1, 1); book(_hFull_addJJHT_abs ,33, 1, 1); book(_hFull_addJJHT ,34, 1, 1); book(_hVis_addBJet1Pt_abs ,35, 1, 1); book(_hVis_addBJet1Pt ,36, 1, 1); book(_hVis_addBJet1Eta_abs ,37, 1, 1); book(_hVis_addBJet1Eta ,38, 1, 1); book(_hVis_addBJet2Pt_abs ,39, 1, 1); book(_hVis_addBJet2Pt ,40, 1, 1); book(_hVis_addBJet2Eta_abs ,41, 1, 1); book(_hVis_addBJet2Eta ,42, 1, 1); book(_hVis_addBBMass_abs ,43, 1, 1); book(_hVis_addBBMass ,44, 1, 1); book(_hVis_addBBDR_abs ,45, 1, 1); book(_hVis_addBBDR ,46, 1, 1); book(_hFull_addBJet1Pt_abs ,47, 1, 1); book(_hFull_addBJet1Pt ,48, 1, 1); book(_hFull_addBJet1Eta_abs ,49, 1, 1); book(_hFull_addBJet1Eta ,50, 1, 1); book(_hFull_addBJet2Pt_abs ,51, 1, 1); book(_hFull_addBJet2Pt ,52, 1, 1); book(_hFull_addBJet2Eta_abs ,53, 1, 1); book(_hFull_addBJet2Eta ,54, 1, 1); book(_hFull_addBBMass_abs ,55, 1, 1); book(_hFull_addBBMass ,56, 1, 1); book(_hFull_addBBDR_abs ,57, 1, 1); book(_hFull_addBBDR ,58, 1, 1); book(_h_gap_addJet1Pt ,59, 1, 1); book(_h_gap_addJet1Pt_eta0 ,60, 1, 1); book(_h_gap_addJet1Pt_eta1 ,61, 1, 1); book(_h_gap_addJet1Pt_eta2 ,62, 1, 1); book(_h_gap_addJet2Pt ,63, 1, 1); book(_h_gap_addJet2Pt_eta0 ,64, 1, 1); book(_h_gap_addJet2Pt_eta1 ,65, 1, 1); book(_h_gap_addJet2Pt_eta2 ,66, 1, 1); book(_h_gap_addJetHT ,67, 1, 1); book(_h_gap_addJetHT_eta0 ,68, 1, 1); book(_h_gap_addJetHT_eta1 ,69, 1, 1); book(_h_gap_addJetHT_eta2 ,70, 1, 1); } void analyze(const Event& event) { // The objects used in the PAPER 12-041 are defined as follows (see p.16 for details): // // * Leptons : from the W boson decays after FSR // * Jets : anti-kT R=0.5 to all stable particles // exclude W->enu, munu, taunu // * B jet : B-Ghost matched // * B from top : B hadron from top->b decay // // Visible phase space definition: // // * Leptons : pT > 20, |eta| < 2.4 // * B jets from top : pT > 30, |eta| < 2.4 // Additional jets : pT > 20, |eta| < 2.4 // * // Full phase space definition: // // * Correction to dilepton BR from W boson BR // * No cut on top decay products // * Additional jets : pT > 20, |eta| < 2.4 // Do the analysis only for the ttbar full leptonic channel, removing tau decays const Particles partontops = apply(event, "PartonTops").particlesByPt(); if (partontops.size() != 2) vetoEvent; const Particle& t1 = partontops[0]; const Particle& t2 = partontops[1]; // Apply acceptance cuts on top-decay leptons (existence should be guaranteed) const auto isPromptChLepton = [](const Particle& p){return isChargedLepton(p) && !fromDecay(p);}; const Particle lep1 = t1.allDescendants(lastParticleWith(isPromptChLepton)).front(); const Particle lep2 = t2.allDescendants(lastParticleWith(isPromptChLepton)).front(); if (lep1.pT() < 1e-9*GeV || lep2.pT() < 1e-9*GeV) vetoEvent; // sanity check? const Jets jets = apply(event, "Jets").jetsByPt(Cuts::pT > 20*GeV && Cuts::abseta < 2.4); int nJet30 = 0, nJet60 = 0, nJet100 = 0; Jets topBJets, addJets, addBJets, addJets_eta0, addJets_eta1, addJets_eta2; for (const Jet& jet : jets) { if (jet.pT() > 30*GeV) nJet30 += 1; if (jet.pT() > 60*GeV) nJet60 += 1; if (jet.pT() > 100*GeV) nJet100 += 1; const bool isBtagged = jet.bTagged(); const bool isBFromTop = any(jet.bTags(), hasParticleAncestorWith(Cuts::abspid == PID::TQUARK)); if (isBFromTop) { if (jet.pT() > 30*GeV) topBJets.push_back(jet); } else { addJets.push_back(jet); if (isBtagged) addBJets.push_back(jet); if (jet.abseta() < 0.8 ) addJets_eta0.push_back(jet); else if (jet.abseta() < 1.5 ) addJets_eta1.push_back(jet); else if (jet.abseta() < 2.4 ) addJets_eta2.push_back(jet); } } const bool isVisiblePS = topBJets.size() >= 2 && lep1.pT() > 20*GeV && lep1.abseta() < 2.4 && lep2.pT() > 20*GeV && lep2.abseta() < 2.4; MSG_DEBUG(isVisiblePS << ": #b(top) = " << topBJets.size() << "; l1 = " << lep1.pT() << ", " << lep1.abseta() << "; l2 = " << lep2.pT() << ", " << lep2.abseta()); const double weight = 1.0; if (isVisiblePS) { fillWithOF(_hVis_nJet30_abs, nJet30, weight); fillWithOF(_hVis_nJet30, nJet30, weight); fillWithOF(_hVis_nJet60_abs, nJet60, weight); fillWithOF(_hVis_nJet60, nJet60, weight); fillWithOF(_hVis_nJet100_abs, nJet100, weight); fillWithOF(_hVis_nJet100, nJet100, weight); fillGapFractions(addJets, _h_gap_addJet1Pt, _h_gap_addJet2Pt, _h_gap_addJetHT, weight); fillGapFractions(addJets_eta0, _h_gap_addJet1Pt_eta0, _h_gap_addJet2Pt_eta0, _h_gap_addJetHT_eta0, weight); fillGapFractions(addJets_eta1, _h_gap_addJet1Pt_eta1, _h_gap_addJet2Pt_eta1, _h_gap_addJetHT_eta1, weight); fillGapFractions(addJets_eta2, _h_gap_addJet1Pt_eta2, _h_gap_addJet2Pt_eta2, _h_gap_addJetHT_eta2, weight); } // Plots with two additional jets if (addJets.size() >= 1) { const double ht = sum(addJets, pT, 0.0); _hFull_addJJHT_abs->fill(ht/GeV, weight); _hFull_addJJHT ->fill(ht/GeV, weight); if (isVisiblePS) { _hVis_addJJHT_abs->fill(ht/GeV, weight); _hVis_addJJHT ->fill(ht/GeV, weight); } const Jet& j1 = addJets[0]; _hFull_addJet1Pt_abs ->fill(j1.pT()/GeV, weight); _hFull_addJet1Pt ->fill(j1.pT()/GeV, weight); _hFull_addJet1Eta_abs->fill(j1.abseta(), weight); _hFull_addJet1Eta ->fill(j1.abseta(), weight); if (isVisiblePS) { _hVis_addJet1Pt_abs ->fill(j1.pT()/GeV, weight); _hVis_addJet1Pt ->fill(j1.pT()/GeV, weight); _hVis_addJet1Eta_abs->fill(j1.abseta(), weight); _hVis_addJet1Eta ->fill(j1.abseta(), weight); } if (addJets.size() >= 2) { const Jet& j2 = addJets[1]; _hFull_addJet2Pt_abs ->fill(j2.pT()/GeV, weight); _hFull_addJet2Pt ->fill(j2.pT()/GeV, weight); _hFull_addJet2Eta_abs->fill(j2.abseta(), weight); _hFull_addJet2Eta ->fill(j2.abseta(), weight); if (isVisiblePS) { _hVis_addJet2Pt_abs ->fill(j2.pT()/GeV, weight); _hVis_addJet2Pt ->fill(j2.pT()/GeV, weight); _hVis_addJet2Eta_abs->fill(j2.abseta(), weight); _hVis_addJet2Eta ->fill(j2.abseta(), weight); } const double jjmass = (j1.mom() + j2.mom()).mass(); const double jjdR = deltaR(j1, j2); _hFull_addJJMass_abs->fill(jjmass/GeV, weight); _hFull_addJJMass ->fill(jjmass/GeV, weight); _hFull_addJJDR_abs ->fill(jjdR, weight); _hFull_addJJDR ->fill(jjdR, weight); if (isVisiblePS) { _hVis_addJJMass_abs->fill(jjmass/GeV, weight); _hVis_addJJMass ->fill(jjmass/GeV, weight); _hVis_addJJDR_abs ->fill(jjdR, weight); _hVis_addJJDR ->fill(jjdR, weight); } } } // Same set of plots if there are additional b-jets if (addBJets.size() >= 1) { const Jet& b1 = addBJets[0]; _hFull_addBJet1Pt_abs ->fill(b1.pT()/GeV, weight); _hFull_addBJet1Pt ->fill(b1.pT()/GeV, weight); _hFull_addBJet1Eta_abs->fill(b1.abseta(), weight); _hFull_addBJet1Eta ->fill(b1.abseta(), weight); if (isVisiblePS) { _hVis_addBJet1Pt_abs ->fill(b1.pT()/GeV, weight); _hVis_addBJet1Pt ->fill(b1.pT()/GeV, weight); _hVis_addBJet1Eta_abs->fill(b1.abseta(), weight); _hVis_addBJet1Eta ->fill(b1.abseta(), weight); } if (addBJets.size() >= 2) { const Jet& b2 = addBJets[1]; _hFull_addBJet2Pt_abs ->fill(b2.pT()/GeV, weight); _hFull_addBJet2Pt ->fill(b2.pT()/GeV, weight); _hFull_addBJet2Eta_abs->fill(b2.abseta(), weight); _hFull_addBJet2Eta ->fill(b2.abseta(), weight); if (isVisiblePS) { _hVis_addBJet2Pt_abs ->fill(b2.pT()/GeV, weight); _hVis_addBJet2Pt ->fill(b2.pT()/GeV, weight); _hVis_addBJet2Eta_abs->fill(b2.abseta(), weight); _hVis_addBJet2Eta ->fill(b2.abseta(), weight); } const double bbmass = (b1.mom() + b2.mom()).mass(); const double bbdR = deltaR(b1, b2); _hFull_addBBMass_abs->fill(bbmass/GeV, weight); _hFull_addBBMass ->fill(bbmass/GeV, weight); _hFull_addBBDR_abs ->fill(bbdR, weight); _hFull_addBBDR ->fill(bbdR, weight); if (isVisiblePS) { _hVis_addBBMass_abs->fill(bbmass/GeV, weight); _hVis_addBBMass ->fill(bbmass/GeV, weight); _hVis_addBBDR_abs ->fill(bbdR, weight); _hVis_addBBDR ->fill(bbdR, weight); } } } } void finalize() { const double ttbarXS = !std::isnan(crossSectionPerEvent()) ? crossSection() : 252.89*picobarn; if (std::isnan(crossSectionPerEvent())) MSG_INFO("No valid cross-section given, using NNLO (arXiv:1303.6254; sqrt(s)=8 TeV, m_t=172.5 GeV): " << ttbarXS/picobarn << " pb"); normalize({_hVis_nJet30,_hVis_nJet60, _hVis_nJet100, _hVis_addJet1Pt, _hVis_addJet1Eta, _hVis_addJet2Pt, _hVis_addJet2Eta, _hVis_addJJMass, _hVis_addJJDR, _hVis_addJJHT, _hFull_addJet1Pt, _hFull_addJet1Eta, _hFull_addJet2Pt, _hFull_addJet2Eta, _hFull_addJJMass, _hFull_addJJDR, _hFull_addJJHT, _hVis_addBJet1Pt, _hVis_addBJet1Eta, _hVis_addBJet2Pt, _hVis_addBJet2Eta, _hVis_addBBMass, _hVis_addBBDR, _hFull_addBJet1Pt, _hFull_addBJet1Eta, _hFull_addBJet2Pt, _hFull_addBJet2Eta, _hFull_addBBMass, _hFull_addBBDR}); const double xsPerWeight = ttbarXS/picobarn / sumOfWeights(); scale({_hVis_nJet30_abs, _hVis_nJet60_abs, _hVis_nJet100_abs, _hVis_addJet1Pt_abs, _hVis_addJet1Eta_abs, _hVis_addJet2Pt_abs, _hVis_addJet2Eta_abs, _hVis_addJJMass_abs, _hVis_addJJDR_abs, _hVis_addJJHT_abs, _hVis_addBJet1Pt_abs, _hVis_addBJet1Eta_abs, _hVis_addBJet2Pt_abs, _hVis_addBJet2Eta_abs, _hVis_addBBMass_abs, _hVis_addBBDR_abs}, xsPerWeight); const double sfull = xsPerWeight / 0.0454; //< correct for dilepton branching fraction scale({_hFull_addJet1Pt_abs, _hFull_addJet1Eta_abs, _hFull_addJet2Pt_abs, _hFull_addJet2Eta_abs, _hFull_addJJMass_abs, _hFull_addJJDR_abs, _hFull_addJJHT_abs, _hFull_addBJet1Pt_abs, _hFull_addBJet1Eta_abs, _hFull_addBJet2Pt_abs, _hFull_addBJet2Eta_abs, _hFull_addBBMass_abs, _hFull_addBBDR_abs}, sfull); } //@} void fillWithOF(Histo1DPtr h, double x, double w) { h->fill(std::min(x, h->xMax()-1e-9), w); } void fillGapFractions(const Jets& addJets, Profile1DPtr h_gap_addJet1Pt, Profile1DPtr h_gap_addJet2Pt, Profile1DPtr h_gap_addJetHT, double weight) { const double j1pt = (addJets.size() > 0) ? addJets[0].pT() : 0; for (size_t i = 0; i < h_gap_addJet1Pt->numBins(); ++i) { const double binCenter = h_gap_addJet1Pt->bin(i).xMid(); h_gap_addJet1Pt->fillBin(i, int(j1pt/GeV < binCenter), weight); } const double j2pt = (addJets.size() > 1) ? addJets[1].pT() : 0; for (size_t i = 0; i < h_gap_addJet2Pt->numBins(); ++i) { const double binCenter = h_gap_addJet2Pt->bin(i).xMid(); h_gap_addJet2Pt->fillBin(i, int(j2pt/GeV < binCenter), weight); } const double ht = sum(addJets, pT, 0.); for (size_t i = 0; i < h_gap_addJetHT->numBins(); ++i) { const double binCenter = h_gap_addJetHT->bin(i).xMid(); h_gap_addJetHT->fillBin(i, int(ht/GeV < binCenter) , weight); } } // @name Histogram data members //@{ Histo1DPtr _hVis_nJet30_abs, _hVis_nJet60_abs, _hVis_nJet100_abs; Histo1DPtr _hVis_addJet1Pt_abs, _hVis_addJet1Eta_abs, _hVis_addJet2Pt_abs, _hVis_addJet2Eta_abs; Histo1DPtr _hVis_addJJMass_abs, _hVis_addJJDR_abs, _hVis_addJJHT_abs; Histo1DPtr _hFull_addJet1Pt_abs, _hFull_addJet1Eta_abs, _hFull_addJet2Pt_abs, _hFull_addJet2Eta_abs; Histo1DPtr _hFull_addJJMass_abs, _hFull_addJJDR_abs, _hFull_addJJHT_abs; Histo1DPtr _hVis_addBJet1Pt_abs, _hVis_addBJet1Eta_abs, _hVis_addBJet2Pt_abs, _hVis_addBJet2Eta_abs; Histo1DPtr _hVis_addBBMass_abs, _hVis_addBBDR_abs; Histo1DPtr _hFull_addBJet1Pt_abs, _hFull_addBJet1Eta_abs, _hFull_addBJet2Pt_abs, _hFull_addBJet2Eta_abs; Histo1DPtr _hFull_addBBMass_abs, _hFull_addBBDR_abs; Histo1DPtr _hVis_nJet30, _hVis_nJet60, _hVis_nJet100; Histo1DPtr _hVis_addJet1Pt, _hVis_addJet1Eta, _hVis_addJet2Pt, _hVis_addJet2Eta; Histo1DPtr _hVis_addJJMass, _hVis_addJJDR, _hVis_addJJHT; Histo1DPtr _hFull_addJet1Pt, _hFull_addJet1Eta, _hFull_addJet2Pt, _hFull_addJet2Eta; Histo1DPtr _hFull_addJJMass, _hFull_addJJDR, _hFull_addJJHT; Histo1DPtr _hVis_addBJet1Pt, _hVis_addBJet1Eta, _hVis_addBJet2Pt, _hVis_addBJet2Eta; Histo1DPtr _hVis_addBBMass, _hVis_addBBDR; Histo1DPtr _hFull_addBJet1Pt, _hFull_addBJet1Eta, _hFull_addBJet2Pt, _hFull_addBJet2Eta; Histo1DPtr _hFull_addBBMass, _hFull_addBBDR; Profile1DPtr _h_gap_addJet1Pt, _h_gap_addJet1Pt_eta0, _h_gap_addJet1Pt_eta1, _h_gap_addJet1Pt_eta2; Profile1DPtr _h_gap_addJet2Pt, _h_gap_addJet2Pt_eta0, _h_gap_addJet2Pt_eta1, _h_gap_addJet2Pt_eta2; Profile1DPtr _h_gap_addJetHT, _h_gap_addJetHT_eta0, _h_gap_addJetHT_eta1, _h_gap_addJetHT_eta2; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2015_I1397174); } diff --git a/analyses/pluginCMS/CMS_2016_I1413748.cc b/analyses/pluginCMS/CMS_2016_I1413748.cc --- a/analyses/pluginCMS/CMS_2016_I1413748.cc +++ b/analyses/pluginCMS/CMS_2016_I1413748.cc @@ -1,328 +1,328 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/PartonicTops.hh" namespace Rivet { /// CMS 8 TeV dilepton channel ttbar spin correlations and polarisation analysis class CMS_2016_I1413748 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_I1413748); /// Book histograms and initialise projections void init() { // Complete final state FinalState fs(-MAXDOUBLE, MAXDOUBLE, 0*GeV); // Projection for dressed electrons and muons IdentifiedFinalState photons(fs); photons.acceptIdPair(PID::PHOTON); IdentifiedFinalState el_id(fs); el_id.acceptIdPair(PID::ELECTRON); PromptFinalState electrons(el_id); addProjection(electrons, "Electrons"); DressedLeptons dressed_electrons(photons, electrons, 0.1); addProjection(dressed_electrons, "DressedElectrons"); IdentifiedFinalState mu_id(fs); mu_id.acceptIdPair(PID::MUON); PromptFinalState muons(mu_id); addProjection(muons, "Muons"); DressedLeptons dressed_muons(photons, muons, 0.1); addProjection(dressed_muons, "DressedMuons"); // Parton-level top quarks - declare(PartonicTops(PartonicTops::E_MU, false), "LeptonicPartonTops"); + declare(PartonicTops(PartonicTops::DecayMode::E_MU, false), "LeptonicPartonTops"); // Booking of histograms // This histogram is independent of the parton-level information, and is an addition to the original analysis. // It is compared to the same data as the parton-level delta_phi histogram d02-x01-y01. book(_h_dphidressedleptons, "d00-x01-y01", _bins_dphi); // The remaining histos use parton-level information book(_h_dphi, "d02-x01-y01", _bins_dphi); book(_h_cos_opening_angle, "d05-x01-y01", _bins_cos_opening_angle); book(_h_c1c2, "d08-x01-y01", _bins_c1c2); book(_h_lep_costheta, "d11-x01-y01", _bins_lep_costheta); book(_h_lep_costheta_CPV, "d14-x01-y01", _bins_lep_costheta_CPV); // 2D histos book(_h_dphi_var[0], "d20-x01-y01", _bins_dphi, _bins_tt_mass); book(_h_cos_opening_angle_var[0], "d26-x01-y01", _bins_cos_opening_angle, _bins_tt_mass); book(_h_c1c2_var[0], "d32-x01-y01", _bins_c1c2, _bins_tt_mass); book(_h_lep_costheta_var[0], "d38-x01-y01", _bins_lep_costheta, _bins_tt_mass); book(_h_lep_costheta_CPV_var[0], "d44-x01-y01", _bins_lep_costheta_CPV, _bins_tt_mass); book(_h_dphi_var[1], "d50-x01-y01", _bins_dphi, _bins_tt_pT); book(_h_cos_opening_angle_var[1], "d56-x01-y01", _bins_cos_opening_angle, _bins_tt_pT); book(_h_c1c2_var[1], "d62-x01-y01", _bins_c1c2, _bins_tt_pT); book(_h_lep_costheta_var[1], "d68-x01-y01", _bins_lep_costheta, _bins_tt_pT); book(_h_lep_costheta_CPV_var[1], "d74-x01-y01", _bins_lep_costheta_CPV, _bins_tt_pT); book(_h_dphi_var[2], "d80-x01-y01", _bins_dphi, _bins_tt_absrapidity); book(_h_cos_opening_angle_var[2], "d86-x01-y01", _bins_cos_opening_angle, _bins_tt_absrapidity); book(_h_c1c2_var[2], "d92-x01-y01", _bins_c1c2, _bins_tt_absrapidity); book(_h_lep_costheta_var[2], "d98-x01-y01", _bins_lep_costheta, _bins_tt_absrapidity); book(_h_lep_costheta_CPV_var[2], "d104-x01-y01", _bins_lep_costheta_CPV, _bins_tt_absrapidity); // Profile histos for asymmetries book(_h_dphi_profile[0], "d17-x01-y01", _bins_tt_mass); book(_h_cos_opening_angle_profile[0], "d23-x01-y01", _bins_tt_mass); book(_h_c1c2_profile[0], "d29-x01-y01", _bins_tt_mass); book(_h_lep_costheta_profile[0], "d35-x01-y01", _bins_tt_mass); book(_h_lep_costheta_CPV_profile[0], "d41-x01-y01", _bins_tt_mass); book(_h_dphi_profile[1], "d47-x01-y01", _bins_tt_pT); book(_h_cos_opening_angle_profile[1], "d53-x01-y01", _bins_tt_pT); book(_h_c1c2_profile[1], "d59-x01-y01", _bins_tt_pT); book(_h_lep_costheta_profile[1], "d65-x01-y01", _bins_tt_pT); book(_h_lep_costheta_CPV_profile[1], "d71-x01-y01", _bins_tt_pT); book(_h_dphi_profile[2], "d77-x01-y01", _bins_tt_absrapidity); book(_h_cos_opening_angle_profile[2], "d83-x01-y01", _bins_tt_absrapidity); book(_h_c1c2_profile[2], "d89-x01-y01", _bins_tt_absrapidity); book(_h_lep_costheta_profile[2], "d95-x01-y01", _bins_tt_absrapidity); book(_h_lep_costheta_CPV_profile[2], "d101-x01-y01", _bins_tt_absrapidity); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; // Use particle-level leptons for the first histogram const DressedLeptons& dressed_electrons = applyProjection(event, "DressedElectrons"); const DressedLeptons& dressed_muons = applyProjection(event, "DressedMuons"); const vector dressedels = dressed_electrons.dressedLeptons(); const vector dressedmus = dressed_muons.dressedLeptons(); const size_t ndressedel = dressedels.size(); const size_t ndressedmu = dressedmus.size(); // For the particle-level histogram, require exactly one electron and exactly one muon, to select // the ttbar->emu channel. Note this means ttbar->emu events with additional PromptFinalState // dilepton pairs from the shower are vetoed - for PYTHIA8, this affects ~0.5% of events, so the // effect is well below the level of sensitivity of the measured distribution. if ( ndressedel == 1 && ndressedmu == 1 ) { const int electrontouse = 0, muontouse = 0; // Opposite-charge leptons only if ( sameSign(dressedels[electrontouse],dressedmus[muontouse]) ) { MSG_INFO("Error, e and mu have same charge, skipping event"); } else { //Get the four-momenta of the positively- and negatively-charged leptons FourMomentum lepPlus = dressedels[electrontouse].charge() > 0 ? dressedels[electrontouse] : dressedmus[muontouse]; FourMomentum lepMinus = dressedels[electrontouse].charge() > 0 ? dressedmus[muontouse] : dressedels[electrontouse]; // Now calculate the variable double dphi_temp = deltaPhi(lepPlus,lepMinus); fillWithUFOF( _h_dphidressedleptons, dphi_temp, weight ); } } // The remaining variables use parton-level information. // Get the leptonically decaying tops const Particles& leptonicpartontops = apply(event, "LeptonicPartonTops").particlesByPt(); Particles chargedleptons; unsigned int ntrueleptonictops = 0; bool oppositesign = false; if ( leptonicpartontops.size() == 2 ) { for (size_t k = 0; k < leptonicpartontops.size(); ++k) { // Get the lepton const Particle lepTop = leptonicpartontops[k]; const auto isPromptChargedLepton = [](const Particle& p){return (isChargedLepton(p) && isPrompt(p, false, false));}; Particles lepton_candidates = lepTop.allDescendants(firstParticleWith(isPromptChargedLepton), false); - if ( lepton_candidates.size() < 1 ) MSG_WARNING("error, PartonicTops::E_MU top quark had no daughter lepton candidate, skipping event."); + if ( lepton_candidates.size() < 1 ) MSG_WARNING("error, PartonicTops::DecayMode::E_MU top quark had no daughter lepton candidate, skipping event."); // In some cases there is no lepton from the W decay but only leptons from the decay of a radiated gamma. - // These hadronic PartonicTops are currently being mistakenly selected by PartonicTops::E_MU (as of April 2017), and need to be rejected. - // PartonicTops::E_MU is being fixed in Rivet, and when it is the veto below should do nothing. + // These hadronic PartonicTops are currently being mistakenly selected by PartonicTops::DecayMode::E_MU (as of April 2017), and need to be rejected. + // PartonicTops::DecayMode::E_MU is being fixed in Rivet, and when it is the veto below should do nothing. /// @todo Should no longer be necessary -- remove bool istrueleptonictop = false; for (size_t i = 0; i < lepton_candidates.size(); ++i) { const Particle& lepton_candidate = lepton_candidates[i]; if ( lepton_candidate.hasParent(PID::PHOTON) ) { MSG_DEBUG("Found gamma parent, top: " << k+1 << " of " << leptonicpartontops.size() << " , lepton: " << i+1 << " of " << lepton_candidates.size()); continue; } if ( !istrueleptonictop && sameSign(lepTop,lepton_candidate) ) { chargedleptons.push_back(lepton_candidate); istrueleptonictop = true; } else MSG_WARNING("Found extra prompt charged lepton from top decay (and without gamma parent), ignoring it."); } if ( istrueleptonictop ) ++ntrueleptonictops; } } if ( ntrueleptonictops == 2 ) { oppositesign = !( sameSign(chargedleptons[0],chargedleptons[1]) ); if ( !oppositesign ) MSG_WARNING("error, same charge tops, skipping event."); } if ( ntrueleptonictops == 2 && oppositesign ) { // Get the four-momenta of the positively- and negatively-charged leptons FourMomentum lepPlus = chargedleptons[0].charge() > 0 ? chargedleptons[0] : chargedleptons[1]; FourMomentum lepMinus = chargedleptons[0].charge() > 0 ? chargedleptons[1] : chargedleptons[0]; const double dphi_temp = deltaPhi(lepPlus,lepMinus); // Get the four-momenta of the positively- and negatively-charged tops FourMomentum topPlus_p4 = leptonicpartontops[0].pdgId() > 0 ? leptonicpartontops[0] : leptonicpartontops[1]; FourMomentum topMinus_p4 = leptonicpartontops[0].pdgId() > 0 ? leptonicpartontops[1] : leptonicpartontops[0]; const FourMomentum ttbar_p4 = topPlus_p4 + topMinus_p4; const double tt_mass_temp = ttbar_p4.mass(); const double tt_absrapidity_temp = ttbar_p4.absrapidity(); const double tt_pT_temp = ttbar_p4.pT(); // Lorentz transformations to calculate the spin observables in the helicity basis // Transform everything to the ttbar CM frame LorentzTransform ttCM; ttCM.setBetaVec(-ttbar_p4.boostVector()); topPlus_p4 = ttCM.transform(topPlus_p4); topMinus_p4 = ttCM.transform(topMinus_p4); lepPlus = ttCM.transform(lepPlus); lepMinus = ttCM.transform(lepMinus); // Now boost the leptons to their parent top CM frames LorentzTransform topPlus, topMinus; topPlus.setBetaVec(-topPlus_p4.boostVector()); topMinus.setBetaVec(-topMinus_p4.boostVector()); lepPlus = topPlus.transform(lepPlus); lepMinus = topMinus.transform(lepMinus); const double lepPlus_costheta_temp = lepPlus.vector3().dot(topPlus_p4.vector3()) / (lepPlus.vector3().mod() * topPlus_p4.vector3().mod()); const double lepMinus_costheta_temp = lepMinus.vector3().dot(topMinus_p4.vector3()) / (lepMinus.vector3().mod() * topMinus_p4.vector3().mod()); const double c1c2_temp = lepPlus_costheta_temp * lepMinus_costheta_temp; const double cos_opening_angle_temp = lepPlus.vector3().dot(lepMinus.vector3()) / (lepPlus.vector3().mod() * lepMinus.vector3().mod()); // Fill parton-level histos fillWithUFOF( _h_dphi, dphi_temp, weight ); fillWithUFOF( _h_cos_opening_angle, cos_opening_angle_temp, weight ); fillWithUFOF( _h_c1c2, c1c2_temp, weight ); fillWithUFOF( _h_lep_costheta, lepPlus_costheta_temp, weight ); fillWithUFOF( _h_lep_costheta, lepMinus_costheta_temp, weight ); fillWithUFOF( _h_lep_costheta_CPV, lepPlus_costheta_temp, weight ); fillWithUFOF( _h_lep_costheta_CPV, -lepMinus_costheta_temp, weight ); // Now fill the same variables in the 2D and profile histos vs ttbar invariant mass, pT, and absolute rapidity for (int i_var = 0; i_var < 3; ++i_var) { double var; if ( i_var == 0 ) { var = tt_mass_temp; } else if ( i_var == 1 ) { var = tt_pT_temp; } else { var = tt_absrapidity_temp; } fillWithUFOF( _h_dphi_var[i_var], dphi_temp, var, weight ); fillWithUFOF( _h_cos_opening_angle_var[i_var], cos_opening_angle_temp, var, weight ); fillWithUFOF( _h_c1c2_var[i_var], c1c2_temp, var, weight ); fillWithUFOF( _h_lep_costheta_var[i_var], lepPlus_costheta_temp, var, weight ); fillWithUFOF( _h_lep_costheta_var[i_var], lepMinus_costheta_temp, var, weight ); fillWithUFOF( _h_lep_costheta_CPV_var[i_var], lepPlus_costheta_temp, var, weight ); fillWithUFOF( _h_lep_costheta_CPV_var[i_var], -lepMinus_costheta_temp, var, weight ); fillWithUFOF( _h_dphi_profile[i_var], dphi_temp, var, weight, (_h_dphi->xMax() + _h_dphi->xMin())/2. ); fillWithUFOF( _h_cos_opening_angle_profile[i_var], cos_opening_angle_temp, var, weight, (_h_cos_opening_angle->xMax() + _h_cos_opening_angle->xMin())/2. ); fillWithUFOF( _h_c1c2_profile[i_var], c1c2_temp, var, weight, (_h_c1c2->xMax() + _h_c1c2->xMin())/2. ); fillWithUFOF( _h_lep_costheta_profile[i_var], lepPlus_costheta_temp, var, weight, (_h_lep_costheta->xMax() + _h_lep_costheta->xMin())/2. ); fillWithUFOF( _h_lep_costheta_profile[i_var], lepMinus_costheta_temp, var, weight, (_h_lep_costheta->xMax() + _h_lep_costheta->xMin())/2. ); fillWithUFOF( _h_lep_costheta_CPV_profile[i_var], lepPlus_costheta_temp, var, weight, (_h_lep_costheta_CPV->xMax() + _h_lep_costheta_CPV->xMin())/2. ); fillWithUFOF( _h_lep_costheta_CPV_profile[i_var], -lepMinus_costheta_temp, var, weight, (_h_lep_costheta_CPV->xMax() + _h_lep_costheta_CPV->xMin())/2. ); } } } /// Normalise histograms to unit area void finalize() { normalize(_h_dphidressedleptons); normalize(_h_dphi); normalize(_h_cos_opening_angle); normalize(_h_c1c2); normalize(_h_lep_costheta); normalize(_h_lep_costheta_CPV); for (int i_var = 0; i_var < 3; ++i_var) { normalize(_h_dphi_var[i_var]); normalize(_h_cos_opening_angle_var[i_var]); normalize(_h_c1c2_var[i_var]); normalize(_h_lep_costheta_var[i_var]); normalize(_h_lep_costheta_CPV_var[i_var]); } } private: Histo1DPtr _h_dphidressedleptons, _h_dphi, _h_lep_costheta, _h_lep_costheta_CPV, _h_c1c2, _h_cos_opening_angle; Histo2DPtr _h_dphi_var[3], _h_lep_costheta_var[3], _h_lep_costheta_CPV_var[3], _h_c1c2_var[3], _h_cos_opening_angle_var[3]; Profile1DPtr _h_dphi_profile[3], _h_lep_costheta_profile[3], _h_lep_costheta_CPV_profile[3], _h_c1c2_profile[3], _h_cos_opening_angle_profile[3]; const vector _bins_tt_mass = {300., 430., 530., 1200.}; const vector _bins_tt_pT = {0., 41., 92., 300.}; const vector _bins_tt_absrapidity = {0., 0.34, 0.75, 1.5}; const vector _bins_dphi = {0., 5.*M_PI/60., 10.*M_PI/60., 15.*M_PI/60., 20.*M_PI/60., 25.*M_PI/60., 30.*M_PI/60., 35.*M_PI/60., 40.*M_PI/60., 45.*M_PI/60., 50.*M_PI/60., 55.*M_PI/60., M_PI}; const vector _bins_lep_costheta = {-1., -2./3., -1./3., 0., 1./3., 2./3., 1.}; const vector _bins_lep_costheta_CPV = {-1., -2./3., -1./3., 0., 1./3., 2./3., 1.}; const vector _bins_c1c2 = {-1., -0.4, -10./60., 0., 10./60., 0.4, 1.}; const vector _bins_cos_opening_angle = {-1., -2./3., -1./3., 0., 1./3., 2./3., 1.}; void fillWithUFOF(Histo1DPtr h, double x, double w) { h->fill(std::max(std::min(x, h->xMax()-1e-9),h->xMin()+1e-9), w); } void fillWithUFOF(Histo2DPtr h, double x, double y, double w) { h->fill(std::max(std::min(x, h->xMax()-1e-9),h->xMin()+1e-9), std::max(std::min(y, h->yMax()-1e-9),h->yMin()+1e-9), w); } void fillWithUFOF(Profile1DPtr h, double x, double y, double w, double c) { h->fill(std::max(std::min(y, h->xMax()-1e-9),h->xMin()+1e-9), float(x > c) - float(x < c), w); } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2016_I1413748); } diff --git a/analyses/pluginCMS/CMS_2016_I1430892.cc b/analyses/pluginCMS/CMS_2016_I1430892.cc --- a/analyses/pluginCMS/CMS_2016_I1430892.cc +++ b/analyses/pluginCMS/CMS_2016_I1430892.cc @@ -1,259 +1,259 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/PartonicTops.hh" namespace Rivet { /// CMS 8 TeV dilepton channel ttbar charge asymmetry analysis class CMS_2016_I1430892 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_I1430892); /// Book histograms and initialise projections void init() { // Complete final state FinalState fs(-MAXDOUBLE, MAXDOUBLE, 0*GeV); // Projection for dressed electrons and muons IdentifiedFinalState photons(fs); photons.acceptIdPair(PID::PHOTON); IdentifiedFinalState el_id(fs); el_id.acceptIdPair(PID::ELECTRON); PromptFinalState electrons(el_id); addProjection(electrons, "Electrons"); DressedLeptons dressed_electrons(photons, electrons, 0.1); addProjection(dressed_electrons, "DressedElectrons"); IdentifiedFinalState mu_id(fs); mu_id.acceptIdPair(PID::MUON); PromptFinalState muons(mu_id); addProjection(muons, "Muons"); DressedLeptons dressed_muons(photons, muons, 0.1); addProjection(dressed_muons, "DressedMuons"); // Parton-level top quarks - declare(PartonicTops(PartonicTops::E_MU, false), "LeptonicPartonTops"); + declare(PartonicTops(PartonicTops::DecayMode::E_MU, false), "LeptonicPartonTops"); // Booking of histograms // This histogram is independent of the parton-level information, and is an // addition to the original analysis. It is compared to the same data as // the parton-level delta_abseta histogram d05-x01-y01. book(_h_dabsetadressedleptons, "d00-x01-y01", _bins_dabseta); // The remaining histos use parton-level information book(_h_dabseta, "d05-x01-y01", _bins_dabseta); book(_h_dabsrapidity, "d02-x01-y01", _bins_dabsrapidity); // 2D histos book(_h_dabsrapidity_var[0], "d11-x01-y01", _bins_dabsrapidity, _bins_tt_mass); book(_h_dabseta_var[0], "d17-x01-y01", _bins_dabseta, _bins_tt_mass); book(_h_dabsrapidity_var[1], "d23-x01-y01", _bins_dabsrapidity, _bins_tt_pT); book(_h_dabseta_var[1], "d29-x01-y01", _bins_dabseta, _bins_tt_pT); book(_h_dabsrapidity_var[2], "d35-x01-y01", _bins_dabsrapidity, _bins_tt_absrapidity); book(_h_dabseta_var[2], "d41-x01-y01", _bins_dabseta, _bins_tt_absrapidity); // Profile histos for asymmetries book(_h_dabsrapidity_profile[0], "d08-x01-y01", _bins_tt_mass); book(_h_dabseta_profile[0], "d14-x01-y01", _bins_tt_mass); book(_h_dabsrapidity_profile[1], "d20-x01-y01", _bins_tt_pT); book(_h_dabseta_profile[1], "d26-x01-y01", _bins_tt_pT); book(_h_dabsrapidity_profile[2], "d32-x01-y01", _bins_tt_absrapidity); book(_h_dabseta_profile[2], "d38-x01-y01", _bins_tt_absrapidity); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; // Use particle-level leptons for the first histogram const DressedLeptons& dressed_electrons = applyProjection(event, "DressedElectrons"); const DressedLeptons& dressed_muons = applyProjection(event, "DressedMuons"); const vector dressedels = dressed_electrons.dressedLeptons(); const vector dressedmus = dressed_muons.dressedLeptons(); const size_t ndressedel = dressedels.size(); const size_t ndressedmu = dressedmus.size(); // For the particle-level histogram, require exactly one electron and exactly one muon, to select the ttbar->emu channel. // Note this means ttbar->emu events with additional PromptFinalState dilepton pairs from the shower are vetoed - for PYTHIA8, // this affects ~0.5% of events, so the effect is well below the level of sensitivity of the measured distribution. if ( ndressedel == 1 && ndressedmu == 1 ) { const int electrontouse = 0, muontouse = 0; // Opposite-charge leptons only if ( sameSign(dressedels[electrontouse], dressedmus[muontouse]) ) { MSG_INFO("Error, e and mu have same charge, skipping event"); } else { // Get the four-momenta of the positively- and negatively-charged leptons FourMomentum lepPlus = dressedels[electrontouse].charge() > 0 ? dressedels[electrontouse] : dressedmus[muontouse]; FourMomentum lepMinus = dressedels[electrontouse].charge() > 0 ? dressedmus[muontouse] : dressedels[electrontouse]; // Now calculate the variable double dabseta_temp = lepPlus.abseta() - lepMinus.abseta(); fillWithUFOF( _h_dabsetadressedleptons, dabseta_temp, weight ); } } // The remaining variables use parton-level information. // Get the leptonically decaying tops const Particles leptonicpartontops = apply(event, "LeptonicPartonTops").particlesByPt(); Particles chargedleptons; unsigned int ntrueleptonictops = 0; bool oppositesign = false; if ( leptonicpartontops.size() == 2 ) { for (size_t k = 0; k < leptonicpartontops.size(); ++k) { // Get the lepton const Particle lepTop = leptonicpartontops[k]; const auto isPromptChargedLepton = [](const Particle& p){return (isChargedLepton(p) && isPrompt(p, false, false));}; Particles lepton_candidates = lepTop.allDescendants(firstParticleWith(isPromptChargedLepton), false); - if ( lepton_candidates.size() < 1 ) MSG_WARNING("error, PartonicTops::E_MU top quark had no daughter lepton candidate, skipping event."); + if ( lepton_candidates.size() < 1 ) MSG_WARNING("error, PartonicTops::DecayMode::E_MU top quark had no daughter lepton candidate, skipping event."); // In some cases there is no lepton from the W decay but only leptons from the decay of a radiated gamma. - // These hadronic PartonicTops are currently being mistakenly selected by PartonicTops::E_MU (as of April 2017), and need to be rejected. - // PartonicTops::E_MU is being fixed in Rivet, and when it is the veto below should do nothing. + // These hadronic PartonicTops are currently being mistakenly selected by PartonicTops::DecayMode::E_MU (as of April 2017), and need to be rejected. + // PartonicTops::DecayMode::E_MU is being fixed in Rivet, and when it is the veto below should do nothing. /// @todo Should no longer be necessary -- remove bool istrueleptonictop = false; for (size_t i = 0; i < lepton_candidates.size(); ++i) { const Particle& lepton_candidate = lepton_candidates[i]; if ( lepton_candidate.hasParent(PID::PHOTON) ) { MSG_DEBUG("Found gamma parent, top: " << k+1 << " of " << leptonicpartontops.size() << " , lepton: " << i+1 << " of " << lepton_candidates.size()); continue; } if ( !istrueleptonictop && sameSign(lepTop,lepton_candidate) ) { chargedleptons.push_back(lepton_candidate); istrueleptonictop = true; } else MSG_WARNING("Error, found extra prompt charged lepton from top decay (and without gamma parent), ignoring it."); } if ( istrueleptonictop ) ++ntrueleptonictops; } } if ( ntrueleptonictops == 2 ) { oppositesign = !( sameSign(chargedleptons[0],chargedleptons[1]) ); if ( !oppositesign ) MSG_WARNING("error, same charge tops, skipping event."); } if ( ntrueleptonictops == 2 && oppositesign ) { // Get the four-momenta of the positively- and negatively-charged leptons const FourMomentum lepPlus = chargedleptons[0].charge() > 0 ? chargedleptons[0] : chargedleptons[1]; const FourMomentum lepMinus = chargedleptons[0].charge() > 0 ? chargedleptons[1] : chargedleptons[0]; const double dabseta_temp = lepPlus.abseta() - lepMinus.abseta(); // Get the four-momenta of the positively- and negatively-charged tops const FourMomentum topPlus_p4 = leptonicpartontops[0].pdgId() > 0 ? leptonicpartontops[0] : leptonicpartontops[1]; const FourMomentum topMinus_p4 = leptonicpartontops[0].pdgId() > 0 ? leptonicpartontops[1] : leptonicpartontops[0]; const FourMomentum ttbar_p4 = topPlus_p4 + topMinus_p4; const double tt_mass_temp = ttbar_p4.mass(); const double tt_absrapidity_temp = ttbar_p4.absrapidity(); const double tt_pT_temp = ttbar_p4.pT(); const double dabsrapidity_temp = topPlus_p4.absrapidity() - topMinus_p4.absrapidity(); // Fill parton-level histos fillWithUFOF( _h_dabseta, dabseta_temp, weight ); fillWithUFOF( _h_dabsrapidity, dabsrapidity_temp, weight ); // Now fill the same variables in the 2D and profile histos vs ttbar invariant mass, pT, and absolute rapidity for (int i_var = 0; i_var < 3; ++i_var) { double var; if ( i_var == 0 ) { var = tt_mass_temp; } else if ( i_var == 1 ) { var = tt_pT_temp; } else { var = tt_absrapidity_temp; } fillWithUFOF( _h_dabsrapidity_var[i_var], dabsrapidity_temp, var, weight ); fillWithUFOF( _h_dabseta_var[i_var], dabseta_temp, var, weight ); fillWithUFOF( _h_dabsrapidity_profile[i_var], dabsrapidity_temp, var, weight, (_h_dabsrapidity->xMax() + _h_dabsrapidity->xMin())/2. ); fillWithUFOF( _h_dabseta_profile[i_var], dabseta_temp, var, weight, (_h_dabseta->xMax() + _h_dabseta->xMin())/2. ); } } } /// Normalise histograms to unit area void finalize() { normalize(_h_dabsetadressedleptons); normalize(_h_dabseta); normalize(_h_dabsrapidity); for (int i_var = 0; i_var < 3; ++i_var) { normalize(_h_dabsrapidity_var[i_var]); normalize(_h_dabseta_var[i_var]); } } private: Histo1DPtr _h_dabsetadressedleptons, _h_dabseta, _h_dabsrapidity; Histo2DPtr _h_dabseta_var[3], _h_dabsrapidity_var[3]; Profile1DPtr _h_dabseta_profile[3], _h_dabsrapidity_profile[3]; const vector _bins_tt_mass = {300., 430., 530., 1200.}; const vector _bins_tt_pT = {0., 41., 92., 300.}; const vector _bins_tt_absrapidity = {0., 0.34, 0.75, 1.5}; const vector _bins_dabseta = { -2., -68./60., -48./60., -32./60., -20./60., -8./60., 0., 8./60., 20./60., 32./60., 48./60., 68./60., 2.}; const vector _bins_dabsrapidity = {-2., -44./60., -20./60., 0., 20./60., 44./60., 2.}; void fillWithUFOF(Histo1DPtr h, double x, double w) { h->fill(std::max(std::min(x, h->xMax()-1e-9),h->xMin()+1e-9), w); } void fillWithUFOF(Histo2DPtr h, double x, double y, double w) { h->fill(std::max(std::min(x, h->xMax()-1e-9),h->xMin()+1e-9), std::max(std::min(y, h->yMax()-1e-9),h->yMin()+1e-9), w); } void fillWithUFOF(Profile1DPtr h, double x, double y, double w, double c) { h->fill(std::max(std::min(y, h->xMax()-1e-9),h->xMin()+1e-9), float(x > c) - float(x < c), w); } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2016_I1430892); } diff --git a/analyses/pluginCMS/CMS_2016_I1454211.cc b/analyses/pluginCMS/CMS_2016_I1454211.cc --- a/analyses/pluginCMS/CMS_2016_I1454211.cc +++ b/analyses/pluginCMS/CMS_2016_I1454211.cc @@ -1,275 +1,275 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/PartonicTops.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/InvMassFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" namespace Rivet { /// Boosted ttbar in pp collisions at sqrtS = 8 TeV /// @todo Use persistent weight counters class CMS_2016_I1454211 : public Analysis { public: DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_I1454211); // Set up projections and book histograms void init() { // Complete final state FinalState fs; // Partonic tops - declare(PartonicTops(PartonicTops::ELECTRON, false), "ElectronPartonTops"); - declare(PartonicTops(PartonicTops::MUON, false), "MuonPartonTops"); - declare(PartonicTops(PartonicTops::HADRONIC), "HadronicPartonTops"); + declare(PartonicTops(PartonicTops::DecayMode::ELECTRON, false), "ElectronPartonTops"); + declare(PartonicTops(PartonicTops::DecayMode::MUON, false), "MuonPartonTops"); + declare(PartonicTops(PartonicTops::DecayMode::HADRONIC), "HadronicPartonTops"); // Projection for electrons and muons IdentifiedFinalState photons(fs, PID::PHOTON); const Cut leptonCuts = Cuts::pt > 45*GeV && Cuts::abseta < 2.1; IdentifiedFinalState el_id(fs, {{PID::ELECTRON, -PID::ELECTRON}}); PromptFinalState electrons(el_id); DressedLeptons dressed_electrons(photons, electrons, 0.1, leptonCuts); declare(dressed_electrons, "DressedElectrons"); IdentifiedFinalState mu_id(fs, {{PID::MUON, -PID::MUON}}); PromptFinalState muons(mu_id); DressedLeptons dressed_muons(photons, muons, 0.1, leptonCuts); declare(dressed_muons, "DressedMuons"); // Projection for jets VetoedFinalState fs_jets(fs); fs_jets.addVetoOnThisFinalState(dressed_muons); fs_jets.addVetoOnThisFinalState(dressed_electrons); fs_jets.vetoNeutrinos(); declare(FastJets(fs_jets, FastJets::ANTIKT, 0.5), "ak5jets"); declare(FastJets(fs_jets, FastJets::CAM, 0.8), "ca8jets"); book(_hEl_topPt_parton , "d01-x01-y01"); // dsigma/dpt(top quark), el ch book(_hEl_topPt_particle , "d02-x01-y01"); // dsigma/dpt(top jet), el ch book(_hEl_topY_parton , "d03-x01-y01"); // dsigma/dy(top quark), el ch book(_hEl_topY_particle , "d04-x01-y01"); // dsigma/dy(top jet), el ch book(_hMu_topPt_parton , "d05-x01-y01"); // dsigma/dpt(top quark), mu ch book(_hMu_topPt_particle , "d06-x01-y01"); // dsigma/dpt(top jet), mu ch book(_hMu_topY_parton , "d07-x01-y01"); // dsigma/dy(top quark), mu ch book(_hMu_topY_particle , "d08-x01-y01"); // dsigma/dy(top jet), mu ch book(_hComb_topPt_parton , "d09-x01-y01"); // dsigma/dpt(top quark), comb ch book(_hComb_topPt_particle , "d10-x01-y01"); // dsigma/dpt(top jet), comb ch book(_hComb_topY_parton , "d11-x01-y01"); // dsigma/dy(top quark), comb ch book(_hComb_topY_particle , "d12-x01-y01"); // dsigma/dy(top jet), comb ch book(_hEl_topPt_parton_norm , "d13-x01-y01"); // 1/sigma dsigma/dpt(top quark), el ch book(_hEl_topPt_particle_norm , "d14-x01-y01"); // 1/sigma dsigma/dpt(top jet), el ch book(_hEl_topY_parton_norm , "d15-x01-y01"); // 1/sigma dsigma/dy(top quark), el ch book(_hEl_topY_particle_norm , "d16-x01-y01"); // 1/sigma dsigma/dy(top jet), el ch book(_hMu_topPt_parton_norm , "d17-x01-y01"); // 1/sigma dsigma/dpt(top quark), mu ch book(_hMu_topPt_particle_norm , "d18-x01-y01"); // 1/sigma dsigma/dpt(top jet), mu ch book(_hMu_topY_parton_norm , "d19-x01-y01"); // 1/sigma dsigma/dy(top quark), mu ch book(_hMu_topY_particle_norm , "d20-x01-y01"); // 1/sigma dsigma/dy(top jet), mu ch book(_hComb_topPt_parton_norm , "d21-x01-y01"); // 1/sigma dsigma/dpt(top quark), comb ch book(_hComb_topPt_particle_norm , "d22-x01-y01"); // 1/sigma dsigma/dpt(top jet), comb ch book(_hComb_topY_parton_norm , "d23-x01-y01"); // 1/sigma dsigma/dy(top quark), comb ch book(_hComb_topY_particle_norm , "d24-x01-y01"); // 1/sigma dsigma/dy(top jet), comb ch book(_hMu_cutflow , "mu_cutflow", 7, -0.5, 6.5); book(_hEl_cutflow , "el_cutflow", 7, -0.5, 6.5); } // per event analysis void analyze(const Event& event) { // Total-events cutflow entries _hMu_cutflow->fill(0.); _hEl_cutflow->fill(0.); // Do parton-level selection and channel determination int partonCh = 0; //0 non-semi-lep, 1 muon, 2 electron const Particles muonpartontops = apply(event, "MuonPartonTops").particlesByPt(); const Particles electronpartontops = apply(event, "ElectronPartonTops").particlesByPt(); if (electronpartontops.size() == 0 && muonpartontops.size() == 1) partonCh = 1; else if (electronpartontops.size() == 1 && muonpartontops.size() == 0) partonCh = 2; else vetoEvent; const Particles hadronicpartontops = apply(event, "HadronicPartonTops").particlesByPt(); if (hadronicpartontops.size() != 1) vetoEvent; if (partonCh == 1) _hMu_cutflow->fill(1.); // muon at parton level if (partonCh == 2) _hEl_cutflow->fill(1.); // electron at parton level // Get hadronic parton-level top const FourMomentum& partonTopP4 = hadronicpartontops.front(); // Do particle-level selection and channel determination const DressedLeptons& dressed_electrons = apply(event, "DressedElectrons"); const DressedLeptons& dressed_muons = apply(event, "DressedMuons"); bool passParticleLep = false, passParticleTop = false; FourMomentum lepton, particleTopP4; if (partonCh == 1 && dressed_muons.dressedLeptons().size() == 1 && dressed_electrons.dressedLeptons().size() == 0) { passParticleLep = true; _hMu_cutflow->fill(3.); //muon at particle level lepton = dressed_muons.dressedLeptons()[0].momentum(); } if (partonCh == 2 && dressed_muons.dressedLeptons().size() == 0 && dressed_electrons.dressedLeptons().size() == 1) { passParticleLep = true; _hEl_cutflow->fill(3.); //electron at particle level lepton = dressed_electrons.dressedLeptons()[0].momentum(); } if (passParticleLep) { // Jet cuts Cut jetCuts = Cuts::pt > 30*GeV && Cuts::abseta < 2.4; Jets genBjets, genTjets; int nGenBjets = 0, nGenTjets = 0; const FastJets& AK5jets = apply(event, "ak5jets"); for (const Jet& jet : AK5jets.jetsByPt(jetCuts)) { if (deltaR(jet, lepton) > M_PI / 2.0) continue; if (deltaR(jet, lepton) < 0.1) continue; genBjets.push_back(jet); nGenBjets += 1; } const FastJets& CA8jets = apply(event, "ca8jets"); for (const Jet& jet : CA8jets.jetsByPt(jetCuts)) { if (deltaR(jet, lepton) < M_PI / 2.0) continue; if (jet.mass() < 140*GeV) continue; if (jet.mass() > 250*GeV) continue; genTjets.push_back(jet); nGenTjets += 1; } if (nGenBjets >=1) { if (partonCh == 1) _hMu_cutflow->fill(4.); // muon at parton level if (partonCh == 2) _hEl_cutflow->fill(4.); // electron at parton level if (nGenTjets >= 1) { passParticleTop = true; if (partonCh == 1) _hMu_cutflow->fill(5.); // muon at parton level if (partonCh == 2) _hEl_cutflow->fill(5.); // electron at parton level particleTopP4 = genTjets[0]; } } } const double weight = 1.0; if (partonCh == 1) { _nMu += weight; _hMu_topPt_parton->fill(partonTopP4.pT()/GeV, weight); _hMu_topPt_parton_norm->fill(partonTopP4.pT()/GeV, weight); _hComb_topPt_parton->fill(partonTopP4.pT()/GeV, weight); _hComb_topPt_parton_norm->fill(partonTopP4.pT()/GeV, weight); if (partonTopP4.pT() >= 400*GeV) { _nPassParton_mu += weight; _hMu_cutflow->fill(2.); _hMu_topY_parton->fill(partonTopP4.rapidity(), weight); _hMu_topY_parton_norm->fill(partonTopP4.rapidity(), weight); _hComb_topY_parton->fill(partonTopP4.rapidity(), weight); _hComb_topY_parton_norm->fill(partonTopP4.rapidity(), weight); } if (passParticleTop) { _hMu_topPt_particle->fill(particleTopP4.pT()/GeV, weight); _hMu_topPt_particle_norm->fill(particleTopP4.pT()/GeV, weight); _hComb_topPt_particle->fill(particleTopP4.pT()/GeV, weight); _hComb_topPt_particle_norm->fill(particleTopP4.pT()/GeV, weight); if (particleTopP4.pT() >= 400*GeV) { _nPassParticle_mu += weight; _hMu_cutflow->fill(6.); _hMu_topY_particle->fill(particleTopP4.rapidity(), weight); _hMu_topY_particle_norm->fill(particleTopP4.rapidity(), weight); _hComb_topY_particle->fill(particleTopP4.rapidity(), weight); _hComb_topY_particle_norm->fill(particleTopP4.rapidity(), weight); } } } if (partonCh == 2){ _nEl += weight; _hEl_topPt_parton->fill(partonTopP4.pT()/GeV, weight); _hEl_topPt_parton_norm->fill(partonTopP4.pT()/GeV, weight); _hComb_topPt_parton->fill(partonTopP4.pT()/GeV, weight); _hComb_topPt_parton_norm->fill(partonTopP4.pT()/GeV, weight); if (partonTopP4.pT() >= 400*GeV) { _nPassParton_el += weight; _hEl_cutflow->fill(2.); _hEl_topY_parton->fill(partonTopP4.rapidity(), weight); _hEl_topY_parton_norm->fill(partonTopP4.rapidity(), weight); _hComb_topY_parton->fill(partonTopP4.rapidity(), weight); _hComb_topY_parton_norm->fill(partonTopP4.rapidity(), weight); } if (passParticleTop) { _hEl_topPt_particle->fill(particleTopP4.pT()/GeV, weight); _hEl_topPt_particle_norm->fill(particleTopP4.pT()/GeV, weight); _hComb_topPt_particle->fill(particleTopP4.pT()/GeV, weight); _hComb_topPt_particle_norm->fill(particleTopP4.pT()/GeV, weight); if (particleTopP4.pT() >= 400*GeV) { _nPassParticle_el += weight; _hEl_cutflow->fill(6.); _hEl_topY_particle->fill(particleTopP4.rapidity(), weight); _hEl_topY_particle_norm->fill(particleTopP4.rapidity(), weight); _hComb_topY_particle->fill(particleTopP4.rapidity(), weight); _hComb_topY_particle_norm->fill(particleTopP4.rapidity(), weight); } } } } void finalize() { normalize({_hMu_topPt_parton_norm, _hMu_topY_parton_norm, _hEl_topPt_parton_norm, _hEl_topY_parton_norm, _hComb_topPt_parton_norm, _hComb_topY_parton_norm}, 1.0, false); normalize({_hMu_topPt_particle_norm, _hMu_topY_particle_norm, _hEl_topPt_particle_norm, _hEl_topY_particle_norm, _hComb_topPt_particle_norm, _hComb_topY_particle_norm}, 1.0, false); scale(_hMu_topPt_particle, crossSection()/femtobarn / sumOfWeights()); scale(_hEl_topPt_particle, crossSection()/femtobarn / sumOfWeights()); scale(_hMu_topY_particle, crossSection()/femtobarn / sumOfWeights()); scale(_hEl_topY_particle, crossSection()/femtobarn / sumOfWeights()); scale(_hComb_topPt_particle, crossSection()/femtobarn / sumOfWeights()); scale(_hComb_topY_particle, crossSection()/femtobarn / sumOfWeights()); scale(_hMu_topPt_parton, crossSection()/femtobarn / sumOfWeights()); scale(_hEl_topPt_parton, crossSection()/femtobarn / sumOfWeights()); scale(_hMu_topY_parton, crossSection()/femtobarn / sumOfWeights()); scale(_hEl_topY_parton, crossSection()/femtobarn / sumOfWeights()); scale(_hComb_topPt_parton, crossSection()/femtobarn / sumOfWeights()); scale(_hComb_topY_parton, crossSection()/femtobarn / sumOfWeights()); } private: Histo1DPtr _hMu_topPt_parton, _hMu_topY_parton, _hEl_topPt_parton, _hEl_topY_parton, _hComb_topPt_parton, _hComb_topY_parton; Histo1DPtr _hMu_topPt_particle, _hMu_topY_particle, _hEl_topPt_particle, _hEl_topY_particle, _hComb_topPt_particle, _hComb_topY_particle; Histo1DPtr _hMu_topPt_parton_norm, _hMu_topY_parton_norm, _hEl_topPt_parton_norm, _hEl_topY_parton_norm, _hComb_topPt_parton_norm, _hComb_topY_parton_norm; Histo1DPtr _hMu_topPt_particle_norm, _hMu_topY_particle_norm, _hEl_topPt_particle_norm, _hEl_topY_particle_norm, _hComb_topPt_particle_norm, _hComb_topY_particle_norm; Histo1DPtr _hMu_cutflow, _hEl_cutflow; double _nMu = 0., _nEl = 0.; double _nPassParton_mu = 0.,_nPassParton_el = 0.; double _nPassParticle_mu = 0., _nPassParticle_el = 0.; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2016_I1454211); } diff --git a/analyses/pluginCMS/CMS_2016_I1473674.cc b/analyses/pluginCMS/CMS_2016_I1473674.cc --- a/analyses/pluginCMS/CMS_2016_I1473674.cc +++ b/analyses/pluginCMS/CMS_2016_I1473674.cc @@ -1,124 +1,124 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/PartonicTops.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/InvMassFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" namespace Rivet { class CMS_2016_I1473674 : public Analysis { public: // Minimal constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_I1473674); // Set up projections and book histograms void init() { // Complete final state FinalState fs; // Parton level top quarks - declare(PartonicTops(PartonicTops::E_MU, false), "LeptonicPartonTops"); - declare(PartonicTops(PartonicTops::HADRONIC), "HadronicPartonTops"); + declare(PartonicTops(PartonicTops::DecayMode::E_MU, false), "LeptonicPartonTops"); + declare(PartonicTops(PartonicTops::DecayMode::HADRONIC), "HadronicPartonTops"); // Projections for dressed electrons and muons IdentifiedFinalState photons(fs); photons.acceptIdPair(PID::PHOTON); // IdentifiedFinalState el_id(fs); el_id.acceptIdPair(PID::ELECTRON); PromptFinalState electrons(el_id); addProjection(electrons, "Electrons"); DressedLeptons dressed_electrons(photons, electrons, 0.1); addProjection(dressed_electrons, "DressedElectrons"); // IdentifiedFinalState mu_id(fs); mu_id.acceptIdPair(PID::MUON); PromptFinalState muons(mu_id); addProjection(muons, "Muons"); DressedLeptons dressed_muons(photons, muons, 0.1); addProjection(dressed_muons, "DressedMuons"); // Projection for jets VetoedFinalState fs_jets(FinalState(-MAXDOUBLE, MAXDOUBLE, 0*GeV)); fs_jets.addVetoOnThisFinalState(dressed_muons); addProjection(FastJets(fs_jets, FastJets::ANTIKT, 0.5), "Jets"); // Projections for MET addProjection(MissingMomentum(), "MET"); // Booking of histograms book(_hist_met ,5, 1, 1); book(_hist_ht ,6, 1, 1); book(_hist_st ,7, 1, 1); book(_hist_wpt ,8, 1, 1); } /// Per-event analysis void analyze(const Event& event) { const double weight = 1.0; // Select ttbar -> lepton+jets at parton level, removing tau decays const Particles leptonicpartontops = apply(event, "LeptonicPartonTops").particlesByPt(); if (leptonicpartontops.size() != 1) vetoEvent; const Particles hadronicpartontops = apply(event, "HadronicPartonTops").particlesByPt(); if (hadronicpartontops.size() != 1) vetoEvent; // Select ttbar -> lepton+jets at particle level const DressedLeptons& dressed_electrons = applyProjection(event, "DressedElectrons"); const DressedLeptons& dressed_muons = applyProjection(event, "DressedMuons"); if (dressed_electrons.dressedLeptons().size() + dressed_muons.dressedLeptons().size() != 1) vetoEvent; const FourMomentum lepton = (dressed_electrons.dressedLeptons().empty() ? dressed_muons : dressed_electrons).dressedLeptons()[0]; // MET const MissingMomentum& met = applyProjection(event, "MET"); _hist_met->fill(met.visibleMomentum().pT()/GeV, weight); // HT and ST const FastJets& jetpro = applyProjection(event, "Jets"); const Jets jets = jetpro.jetsByPt(20*GeV); double ht = 0.0; for (const Jet& j : jets) { if (deltaR(j.momentum(), lepton) > 0.3) { ht += j.pT(); } } double st = ht + lepton.pT() + met.visibleMomentum().pT(); _hist_ht->fill(ht/GeV, weight); _hist_st->fill(st/GeV, weight); // WPT const FourMomentum w = lepton - met.visibleMomentum(); _hist_wpt->fill(w.pT()/GeV, weight); } /// Normalize histograms void finalize() { normalize(_hist_met); normalize(_hist_ht); normalize(_hist_st); normalize(_hist_wpt); } private: Histo1DPtr _hist_met, _hist_ht, _hist_st, _hist_wpt; }; DECLARE_RIVET_PLUGIN(CMS_2016_I1473674); } diff --git a/analyses/pluginCMS/CMS_2016_I1491950.cc b/analyses/pluginCMS/CMS_2016_I1491950.cc --- a/analyses/pluginCMS/CMS_2016_I1491950.cc +++ b/analyses/pluginCMS/CMS_2016_I1491950.cc @@ -1,500 +1,500 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Tools/ParticleName.hh" #include "Rivet/Tools/ParticleIdUtils.hh" namespace Rivet { namespace { //< only visible in this compilation unit /// @brief Special dressed lepton finder /// /// Find dressed leptons by clustering all leptons and photons class SpecialDressedLeptons : public FinalState { public: /// The default constructor. May specify cuts SpecialDressedLeptons(const FinalState& fs, const Cut& cut) : FinalState(cut) { setName("SpecialDressedLeptons"); IdentifiedFinalState ifs(fs); ifs.acceptIdPair(PID::PHOTON); ifs.acceptIdPair(PID::ELECTRON); ifs.acceptIdPair(PID::MUON); addProjection(ifs, "IFS"); addProjection(FastJets(ifs, FastJets::ANTIKT, 0.1), "LeptonJets"); } /// Clone on the heap. virtual unique_ptr clone() const { return unique_ptr(new SpecialDressedLeptons(*this)); } /// Retrieve the dressed leptons const vector& dressedLeptons() const { return _clusteredLeptons; } private: /// Container which stores the clustered lepton objects vector _clusteredLeptons; public: void project(const Event& e) { _theParticles.clear(); _clusteredLeptons.clear(); vector allClusteredLeptons; const Jets jets = applyProjection(e, "LeptonJets").jetsByPt(5.*GeV); for (const Jet& jet : jets) { Particle lepCand; for (const Particle& cand : jet.particles()) { const int absPdgId = abs(cand.pdgId()); if (absPdgId == PID::ELECTRON || absPdgId == PID::MUON) { if (cand.pt() > lepCand.pt()) lepCand = cand; } } //Central lepton must be the major component if ((lepCand.pt() < jet.pt()/2.) || (lepCand.pdgId() == 0)) continue; DressedLepton lepton = DressedLepton(lepCand); for (const Particle& cand : jet.particles()) { if (cand == lepCand) continue; if (cand.pid() != PID::PHOTON) continue; lepton.addPhoton(cand, true); } allClusteredLeptons.push_back(lepton); } for (const DressedLepton& lepton : allClusteredLeptons) { if (accept(lepton)) { _clusteredLeptons.push_back(lepton); _theParticles.push_back(lepton.constituentLepton()); _theParticles += lepton.constituentPhotons(); } } } }; } class CMS_2016_I1491950 : public Analysis { public: /// Constructor CMS_2016_I1491950() : Analysis("CMS_2016_I1491950") { } /// Book histograms and initialise projections before the run void init() { FinalState fs(Cuts::pT > 0. && Cuts::abseta < 6.); PromptFinalState prompt_fs(fs); prompt_fs.acceptMuonDecays(true); prompt_fs.acceptTauDecays(true); // Projection for dressed electrons and muons Cut leptonCuts = Cuts::abseta < 2.5 and Cuts::pt > 30.*GeV; SpecialDressedLeptons dressedleptons(prompt_fs, leptonCuts); addProjection(dressedleptons, "DressedLeptons"); // Neutrinos IdentifiedFinalState neutrinos(prompt_fs); neutrinos.acceptNeutrinos(); addProjection(neutrinos, "Neutrinos"); // Projection for jets VetoedFinalState fsForJets(fs); fsForJets.addVetoOnThisFinalState(dressedleptons); fsForJets.addVetoOnThisFinalState(neutrinos); - addProjection(FastJets(fsForJets, FastJets::ANTIKT, 0.4, JetAlg::DECAY_MUONS, JetAlg::DECAY_INVISIBLES), "Jets"); + addProjection(FastJets(fsForJets, FastJets::ANTIKT, 0.4, JetAlg::Muons::DECAY, JetAlg::Invisibles::DECAY), "Jets"); //book hists book(_hist_thadpt, "d01-x02-y01"); book(_hist_thady, "d03-x02-y01"); book(_hist_tleppt, "d05-x02-y01"); book(_hist_tlepy, "d07-x02-y01"); book(_hist_ttpt, "d09-x02-y01"); book(_hist_tty, "d13-x02-y01"); book(_hist_ttm, "d11-x02-y01"); book(_hist_njet, "d15-x02-y01"); book(_hist_njets_thadpt_1, "d17-x02-y01"); book(_hist_njets_thadpt_2, "d18-x02-y01"); book(_hist_njets_thadpt_3, "d19-x02-y01"); book(_hist_njets_thadpt_4, "d20-x02-y01"); book(_hist_njets_ttpt_1, "d22-x02-y01"); book(_hist_njets_ttpt_2, "d23-x02-y01"); book(_hist_njets_ttpt_3, "d24-x02-y01"); book(_hist_njets_ttpt_4, "d25-x02-y01"); book(_hist_thady_thadpt_1, "d27-x02-y01"); book(_hist_thady_thadpt_2, "d28-x02-y01"); book(_hist_thady_thadpt_3, "d29-x02-y01"); book(_hist_thady_thadpt_4, "d30-x02-y01"); book(_hist_ttm_tty_1, "d32-x02-y01"); book(_hist_ttm_tty_2, "d33-x02-y01"); book(_hist_ttm_tty_3, "d34-x02-y01"); book(_hist_ttm_tty_4, "d35-x02-y01"); book(_hist_ttpt_ttm_1, "d37-x02-y01"); book(_hist_ttpt_ttm_2, "d38-x02-y01"); book(_hist_ttpt_ttm_3, "d39-x02-y01"); book(_hist_ttpt_ttm_4, "d40-x02-y01"); book(_histnorm_thadpt, "d42-x02-y01"); book(_histnorm_thady, "d44-x02-y01"); book(_histnorm_tleppt, "d46-x02-y01"); book(_histnorm_tlepy, "d48-x02-y01"); book(_histnorm_ttpt, "d50-x02-y01"); book(_histnorm_tty, "d54-x02-y01"); book(_histnorm_ttm, "d52-x02-y01"); book(_histnorm_njet, "d56-x02-y01"); book(_histnorm_njets_thadpt_1, "d58-x02-y01"); book(_histnorm_njets_thadpt_2, "d59-x02-y01"); book(_histnorm_njets_thadpt_3, "d60-x02-y01"); book(_histnorm_njets_thadpt_4, "d61-x02-y01"); book(_histnorm_njets_ttpt_1, "d63-x02-y01"); book(_histnorm_njets_ttpt_2, "d64-x02-y01"); book(_histnorm_njets_ttpt_3, "d65-x02-y01"); book(_histnorm_njets_ttpt_4, "d66-x02-y01"); book(_histnorm_thady_thadpt_1, "d68-x02-y01"); book(_histnorm_thady_thadpt_2, "d69-x02-y01"); book(_histnorm_thady_thadpt_3, "d70-x02-y01"); book(_histnorm_thady_thadpt_4, "d71-x02-y01"); book(_histnorm_ttm_tty_1, "d73-x02-y01"); book(_histnorm_ttm_tty_2, "d74-x02-y01"); book(_histnorm_ttm_tty_3, "d75-x02-y01"); book(_histnorm_ttm_tty_4, "d76-x02-y01"); book(_histnorm_ttpt_ttm_1, "d78-x02-y01"); book(_histnorm_ttpt_ttm_2, "d79-x02-y01"); book(_histnorm_ttpt_ttm_3, "d80-x02-y01"); book(_histnorm_ttpt_ttm_4, "d81-x02-y01"); } /// Perform the per-event analysis void analyze(const Event& event) { // leptons const SpecialDressedLeptons& dressedleptons_proj = applyProjection(event, "DressedLeptons"); std::vector dressedLeptons = dressedleptons_proj.dressedLeptons(); if(dressedLeptons.size() != 1) return; // neutrinos const Particles neutrinos = applyProjection(event, "Neutrinos").particlesByPt(); _nusum = FourMomentum(0., 0., 0., 0.); for(const Particle& neutrino : neutrinos) { _nusum += neutrino.momentum(); } _wl = _nusum + dressedLeptons[0].momentum(); // jets Cut jet_cut = (Cuts::abseta < 2.5) and (Cuts::pT > 25.*GeV); const Jets jets = applyProjection(event, "Jets").jetsByPt(jet_cut); Jets allJets; for (const Jet& jet : jets) { allJets.push_back(jet); } Jets bJets; for (const Jet& jet : allJets) { if (jet.bTagged()) bJets.push_back(jet); } if(bJets.size() < 2 || allJets.size() < 4) return; //construct top quark proxies double Kmin = numeric_limits::max(); for(const Jet& itaj : allJets) { for(const Jet& itbj : allJets) { if (itaj.momentum() == itbj.momentum()) continue; FourMomentum wh(itaj.momentum() + itbj.momentum()); for(const Jet& ithbj : bJets) { if(itaj.momentum() == ithbj.momentum() || itbj.momentum() == ithbj.momentum()) continue; FourMomentum th(wh + ithbj.momentum()); for(const Jet& itlbj : bJets) { if(itaj.momentum() == itlbj.momentum() || itbj.momentum() == itlbj.momentum() || ithbj.momentum() == itlbj.momentum()) continue; FourMomentum tl(_wl + itlbj.momentum()); double K = pow(wh.mass() - 80.4, 2) + pow(th.mass() - 172.5, 2) + pow(tl.mass() - 172.5, 2); if(K < Kmin) { Kmin = K; _tl = tl; _th = th; _wh = wh; } } } } } _hist_thadpt->fill(_th.pt()); _hist_thady->fill(abs(_th.rapidity()) ); _hist_tleppt->fill(_tl.pt() ); _hist_tlepy->fill(abs(_tl.rapidity()) ); _histnorm_thadpt->fill(_th.pt()); _histnorm_thady->fill(abs(_th.rapidity()) ); _histnorm_tleppt->fill(_tl.pt() ); _histnorm_tlepy->fill(abs(_tl.rapidity()) ); FourMomentum tt(_tl+_th); _hist_ttpt->fill(tt.pt() ); _hist_tty->fill(abs(tt.rapidity()) ); _hist_ttm->fill(tt.mass() ); _hist_njet->fill(min(allJets.size()-4., 4.)); _histnorm_ttpt->fill(tt.pt() ); _histnorm_tty->fill(abs(tt.rapidity()) ); _histnorm_ttm->fill(tt.mass() ); _histnorm_njet->fill(min(allJets.size()-4., 4.)); if(allJets.size() == 4) { _hist_njets_thadpt_1->fill(_th.pt()); _hist_njets_ttpt_1->fill(tt.pt()); _histnorm_njets_thadpt_1->fill(_th.pt()); _histnorm_njets_ttpt_1->fill(tt.pt()); } else if(allJets.size() == 5) { _hist_njets_thadpt_2->fill(_th.pt()); _hist_njets_ttpt_2->fill(tt.pt()); _histnorm_njets_thadpt_2->fill(_th.pt()); _histnorm_njets_ttpt_2->fill(tt.pt()); } else if(allJets.size() == 6) { _hist_njets_thadpt_3->fill(_th.pt()); _hist_njets_ttpt_3->fill(tt.pt()); _histnorm_njets_thadpt_3->fill(_th.pt()); _histnorm_njets_ttpt_3->fill(tt.pt()); } else //>= 4 jets { _hist_njets_thadpt_4->fill(_th.pt()); _hist_njets_ttpt_4->fill(tt.pt()); _histnorm_njets_thadpt_4->fill(_th.pt()); _histnorm_njets_ttpt_4->fill(tt.pt()); } if(abs(_th.rapidity()) < 0.5) { _hist_thady_thadpt_1->fill(_th.pt()); _histnorm_thady_thadpt_1->fill(_th.pt()); } else if(abs(_th.rapidity()) < 1.0) { _hist_thady_thadpt_2->fill(_th.pt()); _histnorm_thady_thadpt_2->fill(_th.pt()); } else if(abs(_th.rapidity()) < 1.5) { _hist_thady_thadpt_3->fill(_th.pt()); _histnorm_thady_thadpt_3->fill(_th.pt()); } else if(abs(_th.rapidity()) < 2.5) { _hist_thady_thadpt_4->fill(_th.pt()); _histnorm_thady_thadpt_4->fill(_th.pt()); } if(tt.mass() >= 300. && tt.mass() < 450.) { _hist_ttm_tty_1->fill(abs(tt.rapidity())); _histnorm_ttm_tty_1->fill(abs(tt.rapidity())); } else if(tt.mass() >= 450. && tt.mass() < 625.) { _hist_ttm_tty_2->fill(abs(tt.rapidity())); _histnorm_ttm_tty_2->fill(abs(tt.rapidity())); } else if(tt.mass() >= 625. && tt.mass() < 850.) { _hist_ttm_tty_3->fill(abs(tt.rapidity())); _histnorm_ttm_tty_3->fill(abs(tt.rapidity())); } else if(tt.mass() >= 850. && tt.mass() < 2000.) { _hist_ttm_tty_4->fill(abs(tt.rapidity())); _histnorm_ttm_tty_4->fill(abs(tt.rapidity())); } if(tt.pt() < 35.) { _hist_ttpt_ttm_1->fill(tt.mass()); _histnorm_ttpt_ttm_1->fill(tt.mass()); } else if(tt.pt() < 80.) { _hist_ttpt_ttm_2->fill(tt.mass()); _histnorm_ttpt_ttm_2->fill(tt.mass()); } else if(tt.pt() < 140.) { _hist_ttpt_ttm_3->fill(tt.mass()); _histnorm_ttpt_ttm_3->fill(tt.mass()); } else if(tt.pt() < 500.) { _hist_ttpt_ttm_4->fill(tt.mass()); _histnorm_ttpt_ttm_4->fill(tt.mass()); } } /// Normalise histograms etc., after the run void finalize() { scale(_hist_thadpt, crossSection()/sumOfWeights()); scale(_hist_thady, crossSection()/sumOfWeights()); scale(_hist_tleppt, crossSection()/sumOfWeights()); scale(_hist_tlepy, crossSection()/sumOfWeights()); scale(_hist_ttpt, crossSection()/sumOfWeights()); scale(_hist_tty, crossSection()/sumOfWeights()); scale(_hist_ttm, crossSection()/sumOfWeights()); scale(_hist_njet, crossSection()/sumOfWeights()); scale(_hist_njets_thadpt_1, crossSection()/sumOfWeights()); scale(_hist_njets_thadpt_2, crossSection()/sumOfWeights()); scale(_hist_njets_thadpt_3, crossSection()/sumOfWeights()); scale(_hist_njets_thadpt_4, crossSection()/sumOfWeights()); scale(_hist_njets_ttpt_1, crossSection()/sumOfWeights()); scale(_hist_njets_ttpt_2, crossSection()/sumOfWeights()); scale(_hist_njets_ttpt_3, crossSection()/sumOfWeights()); scale(_hist_njets_ttpt_4, crossSection()/sumOfWeights()); scale(_hist_thady_thadpt_1, crossSection()/sumOfWeights()/0.5); scale(_hist_thady_thadpt_2, crossSection()/sumOfWeights()/0.5); scale(_hist_thady_thadpt_3, crossSection()/sumOfWeights()/0.5); scale(_hist_thady_thadpt_4, crossSection()/sumOfWeights()/1.0); scale(_hist_ttm_tty_1, crossSection()/sumOfWeights()/150.); scale(_hist_ttm_tty_2, crossSection()/sumOfWeights()/175.); scale(_hist_ttm_tty_3, crossSection()/sumOfWeights()/225.); scale(_hist_ttm_tty_4, crossSection()/sumOfWeights()/1150.); scale(_hist_ttpt_ttm_1, crossSection()/sumOfWeights()/35.); scale(_hist_ttpt_ttm_2, crossSection()/sumOfWeights()/45.); scale(_hist_ttpt_ttm_3, crossSection()/sumOfWeights()/60.); scale(_hist_ttpt_ttm_4, crossSection()/sumOfWeights()/360.); scale(_histnorm_thadpt, 1./_histnorm_thadpt->sumW(false)); scale(_histnorm_thady, 1./_histnorm_thady->sumW(false)); scale(_histnorm_tleppt, 1./_histnorm_tleppt->sumW(false)); scale(_histnorm_tlepy, 1./_histnorm_tlepy->sumW(false)); scale(_histnorm_ttpt, 1./_histnorm_ttpt->sumW(false)); scale(_histnorm_tty, 1./_histnorm_tty->sumW(false)); scale(_histnorm_ttm, 1./_histnorm_ttm->sumW(false)); scale(_histnorm_njet, 1./_histnorm_njet->sumW(false)); double sum_njets_thadpt = _histnorm_njets_thadpt_1->sumW(false) + _histnorm_njets_thadpt_2->sumW(false) + _histnorm_njets_thadpt_3->sumW(false) + _histnorm_njets_thadpt_4->sumW(false); scale(_histnorm_njets_thadpt_1, 1./sum_njets_thadpt); scale(_histnorm_njets_thadpt_2, 1./sum_njets_thadpt); scale(_histnorm_njets_thadpt_3, 1./sum_njets_thadpt); scale(_histnorm_njets_thadpt_4, 1./sum_njets_thadpt); double sum_njets_ttpt = _histnorm_njets_ttpt_1->sumW(false) + _histnorm_njets_ttpt_2->sumW(false) + _histnorm_njets_ttpt_3->sumW(false) + _histnorm_njets_ttpt_4->sumW(false); scale(_histnorm_njets_ttpt_1, 1./sum_njets_ttpt); scale(_histnorm_njets_ttpt_2, 1./sum_njets_ttpt); scale(_histnorm_njets_ttpt_3, 1./sum_njets_ttpt); scale(_histnorm_njets_ttpt_4, 1./sum_njets_ttpt); double sum_thady_thadpt = _histnorm_thady_thadpt_1->sumW(false) + _histnorm_thady_thadpt_2->sumW(false) + _histnorm_thady_thadpt_3->sumW(false) + _histnorm_thady_thadpt_4->sumW(false); scale(_histnorm_thady_thadpt_1, 1./sum_thady_thadpt/0.5); scale(_histnorm_thady_thadpt_2, 1./sum_thady_thadpt/0.5); scale(_histnorm_thady_thadpt_3, 1./sum_thady_thadpt/0.5); scale(_histnorm_thady_thadpt_4, 1./sum_thady_thadpt/1.0); double sum_ttm_tty = _histnorm_ttm_tty_1->sumW(false) + _histnorm_ttm_tty_2->sumW(false) + _histnorm_ttm_tty_3->sumW(false) + _histnorm_ttm_tty_4->sumW(false); scale(_histnorm_ttm_tty_1, 1./sum_ttm_tty/150.); scale(_histnorm_ttm_tty_2, 1./sum_ttm_tty/175.); scale(_histnorm_ttm_tty_3, 1./sum_ttm_tty/225.); scale(_histnorm_ttm_tty_4, 1./sum_ttm_tty/1150.); double sum_ttpt_ttm = _histnorm_ttpt_ttm_1->sumW(false) + _histnorm_ttpt_ttm_2->sumW(false) + _histnorm_ttpt_ttm_3->sumW(false) + _histnorm_ttpt_ttm_4->sumW(false); scale(_histnorm_ttpt_ttm_1, 1./sum_ttpt_ttm/35.); scale(_histnorm_ttpt_ttm_2, 1./sum_ttpt_ttm/45.); scale(_histnorm_ttpt_ttm_3, 1./sum_ttpt_ttm/60.); scale(_histnorm_ttpt_ttm_4, 1./sum_ttpt_ttm/360.); } private: FourMomentum _tl; FourMomentum _th; FourMomentum _wl; FourMomentum _wh; FourMomentum _nusum; Histo1DPtr _hist_thadpt; Histo1DPtr _hist_thady; Histo1DPtr _hist_tleppt; Histo1DPtr _hist_tlepy; Histo1DPtr _hist_ttpt; Histo1DPtr _hist_tty; Histo1DPtr _hist_ttm; Histo1DPtr _hist_njet; Histo1DPtr _hist_njets_thadpt_1; Histo1DPtr _hist_njets_thadpt_2; Histo1DPtr _hist_njets_thadpt_3; Histo1DPtr _hist_njets_thadpt_4; Histo1DPtr _hist_njets_ttpt_1; Histo1DPtr _hist_njets_ttpt_2; Histo1DPtr _hist_njets_ttpt_3; Histo1DPtr _hist_njets_ttpt_4; Histo1DPtr _hist_thady_thadpt_1; Histo1DPtr _hist_thady_thadpt_2; Histo1DPtr _hist_thady_thadpt_3; Histo1DPtr _hist_thady_thadpt_4; Histo1DPtr _hist_ttm_tty_1; Histo1DPtr _hist_ttm_tty_2; Histo1DPtr _hist_ttm_tty_3; Histo1DPtr _hist_ttm_tty_4; Histo1DPtr _hist_ttpt_ttm_1; Histo1DPtr _hist_ttpt_ttm_2; Histo1DPtr _hist_ttpt_ttm_3; Histo1DPtr _hist_ttpt_ttm_4; Histo1DPtr _histnorm_thadpt; Histo1DPtr _histnorm_thady; Histo1DPtr _histnorm_tleppt; Histo1DPtr _histnorm_tlepy; Histo1DPtr _histnorm_ttpt; Histo1DPtr _histnorm_tty; Histo1DPtr _histnorm_ttm; Histo1DPtr _histnorm_njet; Histo1DPtr _histnorm_njets_thadpt_1; Histo1DPtr _histnorm_njets_thadpt_2; Histo1DPtr _histnorm_njets_thadpt_3; Histo1DPtr _histnorm_njets_thadpt_4; Histo1DPtr _histnorm_njets_ttpt_1; Histo1DPtr _histnorm_njets_ttpt_2; Histo1DPtr _histnorm_njets_ttpt_3; Histo1DPtr _histnorm_njets_ttpt_4; Histo1DPtr _histnorm_thady_thadpt_1; Histo1DPtr _histnorm_thady_thadpt_2; Histo1DPtr _histnorm_thady_thadpt_3; Histo1DPtr _histnorm_thady_thadpt_4; Histo1DPtr _histnorm_ttm_tty_1; Histo1DPtr _histnorm_ttm_tty_2; Histo1DPtr _histnorm_ttm_tty_3; Histo1DPtr _histnorm_ttm_tty_4; Histo1DPtr _histnorm_ttpt_ttm_1; Histo1DPtr _histnorm_ttpt_ttm_2; Histo1DPtr _histnorm_ttpt_ttm_3; Histo1DPtr _histnorm_ttpt_ttm_4; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2016_I1491950); } diff --git a/analyses/pluginCMS/CMS_2016_I1491953.cc b/analyses/pluginCMS/CMS_2016_I1491953.cc --- a/analyses/pluginCMS/CMS_2016_I1491953.cc +++ b/analyses/pluginCMS/CMS_2016_I1491953.cc @@ -1,331 +1,331 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/WFinder.hh" namespace Rivet { /// @brief Differential cross sections for associated production of a W boson and jets at 8 TeV class CMS_2016_I1491953 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_I1491953); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections FinalState fs; WFinder wfinder_mu(fs, Cuts::abseta < 2.4 && Cuts::pT > 0*GeV, PID::MUON, 0*GeV, 1000000*GeV, - 0*GeV, 0.1, WFinder::CLUSTERNODECAY, WFinder::TRACK, WFinder::TRANSMASS); + 0*GeV, 0.1, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::YES, WFinder::MassWindow::MT); addProjection(wfinder_mu, "WFinder_mu"); // Define veto FS VetoedFinalState vfs; vfs.addVetoOnThisFinalState(wfinder_mu); vfs.addVetoPairId(PID::MUON); vfs.vetoNeutrinos(); FastJets fastjets(vfs, FastJets::ANTIKT, 0.5); addProjection(fastjets, "Jets"); book(_hist_Mult_exc ,1,1,1); book(_hist_inc_WJetMult ,2,1,1); book(_hist_addJetPt1j ,3,1,1); book(_hist_addJetPt2j ,4,1,1); book(_hist_addJetPt3j ,5,1,1); book(_hist_addJetPt4j ,6,1,1); book(_hist_addHt_1j ,7,1,1); book(_hist_addHt_2j ,8,1,1); book(_hist_addHt_3j ,9,1,1); book(_hist_addHt_4j ,10,1,1); book(_hist_diJetPt_2j ,11,1,1); book(_hist_diJetPt_3j ,12,1,1); book(_hist_diJetPt_4j ,13,1,1); book(_hist_dijetM_2j ,14,1,1); book(_hist_dijetM_3j ,15,1,1); book(_hist_dijetM_4j ,16,1,1); book(_hist_Jeteta1j ,17,1,1); book(_hist_Jeteta2j ,18,1,1); book(_hist_Jeteta3j ,19,1,1); book(_hist_Jeteta4j ,20,1,1); book(_hist_dyj1j2_2j ,21,1,1); book(_hist_dyj1j2_3j ,22,1,1); book(_hist_dyj1j2_4j ,23,1,1); book(_hist_dyj1j3_3j ,24,1,1); book(_hist_dyj2j3_3j ,25,1,1); book(_hist_dyjFjB_2j ,26,1,1); book(_hist_dyjFjB_3j ,27,1,1); book(_hist_dyjFjB_4j ,28,1,1); book(_hist_dphij1j2_2j ,29,1,1); book(_hist_dphijFjB_2j ,30,1,1); book(_hist_dRj1j2_2j ,31,1,1); book(_hist_dphij1mu_1j ,32,1,1); book(_hist_dphij2mu_2j ,33,1,1); book(_hist_dphij3mu_3j ,34,1,1); book(_hist_dphij4mu_4j ,35,1,1); book(_hist_MeanNJht_1j ,36,1,1); book(_hist_MeanNJht_2j ,37,1,1); book(_hist_MeanNJdyj1j2_2j ,38,1,1); book(_hist_MeanNJdyjFjB_2j ,39,1,1); } // Define function used for filiing inc Njets histo void _fill(Histo1DPtr& _histJetMult, vector& finaljet_list) { _histJetMult->fill(0); for (size_t i = 0 ; i < finaljet_list.size() ; ++i) { if (i == 7) break; _histJetMult->fill(i+1); // inclusive multiplicity } } /// Perform the per-event analysis void analyze(const Event& event) { const WFinder& wfinder_mu = apply(event, "WFinder_mu"); if (wfinder_mu.bosons().size() != 1) vetoEvent; //const FourMomentum& lepton0 = wfinder_mu.constituentLeptons()[0].momentum(); //const FourMomentum& neutrino = wfinder_mu.constituentNeutrinos()[0].momentum(); //double WmT = sqrt( 2 * lepton0.pT() * neutrino.pT() * (1 - cos(deltaPhi(lepton0, neutrino))) ); const FourMomentum& lepton0 = wfinder_mu.constituentLepton().momentum(); double WmT = wfinder_mu.mT(); if (WmT < 50.0*GeV) vetoEvent; if (lepton0.abseta() > 2.1 || lepton0.pT() < 25.0*GeV) vetoEvent; // Select final jets, ordered by decreasing pT vector finaljet_list; double HT = 0.0; const Jets jListAll = apply(event, "Jets").jetsByPt(30.0*GeV); for (const Jet& j : jListAll) { if (j.abseta() < 2.4 && j.pT() > 30.0*GeV && deltaR(lepton0, j) > 0.5) { finaljet_list.push_back(j.momentum()); HT += j.pT(); } } // Another jet list, sorted by increasing rapidity vector jListRap = finaljet_list; std::sort(jListRap.begin(), jListRap.end(), cmpMomByRap); // Multiplicity exc plot. if (finaljet_list.size()<=7) { _hist_Mult_exc->fill(finaljet_list.size()); } else if (finaljet_list.size()>7){ _hist_Mult_exc->fill(7.); } // Multiplicity inc plot. _fill(_hist_inc_WJetMult, finaljet_list); if (finaljet_list.size()>=1) { _hist_addJetPt1j->fill(finaljet_list[0].pT()); _hist_Jeteta1j->fill(fabs(finaljet_list[0].eta())); _hist_addHt_1j->fill(HT); _hist_dphij1mu_1j->fill( deltaPhi(finaljet_list[0].phi(), lepton0.phi()) ); _hist_MeanNJht_1j->fill( HT, finaljet_list.size()); } if (finaljet_list.size()>=2) { _hist_addJetPt2j->fill(finaljet_list[1].pT()); _hist_Jeteta2j->fill(fabs(finaljet_list[1].eta())); _hist_addHt_2j->fill(HT); _hist_dyj1j2_2j ->fill( fabs(finaljet_list[0].rapidity() - finaljet_list[1].rapidity())); _hist_dyjFjB_2j ->fill( fabs(jListRap[0].rapidity() - jListRap[jListRap.size()-1].rapidity())); _hist_dphij1j2_2j ->fill( deltaPhi(finaljet_list[0].phi(), finaljet_list[1].phi())); _hist_dphijFjB_2j ->fill( deltaPhi(jListRap[0].phi(), jListRap[jListRap.size()-1].phi()) ); _hist_dijetM_2j ->fill( (add(finaljet_list[0], finaljet_list[1])).mass()); _hist_diJetPt_2j ->fill( (add(finaljet_list[0], finaljet_list[1])).pT()); _hist_dRj1j2_2j ->fill( deltaR(finaljet_list[0].rapidity(), finaljet_list[0].phi(), finaljet_list[1].rapidity(), finaljet_list[1].phi())); _hist_dphij2mu_2j ->fill( deltaPhi(finaljet_list[1].phi(), lepton0.phi()) ); _hist_MeanNJht_2j->fill( HT, finaljet_list.size()); _hist_MeanNJdyj1j2_2j->fill( fabs(finaljet_list[0].rapidity() - finaljet_list[1].rapidity()), finaljet_list.size()); _hist_MeanNJdyjFjB_2j->fill( fabs(jListRap[0].rapidity() - jListRap[jListRap.size()-1].rapidity()), finaljet_list.size()); } if (finaljet_list.size()>=3) { _hist_addJetPt3j->fill(finaljet_list[2].pT()); _hist_Jeteta3j->fill(fabs(finaljet_list[2].eta())); _hist_addHt_3j->fill(HT); _hist_dyj1j2_3j ->fill( fabs(finaljet_list[0].rapidity() - finaljet_list[1].rapidity())); _hist_dyj1j3_3j ->fill( fabs(finaljet_list[0].rapidity() - finaljet_list[2].rapidity())); _hist_dyj2j3_3j ->fill( fabs(finaljet_list[1].rapidity() - finaljet_list[2].rapidity())); _hist_dyjFjB_3j ->fill( fabs(jListRap[0].rapidity() - jListRap[jListRap.size()-1].rapidity())); _hist_dijetM_3j ->fill( (add(finaljet_list[0], finaljet_list[1])).mass()); _hist_diJetPt_3j ->fill( (add(finaljet_list[0], finaljet_list[1])).pT()); _hist_dphij3mu_3j->fill( deltaPhi(finaljet_list[2].phi(), lepton0.phi()) ); } if (finaljet_list.size()>=4) { _hist_addJetPt4j->fill(finaljet_list[3].pT()); _hist_Jeteta4j->fill(fabs(finaljet_list[3].eta())); _hist_addHt_4j->fill(HT); _hist_dyj1j2_4j ->fill( fabs(finaljet_list[0].rapidity() - finaljet_list[1].rapidity())); _hist_dyjFjB_4j ->fill( fabs(jListRap[0].rapidity() - jListRap[jListRap.size()-1].rapidity())); _hist_dijetM_4j ->fill( (add(finaljet_list[0], finaljet_list[1])).mass()); _hist_diJetPt_4j ->fill( (add(finaljet_list[0], finaljet_list[1])).pT()); _hist_dphij4mu_4j->fill( deltaPhi(finaljet_list[3].phi(), lepton0.phi()) ); } } //void loop /// Normalise histograms etc., after the run void finalize() { const double crossec = !std::isnan(crossSectionPerEvent()) ? crossSection() : 36703*picobarn; if (std::isnan(crossSectionPerEvent())){ MSG_INFO("No valid cross-section given, using NNLO xsec calculated by FEWZ " << crossec/picobarn << " pb"); } scale(_hist_Mult_exc, crossec/picobarn/sumOfWeights()); scale(_hist_inc_WJetMult, crossec/picobarn/sumOfWeights()); scale(_hist_addJetPt1j, crossec/picobarn/sumOfWeights()); scale(_hist_addJetPt2j, crossec/picobarn/sumOfWeights()); scale(_hist_addJetPt3j, crossec/picobarn/sumOfWeights()); scale(_hist_addJetPt4j, crossec/picobarn/sumOfWeights()); scale(_hist_Jeteta1j, crossec/picobarn/sumOfWeights()); scale(_hist_Jeteta2j, crossec/picobarn/sumOfWeights()); scale(_hist_Jeteta3j, crossec/picobarn/sumOfWeights()); scale(_hist_Jeteta4j, crossec/picobarn/sumOfWeights()); scale(_hist_addHt_1j, crossec/picobarn/sumOfWeights()); scale(_hist_addHt_2j, crossec/picobarn/sumOfWeights()); scale(_hist_addHt_3j, crossec/picobarn/sumOfWeights()); scale(_hist_addHt_4j, crossec/picobarn/sumOfWeights()); //------------------------------------- scale(_hist_dyj1j2_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dyj1j2_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dyj1j2_4j, crossec/picobarn/sumOfWeights()); scale(_hist_dyjFjB_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dyjFjB_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dyjFjB_4j, crossec/picobarn/sumOfWeights()); scale(_hist_dyj1j3_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dyj2j3_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij1j2_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dphijFjB_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dRj1j2_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dijetM_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dijetM_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dijetM_4j, crossec/picobarn/sumOfWeights()); scale(_hist_diJetPt_2j, crossec/picobarn/sumOfWeights()); scale(_hist_diJetPt_3j, crossec/picobarn/sumOfWeights()); scale(_hist_diJetPt_4j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij1mu_1j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij2mu_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij3mu_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij4mu_4j, crossec/picobarn/sumOfWeights()); } //@} private: /// @name Histograms //@{ Histo1DPtr _hist_inc_WJetMult; Histo1DPtr _hist_Mult_exc; Histo1DPtr _hist_addJetPt1j; Histo1DPtr _hist_addJetPt2j; Histo1DPtr _hist_addJetPt3j; Histo1DPtr _hist_addJetPt4j; Histo1DPtr _hist_Jeteta1j; Histo1DPtr _hist_Jeteta2j; Histo1DPtr _hist_Jeteta3j; Histo1DPtr _hist_Jeteta4j; Histo1DPtr _hist_addHt_1j; Histo1DPtr _hist_addHt_2j; Histo1DPtr _hist_addHt_3j; Histo1DPtr _hist_addHt_4j; //------------------------------------- Histo1DPtr _hist_dyj1j2_2j; Histo1DPtr _hist_dyj1j2_3j; Histo1DPtr _hist_dyj1j2_4j; Histo1DPtr _hist_dyjFjB_2j; Histo1DPtr _hist_dyjFjB_3j; Histo1DPtr _hist_dyjFjB_4j; Histo1DPtr _hist_dyj1j3_3j; Histo1DPtr _hist_dyj2j3_3j; Histo1DPtr _hist_dphij1j2_2j; Histo1DPtr _hist_dphijFjB_2j; Histo1DPtr _hist_dRj1j2_2j; Histo1DPtr _hist_dijetM_2j; Histo1DPtr _hist_dijetM_3j; Histo1DPtr _hist_dijetM_4j; Histo1DPtr _hist_diJetPt_2j; Histo1DPtr _hist_diJetPt_3j; Histo1DPtr _hist_diJetPt_4j; Histo1DPtr _hist_dphij1mu_1j; Histo1DPtr _hist_dphij2mu_2j; Histo1DPtr _hist_dphij3mu_3j; Histo1DPtr _hist_dphij4mu_4j; //------------------------------------- Profile1DPtr _hist_MeanNJht_1j; Profile1DPtr _hist_MeanNJht_2j; Profile1DPtr _hist_MeanNJdyj1j2_2j; Profile1DPtr _hist_MeanNJdyjFjB_2j; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2016_I1491953); } diff --git a/analyses/pluginCMS/CMS_2017_I1518399.cc b/analyses/pluginCMS/CMS_2017_I1518399.cc --- a/analyses/pluginCMS/CMS_2017_I1518399.cc +++ b/analyses/pluginCMS/CMS_2017_I1518399.cc @@ -1,127 +1,127 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/PartonicTops.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/ChargedLeptons.hh" namespace Rivet { /// Leading jet mass for boosted top quarks at 8 TeV class CMS_2017_I1518399 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2017_I1518399); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Dressed leptons IdentifiedFinalState photons(PID::PHOTON); ChargedLeptons charged_leptons; PromptFinalState prompt_leptons(charged_leptons); Cut leptonCuts = Cuts::pT > 45*GeV && Cuts::abseta < 2.1; DressedLeptons dressed_leptons(photons, prompt_leptons, 0.1, leptonCuts); declare(dressed_leptons, "DressedLeptons"); // Jets VetoedFinalState fs_jets; fs_jets.vetoNeutrinos(); declare(FastJets(fs_jets, FastJets::CAM, 1.2), "JetsCA12"); // Partonic top for decay channel definition - declare(PartonicTops(PartonicTops::E_MU, false), "LeptonicTops"); - declare(PartonicTops(PartonicTops::HADRONIC), "HadronicTops"); + declare(PartonicTops(PartonicTops::DecayMode::E_MU, false), "LeptonicTops"); + declare(PartonicTops(PartonicTops::DecayMode::HADRONIC), "HadronicTops"); // Main histograms book(_hist_mass , "d01-x01-y01"); book(_hist_mass_norm, "d02-x01-y01"); } /// Perform the per-event analysis void analyze(const Event& event) { // Decay mode check const Particles& leptonicTops = apply(event, "LeptonicTops").particlesByPt(); const Particles& hadronicTops = apply(event, "HadronicTops").particlesByPt(); if (leptonicTops.size() != 1 || hadronicTops.size() != 1) vetoEvent; // Get the leptons const DressedLeptons& dressed_leptons = apply(event, "DressedLeptons"); // Leading dressed lepton const vector leptons = dressed_leptons.dressedLeptons(); if (leptons.empty()) vetoEvent; Particle lepton; for (const Particle& l : leptons) { if (l.pT() > lepton.pT()) lepton = l; } // Get the jets const Jets& psjetsCA12 = applyProjection(event, "JetsCA12").jetsByPt(Cuts::pT > 50*GeV); // Subtract the lepton four vector from a jet in case of overlap and clean jets Jets cleanedJets; for (Jet jet : psjetsCA12) { if (deltaR(jet, lepton) < 1.2 ) jet = Jet(jet.momentum()-lepton.momentum(), jet.particles(), jet.tags()); if (jet.abseta() < 2.5) cleanedJets.push_back(jet); } std::sort(cleanedJets.begin(), cleanedJets.end(), cmpMomByPt); // Jet pT cuts if (cleanedJets.size() < 2) vetoEvent; if (cleanedJets.at(0).pT() < 400*GeV) vetoEvent; if (cleanedJets.at(1).pT() < 150*GeV) vetoEvent; // Jet veto if (cleanedJets.size() > 2 && cleanedJets.at(2).pT() > 150*GeV) vetoEvent; // Small distance between 2nd jet and lepton if (deltaR(cleanedJets.at(1), lepton) > 1.2) vetoEvent; // m(jet1) > m(jet2 +lepton) FourMomentum secondJetLepton = cleanedJets.at(1).momentum() + lepton.momentum(); if (cleanedJets.at(0).mass() < secondJetLepton.mass()) vetoEvent; // Fill histograms const double weight = 1.0; _hist_mass->fill(cleanedJets.at(0).mass(), weight); _hist_mass_norm->fill(cleanedJets.at(0).mass(), weight); } /// Normalise histograms etc., after the run void finalize() { const double sf = crossSection() * 1000 / sumOfWeights(); scale(_hist_mass, sf); normalize(_hist_mass_norm, 1.0, false); } //@} private: // Histograms Histo1DPtr _hist_mass, _hist_mass_norm; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2017_I1518399); } diff --git a/analyses/pluginCMS/CMS_2017_I1610623.cc b/analyses/pluginCMS/CMS_2017_I1610623.cc --- a/analyses/pluginCMS/CMS_2017_I1610623.cc +++ b/analyses/pluginCMS/CMS_2017_I1610623.cc @@ -1,261 +1,261 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/AnalysisLoader.hh" #include "Rivet/AnalysisInfo.hh" #include "Rivet/Tools/RivetYODA.hh" #include namespace Rivet { /// @brief Add a short analysis description here class CMS_2017_I1610623 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2017_I1610623); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections FinalState fs; - WFinder wfinder_mu(fs, Cuts::abseta < 2.4 && Cuts::pT > 0*GeV, PID::MUON, 0*GeV, 1000000*GeV, 0*GeV, 0.1, WFinder::CLUSTERNODECAY, WFinder::TRACK, WFinder::TRANSMASS); - //WFinder wfinder_mu(fs, Cuts::abseta < 2.4 && Cuts::pT > 0*GeV, PID::MUON, 0*GeV, 1000000*GeV, 0*GeV, 0.1, WFinder::CLUSTERNODECAY, WFinder::NOTRACK, WFinder::TRANSMASS); + WFinder wfinder_mu(fs, Cuts::abseta < 2.4 && Cuts::pT > 0*GeV, PID::MUON, 0*GeV, 1000000*GeV, 0*GeV, 0.1, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::YES, WFinder::MassWindow::MT); + //WFinder wfinder_mu(fs, Cuts::abseta < 2.4 && Cuts::pT > 0*GeV, PID::MUON, 0*GeV, 1000000*GeV, 0*GeV, 0.1, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); addProjection(wfinder_mu, "WFinder_mu"); // Define veto FS VetoedFinalState vfs; vfs.addVetoOnThisFinalState(wfinder_mu); vfs.addVetoPairId(PID::MUON); vfs.vetoNeutrinos(); FastJets fastjets(vfs, FastJets::ANTIKT, 0.4); addProjection(fastjets, "Jets"); //------------- book(_hist_Mult_exc ,"d01-x01-y01"); book(_hist_inc_WJetMult ,"d02-x01-y01"); //------------- book(_hist_JetPt1j ,"d03-x01-y01"); book(_hist_JetPt2j ,"d04-x01-y01"); book(_hist_JetPt3j ,"d05-x01-y01"); book(_hist_JetPt4j ,"d06-x01-y01"); //------------- book(_hist_JetRap1j ,"d07-x01-y01"); book(_hist_JetRap2j ,"d08-x01-y01"); book(_hist_JetRap3j ,"d09-x01-y01"); book(_hist_JetRap4j ,"d10-x01-y01"); //------------- book(_hist_Ht_1j ,"d11-x01-y01"); book(_hist_Ht_2j ,"d12-x01-y01"); book(_hist_Ht_3j ,"d13-x01-y01"); book(_hist_Ht_4j ,"d14-x01-y01"); //------------- book(_hist_dphij1mu_1j , "d15-x01-y01"); book(_hist_dphij2mu_2j , "d16-x01-y01"); book(_hist_dphij3mu_3j , "d17-x01-y01"); book(_hist_dphij4mu_4j , "d18-x01-y01"); //------------- book(_hist_dRmuj_1j , "d19-x01-y01"); } // define function used for filiing inc Njets histo void Fill(Histo1DPtr& _histJetMult, std::vector& finaljet_list){ _histJetMult->fill(0); for (size_t i=0 ; ifill(i+1); // inclusive multiplicity } } /// Perform the per-event analysis void analyze(const Event& event) { /// @todo Do the event by event analysis here const WFinder& wfinder_mu = applyProjection(event, "WFinder_mu"); if (wfinder_mu.bosons().size() != 1) { vetoEvent; } if (wfinder_mu.bosons().size() == 1) { const FourMomentum lepton0 = wfinder_mu.constituentLepton().momentum(); const FourMomentum neutrino = wfinder_mu.constituentNeutrino().momentum(); double WmT = wfinder_mu.mT(); if (WmT < 50.0*GeV) vetoEvent; double pt0 = lepton0.pT(); double eta0 = lepton0.eta(); if ( (fabs(eta0) > 2.4) || (pt0 < 25.0*GeV) ) vetoEvent; // Obtain the jets. vector finaljet_list; vector jet100_list; double HT = 0.0; // loop over jets in an event, pushback in finaljet_list collection for (const Jet& j : applyProjection(event, "Jets").jetsByPt(30.0*GeV)) { const double jrap = j.momentum().rap(); const double jpt = j.momentum().pT(); if ( (fabs(jrap) < 2.4) && (deltaR(lepton0, j.momentum()) > 0.4) ) { if(jpt > 30.0*GeV) { finaljet_list.push_back(j.momentum()); HT += j.momentum().pT(); } if(jpt > 100.0*GeV) { jet100_list.push_back(j.momentum()); } } } // end looping over jets //---------------------- FILL HISTOGRAMS ------------------ // Multiplicity exc plot. _hist_Mult_exc->fill(finaljet_list.size()); // Multiplicity inc plot. Fill(_hist_inc_WJetMult, finaljet_list); // dRmuj plot. double mindR(99999); if(jet100_list.size()>=1) { for (unsigned ji = 0; ji < jet100_list.size(); ji++){ double dr_(9999); dr_ = fabs(deltaR(lepton0, jet100_list[ji])); if (dr_ < mindR){ mindR = dr_; } } if(jet100_list[0].pT() > 300.0*GeV){ _hist_dRmuj_1j->fill(mindR); } } if(finaljet_list.size()>=1) { _hist_JetPt1j->fill(finaljet_list[0].pT()); _hist_JetRap1j->fill(fabs(finaljet_list[0].rap())); _hist_Ht_1j->fill(HT); _hist_dphij1mu_1j->fill(deltaPhi(finaljet_list[0].phi(), lepton0.phi())); } if(finaljet_list.size()>=2) { _hist_JetPt2j->fill(finaljet_list[1].pT()); _hist_JetRap2j->fill(fabs(finaljet_list[1].rap())); _hist_Ht_2j->fill(HT); _hist_dphij2mu_2j->fill(deltaPhi(finaljet_list[1].phi(), lepton0.phi())); } if(finaljet_list.size()>=3) { _hist_JetPt3j->fill(finaljet_list[2].pT()); _hist_JetRap3j->fill(fabs(finaljet_list[2].rap())); _hist_Ht_3j->fill(HT); _hist_dphij3mu_3j->fill(deltaPhi(finaljet_list[2].phi(), lepton0.phi())); } if(finaljet_list.size()>=4) { _hist_JetPt4j->fill(finaljet_list[3].pT()); _hist_JetRap4j->fill(fabs(finaljet_list[3].rap())); _hist_Ht_4j->fill(HT); _hist_dphij4mu_4j->fill(deltaPhi(finaljet_list[3].phi(), lepton0.phi())); } } // close the Wboson loop } //void loop /// Normalise histograms etc., after the run void finalize() { const double crossec = !std::isnan(crossSectionPerEvent()) ? crossSection() : 61526.7*picobarn; if (std::isnan(crossSectionPerEvent())){ MSG_INFO("No valid cross-section given, using NNLO xsec calculated by FEWZ " << crossec/picobarn << " pb"); } scale(_hist_Mult_exc, crossec/picobarn/sumOfWeights()); scale(_hist_inc_WJetMult, crossec/picobarn/sumOfWeights()); scale(_hist_JetPt1j, crossec/picobarn/sumOfWeights()); scale(_hist_JetPt2j, crossec/picobarn/sumOfWeights()); scale(_hist_JetPt3j, crossec/picobarn/sumOfWeights()); scale(_hist_JetPt4j, crossec/picobarn/sumOfWeights()); scale(_hist_JetRap1j, crossec/picobarn/sumOfWeights()); scale(_hist_JetRap2j, crossec/picobarn/sumOfWeights()); scale(_hist_JetRap3j, crossec/picobarn/sumOfWeights()); scale(_hist_JetRap4j, crossec/picobarn/sumOfWeights()); scale(_hist_Ht_1j, crossec/picobarn/sumOfWeights()); scale(_hist_Ht_2j, crossec/picobarn/sumOfWeights()); scale(_hist_Ht_3j, crossec/picobarn/sumOfWeights()); scale(_hist_Ht_4j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij1mu_1j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij2mu_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij3mu_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij4mu_4j, crossec/picobarn/sumOfWeights()); scale(_hist_dRmuj_1j, crossec/picobarn/sumOfWeights()); } //@} private: /// @name Histograms //@{ Histo1DPtr _hist_Mult_exc; Histo1DPtr _hist_inc_WJetMult; Histo1DPtr _hist_JetPt1j; Histo1DPtr _hist_JetPt2j; Histo1DPtr _hist_JetPt3j; Histo1DPtr _hist_JetPt4j; Histo1DPtr _hist_JetRap1j; Histo1DPtr _hist_JetRap2j; Histo1DPtr _hist_JetRap3j; Histo1DPtr _hist_JetRap4j; Histo1DPtr _hist_Ht_1j; Histo1DPtr _hist_Ht_2j; Histo1DPtr _hist_Ht_3j; Histo1DPtr _hist_Ht_4j; Histo1DPtr _hist_dphij1mu_1j; Histo1DPtr _hist_dphij2mu_2j; Histo1DPtr _hist_dphij3mu_3j; Histo1DPtr _hist_dphij4mu_4j; Histo1DPtr _hist_dRmuj_1j; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2017_I1610623); } diff --git a/analyses/pluginCMS/CMS_2017_I1635889.cc b/analyses/pluginCMS/CMS_2017_I1635889.cc --- a/analyses/pluginCMS/CMS_2017_I1635889.cc +++ b/analyses/pluginCMS/CMS_2017_I1635889.cc @@ -1,119 +1,119 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" //#include "Rivet/ParticleName.hh" namespace Rivet { /// Underlying event activity in the Drell-Yan process at 13 TeV class CMS_2017_I1635889 : public Analysis { public: /// Constructor CMS_2017_I1635889() : Analysis("CMS_2017_I1635889") { } /// Initialization void init() { /// @note Using a bare muon Z (but with a clustering radius!?) Cut cut = Cuts::abseta < 2.4 && Cuts::pT > 10*GeV; - ZFinder zfinder(FinalState(), cut, PID::MUON, 81*GeV, 101*GeV, 0.2, ZFinder::NOCLUSTER); + ZFinder zfinder(FinalState(), cut, PID::MUON, 81*GeV, 101*GeV, 0.2, ZFinder::ClusterPhotons::NONE); addProjection(zfinder, "ZFinder"); ChargedFinalState cfs(zfinder.remainingFinalState()); addProjection(cfs, "cfs"); book(_h_Nchg_towards_pTmumu , 1, 1, 1); book(_h_Nchg_transverse_pTmumu , 2, 1, 1); book(_h_Nchg_away_pTmumu , 3, 1, 1); book(_h_pTsum_towards_pTmumu , 4, 1, 1); book(_h_pTsum_transverse_pTmumu , 5, 1, 1); book(_h_pTsum_away_pTmumu , 6, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { const ZFinder& zfinder = applyProjection(event, "ZFinder"); if (zfinder.bosons().size() != 1) vetoEvent; if (zfinder.constituents()[0].pT()<20 && zfinder.constituents()[1].pT()<20)vetoEvent; //std::cout<<"pt[0] = "<(event, "cfs").particlesByPt(Cuts::pT > 0.5*GeV && Cuts::abseta <2.0); int nTowards = 0; int nTransverse = 0; int nAway = 0; double ptSumTowards = 0; double ptSumTransverse = 0; double ptSumAway = 0; for (const Particle& p : particles) { double dphi = fabs(deltaPhi(Zphi, p.phi())); double pT = p.pT(); if ( dphi < M_PI/3 ) { nTowards++; ptSumTowards += pT; } else if ( dphi < 2.*M_PI/3 ) { nTransverse++; ptSumTransverse += pT; } else { nAway++; ptSumAway += pT; } } // Loop over particles const double area = 8./3.*M_PI; _h_Nchg_towards_pTmumu-> fill(Zpt, 1./area * nTowards); _h_Nchg_transverse_pTmumu-> fill(Zpt, 1./area * nTransverse); _h_Nchg_away_pTmumu-> fill(Zpt, 1./area * nAway); _h_pTsum_towards_pTmumu-> fill(Zpt, 1./area * ptSumTowards); _h_pTsum_transverse_pTmumu-> fill(Zpt, 1./area * ptSumTransverse); _h_pTsum_away_pTmumu-> fill(Zpt, 1./area * ptSumAway); } /// Normalise histograms etc., after the run void finalize() { } private: /// @name Histogram objects //@{ Profile1DPtr _h_Nchg_towards_pTmumu; Profile1DPtr _h_Nchg_transverse_pTmumu; Profile1DPtr _h_Nchg_away_pTmumu; Profile1DPtr _h_pTsum_towards_pTmumu; Profile1DPtr _h_pTsum_transverse_pTmumu; Profile1DPtr _h_pTsum_away_pTmumu; //@} }; // Hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2017_I1635889); } diff --git a/analyses/pluginD0/D0_2000_I503361.cc b/analyses/pluginD0/D0_2000_I503361.cc --- a/analyses/pluginD0/D0_2000_I503361.cc +++ b/analyses/pluginD0/D0_2000_I503361.cc @@ -1,76 +1,76 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { /// @ D0 Run I Z \f$ p_\perp \f$ in Drell-Yan events /// @author Simone Amoroso class D0_2000_I503361 : public Analysis { public: /// Constructor D0_2000_I503361() : Analysis("D0_2000_I503361") { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { /// Initialise and register projections here - ZFinder zfinder(FinalState(), Cuts::open(), PID::ELECTRON, 75*GeV, 105*GeV, 0.0*GeV, ZFinder::NOCLUSTER); + ZFinder zfinder(FinalState(), Cuts::open(), PID::ELECTRON, 75*GeV, 105*GeV, 0.0*GeV, ZFinder::ClusterPhotons::NONE); declare(zfinder, "ZFinder"); book(_hist_zpt ,1, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { /// @todo Do the event by event analysis here const ZFinder& zfinder = apply(event, "ZFinder"); if (zfinder.bosons().size() != 1) { MSG_DEBUG("Num e+ e- pairs found = " << zfinder.bosons().size()); vetoEvent; } const FourMomentum& pZ = zfinder.bosons()[0].momentum(); if (pZ.mass2() < 0) { MSG_DEBUG("Negative Z mass**2 = " << pZ.mass2()/GeV2 << "!"); vetoEvent; } MSG_DEBUG("Dilepton mass = " << pZ.mass()/GeV << " GeV"); _hist_zpt->fill(pZ.pT()); } /// Normalise histograms etc., after the run void finalize() { scale(_hist_zpt, crossSection()/picobarn/sumOfWeights()); } //@} private: /// @name Histograms //@{ Histo1DPtr _hist_zpt; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(D0_2000_I503361); } diff --git a/analyses/pluginD0/D0_2007_S7075677.cc b/analyses/pluginD0/D0_2007_S7075677.cc --- a/analyses/pluginD0/D0_2007_S7075677.cc +++ b/analyses/pluginD0/D0_2007_S7075677.cc @@ -1,72 +1,72 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { /// @brief Measurement of D0 Run II Z \f$ p_\perp \f$ diff cross-section shape /// @author Andy Buckley /// @author Gavin Hesketh /// @author Frank Siegert class D0_2007_S7075677 : public Analysis { public: /// Default constructor. D0_2007_S7075677() : Analysis("D0_2007_S7075677") { } /// @name Analysis methods //@{ /// Book histograms void init() { ZFinder zfinder(FinalState(), Cuts::open(), PID::ELECTRON, - 71*GeV, 111*GeV, 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK); + 71*GeV, 111*GeV, 0.2, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zfinder, "ZFinder"); book(_h_yZ ,1, 1, 1); } /// Do the analysis void analyze(const Event & e) { const ZFinder& zfinder = apply(e, "ZFinder"); if (zfinder.bosons().size() == 1) { const Particles& el(zfinder.constituents()); if (el[0].pT() > 25*GeV || el[1].pT() > 25*GeV) { _h_yZ->fill(fabs(zfinder.bosons()[0].rapidity())); } } else { MSG_DEBUG("No unique lepton pair found."); } } // Finalize void finalize() { // Data seems to have been normalized for the avg of the two sides // (+ve & -ve rapidity) rather than the sum, hence the 0.5: normalize(_h_yZ, 0.5); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_yZ; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(D0_2007_S7075677); } diff --git a/analyses/pluginD0/D0_2008_S6879055.cc b/analyses/pluginD0/D0_2008_S6879055.cc --- a/analyses/pluginD0/D0_2008_S6879055.cc +++ b/analyses/pluginD0/D0_2008_S6879055.cc @@ -1,122 +1,122 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief D0 measurement of the ratio \f$ \sigma(Z/\gamma^* + n \text{ jets})/\sigma(Z/\gamma^*) \f$ class D0_2008_S6879055 : public Analysis { public: /// Default constructor. D0_2008_S6879055() : Analysis("D0_2008_S6879055") { } // DEFAULT_RIVET_ANA_CONSTRUCTOR(D0_2008_S6879055); /// @name Analysis methods //@{ // Book histograms void init() { FinalState fs; ZFinder zfinder(fs, Cuts::open(), PID::ELECTRON, - 40*GeV, 200*GeV, 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK); + 40*GeV, 200*GeV, 0.2, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zfinder, "ZFinder"); FastJets conefinder(zfinder.remainingFinalState(), FastJets::D0ILCONE, 0.5); declare(conefinder, "ConeFinder"); book(_crossSectionRatio ,1, 1, 1); book(_pTjet1 ,2, 1, 1); book(_pTjet2 ,3, 1, 1); book(_pTjet3 ,4, 1, 1); } /// Do the analysis void analyze(const Event& event) { const ZFinder& zfinder = apply(event, "ZFinder"); if (zfinder.bosons().size()!=1) { vetoEvent; } FourMomentum e0 = zfinder.constituents()[0].momentum(); FourMomentum e1 = zfinder.constituents()[1].momentum(); const double e0eta = e0.eta(); const double e0phi = e0.phi(); const double e1eta = e1.eta(); const double e1phi = e1.phi(); vector finaljet_list; for (const Jet& j : apply(event, "ConeFinder").jetsByPt(20*GeV)) { const double jeta = j.eta(); const double jphi = j.phi(); if (fabs(jeta) < 2.5) { if (deltaR(e0eta, e0phi, jeta, jphi) > 0.4 && deltaR(e1eta, e1phi, jeta, jphi) > 0.4) { finaljet_list.push_back(j.momentum()); } } } // For normalisation of crossSection data (includes events with no jets passing cuts) _crossSectionRatio->fill(0); // Fill jet pT and multiplicities if (finaljet_list.size() >= 1) { _crossSectionRatio->fill(1); _pTjet1->fill(finaljet_list[0].pT()); } if (finaljet_list.size() >= 2) { _crossSectionRatio->fill(2); _pTjet2->fill(finaljet_list[1].pT()); } if (finaljet_list.size() >= 3) { _crossSectionRatio->fill(3); _pTjet3->fill(finaljet_list[2].pT()); } if (finaljet_list.size() >= 4) { _crossSectionRatio->fill(4); } } /// Finalize void finalize() { // Now divide by the inclusive result scale(_crossSectionRatio,1/_crossSectionRatio->bin(0).area()); // Normalise jet pTs to integrals of data // @note There is no other way to do this, because these quantities are not detector-corrected /// @todo Use integrals of refData()? normalize(_pTjet1, 10439); // fixed norm OK normalize(_pTjet2, 1461.5); // fixed norm OK normalize(_pTjet3, 217); // fixed norm OK } //@} private: /// @name Histograms //@{ Histo1DPtr _crossSectionRatio; Histo1DPtr _pTjet1; Histo1DPtr _pTjet2; Histo1DPtr _pTjet3; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(D0_2008_S6879055); } diff --git a/analyses/pluginD0/D0_2008_S7554427.cc b/analyses/pluginD0/D0_2008_S7554427.cc --- a/analyses/pluginD0/D0_2008_S7554427.cc +++ b/analyses/pluginD0/D0_2008_S7554427.cc @@ -1,73 +1,73 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { /// @brief D0 Run II Z \f$ p_\perp \f$ differential cross-section shape /// @author Andy Buckley /// @author Gavin Hesketh /// @author Frank Siegert class D0_2008_S7554427 : public Analysis { public: /// Default constructor. D0_2008_S7554427() : Analysis("D0_2008_S7554427") { } /// @name Analysis methods //@{ /// Book histograms void init() { FinalState fs; ZFinder zfinder(fs, Cuts::open(), PID::ELECTRON, - 40*GeV, 200*GeV, 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK); + 40*GeV, 200*GeV, 0.2, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zfinder, "ZFinder"); book(_h_ZpT ,1, 1, 1); book(_h_forward_ZpT ,3, 1, 1); } /// Do the analysis void analyze(const Event& e) { const ZFinder& zfinder = apply(e, "ZFinder"); if (zfinder.bosons().size() != 1) { MSG_DEBUG("No unique lepton pair found."); vetoEvent; } const double yZ = fabs(zfinder.bosons()[0].rapidity()); const double pTZ = zfinder.bosons()[0].pT(); _h_ZpT->fill(pTZ); if (yZ > 2) _h_forward_ZpT->fill(pTZ); } // Finalize void finalize() { normalize(_h_ZpT); normalize(_h_forward_ZpT); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_ZpT, _h_forward_ZpT; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(D0_2008_S7554427); } diff --git a/analyses/pluginD0/D0_2008_S7863608.cc b/analyses/pluginD0/D0_2008_S7863608.cc --- a/analyses/pluginD0/D0_2008_S7863608.cc +++ b/analyses/pluginD0/D0_2008_S7863608.cc @@ -1,132 +1,132 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief D0 differential Z/\f$ \gamma^* \f$ + jet + \f$ X \f$ cross sections /// @author Gavin Hesketh, Andy Buckley, Frank Siegert class D0_2008_S7863608 : public Analysis { public: /// Constructor D0_2008_S7863608() : Analysis("D0_2008_S7863608") { } /// @name Analysis methods //@{ /// Book histograms void init() { /// @todo These clustering arguments look odd: are they ok? Cut cut = Cuts::abseta < 1.7 && Cuts::pT > 15*GeV; - ZFinder zfinder(FinalState(), cut, PID::MUON, 65*GeV, 115*GeV, 0.2, ZFinder::NOCLUSTER, ZFinder::TRACK); + ZFinder zfinder(FinalState(), cut, PID::MUON, 65*GeV, 115*GeV, 0.2, ZFinder::ClusterPhotons::NONE, ZFinder::AddPhotons::YES); declare(zfinder, "ZFinder"); FastJets conefinder(zfinder.remainingFinalState(), FastJets::D0ILCONE, 0.5); declare(conefinder, "ConeFinder"); book(_sum_of_weights_inclusive, "sum_of_weights_inclusive"); book(_h_jet_pT_cross_section ,1, 1, 1); book(_h_jet_pT_normalised ,1, 1, 2); book(_h_jet_y_cross_section ,2, 1, 1); book(_h_jet_y_normalised ,2, 1, 2); book(_h_Z_pT_cross_section ,3, 1, 1); book(_h_Z_pT_normalised ,3, 1, 2); book(_h_Z_y_cross_section ,4, 1, 1); book(_h_Z_y_normalised ,4, 1, 2); book(_h_total_cross_section ,5, 1, 1); } // Do the analysis void analyze(const Event& e) { const ZFinder& zfinder = apply(e, "ZFinder"); if (zfinder.bosons().size()==1) { _sum_of_weights_inclusive->fill(); const JetAlg& jetpro = apply(e, "ConeFinder"); const Jets& jets = jetpro.jetsByPt(20*GeV); Jets jets_cut; for (const Jet& j : jets) { if (j.abseta() < 2.8) { jets_cut.push_back(j); } } // Return if there are no jets: if(jets_cut.size()<1) { MSG_DEBUG("Skipping event " << numEvents() << " because no jets pass cuts "); vetoEvent; } const FourMomentum Zmom = zfinder.bosons()[0].momentum(); // In jet pT _h_jet_pT_cross_section->fill( jets_cut[0].pT()); _h_jet_pT_normalised->fill( jets_cut[0].pT()); _h_jet_y_cross_section->fill( fabs(jets_cut[0].rapidity())); _h_jet_y_normalised->fill( fabs(jets_cut[0].rapidity())); // In Z pT _h_Z_pT_cross_section->fill(Zmom.pT()); _h_Z_pT_normalised->fill(Zmom.pT()); _h_Z_y_cross_section->fill(Zmom.absrap()); _h_Z_y_normalised->fill(Zmom.absrap()); _h_total_cross_section->fill(1960); } } /// Finalize void finalize() { const double invlumi = crossSection()/sumOfWeights(); scale(_h_total_cross_section, invlumi); scale(_h_jet_pT_cross_section, invlumi); scale(_h_jet_y_cross_section, invlumi); scale(_h_Z_pT_cross_section, invlumi); scale(_h_Z_y_cross_section, invlumi); double factor=1/dbl(*_sum_of_weights_inclusive); if (_sum_of_weights_inclusive->val() == 0) factor = 0; scale(_h_jet_pT_normalised, factor); scale(_h_jet_y_normalised, factor); scale(_h_Z_pT_normalised, factor); scale(_h_Z_y_normalised, factor); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_jet_pT_cross_section; Histo1DPtr _h_jet_y_cross_section; Histo1DPtr _h_Z_pT_cross_section; Histo1DPtr _h_Z_y_cross_section; Histo1DPtr _h_total_cross_section; Histo1DPtr _h_jet_pT_normalised; Histo1DPtr _h_jet_y_normalised; Histo1DPtr _h_Z_pT_normalised; Histo1DPtr _h_Z_y_normalised; //@} CounterPtr _sum_of_weights_inclusive; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(D0_2008_S7863608); } diff --git a/analyses/pluginD0/D0_2009_S8202443.cc b/analyses/pluginD0/D0_2009_S8202443.cc --- a/analyses/pluginD0/D0_2009_S8202443.cc +++ b/analyses/pluginD0/D0_2009_S8202443.cc @@ -1,126 +1,126 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief D0 Z + jet + \f$ X \f$ cross-section / \f$ p_\perp \f$ distributions class D0_2009_S8202443 : public Analysis { public: /// Constructor D0_2009_S8202443() : Analysis("D0_2009_S8202443") { } /// @name Analysis methods //@{ /// Book histograms void init() { FinalState fs; // Leptons in constrained tracking acceptance Cut cuts = (Cuts::abseta < 1.1 || Cuts::absetaIn(1.5, 2.5)) && Cuts::pT > 25*GeV; - ZFinder zfinder_constrained(fs, cuts, PID::ELECTRON, 65*GeV, 115*GeV, 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK); + ZFinder zfinder_constrained(fs, cuts, PID::ELECTRON, 65*GeV, 115*GeV, 0.2, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zfinder_constrained, "ZFinderConstrained"); FastJets conefinder_constrained(zfinder_constrained.remainingFinalState(), FastJets::D0ILCONE, 0.5); declare(conefinder_constrained, "ConeFinderConstrained"); // Unconstrained leptons - ZFinder zfinder(fs, Cuts::open(), PID::ELECTRON, 65*GeV, 115*GeV, 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK); + ZFinder zfinder(fs, Cuts::open(), PID::ELECTRON, 65*GeV, 115*GeV, 0.2, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zfinder, "ZFinder"); FastJets conefinder(zfinder.remainingFinalState(), FastJets::D0ILCONE, 0.5); declare(conefinder, "ConeFinder"); book(_h_jet1_pT_constrained ,1, 1, 1); book(_h_jet2_pT_constrained ,3, 1, 1); book(_h_jet3_pT_constrained ,5, 1, 1); book(_h_jet1_pT ,2, 1, 1); book(_h_jet2_pT ,4, 1, 1); book(_h_jet3_pT ,6, 1, 1); book(_sum_of_weights,"sum_of_weights"); book(_sum_of_weights_constrained, "sum_of_weights_constrained"); } // Do the analysis void analyze(const Event& e) { // Unconstrained electrons const ZFinder& zfinder = apply(e, "ZFinder"); if (zfinder.bosons().size() == 0) { MSG_DEBUG("No unique lepton pair found."); vetoEvent; } _sum_of_weights->fill(); const Jets jets_cut = apply(e, "ConeFinder").jetsByPt(Cuts::pT > 20*GeV && Cuts::abseta < 2.5); if (jets_cut.size() > 0) _h_jet1_pT->fill(jets_cut[0].pT()/GeV); if (jets_cut.size() > 1) _h_jet2_pT->fill(jets_cut[1].pT()/GeV); if (jets_cut.size() > 2) _h_jet3_pT->fill(jets_cut[2].pT()/GeV); // Constrained electrons const ZFinder& zfinder_constrained = apply(e, "ZFinderConstrained"); if (zfinder_constrained.bosons().size() == 0) { MSG_DEBUG("No unique constrained lepton pair found."); return; // Not really a "veto", since if we got this far there is an unconstrained Z } _sum_of_weights_constrained->fill(); const Jets& jets_constrained = apply(e, "ConeFinderConstrained").jetsByPt(20*GeV); /// @todo Replace this explicit selection with a Cut Jets jets_cut_constrained; for (const Jet& j : jets_constrained) { if (j.abseta() < 2.5) jets_cut_constrained.push_back(j); } if (jets_cut_constrained.size() > 0) _h_jet1_pT_constrained->fill(jets_cut_constrained[0].pT()/GeV); if (jets_cut_constrained.size() > 1) _h_jet2_pT_constrained->fill(jets_cut_constrained[1].pT()/GeV); if (jets_cut_constrained.size() > 2) _h_jet3_pT_constrained->fill(jets_cut_constrained[2].pT()/GeV); } // Finalize void finalize() { scale(_h_jet1_pT, 1/ *_sum_of_weights); scale(_h_jet2_pT, 1/ *_sum_of_weights); scale(_h_jet3_pT, 1/ *_sum_of_weights); scale(_h_jet1_pT_constrained, 1/ *_sum_of_weights_constrained); scale(_h_jet2_pT_constrained, 1/ *_sum_of_weights_constrained); scale(_h_jet3_pT_constrained, 1/ *_sum_of_weights_constrained); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_jet1_pT; Histo1DPtr _h_jet2_pT; Histo1DPtr _h_jet3_pT; Histo1DPtr _h_jet1_pT_constrained; Histo1DPtr _h_jet2_pT_constrained; Histo1DPtr _h_jet3_pT_constrained; //@} CounterPtr _sum_of_weights, _sum_of_weights_constrained; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(D0_2009_S8202443); } diff --git a/analyses/pluginD0/D0_2009_S8349509.cc b/analyses/pluginD0/D0_2009_S8349509.cc --- a/analyses/pluginD0/D0_2009_S8349509.cc +++ b/analyses/pluginD0/D0_2009_S8349509.cc @@ -1,168 +1,168 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief D0 Z+jets angular distributions class D0_2009_S8349509 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor D0_2009_S8349509() : Analysis("D0_2009_S8349509") { } //@} /// @name Analysis methods //@{ /// Book histograms void init() { Cut cut = Cuts::abseta < 1.7 && Cuts::pT > 15*GeV; - ZFinder zfinder(FinalState(), cut, PID::MUON, 65*GeV, 115*GeV, 0.2, ZFinder::NOCLUSTER, ZFinder::TRACK); + ZFinder zfinder(FinalState(), cut, PID::MUON, 65*GeV, 115*GeV, 0.2, ZFinder::ClusterPhotons::NONE, ZFinder::AddPhotons::YES); declare(zfinder, "ZFinder"); FastJets conefinder(zfinder.remainingFinalState(), FastJets::D0ILCONE, 0.5); declare(conefinder, "ConeFinder"); book(_h_dphi_jet_Z25 ,1, 1, 1); book(_h_dphi_jet_Z45 ,2, 1, 1); book(_h_dy_jet_Z25 ,3, 1, 1); book(_h_dy_jet_Z45 ,4, 1, 1); book(_h_yboost_jet_Z25 ,5, 1, 1); book(_h_yboost_jet_Z45 ,6, 1, 1); book(_h_dphi_jet_Z25_xs ,1, 1, 2); book(_h_dphi_jet_Z45_xs ,2, 1, 2); book(_h_dy_jet_Z25_xs ,3, 1, 2); book(_h_dy_jet_Z45_xs ,4, 1, 2); book(_h_yboost_jet_Z25_xs ,5, 1, 2); book(_h_yboost_jet_Z45_xs ,6, 1, 2); book(_inclusive_Z_sumofweights, "_inclusive_Z_sumofweights"); } void analyze(const Event& event) { const ZFinder& zfinder = apply(event, "ZFinder"); if (zfinder.bosons().size() == 1) { // count inclusive sum of weights for histogram normalisation _inclusive_Z_sumofweights->fill(); const FourMomentum& zmom = zfinder.bosons()[0].momentum(); if (zmom.pT() < 25*GeV) vetoEvent; Jets jets; for (const Jet& j : apply(event, "ConeFinder").jetsByPt(20*GeV)) { if (j.abseta() < 2.8) { jets.push_back(j); break; } } // Return if there are no jets: if (jets.size() < 1) { MSG_DEBUG("Skipping event " << numEvents() << " because no jets pass cuts "); vetoEvent; } const FourMomentum& jetmom = jets[0].momentum(); const double yZ = zmom.rapidity(); const double yjet = jetmom.rapidity(); const double dphi = deltaPhi(zmom, jetmom); const double dy = deltaRap(zmom, jetmom); const double yboost = fabs(yZ+yjet)/2; if (zmom.pT() > 25*GeV) { _h_dphi_jet_Z25->fill(dphi); _h_dy_jet_Z25->fill(dy); _h_yboost_jet_Z25->fill(yboost); _h_dphi_jet_Z25_xs->fill(dphi); _h_dy_jet_Z25_xs->fill(dy); _h_yboost_jet_Z25_xs->fill(yboost); } if (zmom.pT() > 45*GeV) { _h_dphi_jet_Z45->fill(dphi); _h_dy_jet_Z45->fill(dy); _h_yboost_jet_Z45->fill(yboost); _h_dphi_jet_Z45_xs->fill(dphi); _h_dy_jet_Z45_xs->fill(dy); _h_yboost_jet_Z45_xs->fill(yboost); } } } void finalize() { if (_inclusive_Z_sumofweights->val() == 0) return; scale(_h_dphi_jet_Z25, 1/ *_inclusive_Z_sumofweights); scale(_h_dphi_jet_Z45, 1/ *_inclusive_Z_sumofweights); scale(_h_dy_jet_Z25, 1/ *_inclusive_Z_sumofweights); scale(_h_dy_jet_Z45, 1/ *_inclusive_Z_sumofweights); scale(_h_yboost_jet_Z25, 1/ *_inclusive_Z_sumofweights); scale(_h_yboost_jet_Z45, 1/ *_inclusive_Z_sumofweights); scale(_h_dphi_jet_Z25_xs, crossSectionPerEvent()); scale(_h_dphi_jet_Z45_xs, crossSectionPerEvent()); scale(_h_dy_jet_Z25_xs, crossSectionPerEvent()); scale(_h_dy_jet_Z45_xs, crossSectionPerEvent()); scale(_h_yboost_jet_Z25_xs, crossSectionPerEvent()); scale(_h_yboost_jet_Z45_xs, crossSectionPerEvent()); } //@} private: // Data members like post-cuts event weight counters go here private: /// @name Histograms (normalised) //@{ Histo1DPtr _h_dphi_jet_Z25; Histo1DPtr _h_dphi_jet_Z45; Histo1DPtr _h_dy_jet_Z25; Histo1DPtr _h_dy_jet_Z45; Histo1DPtr _h_yboost_jet_Z25; Histo1DPtr _h_yboost_jet_Z45; //@} /// @name Histograms (absolute cross sections) //@{ Histo1DPtr _h_dphi_jet_Z25_xs; Histo1DPtr _h_dphi_jet_Z45_xs; Histo1DPtr _h_dy_jet_Z25_xs; Histo1DPtr _h_dy_jet_Z45_xs; Histo1DPtr _h_yboost_jet_Z25_xs; Histo1DPtr _h_yboost_jet_Z45_xs; //@} CounterPtr _inclusive_Z_sumofweights; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(D0_2009_S8349509); } diff --git a/analyses/pluginD0/D0_2010_S8671338.cc b/analyses/pluginD0/D0_2010_S8671338.cc --- a/analyses/pluginD0/D0_2010_S8671338.cc +++ b/analyses/pluginD0/D0_2010_S8671338.cc @@ -1,69 +1,69 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { /// @brief Measurement of Z(->muon muon) pT differential cross-section /// @author Flavia Dias class D0_2010_S8671338 : public Analysis { public: /// Constructor D0_2010_S8671338() : Analysis("D0_2010_S8671338") { } ///@name Analysis methods //@{ /// Add projections and book histograms void init() { Cut cut = Cuts::abseta < 1.7 && Cuts::pT > 15*GeV; - ZFinder zfinder(FinalState(), cut, PID::MUON, 65*GeV, 115*GeV, 0.2, ZFinder::NOCLUSTER, ZFinder::TRACK); + ZFinder zfinder(FinalState(), cut, PID::MUON, 65*GeV, 115*GeV, 0.2, ZFinder::ClusterPhotons::NONE, ZFinder::AddPhotons::YES); declare(zfinder, "ZFinder"); book(_h_Z_pT_normalised ,1, 1, 1); book(_h_Z_pT_xs ,2, 1, 1); } // Do the analysis void analyze(const Event& e) { const ZFinder& zfinder = apply(e, "ZFinder"); if (zfinder.bosons().size()==1) { double ZpT = zfinder.bosons()[0].pT()/GeV; _h_Z_pT_normalised->fill(ZpT); _h_Z_pT_xs->fill(ZpT); } } /// Finalize void finalize() { normalize(_h_Z_pT_normalised); scale(_h_Z_pT_xs, crossSection()/sumOfWeights()); } //@} private: /// @name Histogram Histo1DPtr _h_Z_pT_normalised; Histo1DPtr _h_Z_pT_xs; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(D0_2010_S8671338); } diff --git a/analyses/pluginD0/D0_2010_S8821313.cc b/analyses/pluginD0/D0_2010_S8821313.cc --- a/analyses/pluginD0/D0_2010_S8821313.cc +++ b/analyses/pluginD0/D0_2010_S8821313.cc @@ -1,105 +1,105 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Tools/BinnedHistogram.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { class D0_2010_S8821313 : public Analysis { public: /// Constructor D0_2010_S8821313() : Analysis("D0_2010_S8821313") { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { /// Initialise and register projections FinalState fs; Cut cuts = (Cuts::abseta < 1.1 || Cuts::absetaIn( 1.5, 3.0)) && Cuts::pT > 20*GeV; - ZFinder zfinder_ee(fs, cuts, PID::ELECTRON, 70*GeV, 110*GeV, 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK); + ZFinder zfinder_ee(fs, cuts, PID::ELECTRON, 70*GeV, 110*GeV, 0.2, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zfinder_ee, "zfinder_ee"); - ZFinder zfinder_mm(fs, Cuts::abseta < 2 && Cuts::pT > 15*GeV, PID::MUON, 70*GeV, 110*GeV, 0.0, ZFinder::NOCLUSTER, ZFinder::NOTRACK); + ZFinder zfinder_mm(fs, Cuts::abseta < 2 && Cuts::pT > 15*GeV, PID::MUON, 70*GeV, 110*GeV, 0.0, ZFinder::ClusterPhotons::NONE, ZFinder::AddPhotons::NO); declare(zfinder_mm, "zfinder_mm"); /// Book histograms here {Histo1DPtr tmp; _h_phistar_ee.add(0.0, 1.0, book(tmp, 1, 1, 1));} {Histo1DPtr tmp; _h_phistar_ee.add(1.0, 2.0, book(tmp, 1, 1, 2));} {Histo1DPtr tmp; _h_phistar_ee.add(2.0, 10.0,book(tmp, 1, 1, 3));} {Histo1DPtr tmp; _h_phistar_mm.add(0.0, 1.0, book(tmp, 2, 1, 1));} {Histo1DPtr tmp; _h_phistar_mm.add(1.0, 2.0, book(tmp, 2, 1, 2));} } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; const ZFinder& zfinder_ee = apply(event, "zfinder_ee"); if (zfinder_ee.bosons().size() == 1) { Particles ee = zfinder_ee.constituents(); std::sort(ee.begin(), ee.end(), cmpMomByPt); const FourMomentum& eminus = PID::threeCharge(ee[0].pid()) < 0 ? ee[0].momentum() : ee[1].momentum(); const FourMomentum& eplus = PID::threeCharge(ee[0].pid()) < 0 ? ee[1].momentum() : ee[0].momentum(); double phi_acop = M_PI - mapAngle0ToPi(eminus.phi() - eplus.phi()); double costhetastar = tanh((eminus.eta() - eplus.eta())/2); double sin2thetastar = 1 - sqr(costhetastar); if (sin2thetastar < 0) sin2thetastar = 0; const double phistar = tan(phi_acop/2) * sqrt(sin2thetastar); const FourMomentum& zmom = zfinder_ee.bosons()[0].momentum(); _h_phistar_ee.fill(zmom.rapidity(), phistar, weight); } const ZFinder& zfinder_mm = apply(event, "zfinder_mm"); if (zfinder_mm.bosons().size() == 1) { Particles mm = zfinder_mm.constituents(); std::sort(mm.begin(), mm.end(), cmpMomByPt); const FourMomentum& mminus = PID::threeCharge(mm[0].pid()) < 0 ? mm[0].momentum() : mm[1].momentum(); const FourMomentum& mplus = PID::threeCharge(mm[0].pid()) < 0 ? mm[1].momentum() : mm[0].momentum(); double phi_acop = M_PI - mapAngle0ToPi(mminus.phi() - mplus.phi()); double costhetastar = tanh((mminus.eta() - mplus.eta())/2); double sin2thetastar = 1 - sqr(costhetastar); if (sin2thetastar < 0) sin2thetastar = 0; const double phistar = tan(phi_acop/2) * sqrt(sin2thetastar); const FourMomentum& zmom = zfinder_mm.bosons()[0].momentum(); _h_phistar_mm.fill(zmom.rapidity(), phistar, weight); } } /// Normalise histograms etc., after the run void finalize() { for (Histo1DPtr hist : _h_phistar_ee.histos()) normalize(hist); for (Histo1DPtr hist : _h_phistar_mm.histos()) normalize(hist); } //@} private: /// @name Histograms //@{ BinnedHistogram _h_phistar_ee; BinnedHistogram _h_phistar_mm; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(D0_2010_S8821313); } diff --git a/analyses/pluginD0/D0_2015_I1324946.cc b/analyses/pluginD0/D0_2015_I1324946.cc --- a/analyses/pluginD0/D0_2015_I1324946.cc +++ b/analyses/pluginD0/D0_2015_I1324946.cc @@ -1,101 +1,101 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { class D0_2015_I1324946 : public Analysis { public: /// Constructor D0_2015_I1324946() : Analysis("D0_2015_I1324946") { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FinalState fs; - ZFinder zfinder_mm(fs, Cuts::abseta < 2 && Cuts::pT > 15*GeV, PID::MUON, 30*GeV, 500*GeV, 0.0, ZFinder::NOCLUSTER, ZFinder::NOTRACK); + ZFinder zfinder_mm(fs, Cuts::abseta < 2 && Cuts::pT > 15*GeV, PID::MUON, 30*GeV, 500*GeV, 0.0, ZFinder::ClusterPhotons::NONE, ZFinder::AddPhotons::NO); declare(zfinder_mm, "zfinder_mm"); book(_h_phistar_mm_peak_central ,1, 1, 1); book(_h_phistar_mm_peak_forward ,1, 1, 2); book(_h_phistar_mm_low_central ,2, 1, 1); book(_h_phistar_mm_low_forward ,2, 1, 2); book(_h_phistar_mm_high1 ,3, 1, 1); book(_h_phistar_mm_high2 ,4, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; //70(event, "zfinder_mm"); if (zfinder_mm.bosons().size() == 1) { Particles mm = zfinder_mm.constituents(); std::sort(mm.begin(), mm.end(), cmpMomByPt); const FourMomentum& mminus = PID::threeCharge(mm[0].pid()) < 0 ? mm[0].momentum() : mm[1].momentum(); const FourMomentum& mplus = PID::threeCharge(mm[0].pid()) < 0 ? mm[1].momentum() : mm[0].momentum(); double phi_acop = M_PI - mapAngle0ToPi(mminus.phi() - mplus.phi()); double costhetastar = tanh((mminus.eta() - mplus.eta())/2); double sin2thetastar = 1 - sqr(costhetastar); if (sin2thetastar < 0) sin2thetastar = 0; const double phistar = tan(phi_acop/2) * sqrt(sin2thetastar); const FourMomentum& zmom = zfinder_mm.bosons()[0].momentum(); if (zmom.mass()<30*GeV || zmom.mass() >500*GeV) vetoEvent; if( zmom.mass()>70 && zmom.mass()<100 && zmom.absrap()<1.0) _h_phistar_mm_peak_central->fill(phistar, weight); if( zmom.mass()>70 && zmom.mass()<100 && zmom.absrap()>1.0 && zmom.absrap()<2.0) _h_phistar_mm_peak_forward->fill(phistar, weight); if( zmom.mass()>30 && zmom.mass()<60 && zmom.absrap()<1.0) _h_phistar_mm_low_central->fill(phistar, weight); if( zmom.mass()>30 && zmom.mass()<60 && zmom.absrap()>1.0 && zmom.absrap()<2.0) _h_phistar_mm_low_forward->fill(phistar, weight); if( zmom.mass()>160 && zmom.mass()<300) _h_phistar_mm_high1->fill(phistar, weight); if( zmom.mass()>300 && zmom.mass()<500) _h_phistar_mm_high2->fill(phistar, weight); } } /// Normalise histograms etc., after the run void finalize() { normalize(_h_phistar_mm_low_central); normalize(_h_phistar_mm_low_forward); normalize(_h_phistar_mm_peak_central); normalize(_h_phistar_mm_peak_forward); normalize(_h_phistar_mm_high1); normalize(_h_phistar_mm_high2); } //} //@} private: /// @name Histograms //@{ Histo1DPtr _h_phistar_mm_low_central; Histo1DPtr _h_phistar_mm_low_forward; Histo1DPtr _h_phistar_mm_peak_central; Histo1DPtr _h_phistar_mm_peak_forward; Histo1DPtr _h_phistar_mm_high1; Histo1DPtr _h_phistar_mm_high2; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(D0_2015_I1324946); } diff --git a/analyses/pluginHERA/H1_1995_S3167097.cc b/analyses/pluginHERA/H1_1995_S3167097.cc --- a/analyses/pluginHERA/H1_1995_S3167097.cc +++ b/analyses/pluginHERA/H1_1995_S3167097.cc @@ -1,128 +1,128 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/DISFinalState.hh" #include "Rivet/Projections/CentralEtHCM.hh" namespace Rivet { /// H1 energy flow in DIS /// /// @todo Make histograms match those in HepData and use autobooking /// /// @author Leif Lonnblad /// @author Andy Buckley class H1_1995_S3167097 : public Analysis { public: /// Constructor H1_1995_S3167097() : Analysis("H1_1995_S3167097") { } /// @name Analysis methods //@{ void init() { // Projections const DISKinematics& diskin = declare(DISKinematics(), "Kinematics"); - const DISFinalState& fshcm = declare(DISFinalState(diskin, DISFinalState::HCM), "FS"); + const DISFinalState& fshcm = declare(DISFinalState(diskin, DISFinalState::BoostFrame::HCM), "FS"); declare(CentralEtHCM(fshcm), "Y1HCM"); // Histograms /// @todo Convert to use autobooking and correspond to HepData data tables _hEtFlow.resize(9); for (size_t i = 0; i < 9; ++i) { book(_sumw[i], "sumW_" + to_str(i)); book(_hEtFlow[i] ,to_str(i), 24, -6, 6); } book(_tmphAvEt, "TMP/hAvEt", 9, 1.0, 10.0); book(_tmphAvX , "TMP/hAvX", 9, 1.0, 10.0); book(_tmphAvQ2, "TMP/hAvQ2", 9, 1.0, 10.0); book(_tmphN , "TMP/hN", 9, 1.0, 10.0); } /// Calculate the bin number from the DISKinematics projection /// @todo Convert to use a HEPUtils Binning1D size_t _getbin(const DISKinematics& dk) { if (inRange(dk.Q2()/GeV2, 5.0, 10.0)) { if (inRange(dk.x(), 1e-4, 2e-4)) return 0; if (inRange(dk.x(), 2e-4, 5e-4) && dk.Q2() > 6.0*GeV2) return 1; } else if (inRange(dk.Q2()/GeV2, 10.0, 20.0)) { if (inRange(dk.x(), 2e-4, 5e-4)) return 2; if (inRange(dk.x(), 5e-4, 8e-4)) return 3; if (inRange(dk.x(), 8e-4, 1.5e-3)) return 4; if (inRange(dk.x(), 1.5e-3, 4e-3)) return 5; } else if (inRange(dk.Q2()/GeV2, 20.0, 50.0)) { if (inRange(dk.x(), 5e-4, 1.4e-3)) return 6; if (inRange(dk.x(), 1.4e-3, 3e-3)) return 7; if (inRange(dk.x(), 3e-3, 1e-2)) return 8; } return -1; } void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); const DISKinematics& dk = apply(event, "Kinematics"); const CentralEtHCM& y1 = apply(event, "Y1HCM"); const int ibin = _getbin(dk); if (ibin < 0) vetoEvent; _sumw[ibin]->fill(); for (size_t i = 0, N = fs.particles().size(); i < N; ++i) { const double rap = fs.particles()[i].rapidity(); const double et = fs.particles()[i].Et(); _hEtFlow[ibin]->fill(rap, et/GeV); } /// @todo Use fillBin? _tmphAvEt->fill(ibin + 1.5, y1.sumEt()/GeV); _tmphAvX->fill(ibin + 1.5, dk.x()); _tmphAvQ2->fill(ibin + 1.5, dk.Q2()/GeV2); _tmphN->fill(ibin + 1.5); } void finalize() { for (size_t ibin = 0; ibin < 9; ++ibin) scale(_hEtFlow[ibin], 0.5/ *_sumw[ibin]); /// @todo Improve this! Scatter2DPtr s21,s22,s23; divide(_tmphAvEt,_tmphN,s21); book(s21, "21"); divide(_tmphAvX,_tmphN,s22); book(s22, "22"); divide(_tmphAvQ2,_tmphN,s23); book(s23, "23"); // addAnalysisObject(make_shared(_tmphAvEt/_tmphN, histoPath("21")) ); // addAnalysisObject(make_shared(_tmphAvX/_tmphN, histoPath("22")) ); // addAnalysisObject(make_shared(_tmphAvQ2/_tmphN, histoPath("23")) ); } //@} private: /// Histograms for the \f$ E_T \f$ flow vector _hEtFlow; /// Temporary histograms for averages in different kinematical bins. Histo1DPtr _tmphAvEt, _tmphAvX, _tmphAvQ2, _tmphN; /// Weights counters for each kinematic bin array _sumw; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(H1_1995_S3167097); } diff --git a/analyses/pluginLEP/ALEPH_2016_I1492968.cc b/analyses/pluginLEP/ALEPH_2016_I1492968.cc --- a/analyses/pluginLEP/ALEPH_2016_I1492968.cc +++ b/analyses/pluginLEP/ALEPH_2016_I1492968.cc @@ -1,136 +1,136 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" namespace Rivet { // TODO this calculation needs checked! double impact(const FourMomentum& a, const FourMomentum& b) { const Vector3 a3 = a.vector3(); const Vector3 b3 = b.vector3(); double impact = 0; if (b3.polarRadius() !=0) { impact = (a3).cross((a3-b3)).polarRadius() / (b3).polarRadius(); } return impact; } /// @brief Add a short analysis description here class ALEPH_2016_I1492968 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ALEPH_2016_I1492968); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections const FinalState fs; addProjection(fs, "FS"); - FastJets jets(fs, FastJets::GENKTEE, 0.5, JetAlg::NO_MUONS, JetAlg::ALL_INVISIBLES); - //FastJets jets(fs, FastJets::ANTIKT, 0.5, JetAlg::NO_MUONS, JetAlg::ALL_INVISIBLES); + FastJets jets(fs, FastJets::GENKTEE, 0.5, JetAlg::Muons::NONE, JetAlg::Invisibles::ALL); + //FastJets jets(fs, FastJets::ANTIKT, 0.5, JetAlg::Muons::NONE, JetAlg::Invisibles::ALL); addProjection(jets, "Jets"); IdentifiedFinalState mu_id(fs); mu_id.acceptIdPair(PID::MUON); addProjection(mu_id, "MUONS"); addProjection(MissingMomentum(fs), "MissingMomenta"); // Book histograms //_h_costheta = bookHisto1D(2, 1, 1); book(_h_m_OS, 3, 1, 1); book(_h_m_SS, 5, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { // B-jets const Jets jets = apply(event, "Jets").jetsByPt(Cuts::pT > 5*GeV); // tODO jet eta? const Jets bjets = filter_select(jets, [](const Jet& j) { return j.bTagged(); }); if (bjets.size()<2) vetoEvent; // Muons const Particles all_muons = applyProjection(event, "MUONS").particles(Cuts::pT>2.5/GeV, cmpMomByE); const Particles b_muons = filter_select(all_muons, [](const Particle& m) {return cos(m.theta()) < 0.7; }); if (b_muons.size()<2) vetoEvent; // Missing energy cut const MissingMomentum& met = applyProjection(event, "MissingMomenta"); double Pmiss = met.missingMomentum().p(); if (Pmiss/GeV>18) vetoEvent; // Impact paarameter considerations double b_muon_0_impactdistance = min(impact(b_muons[0].origin(), bjets[0].momentum()),impact(b_muons[0].origin(), bjets[1].momentum())); double b_muon_1_impactdistance = min(impact(b_muons[1].origin(), bjets[0].momentum()),impact(b_muons[1].origin(), bjets[1].momentum())); // Impact parameter cut if ((b_muon_0_impactdistance > 0.1) || (b_muon_1_impactdistance > 0.1)) vetoEvent; FourMomentum dimuon = b_muons[0].momentum() + b_muons[1].momentum(); // Same sign if (b_muons[0].charge()*b_muons[1].charge()>0) { _h_m_SS->fill( dimuon.mass()/GeV); } // Opposite sign else { _h_m_OS->fill( dimuon.mass()/GeV); // //FourMomentum muonminus; //if (b_muons[0].charge() < 0) muonminus = b_muons[0].momentum(); //else muonminus = b_muons[1].momentum(); //const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(-dimuon.betaVec()); //FourMomentum boostedmuon = cms_boost.transform(muonminus); //double cosmuonboosted = boostedmuon.vector3().dot(cms_boost.betaVec()) /// (boostedmuon.vector3().mod()*cms_boost.betaVec().mod()); //_h_costheta->fill( cosmuonboosted); } } /// Normalise histograms etc., after the run void finalize() { //normalize(_h_costheta); // Normalize to data according to Arno. normalize(_h_m_OS, 1387); normalize(_h_m_SS, 1047); } //@} /// @name Histograms //@{ //Histo1DPtr _h_costheta; Histo1DPtr _h_m_OS; Histo1DPtr _h_m_SS; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALEPH_2016_I1492968); } diff --git a/analyses/pluginLHCb/LHCB_2014_I1262703.cc b/analyses/pluginLHCb/LHCB_2014_I1262703.cc --- a/analyses/pluginLHCb/LHCB_2014_I1262703.cc +++ b/analyses/pluginLHCb/LHCB_2014_I1262703.cc @@ -1,105 +1,105 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief Study of forward Z + jet production at 7 TeV at LHCb /// @author W. Barter, A. Bursche, M. Sirendi (Rivet implementation) class LHCB_2014_I1262703 : public Analysis { public: /// Default constructor DEFAULT_RIVET_ANALYSIS_CTOR(LHCB_2014_I1262703); /// Initialise histograms and projections void init() { // Projections const Cut mycut = Cuts::eta >= 2.0 && Cuts::eta <= 4.5 && Cuts::pT > 20*GeV; - ZFinder zfinder(FinalState(), mycut, PID::MUON, 60*GeV, 120*GeV, 0., ZFinder::NOCLUSTER); + ZFinder zfinder(FinalState(), mycut, PID::MUON, 60*GeV, 120*GeV, 0., ZFinder::ClusterPhotons::NONE); declare(zfinder, "ZFinder"); FastJets jetpro(zfinder.remainingFinalState(), FastJets::ANTIKT, 0.5); declare(jetpro, "Jets"); // Histograms book(_h_jet_pT , 3, 1, 1); book(_h_jet_eta20, 4, 1, 1); book(_h_jet_eta10, 4, 1, 2); book(_h_Z_y20 , 5, 1, 1); book(_h_Z_y10 , 5, 1, 2); book(_h_Z_pT20 , 6, 1, 1); book(_h_Z_pT10 , 6, 1, 2); book(_h_dphi20 , 7, 1, 1); book(_h_dphi10 , 7, 1, 2); book(_h_dy20 , 8, 1, 1); book(_h_dy10 , 8, 1, 2); } /// Do the analysis void analyze(const Event & e) { const ZFinder& zfinder = apply(e, "ZFinder"); if (zfinder.bosons().size() != 1) vetoEvent; const ParticleVector leptons = zfinder.constituents(); const Cut jetSelector = Cuts::eta >= 2.0 && Cuts::eta <= 4.5 && Cuts::pT > 10*GeV; const Jets jets = apply(e, "Jets").jetsByPt(jetSelector); // Clean the jets against the lepton candidates with a deltaR cut of 0.4 const Jets cleanedJets = filter_discard(jets, [&](const Jet& j) { return any(leptons, deltaRLess(j, 0.4)); }); // vector cleanedJets; // for (size_t i = 0; i < jets.size(); i++) { // bool isolated = true; // for (size_t j = 0; j < 2; j++) { // if (deltaR(leptons[j], jets[i]) < 0.4) { // isolated = false; // break; // } // } // if (isolated) cleanedJets.push_back(&jets[i]); // } // Require at least 1 survivor and note if it is above a 20 GeV jet pT threshold if (cleanedJets.empty()) vetoEvent; const bool above20 = cleanedJets[0].pT() > 20*GeV; const double dphi = deltaPhi(zfinder.boson(), cleanedJets[0]); const double drap = zfinder.boson().rap() - cleanedJets[0].rap(); // Fill histograms _h_jet_pT->fill(cleanedJets[0].pT()/GeV); _h_jet_eta10->fill(cleanedJets[0].eta()); _h_Z_y10->fill(zfinder.boson().rap()); _h_Z_pT10->fill(zfinder.boson().pT()/GeV); _h_dphi10->fill(dphi); _h_dy10->fill(drap); if (above20) { _h_jet_eta20->fill(cleanedJets[0].eta()); _h_Z_y20->fill(zfinder.boson().rap()); _h_Z_pT20->fill(zfinder.boson().pT()/GeV); _h_dphi20->fill(dphi); _h_dy20->fill(drap); } } /// Finalize void finalize() { normalize({_h_jet_pT, _h_jet_eta20, _h_jet_eta10, _h_Z_y20, _h_Z_y10, _h_Z_pT20, _h_Z_pT10, _h_dphi20, _h_dphi10, _h_dy20, _h_dy10}); } /// Histograms Histo1DPtr _h_jet_pT, _h_jet_eta20, _h_jet_eta10, _h_Z_y20, _h_Z_y10, _h_Z_pT20, _h_Z_pT10, _h_dphi20, _h_dphi10, _h_dy20, _h_dy10; }; DECLARE_RIVET_PLUGIN(LHCB_2014_I1262703); } diff --git a/analyses/pluginMC/EXAMPLE_SMEAR.cc b/analyses/pluginMC/EXAMPLE_SMEAR.cc --- a/analyses/pluginMC/EXAMPLE_SMEAR.cc +++ b/analyses/pluginMC/EXAMPLE_SMEAR.cc @@ -1,241 +1,241 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/TauFinder.hh" #include "Rivet/Projections/SmearedJets.hh" #include "Rivet/Projections/SmearedParticles.hh" #include "Rivet/Projections/SmearedMET.hh" namespace Rivet { class EXAMPLE_SMEAR : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(EXAMPLE_SMEAR); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { MissingMomentum mm(Cuts::abseta < 5); declare(mm, "MET0"); SmearedMET smm1(mm, MET_SMEAR_ATLAS_RUN2); declare(smm1, "MET1"); SmearedMET smm2(mm, [](const Vector3& met, double){ return P3_SMEAR_LEN_GAUSS(met, 0.1*met.mod()); }); declare(smm2, "MET2"); FastJets fj(FinalState(Cuts::abseta < 5), FastJets::ANTIKT, 0.4); declare(fj, "Jets0"); SmearedJets sj1(fj, JET_SMEAR_IDENTITY); declare(sj1, "Jets1"); SmearedJets sj2(fj, JET_SMEAR_ATLAS_RUN2, [](const Jet& j){ return j.bTagged() ? 0.7*(1 - exp(-j.pT()/(10*GeV))) : 0.01; } ); declare(sj2, "Jets2"); SmearedJets sj3(fj, JET_SMEAR_CMS_RUN2, JET_BTAG_EFFS(0.7, 0.1, 0.01), JET_CTAG_PERFECT, JET_EFF_CONST(0.8)); declare(sj3, "Jets3"); IdentifiedFinalState photons(Cuts::abseta < 5, PID::PHOTON); IdentifiedFinalState truthelectrons(Cuts::abseta < 5 && Cuts::pT > 10*GeV, {{PID::ELECTRON, PID::POSITRON}}); declare(truthelectrons, "Electrons0"); DressedLeptons dressedelectrons(photons, truthelectrons, 0.2); declare(dressedelectrons, "Electrons1"); SmearedParticles recoelectrons(dressedelectrons, ELECTRON_EFF_ATLAS_RUN2, ELECTRON_SMEAR_ATLAS_RUN2); declare(recoelectrons, "Electrons2"); IdentifiedFinalState truthmuons(Cuts::abseta < 5 && Cuts::pT > 10*GeV, {{PID::MUON, PID::ANTIMUON}}); declare(truthmuons, "Muons0"); DressedLeptons dressedmuons(photons, truthmuons, 0.2); declare(dressedmuons, "Muons1"); SmearedParticles recomuons(dressedmuons, MUON_EFF_ATLAS_RUN2, MUON_SMEAR_ATLAS_RUN2); declare(recomuons, "Muons2"); - TauFinder truthtaus(TauFinder::ANY, Cuts::abseta < 5 && Cuts::pT > 10*GeV); + TauFinder truthtaus(TauFinder::DecayMode::ANY, Cuts::abseta < 5 && Cuts::pT > 10*GeV); declare(truthtaus, "Taus0"); DressedLeptons dressedtaus(photons, truthtaus, 0.2); declare(dressedtaus, "Taus1"); SmearedParticles recotaus(dressedtaus, TAU_EFF_ATLAS_RUN2, TAU_SMEAR_ATLAS_RUN2); declare(recotaus, "Taus2"); book(_h_met_true ,"met_true", 30, 0.0, 120); book(_h_met_reco ,"met_reco", 30, 0.0, 120); book(_h_nj_true ,"jet_N_true", 10, -0.5, 9.5); book(_h_nj_reco ,"jet_N_reco", 10, -0.5, 9.5); book(_h_j1pt_true ,"jet_pt1_true", 30, 0.0, 120); book(_h_j1pt_reco ,"jet_pt1_reco", 30, 0.0, 120); book(_h_j1eta_true ,"jet_eta1_true", 20, -5.0, 5.0); book(_h_j1eta_reco ,"jet_eta1_reco", 20, -5.0, 5.0); book(_h_ne_true ,"elec_N_true", 5, -0.5, 4.5); book(_h_ne_reco ,"elec_N_reco", 5, -0.5, 4.5); book(_h_e1pt_true ,"elec_pt1_true", 30, 0, 120); book(_h_e1pt_reco ,"elec_pt1_reco", 30, 0, 120); book(_h_e1eta_true ,"elec_eta1_true", 20, -5.0, 5.0); book(_h_e1eta_reco ,"elec_eta1_reco", 20, -5.0, 5.0); book(_h_nm_true ,"muon_N_true", 5, -0.5, 4.5); book(_h_nm_reco ,"muon_N_reco", 5, -0.5, 4.5); book(_h_m1pt_true ,"muon_pt1_true", 30, 0, 120); book(_h_m1pt_reco ,"muon_pt1_reco", 30, 0, 120); book(_h_m1eta_true ,"muon_eta1_true", 20, -5.0, 5.0); book(_h_m1eta_reco ,"muon_eta1_reco", 20, -5.0, 5.0); book(_h_nt_true ,"tau_N_true", 5, -0.5, 4.5); book(_h_nt_reco ,"tau_N_reco", 5, -0.5, 4.5); book(_h_t1pt_true ,"tau_pt1_true", 30, 0, 120); book(_h_t1pt_reco ,"tau_pt1_reco", 30, 0, 120); book(_h_t1eta_true ,"tau_eta1_true", 20, -5.0, 5.0); book(_h_t1eta_reco ,"tau_eta1_reco", 20, -5.0, 5.0); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; const Vector3 met0 = apply(event, "MET0").vectorEt(); const Vector3 met1 = apply(event, "MET1").vectorEt(); const Vector3 met2 = apply(event, "MET2").vectorEt(); MSG_DEBUG("MET = " << met0.mod()/GeV << ", " << met1.mod()/GeV << ", " << met2.mod()/GeV << " GeV"); _h_met_true->fill(met0.mod()/GeV, weight); _h_met_reco->fill(met2.mod()/GeV, weight); const Jets jets0 = apply(event, "Jets0").jetsByPt(Cuts::pT > 10*GeV); const Jets jets1 = apply(event, "Jets1").jetsByPt(Cuts::pT > 10*GeV); const Jets jets2 = apply(event, "Jets2").jetsByPt(Cuts::pT > 10*GeV); const Jets jets3 = apply(event, "Jets3").jetsByPt(Cuts::pT > 10*GeV); MSG_DEBUG("Numbers of jets = " << jets0.size() << " true; " << jets1.size() << ", " << jets2.size() << ", " << jets3.size()); _h_nj_true->fill(jets0.size(), weight); _h_nj_reco->fill(jets2.size(), weight); if (!jets0.empty()) { _h_j1pt_true->fill(jets0.front().pT()/GeV, weight); _h_j1eta_true->fill(jets0.front().eta(), weight); } if (!jets2.empty()) { _h_j1pt_reco->fill(jets2.front().pT()/GeV, weight); _h_j1eta_reco->fill(jets2.front().eta(), weight); } const Particles& elecs1 = apply(event, "Electrons1").particlesByPt(); const Particles& elecs2 = apply(event, "Electrons2").particlesByPt(); MSG_DEBUG("Numbers of electrons = " << elecs1.size() << " true; " << elecs2.size() << " reco"); _h_ne_true->fill(elecs1.size(), weight); _h_ne_reco->fill(elecs2.size(), weight); if (!elecs1.empty()) { _h_e1pt_true->fill(elecs1.front().pT()/GeV, weight); _h_e1eta_true->fill(elecs1.front().eta(), weight); } if (!elecs2.empty()) { _h_e1pt_reco->fill(elecs2.front().pT()/GeV, weight); _h_e1eta_reco->fill(elecs2.front().eta(), weight); } const Particles& muons1 = apply(event, "Muons1").particlesByPt(); const Particles& muons2 = apply(event, "Muons2").particlesByPt(); MSG_DEBUG("Numbers of muons = " << muons1.size() << " true; " << muons2.size() << " reco"); _h_nm_true->fill(muons1.size(), weight); _h_nm_reco->fill(muons2.size(), weight); if (!muons1.empty()) { _h_m1pt_true->fill(muons1.front().pT()/GeV, weight); _h_m1eta_true->fill(muons1.front().eta(), weight); } if (!muons2.empty()) { _h_m1pt_reco->fill(muons2.front().pT()/GeV, weight); _h_m1eta_reco->fill(muons2.front().eta(), weight); } const Particles& taus1 = apply(event, "Taus1").particlesByPt(); const Particles& taus2 = apply(event, "Taus2").particlesByPt(); MSG_DEBUG("Numbers of taus = " << taus1.size() << " true; " << taus2.size() << " reco"); _h_nt_true->fill(taus1.size(), weight); _h_nt_reco->fill(taus2.size(), weight); if (!taus1.empty()) { _h_t1pt_true->fill(taus1.front().pT()/GeV, weight); _h_t1eta_true->fill(taus1.front().eta(), weight); } if (!taus2.empty()) { _h_t1pt_reco->fill(taus2.front().pT()/GeV, weight); _h_t1eta_reco->fill(taus2.front().eta(), weight); } } /// Normalise histograms etc., after the run void finalize() { normalize(_h_met_true); normalize(_h_met_reco); normalize(_h_nj_true); normalize(_h_nj_reco); normalize(_h_j1pt_true, 1-_h_nj_true->bin(0).area()); normalize(_h_j1pt_reco, 1-_h_nj_reco->bin(0).area()); normalize(_h_j1eta_true, 1-_h_nj_true->bin(0).area()); normalize(_h_j1eta_reco, 1-_h_nj_reco->bin(0).area()); normalize(_h_ne_true); normalize(_h_ne_reco); normalize(_h_e1pt_true, 1-_h_ne_true->bin(0).area()); normalize(_h_e1pt_reco, 1-_h_ne_reco->bin(0).area()); normalize(_h_e1eta_true, 1-_h_ne_true->bin(0).area()); normalize(_h_e1eta_reco, 1-_h_ne_reco->bin(0).area()); normalize(_h_nm_true); normalize(_h_nm_reco); normalize(_h_m1pt_true, 1-_h_nm_true->bin(0).area()); normalize(_h_m1pt_reco, 1-_h_nm_reco->bin(0).area()); normalize(_h_m1eta_true, 1-_h_nm_true->bin(0).area()); normalize(_h_m1eta_reco, 1-_h_nm_reco->bin(0).area()); normalize(_h_nt_true); normalize(_h_nt_reco); normalize(_h_t1pt_true, 1-_h_nt_true->bin(0).area()); normalize(_h_t1pt_reco, 1-_h_nt_reco->bin(0).area()); normalize(_h_t1eta_true, 1-_h_nt_true->bin(0).area()); normalize(_h_t1eta_reco, 1-_h_nt_reco->bin(0).area()); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_met_true, _h_met_reco; Histo1DPtr _h_nj_true, _h_nj_reco, _h_ne_true, _h_ne_reco, _h_nm_true, _h_nm_reco, _h_nt_true, _h_nt_reco; Histo1DPtr _h_j1pt_true, _h_j1pt_reco, _h_e1pt_true, _h_e1pt_reco, _h_m1pt_true, _h_m1pt_reco, _h_t1pt_true, _h_t1pt_reco; Histo1DPtr _h_j1eta_true, _h_j1eta_reco, _h_e1eta_true, _h_e1eta_reco, _h_m1eta_true, _h_m1eta_reco, _h_t1eta_true, _h_t1eta_reco; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(EXAMPLE_SMEAR); } diff --git a/analyses/pluginMC/MC_HINC.cc b/analyses/pluginMC/MC_HINC.cc --- a/analyses/pluginMC/MC_HINC.cc +++ b/analyses/pluginMC/MC_HINC.cc @@ -1,91 +1,91 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { /// @brief MC validation analysis for higgs [-> tau tau] events class MC_HINC : public Analysis { public: /// Default constructor MC_HINC() : Analysis("MC_HINC") { } /// @name Analysis methods //@{ /// Book histograms void init() { Cut cut = Cuts::abseta < 3.5 && Cuts::pT > 25*GeV; /// @todo Urk, abuse! Need explicit HiggsFinder and TauFinder? - ZFinder hfinder(FinalState(), cut, PID::TAU, 115*GeV, 135*GeV, 0.0, ZFinder::NOCLUSTER, ZFinder::NOTRACK, 125*GeV); + ZFinder hfinder(FinalState(), cut, PID::TAU, 115*GeV, 135*GeV, 0.0, ZFinder::ClusterPhotons::NONE, ZFinder::AddPhotons::NO, 125*GeV); declare(hfinder, "Hfinder"); book(_h_H_mass ,"H_mass", 50, 119.7, 120.3); book(_h_H_pT ,"H_pT", logspace(100, 1.0, 0.5*(sqrtS()>0.?sqrtS():14000.)/GeV)); book(_h_H_pT_peak ,"H_pT_peak", 25, 0.0, 25.0); book(_h_H_y ,"H_y", 40, -4, 4); book(_h_H_phi ,"H_phi", 25, 0.0, TWOPI); book(_h_lepton_pT ,"lepton_pT", logspace(100, 10.0, 0.25*(sqrtS()>0.?sqrtS():14000.)/GeV)); book(_h_lepton_eta ,"lepton_eta", 40, -4, 4); } /// Do the analysis void analyze(const Event & e) { const ZFinder& hfinder = apply(e, "Hfinder"); if (hfinder.bosons().size() != 1) vetoEvent; const double weight = 1.0; FourMomentum hmom(hfinder.bosons()[0].momentum()); _h_H_mass->fill(hmom.mass()/GeV, weight); _h_H_pT->fill(hmom.pT()/GeV, weight); _h_H_pT_peak->fill(hmom.pT()/GeV, weight); _h_H_y->fill(hmom.rapidity(), weight); _h_H_phi->fill(hmom.phi(), weight); for (const Particle& l : hfinder.constituents()) { _h_lepton_pT->fill(l.pT()/GeV, weight); _h_lepton_eta->fill(l.eta(), weight); } } /// Finalize void finalize() { const double xsec = crossSection()/picobarn; normalize(_h_H_mass, xsec); normalize(_h_H_pT, xsec); normalize(_h_H_pT_peak, xsec); normalize(_h_H_y, xsec); normalize(_h_H_phi, xsec); normalize(_h_lepton_pT, xsec); normalize(_h_lepton_eta, xsec); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_H_mass; Histo1DPtr _h_H_pT; Histo1DPtr _h_H_pT_peak; Histo1DPtr _h_H_y; Histo1DPtr _h_H_phi; Histo1DPtr _h_lepton_pT; Histo1DPtr _h_lepton_eta; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_HINC); } diff --git a/analyses/pluginMC/MC_HJETS.cc b/analyses/pluginMC/MC_HJETS.cc --- a/analyses/pluginMC/MC_HJETS.cc +++ b/analyses/pluginMC/MC_HJETS.cc @@ -1,81 +1,81 @@ // -*- C++ -*- #include "Rivet/Analyses/MC_JetAnalysis.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief MC validation analysis for Higgs [-> tau tau] + jets events class MC_HJETS : public MC_JetAnalysis { public: /// Default constructor MC_HJETS() : MC_JetAnalysis("MC_HJETS", 4, "Jets") { } /// @name Analysis methods //@{ /// Book histograms void init() { Cut cut = Cuts::abseta < 3.5 && Cuts::pT > 25*GeV; /// @todo Urk, abuse! Need explicit HiggsFinder (and TauFinder?) - ZFinder hfinder(FinalState(), cut, PID::TAU, 115*GeV, 135*GeV, 0.0, ZFinder::NOCLUSTER, ZFinder::NOTRACK, 125*GeV); + ZFinder hfinder(FinalState(), cut, PID::TAU, 115*GeV, 135*GeV, 0.0, ZFinder::ClusterPhotons::NONE, ZFinder::AddPhotons::NO, 125*GeV); declare(hfinder, "Hfinder"); FastJets jetpro(hfinder.remainingFinalState(), FastJets::ANTIKT, 0.4); declare(jetpro, "Jets"); book(_h_H_jet1_deta ,"H_jet1_deta", 50, -5.0, 5.0); book(_h_H_jet1_dR ,"H_jet1_dR", 25, 0.5, 7.0); MC_JetAnalysis::init(); } /// Do the analysis void analyze(const Event & e) { const ZFinder& hfinder = apply(e, "Hfinder"); if (hfinder.bosons().size() != 1) vetoEvent; const double weight = 1.0; FourMomentum hmom(hfinder.bosons()[0].momentum()); const Jets& jets = apply(e, "Jets").jetsByPt(_jetptcut); if (jets.size() > 0) { _h_H_jet1_deta->fill(hmom.eta()-jets[0].eta(), weight); _h_H_jet1_dR->fill(deltaR(hmom, jets[0].momentum()), weight); } MC_JetAnalysis::analyze(e); } /// Finalize void finalize() { normalize(_h_H_jet1_deta, crossSection()/picobarn); normalize(_h_H_jet1_dR, crossSection()/picobarn); MC_JetAnalysis::finalize(); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_H_jet1_deta; Histo1DPtr _h_H_jet1_dR; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_HJETS); } diff --git a/analyses/pluginMC/MC_HKTSPLITTINGS.cc b/analyses/pluginMC/MC_HKTSPLITTINGS.cc --- a/analyses/pluginMC/MC_HKTSPLITTINGS.cc +++ b/analyses/pluginMC/MC_HKTSPLITTINGS.cc @@ -1,60 +1,60 @@ // -*- C++ -*- #include "Rivet/Analyses/MC_JetSplittings.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief MC validation analysis for higgs [-> tau tau] + jets events class MC_HKTSPLITTINGS : public MC_JetSplittings { public: /// Default constructor MC_HKTSPLITTINGS() : MC_JetSplittings("MC_HKTSPLITTINGS", 4, "Jets") { } /// @name Analysis methods //@{ /// Book histograms void init() { Cut cut = Cuts::abseta < 3.5 && Cuts::pT > 25*GeV; /// @todo Urk, abuse! Need explicit HiggsFinder and TauFinder - ZFinder hfinder(FinalState(), cut, PID::TAU, 115*GeV, 135*GeV, 0.0, ZFinder::NOCLUSTER, ZFinder::NOTRACK, 125*GeV); + ZFinder hfinder(FinalState(), cut, PID::TAU, 115*GeV, 135*GeV, 0.0, ZFinder::ClusterPhotons::NONE, ZFinder::AddPhotons::NO, 125*GeV); declare(hfinder, "Hfinder"); FastJets jetpro(hfinder.remainingFinalState(), FastJets::KT, 0.6); declare(jetpro, "Jets"); MC_JetSplittings::init(); } /// Do the analysis void analyze(const Event & e) { const ZFinder& hfinder = apply(e, "Hfinder"); if (hfinder.bosons().size() != 1) vetoEvent; MC_JetSplittings::analyze(e); } /// Finalize void finalize() { MC_JetSplittings::finalize(); } //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_HKTSPLITTINGS); } diff --git a/analyses/pluginMC/MC_PARTONICTOPS.cc b/analyses/pluginMC/MC_PARTONICTOPS.cc --- a/analyses/pluginMC/MC_PARTONICTOPS.cc +++ b/analyses/pluginMC/MC_PARTONICTOPS.cc @@ -1,94 +1,94 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/PartonicTops.hh" namespace Rivet { /// Find and plot partonic top properties (requires tops in event record) class MC_PARTONICTOPS : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(MC_PARTONICTOPS); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections - declare(PartonicTops(PartonicTops::ALL), "AllTops"); - declare(PartonicTops(PartonicTops::E_MU), "LeptonicTops"); - declare(PartonicTops(PartonicTops::HADRONIC), "HadronicTops"); + declare(PartonicTops(PartonicTops::DecayMode::ALL), "AllTops"); + declare(PartonicTops(PartonicTops::DecayMode::E_MU), "LeptonicTops"); + declare(PartonicTops(PartonicTops::DecayMode::HADRONIC), "HadronicTops"); // Book histograms book(_h_tall_n , "t_all_n", linspace(5, -0.5, 4.5)); book(_h_tall_pt, "t_all_pT", logspace(50, 1, 500)); book(_h_tall_y , "t_all_y", linspace(50, -5, 5)); book(_h_tlep_n , "t_lep_n", linspace(5, -0.5, 4.5)); book(_h_tlep_pt, "t_lep_pT", logspace(50, 1, 500)); book(_h_tlep_y , "t_lep_y", linspace(50, -5, 5)); book(_h_thad_n , "t_had_n", linspace(5, -0.5, 4.5)); book(_h_thad_pt, "t_had_pT", logspace(50, 1, 500)); book(_h_thad_y , "t_had_y", linspace(50, -5, 5)); } /// Perform the per-event analysis void analyze(const Event& event) { const Particles& alltops = apply(event, "AllTops").particlesByPt(); _h_tall_n->fill(alltops.size()); for (const Particle& t : alltops) { _h_tall_pt->fill(t.pT()/GeV); _h_tall_y->fill(t.rap()); } const Particles& leptops = apply(event, "LeptonicTops").particlesByPt(); _h_tlep_n->fill(leptops.size()); for (const Particle& t : leptops) { _h_tlep_pt->fill(t.pT()/GeV); _h_tlep_y->fill(t.rap()); } const Particles& hadtops = apply(event, "HadronicTops").particlesByPt(); _h_thad_n->fill(hadtops.size()); for (const Particle& t : hadtops) { _h_thad_pt->fill(t.pT()/GeV); _h_thad_y->fill(t.rap()); } } /// Normalise histograms etc., after the run void finalize() { normalize({_h_tall_n, _h_tlep_n, _h_thad_n}); normalize({_h_tall_pt, _h_tlep_pt, _h_thad_pt}); normalize({_h_tall_y, _h_tlep_y, _h_thad_y}); } //@} /// @name Histograms //@{ Histo1DPtr _h_tall_n, _h_tlep_n, _h_thad_n; Histo1DPtr _h_tall_pt, _h_tlep_pt, _h_thad_pt; Histo1DPtr _h_tall_y, _h_tlep_y, _h_thad_y; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_PARTONICTOPS); } diff --git a/analyses/pluginMC/MC_TAUS.cc b/analyses/pluginMC/MC_TAUS.cc --- a/analyses/pluginMC/MC_TAUS.cc +++ b/analyses/pluginMC/MC_TAUS.cc @@ -1,45 +1,45 @@ // -*- C++ -*- #include "Rivet/Analyses/MC_ParticleAnalysis.hh" #include "Rivet/Projections/TauFinder.hh" namespace Rivet { /// @brief MC validation analysis for taus class MC_TAUS : public MC_ParticleAnalysis { public: /// Constructor MC_TAUS() : MC_ParticleAnalysis("MC_TAUS", 2, "tau") { } /// Book projections and histograms void init() { - TauFinder taus(TauFinder::ANY); + TauFinder taus(TauFinder::DecayMode::ANY); declare(taus, "Taus"); MC_ParticleAnalysis::init(); } /// Per-event analysis void analyze(const Event& event) { const Particles taus = apply(event, "Taus").particlesByPt(0.5*GeV); MC_ParticleAnalysis::_analyze(event, taus); } /// Normalisations etc. void finalize() { MC_ParticleAnalysis::finalize(); } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_TAUS); } diff --git a/analyses/pluginMC/MC_ZINC.cc b/analyses/pluginMC/MC_ZINC.cc --- a/analyses/pluginMC/MC_ZINC.cc +++ b/analyses/pluginMC/MC_ZINC.cc @@ -1,141 +1,141 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { /// @brief MC validation analysis for Z events class MC_ZINC : public Analysis { public: /// Default constructor MC_ZINC(string name="MC_ZINC") : Analysis(name) { _dR=0.2; _lepton=PID::ELECTRON; } /// @name Analysis methods //@{ /// Book histograms void init() { FinalState fs; Cut cut = Cuts::abseta < 3.5 && Cuts::pT > 25*GeV; - ZFinder zfinder(fs, cut, _lepton, 65.0*GeV, 115.0*GeV, _dR, ZFinder::CLUSTERNODECAY, ZFinder::TRACK); + ZFinder zfinder(fs, cut, _lepton, 65.0*GeV, 115.0*GeV, _dR, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zfinder, "ZFinder"); book(_h_Z_mass ,"Z_mass", 50, 66.0, 116.0); book(_h_Z_pT ,"Z_pT", logspace(100, 1.0, 0.5*(sqrtS()>0.?sqrtS():14000.)/GeV)); book(_h_Z_pT_peak ,"Z_pT_peak", 25, 0.0, 25.0); book(_h_Z_y ,"Z_y", 40, -4.0, 4.0); book(_h_Z_phi ,"Z_phi", 25, 0.0, TWOPI); book(_h_lepton_pT ,"lepton_pT", logspace(100, 10.0, 0.25*(sqrtS()>0.?sqrtS():14000.)/GeV)); book(_h_lepton_eta ,"lepton_eta", 40, -4.0, 4.0); } /// Do the analysis void analyze(const Event & e) { const ZFinder& zfinder = apply(e, "ZFinder"); if (zfinder.bosons().size() != 1) vetoEvent; const double weight = 1.0; FourMomentum zmom(zfinder.bosons()[0].momentum()); _h_Z_mass->fill(zmom.mass()/GeV, weight); _h_Z_pT->fill(zmom.pT()/GeV, weight); _h_Z_pT_peak->fill(zmom.pT()/GeV, weight); _h_Z_y->fill(zmom.rapidity(), weight); _h_Z_phi->fill(zmom.phi(), weight); for (const Particle& l : zfinder.constituents()) { _h_lepton_pT->fill(l.pT()/GeV, weight); _h_lepton_eta->fill(l.eta(), weight); } } /// Finalize void finalize() { const double s = crossSection()/picobarn/sumOfWeights(); scale(_h_Z_mass, s); scale(_h_Z_pT, s); scale(_h_Z_pT_peak, s); scale(_h_Z_y, s); scale(_h_Z_phi, s); scale(_h_lepton_pT, s); scale(_h_lepton_eta, s); } //@} protected: /// @name Parameters for specialised e/mu and dressed/bare subclassing //@{ double _dR; PdgId _lepton; //@} private: /// @name Histograms //@{ Histo1DPtr _h_Z_mass; Histo1DPtr _h_Z_pT; Histo1DPtr _h_Z_pT_peak; Histo1DPtr _h_Z_y; Histo1DPtr _h_Z_phi; Histo1DPtr _h_lepton_pT; Histo1DPtr _h_lepton_eta; //@} }; struct MC_ZINC_EL : public MC_ZINC { MC_ZINC_EL() : MC_ZINC("MC_ZINC_EL") { _dR = 0.2; _lepton = PID::ELECTRON; } }; struct MC_ZINC_EL_BARE : public MC_ZINC { MC_ZINC_EL_BARE() : MC_ZINC("MC_ZINC_EL_BARE") { _dR = 0; _lepton = PID::ELECTRON; } }; struct MC_ZINC_MU : public MC_ZINC { MC_ZINC_MU() : MC_ZINC("MC_ZINC_MU") { _dR = 0.2; _lepton = PID::MUON; } }; struct MC_ZINC_MU_BARE : public MC_ZINC { MC_ZINC_MU_BARE() : MC_ZINC("MC_ZINC_MU_BARE") { _dR = 0; _lepton = PID::MUON; } }; // The hooks for the plugin system DECLARE_RIVET_PLUGIN(MC_ZINC); DECLARE_RIVET_PLUGIN(MC_ZINC_EL); DECLARE_RIVET_PLUGIN(MC_ZINC_EL_BARE); DECLARE_RIVET_PLUGIN(MC_ZINC_MU); DECLARE_RIVET_PLUGIN(MC_ZINC_MU_BARE); } diff --git a/analyses/pluginMC/MC_ZJETS.cc b/analyses/pluginMC/MC_ZJETS.cc --- a/analyses/pluginMC/MC_ZJETS.cc +++ b/analyses/pluginMC/MC_ZJETS.cc @@ -1,130 +1,130 @@ // -*- C++ -*- #include "Rivet/Analyses/MC_JetAnalysis.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief MC validation analysis for Z + jets events class MC_ZJETS : public MC_JetAnalysis { public: /// Default constructor MC_ZJETS(string name = "MC_ZJETS") : MC_JetAnalysis(name, 4, "Jets") { _dR=0.2; _lepton=PID::ELECTRON; } /// @name Analysis methods //@{ /// Initialize void init() { FinalState fs; Cut cut = Cuts::abseta < 3.5 && Cuts::pT > 25*GeV; - ZFinder zfinder(fs, cut, _lepton, 65*GeV, 115*GeV, _dR, ZFinder::CLUSTERNODECAY, ZFinder::TRACK); + ZFinder zfinder(fs, cut, _lepton, 65*GeV, 115*GeV, _dR, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zfinder, "ZFinder"); FastJets jetpro(zfinder.remainingFinalState(), FastJets::ANTIKT, 0.4); declare(jetpro, "Jets"); book(_h_Z_jet1_deta ,"Z_jet1_deta", 50, -5, 5); book(_h_Z_jet1_dR ,"Z_jet1_dR", 25, 0.5, 7.0); MC_JetAnalysis::init(); } /// Do the analysis void analyze(const Event & e) { MSG_TRACE("MC_ZJETS: running ZFinder"); const ZFinder& zfinder = apply(e, "ZFinder"); if (zfinder.bosons().size() != 1) vetoEvent; const FourMomentum& zmom = zfinder.bosons()[0].momentum(); MSG_TRACE("MC_ZJETS: have exactly one Z boson candidate"); const Jets& jets = apply(e, "Jets").jetsByPt(_jetptcut); if (jets.size() > 0) { MSG_TRACE("MC_ZJETS: have at least one valid jet"); const double weight = 1.0; _h_Z_jet1_deta->fill(zmom.eta()-jets[0].eta(), weight); _h_Z_jet1_dR->fill(deltaR(zmom, jets[0].momentum()), weight); } MC_JetAnalysis::analyze(e); } /// Finalize void finalize() { scale(_h_Z_jet1_deta, crossSection()/picobarn/sumOfWeights()); scale(_h_Z_jet1_dR, crossSection()/picobarn/sumOfWeights()); MC_JetAnalysis::finalize(); } //@} protected: /// @name Parameters for specialised e/mu and dressed/bare subclassing //@{ double _dR; PdgId _lepton; //@} private: /// @name Histograms //@{ Histo1DPtr _h_Z_jet1_deta; Histo1DPtr _h_Z_jet1_dR; //@} }; struct MC_ZJETS_EL : public MC_ZJETS { MC_ZJETS_EL() : MC_ZJETS("MC_ZJETS_EL") { _dR = 0.2; _lepton = PID::ELECTRON; } }; struct MC_ZJETS_EL_BARE : public MC_ZJETS { MC_ZJETS_EL_BARE() : MC_ZJETS("MC_ZJETS_EL_BARE") { _dR = 0; _lepton = PID::ELECTRON; } }; struct MC_ZJETS_MU : public MC_ZJETS { MC_ZJETS_MU() : MC_ZJETS("MC_ZJETS_MU") { _dR = 0.2; _lepton = PID::MUON; } }; struct MC_ZJETS_MU_BARE : public MC_ZJETS { MC_ZJETS_MU_BARE() : MC_ZJETS("MC_ZJETS_MU_BARE") { _dR = 0; _lepton = PID::MUON; } }; // The hooks for the plugin system DECLARE_RIVET_PLUGIN(MC_ZJETS); DECLARE_RIVET_PLUGIN(MC_ZJETS_EL); DECLARE_RIVET_PLUGIN(MC_ZJETS_EL_BARE); DECLARE_RIVET_PLUGIN(MC_ZJETS_MU); DECLARE_RIVET_PLUGIN(MC_ZJETS_MU_BARE); } diff --git a/analyses/pluginMC/MC_ZKTSPLITTINGS.cc b/analyses/pluginMC/MC_ZKTSPLITTINGS.cc --- a/analyses/pluginMC/MC_ZKTSPLITTINGS.cc +++ b/analyses/pluginMC/MC_ZKTSPLITTINGS.cc @@ -1,61 +1,61 @@ // -*- C++ -*- #include "Rivet/Analyses/MC_JetSplittings.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief MC validation analysis for Z + jets events class MC_ZKTSPLITTINGS : public MC_JetSplittings { public: /// Default constructor MC_ZKTSPLITTINGS() : MC_JetSplittings("MC_ZKTSPLITTINGS", 4, "Jets") { } /// @name Analysis methods //@{ /// Book histograms void init() { FinalState fs; Cut cut = Cuts::abseta < 3.5 && Cuts::pT > 25*GeV; - ZFinder zfinder(fs, cut, PID::ELECTRON, 65*GeV, 115*GeV, 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK); + ZFinder zfinder(fs, cut, PID::ELECTRON, 65*GeV, 115*GeV, 0.2, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zfinder, "ZFinder"); FastJets jetpro(zfinder.remainingFinalState(), FastJets::KT, 0.6); declare(jetpro, "Jets"); MC_JetSplittings::init(); } /// Do the analysis void analyze(const Event & e) { const ZFinder& zfinder = apply(e, "ZFinder"); if (zfinder.bosons().size() != 1) vetoEvent; MC_JetSplittings::analyze(e); } /// Finalize void finalize() { MC_JetSplittings::finalize(); } //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_ZKTSPLITTINGS); } diff --git a/analyses/pluginMC/MC_ZZINC.cc b/analyses/pluginMC/MC_ZZINC.cc --- a/analyses/pluginMC/MC_ZZINC.cc +++ b/analyses/pluginMC/MC_ZZINC.cc @@ -1,183 +1,183 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// @brief MC validation analysis for Z[ee]Z[mumu] events class MC_ZZINC : public Analysis { public: /// Default constructor MC_ZZINC() : Analysis("MC_ZZINC") { } /// @name Analysis methods //@{ /// Book histograms void init() { Cut cut = Cuts::abseta < 3.5 && Cuts::pT > 25*GeV; ZFinder zeefinder(FinalState(), cut, PID::ELECTRON, 65*GeV, 115*GeV, - 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK); + 0.2, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zeefinder, "ZeeFinder"); VetoedFinalState zmminput; zmminput.addVetoOnThisFinalState(zeefinder); ZFinder zmmfinder(zmminput, cut, PID::MUON, 65*GeV, 115*GeV, - 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK); + 0.2, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zmmfinder, "ZmmFinder"); // Properties of the pair momentum double sqrts = sqrtS()>0. ? sqrtS() : 14000.; book(_h_ZZ_pT ,"ZZ_pT", logspace(100, 1.0, 0.5*sqrts/GeV)); book(_h_ZZ_pT_peak ,"ZZ_pT_peak", 25, 0.0, 25.0); book(_h_ZZ_eta ,"ZZ_eta", 40, -7.0, 7.0); book(_h_ZZ_phi ,"ZZ_phi", 25, 0.0, TWOPI); book(_h_ZZ_m ,"ZZ_m", logspace(100, 150.0, 180.0 + 0.25*sqrts/GeV)); // Correlations between the ZZ book(_h_ZZ_dphi ,"ZZ_dphi", 25, 0.0, PI); /// @todo non-linear? book(_h_ZZ_deta ,"ZZ_deta", 25, -7.0, 7.0); book(_h_ZZ_dR ,"ZZ_dR", 25, 0.5, 7.0); book(_h_ZZ_dpT ,"ZZ_dpT", logspace(100, 1.0, 0.5*sqrts/GeV)); book(_h_ZZ_costheta_planes ,"ZZ_costheta_planes", 25, -1.0, 1.0); // Properties of the Z bosons book(_h_Z_pT ,"Z_pT", logspace(100, 10.0, 0.25*sqrts/GeV)); book(_h_Z_eta ,"Z_eta", 70, -7.0, 7.0); // Properties of the leptons book(_h_Zl_pT ,"Zl_pT", logspace(100, 30.0, 0.1*sqrts/GeV)); book(_h_Zl_eta ,"Zl_eta", 40, -3.5, 3.5); // Correlations between the opposite charge leptons book(_h_ZeZm_dphi ,"ZeZm_dphi", 25, 0.0, PI); book(_h_ZeZm_deta ,"ZeZm_deta", 25, -5.0, 5.0); book(_h_ZeZm_dR ,"ZeZm_dR", 25, 0.5, 5.0); book(_h_ZeZm_m ,"ZeZm_m", 100, 0.0, 300.0); } /// Do the analysis void analyze(const Event& e) { const ZFinder& zeefinder = apply(e, "ZeeFinder"); if (zeefinder.bosons().size() != 1) vetoEvent; const ZFinder& zmmfinder = apply(e, "ZmmFinder"); if (zmmfinder.bosons().size() != 1) vetoEvent; // Z momenta const FourMomentum& zee = zeefinder.bosons()[0].momentum(); const FourMomentum& zmm = zmmfinder.bosons()[0].momentum(); const FourMomentum zz = zee + zmm; // Lepton momenta const FourMomentum& ep = zeefinder.constituents()[0].momentum(); const FourMomentum& em = zeefinder.constituents()[1].momentum(); const FourMomentum& mp = zmmfinder.constituents()[0].momentum(); const FourMomentum& mm = zmmfinder.constituents()[1].momentum(); const double weight = 1.0; _h_ZZ_pT->fill(zz.pT()/GeV, weight); _h_ZZ_pT_peak->fill(zz.pT()/GeV, weight); _h_ZZ_eta->fill(zz.eta(), weight); _h_ZZ_phi->fill(zz.phi(), weight); if (zz.mass2() > 0.0) ///< @todo Protection still needed? _h_ZZ_m->fill(zz.mass()/GeV, weight); _h_ZZ_dphi->fill(deltaPhi(zee, zmm), weight); _h_ZZ_deta->fill(zee.eta()-zmm.eta(), weight); _h_ZZ_dR->fill(deltaR(zee,zmm), weight); _h_ZZ_dpT->fill(fabs(zee.pT()-zmm.pT()), weight); const Vector3 crossZee = ep.p3().cross(em.p3()); const Vector3 crossZmm = mp.p3().cross(mm.p3()); const double costheta = crossZee.dot(crossZmm)/crossZee.mod()/crossZmm.mod(); _h_ZZ_costheta_planes->fill(costheta, weight); _h_Z_pT->fill(zee.pT()/GeV, weight); _h_Z_pT->fill(zmm.pT()/GeV, weight); _h_Z_eta->fill(zee.eta(), weight); _h_Z_eta->fill(zmm.eta(), weight); _h_Zl_pT->fill(ep.pT()/GeV, weight); _h_Zl_pT->fill(em.pT()/GeV, weight); _h_Zl_pT->fill(mp.pT()/GeV, weight); _h_Zl_pT->fill(mm.pT()/GeV, weight); _h_Zl_eta->fill(ep.eta(), weight); _h_Zl_eta->fill(em.eta(), weight); _h_Zl_eta->fill(mp.eta(), weight); _h_Zl_eta->fill(mm.eta(), weight); _h_ZeZm_dphi->fill(deltaPhi(ep, mm), weight); _h_ZeZm_deta->fill(ep.eta()-mm.eta(), weight); _h_ZeZm_dR->fill(deltaR(ep, mm), weight); const FourMomentum epmm = ep + mm; const double m_epmm = (epmm.mass2() > 0) ? epmm.mass() : 0; ///< @todo Protection still needed? _h_ZeZm_m->fill(m_epmm/GeV, weight); } /// Finalize void finalize() { const double s = crossSection()/picobarn/sumOfWeights(); scale(_h_ZZ_pT, s); scale(_h_ZZ_pT_peak, s); scale(_h_ZZ_eta, s); scale(_h_ZZ_phi, s); scale(_h_ZZ_m, s); scale(_h_ZZ_dphi, s); scale(_h_ZZ_deta, s); scale(_h_ZZ_dR, s); scale(_h_ZZ_dpT, s); scale(_h_ZZ_costheta_planes, s); scale(_h_Z_pT, s); scale(_h_Z_eta, s); scale(_h_Zl_pT, s); scale(_h_Zl_eta, s); scale(_h_ZeZm_dphi, s); scale(_h_ZeZm_deta, s); scale(_h_ZeZm_dR, s); scale(_h_ZeZm_m, s); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_ZZ_pT; Histo1DPtr _h_ZZ_pT_peak; Histo1DPtr _h_ZZ_eta; Histo1DPtr _h_ZZ_phi; Histo1DPtr _h_ZZ_m; Histo1DPtr _h_ZZ_dphi; Histo1DPtr _h_ZZ_deta; Histo1DPtr _h_ZZ_dR; Histo1DPtr _h_ZZ_dpT; Histo1DPtr _h_ZZ_costheta_planes; Histo1DPtr _h_Z_pT; Histo1DPtr _h_Z_eta; Histo1DPtr _h_Zl_pT; Histo1DPtr _h_Zl_eta; Histo1DPtr _h_ZeZm_dphi; Histo1DPtr _h_ZeZm_deta; Histo1DPtr _h_ZeZm_dR; Histo1DPtr _h_ZeZm_m; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_ZZINC); } diff --git a/analyses/pluginMC/MC_ZZJETS.cc b/analyses/pluginMC/MC_ZZJETS.cc --- a/analyses/pluginMC/MC_ZZJETS.cc +++ b/analyses/pluginMC/MC_ZZJETS.cc @@ -1,120 +1,120 @@ // -*- C++ -*- #include "Rivet/Analyses/MC_JetAnalysis.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// @brief MC validation analysis for Z[ee]Z[mumu] + jets events class MC_ZZJETS : public MC_JetAnalysis { public: /// Default constructor MC_ZZJETS() : MC_JetAnalysis("MC_ZZJETS", 4, "Jets") { } /// @name Analysis methods //@{ /// Book histograms void init() { Cut cut = Cuts::abseta < 3.5 && Cuts::pT > 25*GeV; - ZFinder zeefinder(FinalState(), cut, PID::ELECTRON, 65*GeV, 115*GeV, 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK); + ZFinder zeefinder(FinalState(), cut, PID::ELECTRON, 65*GeV, 115*GeV, 0.2, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zeefinder, "ZeeFinder"); VetoedFinalState zmminput; zmminput.addVetoOnThisFinalState(zeefinder); - ZFinder zmmfinder(zmminput, cut, PID::MUON, 65*GeV, 115*GeV, 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK); + ZFinder zmmfinder(zmminput, cut, PID::MUON, 65*GeV, 115*GeV, 0.2, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zmmfinder, "ZmmFinder"); VetoedFinalState jetinput; jetinput .addVetoOnThisFinalState(zeefinder) .addVetoOnThisFinalState(zmmfinder); FastJets jetpro(jetinput, FastJets::ANTIKT, 0.4); declare(jetpro, "Jets"); // Correlations with jets book(_h_ZZ_jet1_deta ,"ZZ_jet1_deta", 70, -7.0, 7.0); book(_h_ZZ_jet1_dR ,"ZZ_jet1_dR", 25, 1.5, 7.0); book(_h_Ze_jet1_dR ,"Ze_jet1_dR", 25, 0.0, 7.0); // Global stuff book(_h_HT ,"HT", logspace(100, 100.0, 0.5*(sqrtS()>0.?sqrtS():14000.)/GeV)); MC_JetAnalysis::init(); } /// Do the analysis void analyze(const Event& e) { const double weight = 1.0; const ZFinder& zeefinder = apply(e, "ZeeFinder"); if (zeefinder.bosons().size() != 1) vetoEvent; const ZFinder& zmmfinder = apply(e, "ZmmFinder"); if (zmmfinder.bosons().size() != 1) vetoEvent; // Z momenta const FourMomentum& zee = zeefinder.bosons()[0].momentum(); const FourMomentum& zmm = zmmfinder.bosons()[0].momentum(); const FourMomentum zz = zee + zmm; // Lepton momenta const FourMomentum& ep = zeefinder.constituents()[0].momentum(); const FourMomentum& em = zeefinder.constituents()[1].momentum(); const FourMomentum& mp = zmmfinder.constituents()[0].momentum(); const FourMomentum& mm = zmmfinder.constituents()[1].momentum(); const Jets& jets = apply(e, "Jets").jetsByPt(_jetptcut); if (jets.size() > 0) { const FourMomentum j0 = jets[0].momentum(); _h_ZZ_jet1_deta->fill(zz.eta()-j0.eta(), weight); _h_ZZ_jet1_dR->fill(deltaR(zz, j0), weight); _h_Ze_jet1_dR->fill(deltaR(ep, j0), weight); } double HT = ep.pT() + em.pT() + mp.pT() + mm.pT(); for (const Jet& jet : jets) HT += jet.pT(); if (HT > 0.0) _h_HT->fill(HT/GeV, weight); MC_JetAnalysis::analyze(e); } /// Finalize void finalize() { const double s = crossSection()/picobarn/sumOfWeights(); scale(_h_ZZ_jet1_deta, s); scale(_h_ZZ_jet1_dR, s); scale(_h_Ze_jet1_dR, s); scale(_h_HT, s); MC_JetAnalysis::finalize(); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_ZZ_jet1_deta; Histo1DPtr _h_ZZ_jet1_dR; Histo1DPtr _h_Ze_jet1_dR; Histo1DPtr _h_HT; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_ZZJETS); } diff --git a/analyses/pluginMC/MC_ZZKTSPLITTINGS.cc b/analyses/pluginMC/MC_ZZKTSPLITTINGS.cc --- a/analyses/pluginMC/MC_ZZKTSPLITTINGS.cc +++ b/analyses/pluginMC/MC_ZZKTSPLITTINGS.cc @@ -1,71 +1,71 @@ // -*- C++ -*- #include "Rivet/Analyses/MC_JetSplittings.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// @brief MC validation analysis for Z[ee]Z[mumu] + jets events class MC_ZZKTSPLITTINGS : public MC_JetSplittings { public: /// Default constructor MC_ZZKTSPLITTINGS() : MC_JetSplittings("MC_ZZKTSPLITTINGS", 4, "Jets") { } /// @name Analysis methods //@{ /// Book histograms void init() { Cut cut = Cuts::abseta < 3.5 && Cuts::pT > 25*GeV; ZFinder zeefinder(FinalState(), cut, PID::ELECTRON, 65*GeV, 115*GeV, - 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK); + 0.2, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zeefinder, "ZeeFinder"); VetoedFinalState zmminput; zmminput.addVetoOnThisFinalState(zeefinder); ZFinder zmmfinder(zmminput, cut, PID::MUON, 65*GeV, 115*GeV, - 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK); + 0.2, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zmmfinder, "ZmmFinder"); VetoedFinalState jetinput; jetinput .addVetoOnThisFinalState(zeefinder) .addVetoOnThisFinalState(zmmfinder); FastJets jetpro(jetinput, FastJets::KT, 0.6); declare(jetpro, "Jets"); MC_JetSplittings::init(); } /// Do the analysis void analyze(const Event & e) { const ZFinder& zeefinder = apply(e, "ZeeFinder"); if (zeefinder.bosons().size() != 1) vetoEvent; const ZFinder& zmmfinder = apply(e, "ZmmFinder"); if (zmmfinder.bosons().size() != 1) vetoEvent; MC_JetSplittings::analyze(e); } /// Finalize void finalize() { MC_JetSplittings::finalize(); } //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_ZZKTSPLITTINGS); } diff --git a/analyses/pluginMisc/PDG_TAUS.cc b/analyses/pluginMisc/PDG_TAUS.cc --- a/analyses/pluginMisc/PDG_TAUS.cc +++ b/analyses/pluginMisc/PDG_TAUS.cc @@ -1,212 +1,212 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/TauFinder.hh" namespace Rivet { class PDG_TAUS : public Analysis { public: /// Constructor PDG_TAUS() : Analysis("PDG_TAUS") { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - TauFinder tauleptonic(TauFinder::LEPTONIC); // open cuts, leptonic decays + TauFinder tauleptonic(TauFinder::DecayMode::LEPTONIC); // open cuts, leptonic decays declare(tauleptonic, "TauLeptonic"); - TauFinder tauhadronic(TauFinder::HADRONIC); // open cuts, hadronic decays + TauFinder tauhadronic(TauFinder::DecayMode::HADRONIC); // open cuts, hadronic decays declare(tauhadronic, "TauHadronic"); populateDecayMap(); book(_h_ratio_mu ,1, 1, 1); book(_h_ratio_el ,1, 1, 2); book(_h_1prong_pinu ,2, 1, 1); book(_h_1prong_Kpnu ,2, 1, 2); book(_h_1prong_pipinu ,2, 1, 3); book(_h_1prong_Kppinu ,2, 1, 4); book(_h_1prong_pipipinu ,2, 1, 5); book(_h_1prong_Knpinu ,2, 1, 6); book(_h_3prong_pipipinu ,2, 2, 1); book(_h_5prong ,2, 3, 1); book(_weights_had, "TMP/weights_had"); book(_weights_mu, "TMP/weights_mu"); book(_weights_el, "TMP/weights_el"); } /// Perform the per-event analysis void analyze(const Event& e) { const TauFinder& taulep = apply(e, "TauLeptonic"); const TauFinder& tauhad = apply(e, "TauHadronic"); // Hadronic tau decays --- prong decays for(const Particle& tau : tauhad.taus()) { _weights_had->fill(); int prongs = countProngs(tau); // number of charged particles among decay products // Only do 1 prong decays here if (prongs == 1) { ////// Exclusive decay modes "1-prong" if (analyzeDecay(tau, decay_pids["pinu"], true)) _h_1prong_pinu->fill(1); if (analyzeDecay(tau, decay_pids["Kpnu"], true)) _h_1prong_Kpnu->fill(1); if (analyzeDecay(tau, decay_pids["pipinu"], true)) _h_1prong_pipinu->fill(1); if (analyzeDecay(tau, decay_pids["Kppinu"] , true)) _h_1prong_Kppinu->fill(1); if (analyzeDecay(tau, decay_pids["pipipinu"], true)) _h_1prong_pipipinu->fill(1); // Kshort, Klong --- (twice) filling the K0 labelled PDG histo if (analyzeDecay(tau, decay_pids["KSpinu"] , true)) _h_1prong_Knpinu->fill(1); if (analyzeDecay(tau, decay_pids["KLpinu"] , true)) _h_1prong_Knpinu->fill(1); } else if (prongs == 3) { if (analyzeDecay(tau, decay_pids["3pipipinu"], true)) _h_3prong_pipipinu->fill(1); } else if (prongs == 5 && !any(tau.children(), HasAbsPID(310))) _h_5prong->fill(1); } // Leptonic tau decays --- look for radiative and non-radiative 1 prong decays for(const Particle& tau : taulep.taus()) { int prongs = countProngs(tau); // number of charged particles among decay products // Only do 1 prong decays here if (prongs == 1) { analyzeRadiativeDecay(tau, decay_pids["muids"], _weights_mu, true, _h_ratio_mu); analyzeRadiativeDecay(tau, decay_pids["elids"], _weights_el, true, _h_ratio_el); } } } /// Normalise histograms etc., after the run void finalize() { scale(_h_ratio_mu, 1. / *_weights_mu); scale(_h_ratio_el, 1. / *_weights_el); const YODA::Counter norm = *_weights_had + *_weights_mu + *_weights_el; scale(_h_1prong_pinu, 1./norm); scale(_h_1prong_Kpnu, 1./norm); scale(_h_1prong_pipinu, 1./norm); scale(_h_1prong_Kppinu, 1./norm); scale(_h_1prong_pipipinu, 1./norm); scale(_h_1prong_Knpinu, 1./norm); scale(_h_3prong_pipipinu, 1./norm); scale(_h_5prong, 1./norm); } // Short hand bool contains(Particle& mother, int id, bool abs=false) { if (abs) return any(mother.children(), HasAbsPID(id)); return any(mother.children(), HasPID(id)); } // Count charged decay products int countProngs(Particle mother) { int n_prongs = 0; for(Particle p : mother.children()) if (p.threeCharge()!=0) ++n_prongs; return n_prongs; } // Set up a lookup table for decays void populateDecayMap() { decay_pids["muids"] = {{ 13,14,16 }}; decay_pids["elids"] = {{ 11,12,16 }}; decay_pids["pinu"] = {{ 211,16 }}; decay_pids["Kpnu"] = {{ 321,16 }}; decay_pids["pipinu"] = {{ 111,211,16 }}; decay_pids["Kppinu"] = {{ 111,321,16 }}; decay_pids["pipipinu"] = {{ 111,111,211,16 }}; decay_pids["KSpinu"] = {{ 211,310,16 }}; decay_pids["KLpinu"] = {{ 211,130,16 }}; decay_pids["3pipipinu"] = {{ 211,211,211,16 }}; } bool analyzeDecay(Particle mother, vector ids, bool absolute) { // There is no point in looking for decays with less particles than to be analysed if (mother.children().size() == ids.size()) { bool decayfound = true; for (int id : ids) { if (!contains(mother, id, absolute)) decayfound = false; } return decayfound; } // end of first if return false; } // Look for radiative (and non-radiative) tau decays to fill a ratio histo void analyzeRadiativeDecay(Particle mother, vector ids, CounterPtr &w_incl, bool absolute, Histo1DPtr h_ratio) { // w_incl ... reference to a global weight counter for all leptonic tau decays // h_ratio ... pointer to ratio histo // There is no point in looking for decays with less particles than to be analysed if (mother.children().size() >= ids.size()) { bool decayfound = true; for (int id : ids) { if (!contains(mother, id, absolute)) decayfound = false; } // Do not increment counters if the specified decay products were not found if (decayfound) { w_incl->fill(); // the (global) weight counter for leptonic decays bool radiative = any(mother.children(), HasPID(PID::PHOTON)); // Only fill the histo if there is a radiative decay if (radiative) { // Iterate over decay products to find photon with 5 MeV energy for (const Particle& son : mother.children()) { if (son.pid() == PID::PHOTON) { // Require photons to have at least 5 MeV energy in the rest frame of the tau // boosted taus if (!mother.momentum().betaVec().isZero()) { LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(mother.momentum().betaVec()); if (cms_boost.transform(son.momentum())[0]/MeV > 5.) { h_ratio->fill(1); break; } } // not boosted taus else { if (son.momentum()[0]/MeV > 5.) { h_ratio->fill(1); break; } } } } // end loop over decay products } // end of radiative } // end of decayfound } // end of first if } private: /// @name Histograms //@{ Histo1DPtr _h_ratio_mu, _h_ratio_el; Histo1DPtr _h_1prong_pinu, _h_1prong_Kpnu, _h_1prong_Kppinu, _h_1prong_pipinu, _h_1prong_pipipinu, _h_1prong_Knpinu; Histo1DPtr _h_3prong_pipipinu; Histo1DPtr _h_5prong; //@} CounterPtr _weights_had, _weights_mu, _weights_el; map > decay_pids; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(PDG_TAUS); } diff --git a/analyses/pluginRHIC/STAR_2006_S6870392.cc b/analyses/pluginRHIC/STAR_2006_S6870392.cc --- a/analyses/pluginRHIC/STAR_2006_S6870392.cc +++ b/analyses/pluginRHIC/STAR_2006_S6870392.cc @@ -1,86 +1,86 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief STAR inclusive jet cross-section in pp at 200 GeV class STAR_2006_S6870392 : public Analysis { public: /// Constructor STAR_2006_S6870392() : Analysis("STAR_2006_S6870392") { } /// @name Analysis methods //@{ /// Book projections and histograms void init() { FinalState fs(-2.0, 2.0); declare(fs, "FS"); declare(FastJets(fs, FastJets::CDFMIDPOINT, 0.4, - JetAlg::ALL_MUONS, JetAlg::NO_INVISIBLES, + JetAlg::Muons::ALL, JetAlg::Invisibles::NONE, nullptr, 0.5), "MidpointJets"); book(_h_jet_pT_MB ,1, 1, 1); book(_h_jet_pT_HT ,2, 1, 1); } /// Do the analysis void analyze(const Event& event) { // Skip if the event is empty const FinalState& fs = apply(event, "FS"); if (fs.empty()) { MSG_DEBUG("Skipping event " << numEvents() << " because no final state found "); vetoEvent; } // Find jets const FastJets& jetpro = apply(event, "MidpointJets"); const Jets& jets = jetpro.jetsByPt(); if (!jets.empty()) { const Jet& j1 = jets.front(); if (inRange(fabs(j1.eta()), 0.2, 0.8)) { for (const Jet& j : jets) { const FourMomentum pj = j.momentum(); _h_jet_pT_MB->fill(pj.pT()); _h_jet_pT_HT->fill(pj.pT()); } } } } /// Finalize void finalize() { double normalisation = crossSection()/picobarn/sumOfWeights()/(2*0.6*2*M_PI); scale(_h_jet_pT_MB, normalisation); scale(_h_jet_pT_HT, normalisation); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_jet_pT_MB; Histo1DPtr _h_jet_pT_HT; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(STAR_2006_S6870392); } diff --git a/include/Rivet/Projections/DISFinalState.hh b/include/Rivet/Projections/DISFinalState.hh --- a/include/Rivet/Projections/DISFinalState.hh +++ b/include/Rivet/Projections/DISFinalState.hh @@ -1,92 +1,92 @@ // -*- C++ -*- #ifndef RIVET_DISFinalState_HH #define RIVET_DISFinalState_HH #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/DISKinematics.hh" namespace Rivet { /// @brief Final state particles boosted to the hadronic center of mass system. /// /// NB. The DIS scattered lepton is not included in the final state particles. class DISFinalState: public FinalState { public: /// Type of DIS boost to apply - enum BoostType { HCM, BREIT, LAB }; + enum class BoostFrame { HCM, BREIT, LAB }; /// @name Constructors //@{ /// Constructor with explicit FinalState /// @note The DISKinematics has no parameters, hence explicitly passing it as an arg shouldn't be necessary. - DISFinalState(const FinalState& fs, BoostType boosttype, const DISKinematics& kinematicsp=DISKinematics()) + DISFinalState(const FinalState& fs, BoostFrame boosttype, const DISKinematics& kinematicsp=DISKinematics()) : _boosttype(boosttype) { setName("DISFinalState"); declare(fs, "FS"); declare(kinematicsp, "Kinematics"); } /// Constructor with optional FinalState /// @note The DISKinematics has no parameters, hence explicitly passing it as an arg shouldn't be necessary. - DISFinalState(BoostType boosttype, const FinalState& fs=FinalState(), const DISKinematics& kinematicsp=DISKinematics()) + DISFinalState(BoostFrame boosttype, const FinalState& fs=FinalState(), const DISKinematics& kinematicsp=DISKinematics()) : DISFinalState(fs, boosttype, kinematicsp) { } /// Constructor with explicit cuts to define final-state particles /// @note The DISKinematics has no parameters, hence explicitly passing it as an arg shouldn't be necessary. - DISFinalState(const Cut& c, BoostType boosttype, const DISKinematics& kinematicsp=DISKinematics()) + DISFinalState(const Cut& c, BoostFrame boosttype, const DISKinematics& kinematicsp=DISKinematics()) : DISFinalState(FinalState(c), boosttype, kinematicsp) { } /// Constructor with explicit cuts to define final-state particles /// @note The DISKinematics has no parameters, hence explicitly passing it as an arg shouldn't be necessary. - DISFinalState(BoostType boosttype, const Cut& c, const DISKinematics& kinematicsp=DISKinematics()) + DISFinalState(BoostFrame boosttype, const Cut& c, const DISKinematics& kinematicsp=DISKinematics()) : DISFinalState(FinalState(c), boosttype, kinematicsp) { } // /// @brief Constructor with default FinalState // /// @note The DISKinematics has no parameters, hence explicitly passing it as an arg shouldn't be necessary. - // DISFinalState(BoostType boosttype, const DISKinematics& kinematicsp=DISKinematics()) + // DISFinalState(BoostFrame boosttype, const DISKinematics& kinematicsp=DISKinematics()) // : DISFinalState(FinalState(), boosttype, kinematicsp) // { } /// Backward compatible constructor with default FinalState /// @deprecated Prefer a version that doesn't need a DISKinematics argument - DISFinalState(const DISKinematics& kinematicsp, BoostType boosttype) + DISFinalState(const DISKinematics& kinematicsp, BoostFrame boosttype) : DISFinalState(FinalState(), boosttype, kinematicsp) { } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(DISFinalState); //@} protected: /// Apply the projection on the supplied event. void project(const Event& e); /// Compare projections. int compare(const Projection& p) const { const DISFinalState& other = dynamic_cast(p); return mkNamedPCmp(p, "Kinematics") || mkNamedPCmp(p, "FS") || cmp(_boosttype, other._boosttype); } private: - BoostType _boosttype; + BoostFrame _boosttype; }; } #endif diff --git a/include/Rivet/Projections/FastJets.hh b/include/Rivet/Projections/FastJets.hh --- a/include/Rivet/Projections/FastJets.hh +++ b/include/Rivet/Projections/FastJets.hh @@ -1,292 +1,291 @@ // -*- C++ -*- #ifndef RIVET_FastJets_HH #define RIVET_FastJets_HH #include "Rivet/Jet.hh" #include "Rivet/Particle.hh" #include "Rivet/Projection.hh" #include "Rivet/Projections/JetAlg.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Tools/RivetFastJet.hh" #include "fastjet/SISConePlugin.hh" #include "fastjet/ATLASConePlugin.hh" #include "fastjet/CMSIterativeConePlugin.hh" #include "fastjet/CDFJetCluPlugin.hh" #include "fastjet/CDFMidPointPlugin.hh" #include "fastjet/D0RunIIConePlugin.hh" #include "fastjet/TrackJetPlugin.hh" #include "fastjet/JadePlugin.hh" //#include "fastjet/PxConePlugin.hh" namespace Rivet { /// Project out jets found using the FastJet package jet algorithms. class FastJets : public JetAlg { public: /// Wrapper enum for selected FastJet jet algorithms. /// @todo Move to JetAlg and alias here? - enum JetAlgName { KT, CAM, SISCONE, ANTIKT, + enum Algo { KT, CAM, SISCONE, ANTIKT, // PXCONE, ATLASCONE, CMSCONE, CDFJETCLU, CDFMIDPOINT, D0ILCONE, JADE, DURHAM, TRACKJET, GENKTEE }; - /// @name Constructors etc. //@{ /// Constructor from a FastJet JetDefinition /// /// @warning The AreaDefinition pointer must be heap-allocated: it will be stored/deleted via a shared_ptr. FastJets(const FinalState& fsp, const fastjet::JetDefinition& jdef, - JetAlg::MuonsStrategy usemuons=JetAlg::ALL_MUONS, - JetAlg::InvisiblesStrategy useinvis=JetAlg::NO_INVISIBLES, + JetAlg::Muons usemuons=JetAlg::Muons::ALL, + JetAlg::Invisibles useinvis=JetAlg::Invisibles::NONE, fastjet::AreaDefinition* adef=nullptr) : JetAlg(fsp, usemuons, useinvis), _jdef(jdef), _adef(adef) { _initBase(); } /// JetDefinition-based constructor with reordered args for easier specification of jet area definition /// /// @warning The AreaDefinition pointer must be heap-allocated: it will be stored/deleted via a shared_ptr. FastJets(const FinalState& fsp, const fastjet::JetDefinition& jdef, fastjet::AreaDefinition* adef, - JetAlg::MuonsStrategy usemuons=JetAlg::ALL_MUONS, - JetAlg::InvisiblesStrategy useinvis=JetAlg::NO_INVISIBLES) + JetAlg::Muons usemuons=JetAlg::Muons::ALL, + JetAlg::Invisibles useinvis=JetAlg::Invisibles::NONE) : FastJets(fsp, jdef, usemuons, useinvis, adef) { } /// Native argument constructor, using FastJet alg/scheme enums. /// /// @warning The AreaDefinition pointer must be heap-allocated: it will be stored/deleted via a shared_ptr. FastJets(const FinalState& fsp, fastjet::JetAlgorithm type, fastjet::RecombinationScheme recom, double rparameter, - JetAlg::MuonsStrategy usemuons=JetAlg::ALL_MUONS, - JetAlg::InvisiblesStrategy useinvis=JetAlg::NO_INVISIBLES, + JetAlg::Muons usemuons=JetAlg::Muons::ALL, + JetAlg::Invisibles useinvis=JetAlg::Invisibles::NONE, fastjet::AreaDefinition* adef=nullptr) : FastJets(fsp, fastjet::JetDefinition(type, rparameter, recom), usemuons, useinvis, adef) { } /// Native argument constructor with reordered args for easier specification of jet area definition /// /// @warning The AreaDefinition pointer must be heap-allocated: it will be stored/deleted via a shared_ptr. FastJets(const FinalState& fsp, fastjet::JetAlgorithm type, fastjet::RecombinationScheme recom, double rparameter, fastjet::AreaDefinition* adef, - JetAlg::MuonsStrategy usemuons=JetAlg::ALL_MUONS, - JetAlg::InvisiblesStrategy useinvis=JetAlg::NO_INVISIBLES) + JetAlg::Muons usemuons=JetAlg::Muons::ALL, + JetAlg::Invisibles useinvis=JetAlg::Invisibles::NONE) : FastJets(fsp, type, recom, rparameter, usemuons, useinvis, adef) { } /// @brief Explicitly pass in an externally-constructed plugin /// /// @warning Provided plugin and area definition pointers must be heap-allocated; Rivet will store/delete via a shared_ptr FastJets(const FinalState& fsp, fastjet::JetDefinition::Plugin* plugin, - JetAlg::MuonsStrategy usemuons=JetAlg::ALL_MUONS, - JetAlg::InvisiblesStrategy useinvis=JetAlg::NO_INVISIBLES, + JetAlg::Muons usemuons=JetAlg::Muons::ALL, + JetAlg::Invisibles useinvis=JetAlg::Invisibles::NONE, fastjet::AreaDefinition* adef=nullptr) : FastJets(fsp, fastjet::JetDefinition(plugin), usemuons, useinvis, adef) { _plugin.reset(plugin); } /// @brief Explicitly pass in an externally-constructed plugin, with reordered args for easier specification of jet area definition /// /// @warning Provided plugin and area definition pointers must be heap-allocated; Rivet will store/delete via a shared_ptr FastJets(const FinalState& fsp, fastjet::JetDefinition::Plugin* plugin, fastjet::AreaDefinition* adef, - JetAlg::MuonsStrategy usemuons=JetAlg::ALL_MUONS, - JetAlg::InvisiblesStrategy useinvis=JetAlg::NO_INVISIBLES) + JetAlg::Muons usemuons=JetAlg::Muons::ALL, + JetAlg::Invisibles useinvis=JetAlg::Invisibles::NONE) : FastJets(fsp, plugin, usemuons, useinvis, adef) { } /// @brief Convenience constructor using Rivet enums for most common jet algs (including some plugins). /// /// For the built-in algs, E-scheme recombination is used. For full control /// of FastJet built-in jet algs, use the constructors from native-args or a /// plugin pointer. /// /// @warning Provided area definition pointer must be heap-allocated; Rivet will store/delete via a shared_ptr FastJets(const FinalState& fsp, - JetAlgName alg, double rparameter, - JetAlg::MuonsStrategy usemuons=JetAlg::ALL_MUONS, - JetAlg::InvisiblesStrategy useinvis=JetAlg::NO_INVISIBLES, + Algo alg, double rparameter, + JetAlg::Muons usemuons=JetAlg::Muons::ALL, + JetAlg::Invisibles useinvis=JetAlg::Invisibles::NONE, fastjet::AreaDefinition* adef=nullptr, double seed_threshold=1.0) : JetAlg(fsp, usemuons, useinvis) { _initBase(); _initJdef(alg, rparameter, seed_threshold); } // /// Same thing as above, but without an FS (for when we want to pass the particles directly to the calc method) // /// @todo Does this work properly, without internal HeavyQuarks etc.? - // FastJets(JetAlgName alg, double rparameter, double seed_threshold=1.0) { _initJdef(alg, rparameter, seed_threshold); } + // FastJets(Algo alg, double rparameter, double seed_threshold=1.0) { _initJdef(alg, rparameter, seed_threshold); } // /// Same thing as above, but without an FS (for when we want to pass the particles directly to the calc method) // /// @todo Does this work properly, without internal HeavyQuarks etc.? // FastJets(fastjet::JetAlgorithm type, fastjet::RecombinationScheme recom, double rparameter) { _initJdef(type, recom, rparameter); } // /// Same thing as above, but without an FS (for when we want to pass the particles directly to the calc method) // /// @todo Does this work properly, without internal HeavyQuarks etc.? // FastJets(fastjet::JetDefinition::Plugin* plugin) : _jdef(plugin), _plugin(plugin) { // // _plugin.reset(plugin); // // _jdef = fastjet::JetDefinition(plugin); // } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(FastJets); //@} /// @name Static helper functions for FastJet interaction, with tagging //@{ /// Make PseudoJets for input to a ClusterSequence, with user_index codes for constituent- and tag-particle linking static PseudoJets mkClusterInputs(const Particles& fsparticles, const Particles& tagparticles=Particles()); /// Make a Rivet Jet from a PseudoJet holding a user_index code for lookup of Rivet fsparticle or tagparticle links static Jet mkJet(const PseudoJet& pj, const Particles& fsparticles, const Particles& tagparticles=Particles()); /// Convert a whole list of PseudoJets to a list of Jets, with mkJet-style unpacking static Jets mkJets(const PseudoJets& pjs, const Particles& fsparticles, const Particles& tagparticles=Particles()); //@} /// Reset the projection. Jet def, etc. are unchanged. void reset(); /// @brief Use provided jet area definition /// /// @warning The provided pointer must be heap-allocated: it will be stored/deleted via a shared_ptr. /// @note Provide an adef null pointer to re-disable jet area calculation void useJetArea(fastjet::AreaDefinition* adef) { _adef.reset(adef); } /// @name Access to the jets //@{ /// Get the jets (unordered) with pT > ptmin. Jets _jets() const; /// Get the pseudo jets (unordered). PseudoJets pseudoJets(double ptmin=0.0) const; /// Alias PseudoJets pseudojets(double ptmin=0.0) const { return pseudoJets(ptmin); } /// Get the pseudo jets, ordered by \f$ p_T \f$. PseudoJets pseudoJetsByPt(double ptmin=0.0) const { return sorted_by_pt(pseudoJets(ptmin)); } /// Alias PseudoJets pseudojetsByPt(double ptmin=0.0) const { return pseudoJetsByPt(ptmin); } /// Get the pseudo jets, ordered by \f$ E \f$. PseudoJets pseudoJetsByE(double ptmin=0.0) const { return sorted_by_E(pseudoJets(ptmin)); } /// Alias PseudoJets pseudojetsByE(double ptmin=0.0) const { return pseudoJetsByE(ptmin); } /// Get the pseudo jets, ordered by rapidity. PseudoJets pseudoJetsByRapidity(double ptmin=0.0) const { return sorted_by_rapidity(pseudoJets(ptmin)); } /// Alias PseudoJets pseudojetsByRapidity(double ptmin=0.0) const { return pseudoJetsByRapidity(ptmin); } /// Trim (filter) a jet, keeping tag and constituent info in the resulting jet Jet trimJet(const Jet& input, const fastjet::Filter& trimmer) const; //@} /// @name Access to the FastJet clustering objects such as jet def, area def, and cluster //@{ /// Return the cluster sequence. /// @todo Care needed re. const shared_ptr vs. shared_ptr const shared_ptr clusterSeq() const { return _cseq; } /// Return the area-enabled cluster sequence (if an area defn exists, otherwise returns a null ptr). /// @todo Care needed re. const shared_ptr vs. shared_ptr const shared_ptr clusterSeqArea() const { return areaDef() ? dynamic_pointer_cast(_cseq) : nullptr; } /// Return the jet definition. const fastjet::JetDefinition& jetDef() const { return _jdef; } /// @brief Return the area definition. /// /// @warning May be null! /// @todo Care needed re. const shared_ptr vs. shared_ptr const shared_ptr areaDef() const { return _adef; } //@} private: /// Shared utility functions to implement constructor behaviour void _initBase(); - void _initJdef(JetAlgName alg, double rparameter, double seed_threshold); + void _initJdef(Algo alg, double rparameter, double seed_threshold); protected: /// Perform the projection on the Event. void project(const Event& e); /// Compare projections. int compare(const Projection& p) const; public: /// Do the calculation locally (no caching). void calc(const Particles& fsparticles, const Particles& tagparticles=Particles()); private: /// Jet definition fastjet::JetDefinition _jdef; /// Pointer to user-handled area definition std::shared_ptr _adef; /// Cluster sequence std::shared_ptr _cseq; /// FastJet external plugin std::shared_ptr _plugin; /// Map of vectors of y scales. This is mutable so we can use caching/lazy evaluation. mutable std::map > _yscales; /// Particles used for constituent and tag lookup Particles _fsparticles, _tagparticles; }; } #endif diff --git a/include/Rivet/Projections/JetAlg.hh b/include/Rivet/Projections/JetAlg.hh --- a/include/Rivet/Projections/JetAlg.hh +++ b/include/Rivet/Projections/JetAlg.hh @@ -1,232 +1,218 @@ // -*- C++ -*- #ifndef RIVET_JetAlg_HH #define RIVET_JetAlg_HH #include "Rivet/Projection.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Particle.hh" #include "Rivet/Jet.hh" namespace Rivet { /// Abstract base class for projections which can return a set of {@link Jet}s. class JetAlg : public Projection { public: /// Enum for the treatment of muons: whether to include all, some, or none in jet-finding - enum MuonsStrategy { NO_MUONS, DECAY_MUONS, ALL_MUONS }; + enum class Muons { NONE, DECAY, ALL }; /// Enum for the treatment of invisible particles: whether to include all, some, or none in jet-finding - enum InvisiblesStrategy { NO_INVISIBLES, DECAY_INVISIBLES, ALL_INVISIBLES }; + enum class Invisibles { NONE, DECAY, ALL }; /// Constructor - JetAlg(const FinalState& fs, MuonsStrategy usemuons=JetAlg::ALL_MUONS, InvisiblesStrategy useinvis=JetAlg::NO_INVISIBLES); + JetAlg(const FinalState& fs, + Muons usemuons = Muons::ALL, + Invisibles useinvis = Invisibles::NONE); /// Default constructor - JetAlg() {}; + JetAlg() = default; /// Clone on the heap. virtual unique_ptr clone() const = 0; /// Destructor - virtual ~JetAlg() { } + virtual ~JetAlg() = default; /// @name Control the treatment of muons and invisible particles /// /// Since MC-based jet calibration (and/or particle flow) can add back in /// particles that weren't seen in calorimeters/trackers. //@{ /// @brief Include (some) muons in jet construction. /// /// The default behaviour is that jets are only constructed from visible /// particles. Some jet studies, including those from ATLAS, use a definition /// in which neutrinos from hadron decays are included via MC-based calibrations. /// Setting this flag to true avoids the automatic restriction to a VisibleFinalState. - void useMuons(MuonsStrategy usemuons=ALL_MUONS) { + void useMuons(Muons usemuons = Muons::ALL) { _useMuons = usemuons; } /// @brief Include (some) invisible particles in jet construction. /// /// The default behaviour is that jets are only constructed from visible /// particles. Some jet studies, including those from ATLAS, use a definition /// in which neutrinos from hadron decays are included via MC-based calibrations. /// Setting this flag to true avoids the automatic restriction to a VisibleFinalState. - void useInvisibles(InvisiblesStrategy useinvis=DECAY_INVISIBLES) { + void useInvisibles(Invisibles useinvis = Invisibles::DECAY) { _useInvisibles = useinvis; } - /// @brief Include (some) invisible particles in jet construction. - /// - /// The default behaviour is that jets are only constructed from visible - /// particles. Some jet studies, including those from ATLAS, use a definition - /// in which neutrinos from hadron decays are included via MC-based calibrations. - /// Setting this flag to true avoids the automatic restriction to a VisibleFinalState. - /// - /// @deprecated Use the enum-arg version instead. Will be removed in Rivet v3 + /// @brief obsolete chooser + DEPRECATED("make an explicit choice from Invisibles::{NONE,DECAY,ALL}. This boolean call does not allow for ALL") void useInvisibles(bool useinvis) { - _useInvisibles = useinvis ? DECAY_INVISIBLES : NO_INVISIBLES; + _useInvisibles = useinvis ? Invisibles::DECAY : Invisibles::NONE; } //@} /// @name Access to jet objects //@{ /// Get jets in no guaranteed order, with an optional Cut /// @note Returns a copy rather than a reference, due to cuts virtual Jets jets(const Cut& c=Cuts::open()) const { return filter_select(_jets(), c); - // const Jets rawjets = _jets(); - // // Just return a copy of rawjets if the cut is open - // if (c == Cuts::open()) return rawjets; - // // If there is a non-trivial cut... - // /// @todo Use an STL erase(remove_if) and lambda function for this - // Jets rtn; - // rtn.reserve(size()); - // for (const Jet& j, rawjets) - // if (c->accept(j)) rtn.push_back(j); - // return rtn; } /// Get jets in no guaranteed order, with a selection functor /// @note Returns a copy rather than a reference, due to cuts virtual Jets jets(const JetSelector& selector) const { return filter_select(_jets(), selector); } /// Get the jets with a Cut applied, and ordered by supplied sorting functor /// @note Returns a copy rather than a reference, due to cuts and sorting Jets jets(const Cut& c, const JetSorter& sorter) const { /// @todo Will the vector be efficiently std::move'd by value through this function chain? return sortBy(jets(c), sorter); } /// Get the jets, ordered by supplied sorting functor, with an optional Cut /// @note Returns a copy rather than a reference, due to cuts and sorting Jets jets(const JetSorter& sorter, const Cut& c=Cuts::open()) const { /// @todo Will the vector be efficiently std::move'd by value through this function chain? return jets(c, sorter); } /// Get the jets, ordered by supplied sorting function object, with optional cuts on \f$ p_\perp \f$ and rapidity. /// @note Returns a copy rather than a reference, due to cuts and sorting Jets jets(const JetSelector& selector, const JetSorter& sorter) const { /// @todo Will the vector be efficiently std::move'd by value through this function chain? return sortBy(jets(selector), sorter); } /// Get the jets, ordered by supplied sorting functor and with a selection functor applied /// @note Returns a copy rather than a reference, due to cuts and sorting Jets jets(const JetSorter& sorter, const JetSelector selector) const { /// @todo Will the vector be efficiently std::move'd by value through this function chain? return jets(selector, sorter); } /// Get the jets, ordered by \f$ p_T \f$, with optional cuts. /// /// @note Returns a copy rather than a reference, due to cuts and sorting /// /// This is a very common use-case, so is available as syntatic sugar for jets(c, cmpMomByPt). /// @todo The other sorted accessors should be removed in a cleanup. Jets jetsByPt(const Cut& c=Cuts::open()) const { return jets(c, cmpMomByPt); } /// Get the jets, ordered by \f$ p_T \f$, with cuts via a selection functor. /// /// @note Returns a copy rather than a reference, due to cuts and sorting /// /// This is a very common use-case, so is available as syntatic sugar for jets(c, cmpMomByPt). /// @todo The other sorted accessors should be removed in a cleanup. Jets jetsByPt(const JetSelector& selector) const { return jets(selector, cmpMomByPt); } /// Get the jets, ordered by \f$ p_T \f$, with a cut on \f$ p_\perp \f$. /// /// @deprecated Use the version with a Cut argument /// @note Returns a copy rather than a reference, due to cuts and sorting /// /// This is a very common use-case, so is available as syntatic sugar for jets(Cuts::pT >= ptmin, cmpMomByPt). /// @todo The other sorted accessors should be removed in a cleanup. Jets jetsByPt(double ptmin) const { return jets(Cuts::pT >= ptmin, cmpMomByPt); } //@} protected: /// @brief Internal pure virtual method for getting jets in no guaranteed order. virtual Jets _jets() const = 0; public: /// Count the jets size_t size() const { return jets().size(); } /// Count the jets after a Cut is applied. size_t size(const Cut& c) const { return jets(c).size(); } /// Count the jets after a selection functor is applied. size_t size(const JetSelector& s) const { return jets(s).size(); } /// Is this jet finder empty? bool empty() const { return size() == 0; } /// Is this jet finder empty after a Cut is applied? bool empty(const Cut& c) const { return size(c) == 0; } /// Is this jet finder empty after a selection functor is applied? bool empty(const JetSelector& s) const { return size(s) == 0; } /// Clear the projection. virtual void reset() = 0; typedef Jet entity_type; typedef Jets collection_type; /// Template-usable interface common to FinalState. collection_type entities() const { return jets(); } // /// Do the calculation locally (no caching). // virtual void calc(const Particles& constituents, const Particles& tagparticles=Particles()) = 0; protected: /// Perform the projection on the Event. virtual void project(const Event& e) = 0; /// Compare projections. virtual int compare(const Projection& p) const = 0; protected: /// Flag to determine whether or not to exclude (some) muons from the would-be constituents. - MuonsStrategy _useMuons; + Muons _useMuons; /// Flag to determine whether or not to exclude (some) invisible particles from the would-be constituents. - InvisiblesStrategy _useInvisibles; + Invisibles _useInvisibles; }; /// Compatibility typedef, for equivalence with ParticleFinder /// @todo Should we make this the canonical name? Would "require" a header filename change -> breakage or ugly. using JetFinder = JetAlg; } #endif diff --git a/include/Rivet/Projections/PartonicTops.hh b/include/Rivet/Projections/PartonicTops.hh --- a/include/Rivet/Projections/PartonicTops.hh +++ b/include/Rivet/Projections/PartonicTops.hh @@ -1,107 +1,116 @@ // -*- C++ -*- #ifndef RIVET_PartonicTops_HH #define RIVET_PartonicTops_HH #include "Rivet/Projections/ParticleFinder.hh" namespace Rivet { /// @brief Convenience finder of partonic top quarks /// /// @warning Requires there to be tops in the event record. A fiducial pseudo-top /// analysis approach is strongly recommended instead of this. class PartonicTops : public ParticleFinder { public: /// @brief Enum for categorising top quark decay modes /// /// More specifically, the decay mode of the W from the top. We presume top decay to a W and b quark. - enum DecayMode { ALL = 0, ANY = 0, ELECTRON, MUON, TAU, E_MU, E_MU_TAU, HADRONIC }; + enum class DecayMode { + ANY = 0, + ALL = 0, + ELECTRON, + MUON, + TAU, + E_MU, + E_MU_TAU, + HADRONIC + }; /// @name Constructors //@{ /// Constructor optionally taking cuts object PartonicTops(const Cut& c=Cuts::OPEN) - : ParticleFinder(c), _decaymode(ALL), _emu_from_prompt_tau(true), _include_hadronic_taus(false) + : ParticleFinder(c), _decaymode(DecayMode::ALL), _emu_from_prompt_tau(true), _include_hadronic_taus(false) { } /// Constructor taking decay mode details (and an optional cuts object) PartonicTops(DecayMode decaymode, bool emu_from_prompt_tau=true, bool include_hadronic_taus=false, const Cut& c=Cuts::OPEN) : ParticleFinder(c), _decaymode(decaymode), _emu_from_prompt_tau(emu_from_prompt_tau), _include_hadronic_taus(include_hadronic_taus) { } /// Constructor taking decay mode details (and an optional cuts object) PartonicTops(DecayMode decaymode, const Cut& c, bool emu_from_prompt_tau=true, bool include_hadronic_taus=false) : ParticleFinder(c), _decaymode(decaymode), _emu_from_prompt_tau(emu_from_prompt_tau), _include_hadronic_taus(include_hadronic_taus) { } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(PartonicTops); //@} /// Access to the found partonic tops const Particles& tops() const { return _theParticles; } /// Clear the projection void clear() { _theParticles.clear(); } protected: /// Apply the projection on the supplied event. void project(const Event& event) { // Find partonic tops _theParticles = filter_select(event.allParticles(_cuts), lastParticleWith(isTop)); // Filtering by decay mode - if (_decaymode != ALL) { + if (_decaymode != DecayMode::ALL) { const auto fn = [&](const Particle& t) { const Particles descendants = t.allDescendants(); const bool prompt_e = any(descendants, [&](const Particle& p){ return p.abspid() == PID::ELECTRON && p.isPrompt(_emu_from_prompt_tau) && !p.hasAncestor(PID::PHOTON, false); }); const bool prompt_mu = any(descendants, [&](const Particle& p){ return p.abspid() == PID::MUON && p.isPrompt(_emu_from_prompt_tau) && !p.hasAncestor(PID::PHOTON, false); }); - if (prompt_e && (_decaymode == ELECTRON || _decaymode == E_MU || _decaymode == E_MU_TAU)) return true; - if (prompt_mu && (_decaymode == MUON || _decaymode == E_MU || _decaymode == E_MU_TAU)) return true; + if (prompt_e && (_decaymode == DecayMode::ELECTRON || _decaymode == DecayMode::E_MU || _decaymode == DecayMode::E_MU_TAU)) return true; + if (prompt_mu && (_decaymode == DecayMode::MUON || _decaymode == DecayMode::E_MU || _decaymode == DecayMode::E_MU_TAU)) return true; const bool prompt_tau = any(descendants, [&](const Particle& p){ return p.abspid() == PID::TAU && p.isPrompt() && !p.hasAncestor(PID::PHOTON, false); }); const bool prompt_hadronic_tau = any(descendants, [&](const Particle& p){ return p.abspid() == PID::TAU && p.isPrompt() && !p.hasAncestor(PID::PHOTON, false) && none(p.children(), isChargedLepton); }); - if (prompt_tau && (_decaymode == TAU || _decaymode == E_MU_TAU)) return (_include_hadronic_taus || !prompt_hadronic_tau); - if (_decaymode == HADRONIC && (!prompt_e && !prompt_mu && (!prompt_tau || (_include_hadronic_taus && prompt_hadronic_tau)))) return true; //< logical hairiness... + if (prompt_tau && (_decaymode == DecayMode::TAU || _decaymode == DecayMode::E_MU_TAU)) return (_include_hadronic_taus || !prompt_hadronic_tau); + if (_decaymode == DecayMode::HADRONIC && (!prompt_e && !prompt_mu && (!prompt_tau || (_include_hadronic_taus && prompt_hadronic_tau)))) return true; //< logical hairiness... return false; }; ifilter_select(_theParticles, fn); } } /// Compare projections. int compare(const Projection& p) const { const PartonicTops& other = dynamic_cast(p); return cmp(_cuts, other._cuts) || cmp(_decaymode, other._decaymode) || cmp(_emu_from_prompt_tau, other._emu_from_prompt_tau) || cmp(_include_hadronic_taus, other._include_hadronic_taus); } private: DecayMode _decaymode; bool _emu_from_prompt_tau, _include_hadronic_taus; }; } #endif diff --git a/include/Rivet/Projections/TauFinder.hh b/include/Rivet/Projections/TauFinder.hh --- a/include/Rivet/Projections/TauFinder.hh +++ b/include/Rivet/Projections/TauFinder.hh @@ -1,67 +1,72 @@ #ifndef RIVET_TauFinder_HH #define RIVET_TauFinder_HH #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/UnstableFinalState.hh" namespace Rivet { /// @brief Convenience finder of unstable taus /// /// @todo Convert to a general ParticleFinder, since it's not a true final state? Needs some care... class TauFinder : public FinalState { public: - enum DecayType { ANY=0, LEPTONIC=1, HADRONIC }; + enum class DecayMode { + ANY = 0, + ALL = 0, + LEPTONIC, + HADRONIC + }; static bool isHadronic(const Particle& tau) { assert(tau.abspid() == PID::TAU); return any(tau.stableDescendants(), isHadron); } static bool isLeptonic(const Particle& tau) { return !isHadronic(tau); } - TauFinder(DecayType decaytype, const Cut& cut=Cuts::open()) { + TauFinder(DecayMode decaymode, const Cut& cut=Cuts::open()) { /// @todo What about directness/promptness? setName("TauFinder"); - _dectype = decaytype; + _decmode = decaymode; addProjection(UnstableFinalState(cut), "UFS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(TauFinder); const Particles& taus() const { return _theParticles; } protected: /// Apply the projection on the supplied event. void project(const Event& e); /// Compare with other projections. virtual int compare(const Projection& p) const; private: - /// The decaytype enum - DecayType _dectype; + /// The decaymode enum + DecayMode _decmode; }; /// @todo Make this the canonical name in future using Taus = TauFinder; } #endif diff --git a/include/Rivet/Projections/WFinder.hh b/include/Rivet/Projections/WFinder.hh --- a/include/Rivet/Projections/WFinder.hh +++ b/include/Rivet/Projections/WFinder.hh @@ -1,179 +1,179 @@ // -*- C++ -*- #ifndef RIVET_WFinder_HH #define RIVET_WFinder_HH #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// @brief Convenience finder of leptonically decaying W /// /// Chain together different projections as convenience for finding one W /// from one lepton and the missing E 4-vector in the final state, including photon clustering. class WFinder : public ParticleFinder { public: - enum ChargedLeptons { PROMPTCHLEPTONS=0, ALLCHLEPTONS }; - enum ClusterPhotons { NOCLUSTER=0, CLUSTERNODECAY=1, CLUSTERALL }; - enum PhotonTracking { NOTRACK=0, TRACK=1 }; - enum MassWindow { MASS=0, TRANSMASS=1 }; + enum class ChargedLeptons { PROMPT, ALL }; + enum class ClusterPhotons { NONE, NODECAY, ALL }; + enum class AddPhotons { NO, YES }; + enum class MassWindow { M, MT }; /// @name Constructors //@{ /// Constructor taking cuts object /// @param inputfs Input final state /// @param leptoncuts Charged lepton cuts /// @param pid Type of the charged lepton /// @param minmass,maxmass (Transverse) mass window /// @param missingET Minimal amount of missing ET (neutrinos) required /// @param dRmax Maximum dR of photons around charged lepton to take into account /// for W reconstruction (only relevant if one of the following are true) /// @param chLeptons Only use prompt charged leptons, or any charged leptons? /// @param clusterPhotons Whether such photons are supposed to be /// clustered to the lepton object and thus W mom /// @param trackPhotons Whether such photons should be added to _theParticles /// @param masstype Whether mass window should be applied using m or mT /// - /// @todo Revisit NOTRACK as default? + /// @todo Revisit AddPhotons::NO as default? WFinder(const FinalState& inputfs, const Cut& leptoncuts, PdgId pid, double minmass, double maxmass, double missingET, double dRmax=0.1, - ChargedLeptons chLeptons=PROMPTCHLEPTONS, - ClusterPhotons clusterPhotons=CLUSTERNODECAY, - PhotonTracking trackPhotons=NOTRACK, - MassWindow masstype=MASS, + ChargedLeptons chLeptons=ChargedLeptons::PROMPT, + ClusterPhotons clusterPhotons=ClusterPhotons::NODECAY, + AddPhotons trackPhotons=AddPhotons::NO, + MassWindow masstype=MassWindow::M, double masstarget=80.4*GeV); - /// Backward-compatible constructor with implicit chLeptons mode = PROMPTCHLEPTONS + /// Backward-compatible constructor with implicit chLeptons mode = ChargedLeptons::PROMPT /// @deprecated Remove this and always use the constructor with chLeptons argument. WFinder(const FinalState& inputfs, const Cut& leptoncuts, PdgId pid, double minmass, double maxmass, double missingET, double dRmax, ClusterPhotons clusterPhotons, - PhotonTracking trackPhotons=NOTRACK, - MassWindow masstype=MASS, + AddPhotons trackPhotons=AddPhotons::NO, + MassWindow masstype=MassWindow::M, double masstarget=80.4*GeV) : WFinder(inputfs, leptoncuts, pid, minmass, maxmass, missingET, - dRmax, PROMPTCHLEPTONS, clusterPhotons, trackPhotons, masstype, masstarget) + dRmax, ChargedLeptons::PROMPT, clusterPhotons, trackPhotons, masstype, masstarget) { } // /// Constructor with more convenient argument ordering and default args // /// - // /// @todo Revisit NOTRACK as default? + // /// @todo Revisit AddPhotons::NO as default? // WFinder(const FinalState& inputfs, // const Cut& leptoncuts, // PdgId pid, // double minmass, double maxmass, // double missingET, // MassWindow masstype, // double masstarget=80.4*GeV, - // ClusterPhotons clusterPhotons=CLUSTERNODECAY, + // ClusterPhotons clusterPhotons=ClusterPhotons::NODECAY, // double dRmax=0.1, - // PhotonTracking trackPhotons=NOTRACK) + // AddPhotons trackPhotons=AddPhotons::NO) // : WFinder(inputfs, leptoncuts, pid, minmass, maxmass, missingET, // dRmax, clusterPhotons, trackPhotons, masstype, masstarget) // { } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(WFinder); //@} /// @brief Access to the found bosons, equivalent to constituents() /// @note Currently either 0 or 1 boson can be found. const Particles& bosons() const { return particles(); } /// Access to the found boson (assuming it exists) /// @todo C++17 std::optional... const Particle& boson() const { return particles().front(); } /// @brief Access to the Ws' constituent clustered leptons /// @note Either size 0 if no boson was found or 1 if one boson was found const Particles& constituentLeptons() const { return _leptons; } /// brief Access to the W's constituent clustered lepton (assuming it exists) /// @todo C++17 std::optional... const Particle& constituentLepton() const { return _leptons.front(); } /// Access to the Ws' constituent neutrinos /// /// @note Either size 0 if no boson was found or 1 if one boson was found /// @note The neutrino can't be perfecly reconstructed -- this is a pseudo-nu from the MET. const Particles& constituentNeutrinos() const { return _neutrinos; } /// Access to the W's constituent neutrino (assuming it exists) /// @note The neutrino can't be perfecly reconstructed -- this is a pseudo-nu from the MET. const Particle& constituentNeutrino() const { return _neutrinos.front(); } /// Access to the particles other than the W leptons and clustered photons /// /// Useful for e.g. input to a jet finder const VetoedFinalState& remainingFinalState() const; /// Access to the missing momentum projection used to find the "neutrino" const MissingMomentum& missingMom() const; /// @brief Calculate the transverse mass of the W, from the charged lepton and neutrino /// /// Defined as sqrt(2 pT_l pT_nu (1.0 - cos(dphi_lnu))). Return -1 if no boson found. double mT() const { if (bosons().empty()) return -1; return Rivet::mT(constituentLepton().mom(), constituentNeutrino().mom()); } protected: /// Apply the projection on the supplied event. void project(const Event& e); /// Compare projections. int compare(const Projection& p) const; public: /// Clear the projection void clear() { _theParticles.clear(); } private: /// (Transverse) mass cuts double _minmass, _maxmass, _masstarget; /// Use transverse or complete mass? bool _useTransverseMass; /// Missing ET cut double _etMissMin; /// Switch for tracking of photons (whether to include them in the W particle) /// This is relevant when the clustered photons need to be excluded from e.g. a jet finder - PhotonTracking _trackPhotons; + AddPhotons _trackPhotons; /// Charged lepton flavour PdgId _pid; /// Result caches. Will be filled by project() Particles _leptons, _neutrinos; }; } #endif diff --git a/include/Rivet/Projections/ZFinder.hh b/include/Rivet/Projections/ZFinder.hh --- a/include/Rivet/Projections/ZFinder.hh +++ b/include/Rivet/Projections/ZFinder.hh @@ -1,124 +1,124 @@ // -*- C++ -*- #ifndef RIVET_ZFinder_HH #define RIVET_ZFinder_HH #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// @brief Convenience finder of leptonically decaying Zs /// /// Chain together different projections as convenience for finding Z's /// from two leptons in the final state, including photon clustering. /// /// @todo Alias then rename as Dileptons class ZFinder : public ParticleFinder { public: - enum ChargedLeptons { PROMPTCHLEPTONS=0, ALLCHLEPTONS }; - enum ClusterPhotons { NOCLUSTER=0, CLUSTERNODECAY=1, CLUSTERALL }; - enum PhotonTracking { NOTRACK=0, TRACK=1 }; + enum class ChargedLeptons { PROMPT, ALL }; + enum class ClusterPhotons { NONE, NODECAY, ALL }; + enum class AddPhotons { NO, YES }; /// @name Constructors //@{ /// Constructor taking cuts object /// @param inputfs Input final state /// @param cuts lepton cuts /// @param pid type of the leptons /// @param minmass,maxmass mass window /// @param dRmax maximum dR of photons around leptons to take into account /// for Z reconstruction (only relevant if one of the following are true) /// @param clusterPhotons whether such photons are supposed to be /// clustered to the lepton objects and thus Z mom /// @param trackPhotons whether such photons should be added to _theParticles /// (cf. _trackPhotons) ZFinder(const FinalState& inputfs, const Cut& cuts, PdgId pid, double minmass, double maxmass, double dRmax=0.1, - ChargedLeptons chLeptons=PROMPTCHLEPTONS, - ClusterPhotons clusterPhotons=CLUSTERNODECAY, - PhotonTracking trackPhotons=NOTRACK, + ChargedLeptons chLeptons=ChargedLeptons::PROMPT, + ClusterPhotons clusterPhotons=ClusterPhotons::NODECAY, + AddPhotons trackPhotons=AddPhotons::NO, double masstarget=91.2*GeV); /// Backward-compatible constructor with implicit chLeptons mode = PROMPTCHLEPTONS /// @deprecated Remove this and always use the constructor with chLeptons argument. ZFinder(const FinalState& inputfs, const Cut& cuts, PdgId pid, double minmass, double maxmass, double dRmax, ClusterPhotons clusterPhotons, - PhotonTracking trackPhotons=NOTRACK, + AddPhotons trackPhotons=AddPhotons::NO, double masstarget=91.2*GeV) : ZFinder(inputfs, cuts, pid, minmass, maxmass, - dRmax, PROMPTCHLEPTONS, clusterPhotons, trackPhotons, masstarget) + dRmax, ChargedLeptons::PROMPT, clusterPhotons, trackPhotons, masstarget) { } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(ZFinder); //@} /// Access to the found bosons /// /// @note Currently either 0 or 1 boson can be found. const Particles& bosons() const { return particles(); } /// Access to the found boson (assuming it exists). const Particle& boson() const { return bosons().front(); } /// Access to the Z constituent clustered leptons /// /// For example, to make more fine-grained cuts on the clustered leptons. /// The positive charge constituent is first in the list (if not empty), and /// the negative one second. const Particles & constituentLeptons() const; const Particles & constituents() const { return constituentLeptons(); } /// Access to the particles other than the Z leptons and clustered photons /// /// Useful for e.g. input to a jet finder const VetoedFinalState& remainingFinalState() const; protected: /// Apply the projection on the supplied event. void project(const Event& e); /// Compare projections. int compare(const Projection& p) const; public: /// Clear the projection void clear() { _theParticles.clear(); } private: /// Mass cuts to apply to clustered leptons (cf. InvMassFinalState) double _minmass, _maxmass, _masstarget; /// Switch for tracking of photons (whether to include them in the Z particle) /// This is relevant when the clustered photons need to be excluded from e.g. a jet finder - PhotonTracking _trackPhotons; + AddPhotons _trackPhotons; /// Lepton flavour PdgId _pid; }; } #endif diff --git a/src/Projections/DISFinalState.cc b/src/Projections/DISFinalState.cc --- a/src/Projections/DISFinalState.cc +++ b/src/Projections/DISFinalState.cc @@ -1,33 +1,33 @@ // -*- C++ -*- #include "Rivet/Projections/DISFinalState.hh" namespace Rivet { void DISFinalState::project(const Event& e) { const DISKinematics& diskin = apply(e, "Kinematics"); LorentzTransform hcmboost; //< Null boost = LAB frame by default - if (_boosttype == HCM) hcmboost = diskin.boostHCM(); - else if (_boosttype == BREIT) hcmboost = diskin.boostBreit(); + if (_boosttype == BoostFrame::HCM) hcmboost = diskin.boostHCM(); + else if (_boosttype == BoostFrame::BREIT) hcmboost = diskin.boostBreit(); const DISLepton& dislep = diskin.apply(e, "Lepton"); const FinalState& fs = apply(e, "FS"); // Fill the particle list with all particles _other_ than the DIS scattered // lepton, with momenta boosted into the appropriate frame. _theParticles.clear(); _theParticles.reserve(fs.particles().size()-1); const GenParticle* dislepGP = dislep.out().genParticle(); // const GenParticle* dislepIN = dislep.in().genParticle(); for (const Particle& p : fs.particles()) { ///< Ensure that we skip the DIS lepton Particle temp = p; - if (_boosttype != LAB) temp.setMomentum(hcmboost.transform(temp.momentum())); + if (_boosttype != BoostFrame::LAB) temp.setMomentum(hcmboost.transform(temp.momentum())); if (p.genParticle() != dislepGP) _theParticles.push_back(temp); } } } diff --git a/src/Projections/FastJets.cc b/src/Projections/FastJets.cc --- a/src/Projections/FastJets.cc +++ b/src/Projections/FastJets.cc @@ -1,214 +1,214 @@ // -*- C++ -*- #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/HeavyHadrons.hh" #include "Rivet/Projections/TauFinder.hh" namespace Rivet { void FastJets::_initBase() { setName("FastJets"); addProjection(HeavyHadrons(), "HFHadrons"); - addProjection(TauFinder(TauFinder::HADRONIC), "Taus"); + addProjection(TauFinder(TauFinder::DecayMode::HADRONIC), "Taus"); } - void FastJets::_initJdef(JetAlgName alg, double rparameter, double seed_threshold) { - MSG_DEBUG("JetAlg = " << alg); + void FastJets::_initJdef(Algo alg, double rparameter, double seed_threshold) { + MSG_DEBUG("JetAlg = " << static_cast(alg)); MSG_DEBUG("R parameter = " << rparameter); MSG_DEBUG("Seed threshold = " << seed_threshold); if (alg == KT) { _jdef = fastjet::JetDefinition(fastjet::kt_algorithm, rparameter, fastjet::E_scheme); } else if (alg == CAM) { _jdef = fastjet::JetDefinition(fastjet::cambridge_algorithm, rparameter, fastjet::E_scheme); } else if (alg == ANTIKT) { _jdef = fastjet::JetDefinition(fastjet::antikt_algorithm, rparameter, fastjet::E_scheme); } else if (alg == DURHAM) { _jdef = fastjet::JetDefinition(fastjet::ee_kt_algorithm, fastjet::E_scheme); } else if (alg == GENKTEE) { _jdef = fastjet::JetDefinition(fastjet::ee_genkt_algorithm, rparameter, -1); } else { // Plugins: if (alg == SISCONE) { const double OVERLAP_THRESHOLD = 0.75; _plugin.reset(new fastjet::SISConePlugin(rparameter, OVERLAP_THRESHOLD)); // } else if (alg == PXCONE) { // string msg = "PxCone currently not supported, since FastJet doesn't install it by default. "; // msg += "Please notify the Rivet authors if this behaviour should be changed."; // throw Error(msg); // _plugin.reset(new fastjet::PxConePlugin(rparameter)); } else if (alg == ATLASCONE) { const double OVERLAP_THRESHOLD = 0.5; _plugin.reset(new fastjet::ATLASConePlugin(rparameter, seed_threshold, OVERLAP_THRESHOLD)); } else if (alg == CMSCONE) { _plugin.reset(new fastjet::CMSIterativeConePlugin(rparameter, seed_threshold)); } else if (alg == CDFJETCLU) { const double OVERLAP_THRESHOLD = 0.75; _plugin.reset(new fastjet::CDFJetCluPlugin(rparameter, OVERLAP_THRESHOLD, seed_threshold)); } else if (alg == CDFMIDPOINT) { const double OVERLAP_THRESHOLD = 0.5; _plugin.reset(new fastjet::CDFMidPointPlugin(rparameter, OVERLAP_THRESHOLD, seed_threshold)); } else if (alg == D0ILCONE) { const double min_jet_Et = 6.0; _plugin.reset(new fastjet::D0RunIIConePlugin(rparameter, min_jet_Et)); } else if (alg == JADE) { _plugin.reset(new fastjet::JadePlugin()); } else if (alg == TRACKJET) { _plugin.reset(new fastjet::TrackJetPlugin(rparameter)); } _jdef = fastjet::JetDefinition(_plugin.get()); } } int FastJets::compare(const Projection& p) const { const FastJets& other = dynamic_cast(p); return \ cmp(_useMuons, other._useMuons) || cmp(_useInvisibles, other._useInvisibles) || mkNamedPCmp(other, "FS") || cmp(_jdef.jet_algorithm(), other._jdef.jet_algorithm()) || cmp(_jdef.recombination_scheme(), other._jdef.recombination_scheme()) || cmp(_jdef.plugin(), other._jdef.plugin()) || cmp(_jdef.R(), other._jdef.R()) || cmp(_adef, other._adef); } // STATIC PseudoJets FastJets::mkClusterInputs(const Particles& fsparticles, const Particles& tagparticles) { PseudoJets pjs; /// @todo Use FastJet3's UserInfo system to store Particle pointers directly? // Store 4 vector data about each particle into FastJet's PseudoJets for (size_t i = 0; i < fsparticles.size(); ++i) { fastjet::PseudoJet pj = fsparticles[i]; pj.set_user_index(i+1); pjs.push_back(pj); } // And the same for ghost tagging particles (with negative user indices) for (size_t i = 0; i < tagparticles.size(); ++i) { fastjet::PseudoJet pj = tagparticles[i]; pj *= 1e-20; ///< Ghostify the momentum pj.set_user_index(-i-1); pjs.push_back(pj); } return pjs; } // STATIC Jet FastJets::mkJet(const PseudoJet& pj, const Particles& fsparticles, const Particles& tagparticles) { const PseudoJets pjconstituents = pj.constituents(); Particles constituents, tags; constituents.reserve(pjconstituents.size()); for (const fastjet::PseudoJet& pjc : pjconstituents) { // Pure ghosts don't have corresponding particles if (pjc.has_area() && pjc.is_pure_ghost()) continue; // Default user index = 0 doesn't give valid particle lookup if (pjc.user_index() == 0) continue; // Split by index sign into constituent & tag lookup if (pjc.user_index() > 0) { // Find constituents if index > 0 const size_t i = pjc.user_index() - 1; if (i >= fsparticles.size()) throw RangeError("FS particle lookup failed in jet construction"); constituents.push_back(fsparticles.at(i)); } else if (!tagparticles.empty()) { // Find tags if index < 0 const size_t i = abs(pjc.user_index()) - 1; if (i >= tagparticles.size()) throw RangeError("Tag particle lookup failed in jet construction"); tags.push_back(tagparticles.at(i)); } } return Jet(pj, constituents, tags); } // STATIC Jets FastJets::mkJets(const PseudoJets& pjs, const Particles& fsparticles, const Particles& tagparticles) { Jets rtn; rtn.reserve(pjs.size()); for (const PseudoJet pj : pjs) { rtn.push_back(FastJets::mkJet(pj, fsparticles, tagparticles)); } return rtn; } void FastJets::project(const Event& e) { // Assemble final state particles - const string fskey = (_useInvisibles == JetAlg::NO_INVISIBLES) ? "VFS" : "FS"; + const string fskey = (_useInvisibles == JetAlg::Invisibles::NONE) ? "VFS" : "FS"; Particles fsparticles = applyProjection(e, fskey).particles(); // Remove prompt invisibles if needed (already done by VFS if using NO_INVISIBLES) - if (_useInvisibles == JetAlg::DECAY_INVISIBLES) { + if (_useInvisibles == JetAlg::Invisibles::DECAY) { ifilter_discard(fsparticles, [](const Particle& p) { return !(p.isVisible() || p.fromDecay()); }); } // Remove prompt/all muons if needed - if (_useMuons == JetAlg::DECAY_MUONS) { + if (_useMuons == JetAlg::Muons::DECAY) { ifilter_discard(fsparticles, [](const Particle& p) { return isMuon(p) && !p.fromDecay(); }); - } else if (_useMuons == JetAlg::NO_MUONS) { + } else if (_useMuons == JetAlg::Muons::NONE) { ifilter_discard(fsparticles, isMuon); } // Tagging particles const Particles chadrons = applyProjection(e, "HFHadrons").cHadrons(); const Particles bhadrons = applyProjection(e, "HFHadrons").bHadrons(); const Particles taus = applyProjection(e, "Taus").particles(); calc(fsparticles, chadrons+bhadrons+taus); } void FastJets::calc(const Particles& fsparticles, const Particles& tagparticles) { MSG_DEBUG("Finding jets from " << fsparticles.size() << " input particles + " << tagparticles.size() << " tagging particles"); _fsparticles = fsparticles; _tagparticles = tagparticles; // Make pseudojets, with mapping info to Rivet FS and tag particles PseudoJets pjs = mkClusterInputs(_fsparticles, _tagparticles); // Run either basic or area-calculating cluster sequence as reqd. if (_adef) { _cseq.reset(new fastjet::ClusterSequenceArea(pjs, _jdef, *_adef)); } else { _cseq.reset(new fastjet::ClusterSequence(pjs, _jdef)); } MSG_DEBUG("ClusterSequence constructed; Njets_tot = " << _cseq->inclusive_jets().size() << ", Njets(pT > 10 GeV) = " << _cseq->inclusive_jets(10*GeV).size()); } void FastJets::reset() { _yscales.clear(); _fsparticles.clear(); _tagparticles.clear(); /// @todo _cseq = fastjet::ClusterSequence(); } Jets FastJets::_jets() const { /// @todo Cache? return mkJets(pseudojets(), _fsparticles, _tagparticles); } Jet FastJets::trimJet(const Jet& input, const fastjet::Filter& trimmer) const { if (input.pseudojet().associated_cluster_sequence() != clusterSeq().get()) throw Error("To trim a Rivet::Jet, its associated PseudoJet must have come from this FastJets' ClusterSequence"); PseudoJet pj = trimmer(input); return mkJet(pj, _fsparticles, _tagparticles); } PseudoJets FastJets::pseudoJets(double ptmin) const { return clusterSeq() ? clusterSeq()->inclusive_jets(ptmin) : PseudoJets(); } } diff --git a/src/Projections/JetAlg.cc b/src/Projections/JetAlg.cc --- a/src/Projections/JetAlg.cc +++ b/src/Projections/JetAlg.cc @@ -1,18 +1,18 @@ // -*- C++ -*- #include "Rivet/Projections/JetAlg.hh" namespace Rivet { - JetAlg::JetAlg(const FinalState& fs, MuonsStrategy usemuons, InvisiblesStrategy useinvis) + JetAlg::JetAlg(const FinalState& fs, Muons usemuons, Invisibles useinvis) : _useMuons(usemuons), _useInvisibles(useinvis) { setName("JetAlg"); addProjection(fs, "FS"); VisibleFinalState vfs(fs); // MSG_DEBUG("Making visible final state from provided FS"); addProjection(vfs, "VFS"); } } diff --git a/src/Projections/TauFinder.cc b/src/Projections/TauFinder.cc --- a/src/Projections/TauFinder.cc +++ b/src/Projections/TauFinder.cc @@ -1,28 +1,28 @@ // -*- C++ -*- #include "Rivet/Projections/TauFinder.hh" #include "Rivet/Projections/UnstableFinalState.hh" namespace Rivet { void TauFinder::project(const Event& e) { _theParticles.clear(); const UnstableFinalState& ufs = applyProjection(e, "UFS"); for (const Particle& p : ufs.particles()) { if (p.abspid() != PID::TAU) continue; - if (_dectype == ANY || (_dectype == LEPTONIC && isLeptonic(p)) || (_dectype == HADRONIC && isHadronic(p)) ) + if (_decmode == DecayMode::ANY || (_decmode == DecayMode::LEPTONIC && isLeptonic(p)) || (_decmode == DecayMode::HADRONIC && isHadronic(p)) ) _theParticles.push_back(p); } } int TauFinder::compare(const Projection& p) const { const PCmp fscmp = mkNamedPCmp(p, "UFS"); if (fscmp != EQUIVALENT) return fscmp; const TauFinder& other = dynamic_cast(p); - return cmp(_dectype, other._dectype); + return cmp(_decmode, other._decmode); } } diff --git a/src/Projections/WFinder.cc b/src/Projections/WFinder.cc --- a/src/Projections/WFinder.cc +++ b/src/Projections/WFinder.cc @@ -1,163 +1,163 @@ // -*- C++ -*- #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/InvMassFinalState.hh" #include "Rivet/Projections/MergedFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/Beam.hh" namespace Rivet { WFinder::WFinder(const FinalState& inputfs, const Cut& leptoncuts, PdgId pid, double minmass, double maxmass, double missingET, double dRmax, ChargedLeptons chLeptons, ClusterPhotons clusterPhotons, - PhotonTracking trackPhotons, + AddPhotons trackPhotons, MassWindow masstype, double masstarget) { setName("WFinder"); _etMissMin = missingET; _minmass = minmass; _maxmass = maxmass; _masstarget = masstarget; _pid = abs(pid); _trackPhotons = trackPhotons; - _useTransverseMass = (masstype == TRANSMASS); + _useTransverseMass = (masstype == MassWindow::MT); // Check that the arguments are legal if (_pid != PID::ELECTRON && _pid != PID::MUON) throw Error("Invalid charged lepton PID given to WFinder"); // Identify bare leptons for dressing // Bit of a code nightmare -- FS projection copy constructors don't work? /// @todo Fix FS copy constructors!! - if (chLeptons == PROMPTCHLEPTONS) { + if (chLeptons == ChargedLeptons::PROMPT) { PromptFinalState inputfs_prompt(inputfs); IdentifiedFinalState bareleptons(inputfs_prompt); bareleptons.acceptIdPair(_pid); declare(bareleptons, "BareLeptons"); } else { IdentifiedFinalState bareleptons(inputfs); bareleptons.acceptIdPair(_pid); declare(bareleptons, "BareLeptons"); } // Dress the bare leptons - const bool doClustering = (clusterPhotons != NOCLUSTER); - const bool useDecayPhotons = (clusterPhotons == CLUSTERALL); + const bool doClustering = (clusterPhotons != ClusterPhotons::NONE); + const bool useDecayPhotons = (clusterPhotons == ClusterPhotons::ALL); DressedLeptons leptons(inputfs, get("BareLeptons"), (doClustering ? dRmax : -1.), leptoncuts, useDecayPhotons); addProjection(leptons, "DressedLeptons"); // Add MissingMomentum proj to calc MET MissingMomentum vismom(inputfs); addProjection(vismom, "MissingET"); // Identify the non-Z part of the event VetoedFinalState remainingFS; remainingFS.addVetoOnThisFinalState(*this); addProjection(remainingFS, "RFS"); } ///////////////////////////////////////////////////// const VetoedFinalState& WFinder::remainingFinalState() const { return getProjection("RFS"); } const MissingMomentum& WFinder::missingMom() const { return getProjection("MissingET"); } int WFinder::compare(const Projection& p) const { PCmp dlcmp = mkNamedPCmp(p, "DressedLeptons"); if (dlcmp != EQUIVALENT) return dlcmp; const WFinder& other = dynamic_cast(p); return (cmp(_minmass, other._minmass) || cmp(_maxmass, other._maxmass) || cmp(_useTransverseMass, other._useTransverseMass) || cmp(_etMissMin, other._etMissMin) || cmp(_pid, other._pid) || cmp(_trackPhotons, other._trackPhotons)); } void WFinder::project(const Event& e) { clear(); _leptons.clear(); _neutrinos.clear(); // Check missing ET const MissingMomentum& missmom = applyProjection(e, "MissingET"); const double met = missmom.vectorEt().mod(); MSG_TRACE("MET = " << met/GeV << " GeV vs. required > " << _etMissMin/GeV << " GeV"); if (met < _etMissMin) { MSG_DEBUG("Not enough missing ET: " << met/GeV << " GeV vs. required > " << _etMissMin/GeV << " GeV"); return; } // Get lepton const DressedLeptons& leptons = applyProjection(e, "DressedLeptons"); if ( leptons.dressedLeptons().empty() ) { MSG_DEBUG("No dressed leptons"); return; } MSG_DEBUG("Found at least one dressed lepton: " << leptons.dressedLeptons().front().momentum() ); // Get missing momentum 4-vector, assuming a massless invisible particle const FourMomentum pmiss = missmom.missingMomentum(0*GeV); MSG_DEBUG("Found missing 4-momentum: " << pmiss); // Compute an invariant mass final state for the W decay leptons (using pseudo-neutrinos from ETmiss) PdgId _nu_pid = _pid + 1; assert(_nu_pid == PID::NU_E || _nu_pid == PID::NU_MU); vector > l_nu_ids; l_nu_ids += make_pair(_pid, -_nu_pid); l_nu_ids += make_pair(-_pid, _nu_pid); InvMassFinalState imfs(l_nu_ids, _minmass, _maxmass, _masstarget); imfs.useTransverseMass(_useTransverseMass); Particles tmp = leptons.particles(); tmp += { Particle( _nu_pid, pmiss), Particle(-_nu_pid, pmiss) }; // fake (anti)neutrinos from ETmiss vector imfs.calc(tmp); if (imfs.particlePairs().size() < 1) return; // Assemble a pseudo-W particle const ParticlePair Wconstituents = imfs.particlePairs().front(); const Particle& p1(Wconstituents.first), p2(Wconstituents.second); const FourMomentum pW = p1.momentum() + p2.momentum(); const int wcharge3 = p1.charge3() + p2.charge3(); assert(abs(wcharge3) == 3); const int wcharge = wcharge3/3; const PdgId wpid = (wcharge == 1) ? PID::WPLUSBOSON : PID::WMINUSBOSON; Particle w(wpid, pW); MSG_DEBUG(w << " reconstructed from: " << p1 << " + " << p2); // Add (dressed) lepton constituents to the W (skipping photons if requested) /// @todo Do we need to add all used invisibles to _theParticles ? const Particle l = p1.isChargedLepton() ? p1 : p2; - _leptons += (_trackPhotons == TRACK) ? l : l.constituents().front(); + _leptons += (_trackPhotons == AddPhotons::YES) ? l : l.constituents().front(); w.addConstituent(_leptons.back()); const Particle nu = p1.isNeutrino() ? p1 : p2; _neutrinos += nu; w.addConstituent(nu); // Register the completed W _theParticles.push_back(w); } } diff --git a/src/Projections/ZFinder.cc b/src/Projections/ZFinder.cc --- a/src/Projections/ZFinder.cc +++ b/src/Projections/ZFinder.cc @@ -1,118 +1,118 @@ // -*- C++ -*- #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/InvMassFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { ZFinder::ZFinder(const FinalState& inputfs, const Cut & fsCut, PdgId pid, double minmass, double maxmass, double dRmax, ChargedLeptons chLeptons, ClusterPhotons clusterPhotons, - PhotonTracking trackPhotons, + AddPhotons trackPhotons, double masstarget) { setName("ZFinder"); _minmass = minmass; _maxmass = maxmass; _masstarget = masstarget; _pid = abs(pid); _trackPhotons = trackPhotons; // Identify bare leptons for dressing // Bit of a code nightmare -- FS projection copy constructors don't work? /// @todo Fix FS copy constructors!! - if (chLeptons == PROMPTCHLEPTONS) { + if (chLeptons == ChargedLeptons::PROMPT) { PromptFinalState inputfs_prompt(inputfs); IdentifiedFinalState bareleptons = IdentifiedFinalState(inputfs_prompt); bareleptons.acceptIdPair(_pid); declare(bareleptons, "BareLeptons"); } else { IdentifiedFinalState bareleptons = IdentifiedFinalState(inputfs); bareleptons.acceptIdPair(_pid); declare(bareleptons, "BareLeptons"); } // Dress the bare leptons - const bool doClustering = (clusterPhotons != NOCLUSTER); - const bool useDecayPhotons = (clusterPhotons == CLUSTERALL); + const bool doClustering = (clusterPhotons != ClusterPhotons::NONE); + const bool useDecayPhotons = (clusterPhotons == ClusterPhotons::ALL); DressedLeptons leptons(inputfs, get("BareLeptons"), (doClustering ? dRmax : -1.0), fsCut, useDecayPhotons); addProjection(leptons, "DressedLeptons"); // Identify the non-Z part of the event VetoedFinalState remainingFS; remainingFS.addVetoOnThisFinalState(*this); addProjection(remainingFS, "RFS"); } ///////////////////////////////////////////////////// const Particles & ZFinder::constituentLeptons() const { static const Particles none; if (empty()) return none; return boson().constituents(); } const VetoedFinalState& ZFinder::remainingFinalState() const { return getProjection("RFS"); } int ZFinder::compare(const Projection& p) const { PCmp LCcmp = mkNamedPCmp(p, "DressedLeptons"); if (LCcmp != EQUIVALENT) return LCcmp; const ZFinder& other = dynamic_cast(p); return (cmp(_minmass, other._minmass) || cmp(_maxmass, other._maxmass) || cmp(_pid, other._pid) || cmp(_trackPhotons, other._trackPhotons)); } void ZFinder::project(const Event& e) { clear(); // Get leptons and find an acceptable invariant mass OSSF pair const DressedLeptons& leptons = applyProjection(e, "DressedLeptons"); InvMassFinalState imfs({_pid, -_pid}, _minmass, _maxmass, _masstarget); imfs.calc(leptons.particles()); if (imfs.particlePairs().empty()) { MSG_TRACE("No acceptable inv-mass lepton/antilepton pairs found"); return; } // Assemble a pseudo-Z particle const ParticlePair& Zconstituents = imfs.particlePairs().front(); const Particle& p1(Zconstituents.first), p2(Zconstituents.second); const FourMomentum pZ = p1.momentum() + p2.momentum(); assert(p1.charge3() + p2.charge3() == 0); Particle z(PID::Z0BOSON, pZ); MSG_DEBUG(z << " reconstructed from: " << p1 << " + " << p2); // Add (dressed) lepton constituents to the Z (skipping photons if requested) // Keep the DressedLeptons found by the ZFinder const Particle& l1 = p1.charge() > 0 ? p1 : p2; const Particle& l2 = p2.charge() < 0 ? p2 : p1; MSG_TRACE("l1 = " << l1.constituents()); MSG_TRACE("l2 = " << l2.constituents()); - z.addConstituent(_trackPhotons == TRACK ? l1 : l1.constituents().front()); - z.addConstituent(_trackPhotons == TRACK ? l2 : l2.constituents().front()); + z.addConstituent(_trackPhotons == AddPhotons::YES ? l1 : l1.constituents().front()); + z.addConstituent(_trackPhotons == AddPhotons::YES ? l2 : l2.constituents().front()); MSG_DEBUG("Number of stored raw Z constituents = " << z.rawConstituents().size() << " " << z.rawConstituents()); // Register the completed Z _theParticles.push_back(z); } }