diff --git a/analyses/pluginATLAS/ATLAS_2011_I928289_W.cc b/analyses/pluginATLAS/ATLAS_2011_I928289_W.cc --- a/analyses/pluginATLAS/ATLAS_2011_I928289_W.cc +++ b/analyses/pluginATLAS/ATLAS_2011_I928289_W.cc @@ -1,147 +1,147 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/WFinder.hh" namespace Rivet { class ATLAS_2011_I928289_W : public Analysis { public: /// Constructor ATLAS_2011_I928289_W() : Analysis("ATLAS_2011_I928289_W") { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { ///Initialise and register projections here FinalState fs; Cut cut = (Cuts::pT >= 20*GeV); - WFinder wfinder_el_bare( fs, cut, PID::ELECTRON, 40.0*GeV, 7000.0*GeV, 25.0*GeV, 0.0, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); - WFinder wfinder_el_dressed(fs, cut, PID::ELECTRON, 40.0*GeV, 7000.0*GeV, 25.0*GeV, 0.1, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); - WFinder wfinder_mu_bare (fs, cut, PID::MUON , 40.0*GeV, 7000.0*GeV, 25.0*GeV, 0.0, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); - WFinder wfinder_mu_dressed(fs, cut, PID::MUON , 40.0*GeV, 7000.0*GeV, 25.0*GeV, 0.1, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); + WFinder wfinder_el_bare( fs, cut, PID::ELECTRON, 40.0*GeV, 7000.0*GeV, 25.0*GeV, 0.0, WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); + WFinder wfinder_el_dressed(fs, cut, PID::ELECTRON, 40.0*GeV, 7000.0*GeV, 25.0*GeV, 0.1, WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); + WFinder wfinder_mu_bare (fs, cut, PID::MUON , 40.0*GeV, 7000.0*GeV, 25.0*GeV, 0.0, WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); + WFinder wfinder_mu_dressed(fs, cut, PID::MUON , 40.0*GeV, 7000.0*GeV, 25.0*GeV, 0.1, WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); declare(wfinder_el_bare , "WFinder_el_bare"); declare(wfinder_el_dressed, "WFinder_el_dressed"); declare(wfinder_mu_bare , "WFinder_mu_bare"); declare(wfinder_mu_dressed, "WFinder_mu_dressed"); /// Book histograms here book(_h_Wminus_lepton_eta_el_bare ,3, 1, 1); book(_h_Wminus_lepton_eta_el_dressed ,3, 1, 2); book(_h_Wminus_lepton_eta_mu_bare ,3, 1, 3); book(_h_Wminus_lepton_eta_mu_dressed ,3, 1, 4); book(_h_Wplus_lepton_eta_el_bare ,5, 1, 1); book(_h_Wplus_lepton_eta_el_dressed ,5, 1, 2); book(_h_Wplus_lepton_eta_mu_bare ,5, 1, 3); book(_h_Wplus_lepton_eta_mu_dressed ,5, 1, 4); book(_h_W_asym_eta_el_bare ,7, 1, 1); book(_h_W_asym_eta_el_dressed ,7, 1, 2); book(_h_W_asym_eta_mu_bare ,7, 1, 3); book(_h_W_asym_eta_mu_dressed ,7, 1, 4); } /// Perform the per-event analysis void analyze(const Event& event) { const WFinder& wfinder_el_bare = apply(event, "WFinder_el_bare"); const WFinder& wfinder_el_dressed = apply(event, "WFinder_el_dressed"); const WFinder& wfinder_mu_bare = apply(event, "WFinder_mu_bare"); const WFinder& wfinder_mu_dressed = apply(event, "WFinder_mu_dressed"); fillPlots1D(wfinder_el_bare , _h_Wplus_lepton_eta_el_bare , _h_Wminus_lepton_eta_el_bare); fillPlots1D(wfinder_el_dressed, _h_Wplus_lepton_eta_el_dressed, _h_Wminus_lepton_eta_el_dressed); fillPlots1D(wfinder_mu_bare , _h_Wplus_lepton_eta_mu_bare , _h_Wminus_lepton_eta_mu_bare); fillPlots1D(wfinder_mu_dressed, _h_Wplus_lepton_eta_mu_dressed, _h_Wminus_lepton_eta_mu_dressed); } void fillPlots1D(const WFinder& wfinder, Histo1DPtr hist_plus, Histo1DPtr hist_minus) { if (wfinder.bosons().size() != 1) return; const Particle l = wfinder.constituentLeptons()[0]; const FourMomentum miss = wfinder.constituentNeutrinos()[0]; if (l.pT() > 20*GeV && miss.Et() > 25*GeV && wfinder.mT() > 40*GeV) (l.charge3() > 0 ? hist_plus : hist_minus)->fill(l.abseta()); } /// Normalise histograms etc., after the run void finalize() { // Construct asymmetry: (dsig+/deta - dsig-/deta) / (dsig+/deta + dsig-/deta) divide(*_h_Wplus_lepton_eta_el_bare - *_h_Wminus_lepton_eta_el_bare, *_h_Wplus_lepton_eta_el_bare + *_h_Wminus_lepton_eta_el_bare, _h_W_asym_eta_el_bare); divide(*_h_Wplus_lepton_eta_el_dressed - *_h_Wminus_lepton_eta_el_dressed, *_h_Wplus_lepton_eta_el_dressed + *_h_Wminus_lepton_eta_el_dressed, _h_W_asym_eta_el_dressed); divide(*_h_Wplus_lepton_eta_mu_bare - *_h_Wminus_lepton_eta_mu_bare, *_h_Wplus_lepton_eta_mu_bare + *_h_Wminus_lepton_eta_mu_bare, _h_W_asym_eta_mu_bare); divide(*_h_Wplus_lepton_eta_mu_dressed - *_h_Wminus_lepton_eta_mu_dressed, *_h_Wplus_lepton_eta_mu_dressed + *_h_Wminus_lepton_eta_mu_dressed, _h_W_asym_eta_mu_dressed); // Print summary info const double xs_pb(crossSection() / picobarn); const double sumw(sumOfWeights()); MSG_DEBUG( "Cross-section/pb : " << xs_pb ); MSG_DEBUG( "Sum of weights : " << sumw ); MSG_DEBUG( "nEvents : " << numEvents() ); /// Normalise, scale and otherwise manipulate histograms here const double sf = 0.5 * xs_pb / sumw; // 0.5 accounts for rapidity bin width scale(_h_Wminus_lepton_eta_el_bare , sf); scale(_h_Wminus_lepton_eta_el_dressed, sf); scale(_h_Wminus_lepton_eta_mu_bare , sf); scale(_h_Wminus_lepton_eta_mu_dressed, sf); scale(_h_Wplus_lepton_eta_el_bare , sf); scale(_h_Wplus_lepton_eta_el_dressed , sf); scale(_h_Wplus_lepton_eta_mu_bare , sf); scale(_h_Wplus_lepton_eta_mu_dressed , sf); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_Wminus_lepton_eta_el_bare; Histo1DPtr _h_Wminus_lepton_eta_el_dressed; Histo1DPtr _h_Wminus_lepton_eta_mu_bare; Histo1DPtr _h_Wminus_lepton_eta_mu_dressed; Histo1DPtr _h_Wplus_lepton_eta_el_bare; Histo1DPtr _h_Wplus_lepton_eta_el_dressed; Histo1DPtr _h_Wplus_lepton_eta_mu_bare; Histo1DPtr _h_Wplus_lepton_eta_mu_dressed; Scatter2DPtr _h_W_asym_eta_el_bare; Scatter2DPtr _h_W_asym_eta_el_dressed; Scatter2DPtr _h_W_asym_eta_mu_bare; Scatter2DPtr _h_W_asym_eta_mu_dressed; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2011_I928289_W); } diff --git a/analyses/pluginATLAS/ATLAS_2011_I954993.cc b/analyses/pluginATLAS/ATLAS_2011_I954993.cc --- a/analyses/pluginATLAS/ATLAS_2011_I954993.cc +++ b/analyses/pluginATLAS/ATLAS_2011_I954993.cc @@ -1,117 +1,117 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// @brief WZ fiducial cross-section measurement class ATLAS_2011_I954993 : public Analysis { public: /// Default constructor ATLAS_2011_I954993() : Analysis("ATLAS_2011_I954993") { } /// @name Analysis methods //@{ /// Projection and histogram setup void init() { FinalState fs; Cut cuts = Cuts::abseta < 2.5 && Cuts::pT > 15*GeV; ZFinder zfinder_e(fs, cuts, PID::ELECTRON, 81.1876*GeV, 101.1876*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY); declare(zfinder_e, "ZFinder_e"); ZFinder zfinder_mu(fs, cuts, PID::MUON, 81.1876*GeV, 101.1876*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY); declare(zfinder_mu, "ZFinder_mu"); VetoedFinalState weinput; weinput.addVetoOnThisFinalState(zfinder_e); - WFinder wfinder_e(weinput, cuts, PID::ELECTRON, 0*GeV, 1000*GeV, 25*GeV, 0.1, WFinder::ClusterPhotons::NODECAY); + WFinder wfinder_e(weinput, cuts, PID::ELECTRON, 0*GeV, 1000*GeV, 25*GeV, 0.1, WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::NODECAY); declare(wfinder_e, "WFinder_e"); VetoedFinalState wminput; wminput.addVetoOnThisFinalState(zfinder_mu); - WFinder wfinder_mu(wminput,cuts, PID::MUON, 0*GeV, 1000*GeV, 25*GeV, 0.1, WFinder::ClusterPhotons::NODECAY); + WFinder wfinder_mu(wminput,cuts, PID::MUON, 0*GeV, 1000*GeV, 25*GeV, 0.1, WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::NODECAY); declare(wfinder_mu, "WFinder_mu"); // Histograms book(_h_fiducial ,1,1,1); } /// Do the analysis void analyze(const Event& e) { const ZFinder& zfinder_e = apply(e, "ZFinder_e"); const ZFinder& zfinder_mu = apply(e, "ZFinder_mu"); const WFinder& wfinder_e = apply(e, "WFinder_e"); const WFinder& wfinder_mu = apply(e, "WFinder_mu"); // Looking for a Z, exit if not found if (zfinder_e.bosons().size() != 1 && zfinder_mu.bosons().size() != 1) { MSG_DEBUG("No Z boson found, vetoing event"); vetoEvent; } // Looking for a W, exit if not found if (wfinder_e.bosons().size()!= 1 && wfinder_mu.bosons().size() != 1) { MSG_DEBUG("No W boson found, vetoing event"); vetoEvent; } // If we find a W, make fiducial acceptance cuts and exit if not found if (wfinder_e.bosons().size() == 1) { const FourMomentum We = wfinder_e.constituentLeptons()[0]; const FourMomentum Wenu = wfinder_e.constituentNeutrinos()[0]; const double mT = wfinder_e.mT(); if (Wenu.pT() < 25*GeV || We.pT() < 20*GeV || mT < 20*GeV) { MSG_DEBUG("Wnu pT = " << Wenu.pT()/GeV << " GeV, Wl pT = " << We.pT()/GeV << " GeV, mT = " << mT/GeV << " GeV"); vetoEvent; } } else if (wfinder_mu.bosons().size() == 1) { const FourMomentum Wmu = wfinder_mu.constituentLeptons()[0]; const FourMomentum Wmunu = wfinder_mu.constituentNeutrinos()[0]; const double mT = wfinder_mu.mT(); if (Wmunu.pT() < 25*GeV || Wmu.pT() < 20*GeV || mT < 20*GeV) { MSG_DEBUG("Wnu pT = " << Wmunu.pT()/GeV << ", Wl pT = " << Wmu.pT()/GeV << " GeV, mT = " << mT/GeV << " GeV"); vetoEvent; } } else { MSG_DEBUG("No W boson found: vetoing event"); vetoEvent; } // Update the fiducial cross-section histogram _h_fiducial->fill(7000); } /// Finalize void finalize() { scale(_h_fiducial, crossSection()/femtobarn/sumOfWeights()); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_fiducial; //@} }; //// The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2011_I954993); } diff --git a/analyses/pluginATLAS/ATLAS_2013_I1216670.cc b/analyses/pluginATLAS/ATLAS_2013_I1216670.cc --- a/analyses/pluginATLAS/ATLAS_2013_I1216670.cc +++ b/analyses/pluginATLAS/ATLAS_2013_I1216670.cc @@ -1,120 +1,120 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// @brief MPI sensitive di-jet balance variables for W->ejj or W->mujj events. class ATLAS_2013_I1216670 : public Analysis { public: /// @name Constructor ATLAS_2013_I1216670() : Analysis("ATLAS_2013_I1216670") { } /// @name Analysis methods //@{ /// Book histograms, set up projections for W and jets void init() { book(_h_delta_jets_n ,1, 1, 1); book(_h_delta_jets ,2, 1, 1); FinalState fs; Cut cuts = Cuts::abseta < 2.5 && Cuts::pT >= 20*GeV; - WFinder w_e_finder(fs, cuts, PID::ELECTRON, 40*GeV, DBL_MAX, 0.0*GeV, 0.0, WFinder::ClusterPhotons::NODECAY, + WFinder w_e_finder(fs, cuts, PID::ELECTRON, 40*GeV, DBL_MAX, 0.0*GeV, 0.0, WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); declare(w_e_finder, "W_E_FINDER"); - WFinder w_mu_finder(fs, cuts, PID::MUON, 40*GeV, DBL_MAX, 0.0*GeV, 0.0, WFinder::ClusterPhotons::NODECAY, + WFinder w_mu_finder(fs, cuts, PID::MUON, 40*GeV, DBL_MAX, 0.0*GeV, 0.0, WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); declare(w_mu_finder, "W_MU_FINDER"); VetoedFinalState jet_fs(fs); jet_fs.addVetoOnThisFinalState(getProjection("W_E_FINDER")); jet_fs.addVetoOnThisFinalState(getProjection("W_MU_FINDER")); FastJets jets(jet_fs, FastJets::ANTIKT, 0.4); declare(jets, "JETS"); } /// Do the analysis void analyze(const Event &e) { const WFinder& w_e_finder = apply(e, "W_E_FINDER" ); const WFinder& w_mu_finder = apply(e, "W_MU_FINDER"); Particle lepton, neutrino; Jets all_jets, jets; // Find exactly 1 W->e or W->mu boson if(w_e_finder.bosons().size() == 1 && w_mu_finder.bosons().size() == 0) { MSG_DEBUG(" Event identified as W->e nu."); if( !(w_e_finder.mT() > 40*GeV && w_e_finder.constituentNeutrino().Et() > 25.0*GeV) ) vetoEvent; lepton = w_e_finder.constituentLepton(); } else if(w_mu_finder.bosons().size() == 1 && w_e_finder.bosons().size() == 0) { MSG_DEBUG(" Event identified as W->mu nu."); if( !(w_mu_finder.mT() > 40*GeV && w_mu_finder.constituentNeutrino().Et() > 25.0*GeV) ) vetoEvent; lepton = w_mu_finder.constituentLepton(); } else { MSG_DEBUG(" No W found passing cuts."); vetoEvent; } all_jets = apply(e, "JETS").jetsByPt(Cuts::pt>20.0*GeV && Cuts::absrap<2.8); // Remove jets DeltaR < 0.5 from W lepton for(Jets::iterator it = all_jets.begin(); it != all_jets.end(); ++it) { double distance = deltaR( lepton, (*it) ); if(distance < 0.5) { MSG_DEBUG(" Veto jet DeltaR " << distance << " from W lepton"); } else { jets.push_back(*it); } } // Exactly two jets required if( jets.size() != 2 ) vetoEvent; // Calculate analysis quantities from the two jets double delta_jets = (jets.front().momentum() + jets.back().momentum()).pT(); double total_pt = jets.front().momentum().pT() + jets.back().momentum().pT(); double delta_jets_n = delta_jets / total_pt; _h_delta_jets->fill( delta_jets); // Jet pT balance _h_delta_jets_n->fill( delta_jets_n); // Jet pT balance, normalised by scalar dijet pT } /// Finalize void finalize() { // Data is normalised to 0.03 and 3 normalize(_h_delta_jets_n, 0.03); normalize(_h_delta_jets , 3.0 ); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_delta_jets_n; Histo1DPtr _h_delta_jets; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2013_I1216670); } diff --git a/analyses/pluginATLAS/ATLAS_2013_I1217863_W.cc b/analyses/pluginATLAS/ATLAS_2013_I1217863_W.cc --- a/analyses/pluginATLAS/ATLAS_2013_I1217863_W.cc +++ b/analyses/pluginATLAS/ATLAS_2013_I1217863_W.cc @@ -1,198 +1,198 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" namespace Rivet { class ATLAS_2013_I1217863_W : public Analysis { public: /// Constructor ATLAS_2013_I1217863_W(string name="ATLAS_2013_I1217863_W") : Analysis(name) { // the electron mode is used by default _mode = 1; } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FinalState fs; declare(fs, "FS"); Cut cuts = Cuts::abseta < 2.47 && Cuts::pT > 25*GeV; // W finder for electrons and muons WFinder wf(fs, cuts, _mode==3? PID::MUON : PID::ELECTRON, 0.0*GeV, 1000.0*GeV, 35.0*GeV, 0.1, - WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); + WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); declare(wf, "WF"); // leading photon LeadingParticlesFinalState photonfs(FinalState(Cuts::abseta < 2.37 && Cuts::pT > 15*GeV)); photonfs.addParticleId(PID::PHOTON); declare(photonfs, "LeadingPhoton"); // jets VetoedFinalState jet_fs(fs); jet_fs.addVetoOnThisFinalState(getProjection("WF")); jet_fs.addVetoOnThisFinalState(getProjection("LeadingPhoton")); FastJets jets(jet_fs, FastJets::ANTIKT, 0.4); jets.useInvisibles(true); declare(jets, "Jets"); // FS excluding the leading photon VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(photonfs); declare(vfs, "isolatedFS"); // Book histograms book(_hist_EgammaT_incl , 7, 1, _mode); // dSigma / dE^gamma_T for Njet >= 0 book(_hist_EgammaT_excl , 8, 1, _mode); // dSigma / dE^gamma_T for Njet = 0 book(_hist_Njet_EgammaT15 ,15, 1, _mode); // dSigma / dNjet for E^gamma_T > 15 book(_hist_Njet_EgammaT60 ,16, 1, _mode); // dSigma / dNjet for E^gamma_T > 60 book(_hist_mWgammaT ,19, 1, _mode); // dSigma / dm^{Wgamma}_T } /// Perform the per-event analysis void analyze(const Event& event) { // retrieve leading photon Particles photons = apply(event, "LeadingPhoton").particles(); if (photons.size() != 1) vetoEvent; const Particle& leadingPhoton = photons[0]; if (leadingPhoton.Et() < 15.0*GeV) vetoEvent; if (leadingPhoton.abseta() > 2.37) vetoEvent; // check photon isolation double coneEnergy(0.0); Particles fs = apply(event, "isolatedFS").particles(); for (const Particle& p : fs) { if ( deltaR(leadingPhoton, p) < 0.4 ) coneEnergy += p.E(); } if ( coneEnergy / leadingPhoton.E() >= 0.5 ) vetoEvent; // retrieve W boson candidate const WFinder& wf = apply(event, "WF"); if ( wf.bosons().size() != 1 ) vetoEvent; // only one W boson candidate //const Particle& Wboson = wf.boson(); // retrieve constituent neutrino const Particle& neutrino = wf.constituentNeutrino(); if ( !(neutrino.pT() > 35.0*GeV) ) vetoEvent; // retrieve constituent lepton const Particle& lepton = wf.constituentLepton(); if ( !(lepton.pT() > 25.0*GeV && lepton.abseta() < 2.47) ) vetoEvent; // check photon-lepton overlap if ( !(deltaR(leadingPhoton, lepton) > 0.7) ) vetoEvent; // count jets const FastJets& jetfs = apply(event, "Jets"); Jets jets = jetfs.jets(cmpMomByEt); int goodJets = 0; for (const Jet& j : jets) { if ( !(j.Et() > 30.0*GeV) ) break; if ( (j.abseta() < 4.4) && \ (deltaR(leadingPhoton, j) > 0.3) && \ (deltaR(lepton, j) > 0.3) ) ++goodJets; } double Njets = double(goodJets) + 0.5; double photonEt = leadingPhoton.Et()*GeV; const FourMomentum& lep_gamma = lepton.momentum() + leadingPhoton.momentum(); double term1 = sqrt(lep_gamma.mass2() + lep_gamma.pT2()) + neutrino.Et(); double term2 = (lep_gamma + neutrino.momentum()).pT2(); double mWgammaT = sqrt(term1 * term1 - term2) * GeV; _hist_EgammaT_incl->fill(photonEt); _hist_Njet_EgammaT15->fill(Njets); if ( !goodJets ) _hist_EgammaT_excl->fill(photonEt); if (photonEt > 40.0*GeV) { _hist_mWgammaT->fill(mWgammaT); if (photonEt > 60.0*GeV) _hist_Njet_EgammaT60->fill(Njets); } } /// Normalise histograms etc., after the run void finalize() { const double xs_fb = crossSection()/femtobarn; const double sumw = sumOfWeights(); const double sf = xs_fb / sumw; scale(_hist_EgammaT_excl, sf); scale(_hist_EgammaT_incl, sf); normalize(_hist_Njet_EgammaT15); normalize(_hist_Njet_EgammaT60); normalize(_hist_mWgammaT); } //@} protected: size_t _mode; private: /// @name Histograms //@{ Histo1DPtr _hist_EgammaT_incl; Histo1DPtr _hist_EgammaT_excl; Histo1DPtr _hist_Njet_EgammaT15; Histo1DPtr _hist_Njet_EgammaT60; Histo1DPtr _hist_mWgammaT; //@} }; class ATLAS_2013_I1217863_W_EL : public ATLAS_2013_I1217863_W { public: ATLAS_2013_I1217863_W_EL() : ATLAS_2013_I1217863_W("ATLAS_2013_I1217863_W_EL") { _mode = 2; } }; class ATLAS_2013_I1217863_W_MU : public ATLAS_2013_I1217863_W { public: ATLAS_2013_I1217863_W_MU() : ATLAS_2013_I1217863_W("ATLAS_2013_I1217863_W_MU") { _mode = 3; } }; DECLARE_RIVET_PLUGIN(ATLAS_2013_I1217863_W); DECLARE_RIVET_PLUGIN(ATLAS_2013_I1217863_W_EL); DECLARE_RIVET_PLUGIN(ATLAS_2013_I1217863_W_MU); } diff --git a/analyses/pluginATLAS/ATLAS_2013_I1217863_Z.cc b/analyses/pluginATLAS/ATLAS_2013_I1217863_Z.cc --- a/analyses/pluginATLAS/ATLAS_2013_I1217863_Z.cc +++ b/analyses/pluginATLAS/ATLAS_2013_I1217863_Z.cc @@ -1,193 +1,193 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" namespace Rivet { class ATLAS_2013_I1217863_Z : public Analysis { public: /// Constructor ATLAS_2013_I1217863_Z(string name="ATLAS_2013_I1217863_Z") : Analysis(name) { // the electron mode is used by default _mode = 1; } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FinalState fs; declare(fs, "FS"); Cut cuts = Cuts::abseta < 2.47 && Cuts::pT > 25*GeV; // Z finder ZFinder zf(fs, cuts, _mode==3? PID::MUON : PID::ELECTRON, 40.0*GeV, 1000.0*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::NO); declare(zf, "ZF"); // leading photon LeadingParticlesFinalState photonfs(FinalState(Cuts::abseta < 2.37 && Cuts::pT > 15*GeV)); photonfs.addParticleId(PID::PHOTON); declare(photonfs, "LeadingPhoton"); // jets VetoedFinalState jet_fs(fs); jet_fs.addVetoOnThisFinalState(getProjection("ZF")); jet_fs.addVetoOnThisFinalState(getProjection("LeadingPhoton")); FastJets jets(jet_fs, FastJets::ANTIKT, 0.4); jets.useInvisibles(true); declare(jets, "Jets"); // FS excluding the leading photon VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(photonfs); declare(vfs, "isolatedFS"); // Book histograms book(_hist_EgammaT_incl ,11, 1, _mode); // dSigma / dE^gamma_T for Njet >= 0 book(_hist_EgammaT_excl ,12, 1, _mode); // dSigma / dE^gamma_T for Njet = 0 book(_hist_Njet_EgammaT15 ,17, 1, _mode); // dSigma / dNjet for E^gamma_T >= 15 book(_hist_Njet_EgammaT60 ,18, 1, _mode); // dSigma / dNjet for E^gamma_T >= 60 book(_hist_mZgamma ,20, 1, _mode); // dSigma / dm^{Zgamma} } /// Perform the per-event analysis void analyze(const Event& event) { // retrieve leading photon Particles photons = apply(event, "LeadingPhoton").particles(); if (photons.size() != 1) vetoEvent; const Particle& leadingPhoton = photons[0]; if (leadingPhoton.Et() < 15.0*GeV) vetoEvent; if (leadingPhoton.abseta() > 2.37) vetoEvent; // check photon isolation double coneEnergy(0.0); Particles fs = apply(event, "isolatedFS").particles(); for (const Particle& p : fs) { if ( deltaR(leadingPhoton, p) < 0.4 ) coneEnergy += p.E(); } if (coneEnergy / leadingPhoton.E() >= 0.5 ) vetoEvent; // retrieve Z boson candidate const ZFinder& zf = apply(event, "ZF"); if ( zf.bosons().size() != 1 ) vetoEvent; // only one Z boson candidate const Particle& Zboson = zf.boson(); if ( !(Zboson.mass() > 40.0*GeV) ) vetoEvent; // check charge of constituent leptons - const ParticleVector& leptons = zf.constituents(); + const Particles& leptons = zf.constituents(); if (leptons.size() != 2 || leptons[0].charge() * leptons[1].charge() > 0.) vetoEvent; // check photon-lepton overlap for (const Particle& p : leptons) { if ( !(p.pT() > 25.0*GeV && p.abseta() < 2.47 && deltaR(leadingPhoton, p) > 0.7) ) vetoEvent; } // count jets const FastJets& jetfs = apply(event, "Jets"); Jets jets = jetfs.jets(cmpMomByEt); int goodJets = 0; for (const Jet& j : jets) { if ( !(j.Et() > 30.0*GeV) ) break; if ( (j.abseta() < 4.4) && \ (deltaR(leadingPhoton, j) > 0.3) && \ (deltaR(leptons[0], j) > 0.3) && \ (deltaR(leptons[1], j) > 0.3) ) ++goodJets; } double Njets = double(goodJets) + 0.5; double photonEt = leadingPhoton.Et()*GeV; double mZgamma = (Zboson.momentum() + leadingPhoton.momentum()).mass() * GeV; _hist_EgammaT_incl->fill(photonEt); _hist_Njet_EgammaT15->fill(Njets); if ( !goodJets ) _hist_EgammaT_excl->fill(photonEt); if (photonEt >= 40.0*GeV) { _hist_mZgamma->fill(mZgamma); if (photonEt >= 60.0*GeV) _hist_Njet_EgammaT60->fill(Njets); } } /// Normalise histograms etc., after the run void finalize() { const double xs_fb = crossSection()/femtobarn; const double sumw = sumOfWeights(); const double sf = xs_fb / sumw; scale(_hist_EgammaT_excl, sf); scale(_hist_EgammaT_incl, sf); normalize(_hist_Njet_EgammaT15); normalize(_hist_Njet_EgammaT60); normalize(_hist_mZgamma); } //@} protected: size_t _mode; private: /// @name Histograms //@{ Histo1DPtr _hist_EgammaT_incl; Histo1DPtr _hist_EgammaT_excl; Histo1DPtr _hist_Njet_EgammaT15; Histo1DPtr _hist_Njet_EgammaT60; Histo1DPtr _hist_mZgamma; //@} }; class ATLAS_2013_I1217863_Z_EL : public ATLAS_2013_I1217863_Z { public: ATLAS_2013_I1217863_Z_EL() : ATLAS_2013_I1217863_Z("ATLAS_2013_I1217863_Z_EL") { _mode = 2; } }; class ATLAS_2013_I1217863_Z_MU : public ATLAS_2013_I1217863_Z { public: ATLAS_2013_I1217863_Z_MU() : ATLAS_2013_I1217863_Z("ATLAS_2013_I1217863_Z_MU") { _mode = 3; } }; DECLARE_RIVET_PLUGIN(ATLAS_2013_I1217863_Z); DECLARE_RIVET_PLUGIN(ATLAS_2013_I1217863_Z_EL); DECLARE_RIVET_PLUGIN(ATLAS_2013_I1217863_Z_MU); } diff --git a/analyses/pluginATLAS/ATLAS_2013_I1219109.cc b/analyses/pluginATLAS/ATLAS_2013_I1219109.cc --- a/analyses/pluginATLAS/ATLAS_2013_I1219109.cc +++ b/analyses/pluginATLAS/ATLAS_2013_I1219109.cc @@ -1,157 +1,157 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/HeavyHadrons.hh" namespace Rivet { /// @brief ATLAS W+b measurement class ATLAS_2013_I1219109: public Analysis { public: ATLAS_2013_I1219109(string name = "ATLAS_2013_I1219109") : Analysis(name) { // the electron mode is used by default _mode = 1; } void init() { FinalState fs; declare(fs, "FinalState"); Cut cuts = Cuts::abseta < 2.5 && Cuts::pT >= 25*GeV; // W finder for electrons and muons WFinder wf(fs, cuts, _mode==3? PID::MUON : PID::ELECTRON, 0.0*GeV, DBL_MAX, 0.0, 0.1, - WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); + WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); declare(wf, "WF"); // jets VetoedFinalState jet_fs(fs); jet_fs.addVetoOnThisFinalState(getProjection("WF")); FastJets fj(jet_fs, FastJets::ANTIKT, 0.4); fj.useInvisibles(); declare(fj, "Jets"); declare(HeavyHadrons(Cuts::abseta < 2.5 && Cuts::pT > 5*GeV), "BHadrons"); // book histograms book(_njet ,1, 1, _mode); // dSigma / dNjet book(_jet1_bPt ,2, 1, _mode); // dSigma / dBjetPt for Njet = 1 book(_jet2_bPt ,2, 2, _mode); // dSigma / dBjetPt for Njet = 2 } void analyze(const Event& event) { // retrieve W boson candidate const WFinder& wf = apply(event, "WF"); if( wf.bosons().size() != 1 ) vetoEvent; // only one W boson candidate if( !(wf.mT() > 60.0*GeV) ) vetoEvent; //const Particle& Wboson = wf.boson(); // retrieve constituent neutrino const Particle& neutrino = wf.constituentNeutrino(); if( !(neutrino.pT() > 25.0*GeV) ) vetoEvent; // retrieve constituent lepton const Particle& lepton = wf.constituentLepton(); // count good jets, check if good jet contains B hadron const Particles& bHadrons = apply(event, "BHadrons").bHadrons(); const Jets& jets = apply(event, "Jets").jetsByPt(25*GeV); int goodjets = 0, bjets = 0; double bPt = 0.; for(const Jet& j : jets) { if( (j.abseta() < 2.1) && (deltaR(lepton, j) > 0.5) ) { // this jet passes the selection! ++goodjets; // j.bTagged() uses ghost association which is // more elegant, but not what has been used in // this analysis originally, will match B had- // rons in eta-phi space instead for(const Particle& b : bHadrons) { if( deltaR(j, b) < 0.3 ) { // jet matched to B hadron! if(!bPt) bPt = j.pT() * GeV; // leading b-jet pT ++bjets; // count number of b-jets break; } } } } if( goodjets > 2 ) vetoEvent; // at most two jets if( !bjets ) vetoEvent; // at least one of them b-tagged double njets = double(goodjets); double ncomb = 3.0; _njet->fill(njets); _njet->fill(ncomb); if( goodjets == 1) _jet1_bPt->fill(bPt); else if(goodjets == 2) _jet2_bPt->fill(bPt); } void finalize() { // Print summary info const double xs_pb(crossSection() / picobarn); const double sumw(sumOfWeights()); MSG_INFO("Cross-Section/pb: " << xs_pb ); MSG_INFO("Sum of weights : " << sumw ); MSG_INFO("nEvents : " << numEvents()); const double sf(xs_pb / sumw); scale(_njet, sf); scale(_jet1_bPt, sf); scale(_jet2_bPt, sf); } protected: size_t _mode; private: Histo1DPtr _njet; Histo1DPtr _jet1_bPt; Histo1DPtr _jet2_bPt; //bool _isMuon; }; class ATLAS_2013_I1219109_EL : public ATLAS_2013_I1219109 { public: ATLAS_2013_I1219109_EL() : ATLAS_2013_I1219109("ATLAS_2013_I1219109_EL") { _mode = 2; } }; class ATLAS_2013_I1219109_MU : public ATLAS_2013_I1219109 { public: ATLAS_2013_I1219109_MU() : ATLAS_2013_I1219109("ATLAS_2013_I1219109_MU") { _mode = 3; } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2013_I1219109); DECLARE_RIVET_PLUGIN(ATLAS_2013_I1219109_EL); DECLARE_RIVET_PLUGIN(ATLAS_2013_I1219109_MU); } diff --git a/analyses/pluginATLAS/ATLAS_2013_I1243871.cc b/analyses/pluginATLAS/ATLAS_2013_I1243871.cc --- a/analyses/pluginATLAS/ATLAS_2013_I1243871.cc +++ b/analyses/pluginATLAS/ATLAS_2013_I1243871.cc @@ -1,278 +1,278 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Tools/ParticleIdUtils.hh" #include "Rivet/Particle.hh" namespace Rivet { class ATLAS_2013_I1243871 : public Analysis { public: /// Constructor ATLAS_2013_I1243871() : Analysis("ATLAS_2013_I1243871") { } /// Book histograms and initialise projections before the run void init() { // Set up projections const FinalState fs(-4.5, 4.5); declare(fs, "ALL_FS"); /// Get electrons from truth record IdentifiedFinalState elec_fs(Cuts::abseta < 2.47 && Cuts::pT > 25*GeV); elec_fs.acceptIdPair(PID::ELECTRON); declare(elec_fs, "ELEC_FS"); /// Get muons which pass the initial kinematic cuts: IdentifiedFinalState muon_fs(Cuts::abseta < 2.5 && Cuts::pT > 20*GeV); muon_fs.acceptIdPair(PID::MUON); declare(muon_fs, "MUON_FS"); // Final state used as input for jet-finding. // We include everything except the muons and neutrinos VetoedFinalState jet_input(fs); jet_input.vetoNeutrinos(); jet_input.addVetoPairId(PID::MUON); declare(jet_input, "JET_INPUT"); // Get the jets FastJets jets(jet_input, FastJets::ANTIKT, 0.4); declare(jets, "JETS"); // Book histograms for (size_t d = 0; d < 5; ++d) { book(_p_b_rho[d] ,d+1, 1, 1); book(_p_l_rho[d] ,d+1, 2, 1); book(_p_b_Psi[d] ,d+1, 1, 2); book(_p_l_Psi[d] ,d+1, 2, 2); } } /// Perform the per-event analysis void analyze(const Event& event) { /// Get the various sets of final state particles - const ParticleVector& elecFS = apply(event, "ELEC_FS").particlesByPt(); - const ParticleVector& muonFS = apply(event, "MUON_FS").particlesByPt(); + const Particles& elecFS = apply(event, "ELEC_FS").particlesByPt(); + const Particles& muonFS = apply(event, "MUON_FS").particlesByPt(); // Get all jets with pT > 7 GeV (ATLAS standard jet collection) /// @todo Why rewrite the jets collection as a vector of pointers? const Jets& jets = apply(event, "JETS").jetsByPt(7*GeV); vector allJets; for(const Jet& j : jets) allJets.push_back(&j); // Keep any jets that pass the pt cut vector pt_jets; for (const Jet* j : allJets) { /// @todo Use direct kinematics access const double pt = j->momentum().pT(); const double eta = j->momentum().eta(); if (pt > 25*GeV && fabs(eta) < 2.5) pt_jets.push_back(j); } // Remove jets too close to an electron vector good_jets; for (const Jet* j : pt_jets) { bool isElectron = 0; for (const Particle& e : elecFS) { const double elec_jet_dR = deltaR(e.momentum(), j->momentum()); if (elec_jet_dR < 0.2) { isElectron = true; break; } } if (!isElectron) good_jets.push_back(j); } // Classify the event type const size_t nElec = elecFS.size(); const size_t nMuon = muonFS.size(); bool isSemilepton = false, isDilepton = false; if (nElec == 1 && nMuon == 0) { isSemilepton = true; } else if (nElec == 0 && nMuon == 1) { isSemilepton = true; } else if (nElec == 2 && nMuon == 0) { if (charge(elecFS[0]) != charge(elecFS[1])) isDilepton = true; } else if (nElec == 1 && nMuon == 1) { if (charge(elecFS[0]) != charge(muonFS[0])) isDilepton = true; } else if (nElec == 0 && nMuon == 2) { if (charge(muonFS[0]) != charge(muonFS[1])) isDilepton = true; } const bool isGoodEvent = (isSemilepton && good_jets.size() >= 4) || (isDilepton && good_jets.size() >= 2); if (!isGoodEvent) vetoEvent; // Select b-hadrons /// @todo Use built-in identification on Particle, avoid HepMC vector b_hadrons; vector allParticles = particles(event.genEvent()); for (size_t i = 0; i < allParticles.size(); i++) { const GenParticle* p = allParticles.at(i); if ( !(PID::isHadron( p->pdg_id() ) && PID::hasBottom( p->pdg_id() )) ) continue; if (p->momentum().perp() < 5*GeV) continue; b_hadrons.push_back(p); } // Select b-jets as those containing a b-hadron /// @todo Use built-in dR < 0.3 Jet tagging, avoid HepMC vector b_jets; for (const Jet* j : good_jets) { bool isbJet = false; for (const GenParticle* b : b_hadrons) { /// @todo Use direct momentum accessor / delta functions const FourMomentum hadron = b->momentum(); const double hadron_jet_dR = deltaR(j->momentum(), hadron); if (hadron_jet_dR < 0.3) { isbJet = true; break; } } // Check if it is overlapped to any other jet bool isOverlapped = false; for (const Jet* k : allJets) { if (j == k) continue; double dRjj = deltaR(j->momentum(), k->momentum()); if (dRjj < 0.8) { isOverlapped = true; break; } } if (isbJet && !isOverlapped) b_jets.push_back(j); } MSG_DEBUG(b_jets.size() << " b-jets selected"); // Select light-jets as the pair of non-b-jets with invariant mass closest to the W mass /// @todo Use built-in b-tagging (dR < 0.3 defn), avoid HepMC const double nominalW = 80.4*GeV; double deltaM = 500*GeV; const Jet* light1 = NULL; const Jet* light2 = NULL; // NB: const Jets, not const pointers! for (const Jet* i : good_jets) { bool isbJet1 = false; for (const GenParticle* b : b_hadrons) { /// @todo Use direct momentum accessor / delta functions const FourMomentum hadron = b->momentum(); const double hadron_jet_dR = deltaR(i->momentum(), hadron); if (hadron_jet_dR < 0.3) { isbJet1 = true; break; } } if (isbJet1) continue; for (const Jet* j : good_jets) { bool isbJet2 = false; for (const GenParticle* b : b_hadrons) { FourMomentum hadron = b->momentum(); double hadron_jet_dR = deltaR(j->momentum(), hadron); if (hadron_jet_dR < 0.3) { isbJet2 = true; break; } } if (isbJet2) continue; double invMass = (i->momentum()+j->momentum()).mass(); if (fabs(invMass-nominalW) < deltaM){ deltaM = fabs(invMass - nominalW); light1 = i; light2 = j; } } } // Check that both jets are not overlapped, and populate the light jets list vector light_jets; const bool hasGoodLight = light1 != NULL && light2 != NULL && light1 != light2; if (hasGoodLight) { bool isOverlap1 = false, isOverlap2 = false; for (const Jet* j : allJets) { if (light1 == j) continue; const double dR1j = deltaR(light1->momentum(), j->momentum()); if (dR1j < 0.8) { isOverlap1 = true; break; } } for (const Jet* j : allJets) { if (light2 == j) continue; const double dR2j = deltaR(light2->momentum(), j->momentum()); if (dR2j < 0.8) { isOverlap2 = true; break; } } if (!isOverlap1 && !isOverlap2) { light_jets.push_back(light1); light_jets.push_back(light2); } } MSG_DEBUG(light_jets.size() << " light jets selected"); // Calculate the jet shapes /// @todo Use C++11 vector/array initialization const double binWidth = 0.04; // -> 10 bins from 0.0-0.4 vector ptEdges; ptEdges += {{ 30, 40, 50, 70, 100, 150 }}; // b-jet shapes MSG_DEBUG("Filling b-jet shapes"); for (const Jet* bJet : b_jets) { // Work out jet pT bin and skip this jet if out of range const double jetPt = bJet->momentum().pT(); MSG_DEBUG("Jet pT = " << jetPt/GeV << " GeV"); if (!inRange(jetPt/GeV, 30., 150.)) continue; /// @todo Use YODA bin index lookup tools size_t ipt; for (ipt = 0; ipt < 5; ++ipt) if (inRange(jetPt/GeV, ptEdges[ipt], ptEdges[ipt+1])) break; MSG_DEBUG("Jet pT index = " << ipt); // Calculate jet shape vector rings(10, 0); for (const Particle& p : bJet->particles()) { const double dR = deltaR(bJet->momentum(), p.momentum()); const size_t idR = (size_t) floor(dR/binWidth); for (size_t i = idR; i < 10; ++i) rings[i] += p.pT(); } // Fill each dR bin of the histos for this jet pT for (int iBin = 0; iBin < 10; ++iBin) { const double rcenter = 0.02 + iBin*binWidth; const double rhoval = (iBin != 0 ? (rings[iBin]-rings[iBin-1]) : rings[iBin]) / binWidth / rings[9]; const double psival = rings[iBin] / rings[9]; MSG_DEBUG(rcenter << ", " << rhoval << ", " << psival); _p_b_rho[ipt]->fill(rcenter, rhoval); _p_b_Psi[ipt]->fill(rcenter, psival); } } // Light jet shapes MSG_DEBUG("Filling light jet shapes"); for (const Jet* lJet : light_jets) { // Work out jet pT bin and skip this jet if out of range const double jetPt = lJet->momentum().pT(); MSG_DEBUG("Jet pT = " << jetPt/GeV << " GeV"); if (!inRange(jetPt/GeV, 30., 150.)) continue; /// @todo Use YODA bin index lookup tools size_t ipt; for (ipt = 0; ipt < 5; ++ipt) if (inRange(jetPt/GeV, ptEdges[ipt], ptEdges[ipt+1])) break; MSG_DEBUG("Jet pT index = " << ipt); // Calculate jet shape vector rings(10, 0); for (const Particle& p : lJet->particles()) { const double dR = deltaR(lJet->momentum(), p.momentum()); const size_t idR = (size_t) floor(dR/binWidth); for (size_t i = idR; i < 10; ++i) rings[i] += p.pT(); } // Fill each dR bin of the histos for this jet pT for (int iBin = 0; iBin < 10; ++iBin) { const double rcenter = 0.02 + iBin*binWidth; const double rhoval = (iBin != 0 ? (rings[iBin]-rings[iBin-1]) : rings[iBin]) / binWidth / rings[9]; const double psival = rings[iBin] / rings[9]; _p_l_rho[ipt]->fill(rcenter, rhoval); _p_l_Psi[ipt]->fill(rcenter, psival); } } } private: Profile1DPtr _p_b_rho[5]; Profile1DPtr _p_l_rho[5]; Profile1DPtr _p_b_Psi[5]; Profile1DPtr _p_l_Psi[5]; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2013_I1243871); } diff --git a/analyses/pluginATLAS/ATLAS_2014_I1282447.cc b/analyses/pluginATLAS/ATLAS_2014_I1282447.cc --- a/analyses/pluginATLAS/ATLAS_2014_I1282447.cc +++ b/analyses/pluginATLAS/ATLAS_2014_I1282447.cc @@ -1,595 +1,595 @@ // -*- C++ -*- // ATLAS W+c analysis ////////////////////////////////////////////////////////////////////////// /* Description of rivet analysis ATLAS_2014_I1282447 W+c production This rivet routine implements the ATLAS W+c analysis. Apart from those histograms, described and published on HEP Data, here are some helper histograms defined, these are: d02-x01-y01, d02-x01-y02 and d08-x01-y01 are ratios, the nominator ("_plus") and denominator ("_minus") histograms are also given, so that the ratios can be reconstructed if need be (e.g. when running on separate samples). d05 and d06 are ratios over inclusive W production. The routine has to be run on a sample for inclusive W production in order to make sure the denominator ("_winc") is correctly filled. The ratios can be constructed using the following sample code: python divideWCharm.py import yoda hists_wc = yoda.read("Rivet_Wc.yoda") hists_winc = yoda.read("Rivet_Winc.yoda") ## division histograms --> ONLY for different plus minus runs # (merge before using yodamerge Rivet_plus.yoda Rivet_minus.yoda > Rivet_Wc.yoda) d02y01_plus = hists_wc["/ATLAS_2014_I1282447/d02-x01-y01_plus"] d02y01_minus = hists_wc["/ATLAS_2014_I1282447/d02-x01-y01_minus"] ratio_d02y01 = d02y01_plus.divide(d02y01_minus) ratio_d02y01.path = "/ATLAS_2014_I1282447/d02-x01-y01" d02y02_plus = hists_wc["/ATLAS_2014_I1282447/d02-x01-y02_plus"] d02y02_minus = hists_wc["/ATLAS_2014_I1282447/d02-x01-y02_minus"] ratio_d02y02= d02y02_plus.divide(d02y02_minus) ratio_d02y02.path = "/ATLAS_2014_I1282447/d02-x01-y02" d08y01_plus = hists_wc["/ATLAS_2014_I1282447/d08-x01-y01_plus"] d08y01_minus = hists_wc["/ATLAS_2014_I1282447/d08-x01-y01_minus"] ratio_d08y01= d08y01_plus.divide(d08y01_minus) ratio_d08y01.path = "/ATLAS_2014_I1282447/d08-x01-y01" # inclusive cross section h_winc = hists_winc["/ATLAS_2014_I1282447/d05-x01-y01"] h_d = hists_wc["/ATLAS_2014_I1282447/d01-x01-y02"] h_dstar= hists_wc["/ATLAS_2014_I1282447/d01-x01-y03"] ratio_wd = h_d.divide(h_winc) ratio_wd.path = "/ATLAS_2014_I1282447/d05-x01-y02" ratio_wdstar = h_d.divide(h_winc) ratio_wdstar.path = "/ATLAS_2014_I1282447/d05-x01-y03" # pT differential h_winc_plus = hists_winc["/ATLAS_2014_I1282447/d06-x01-y01_winc"] h_winc_minus = hists_winc["/ATLAS_2014_I1282447/d06-x01-y02_winc"] h_wd_plus = hists_wc["/ATLAS_2014_I1282447/d06-x01-y01_wplus"] h_wd_minus = hists_wc["/ATLAS_2014_I1282447/d06-x01-y02_wminus"] h_wdstar_plus = hists_wc["/ATLAS_2014_I1282447/d06-x01-y03_wplus"] h_wdstar_minus = hists_wc["/ATLAS_2014_I1282447/d06-x01-y04_wminus"] ratio_wd_plus = h_wd_plus.divide(h_winc_plus) ratio_wd_plus.path = "/ATLAS_2014_I1282447/d06-x01-y01" ratio_wd_minus = h_wd_plus.divide(h_winc_minus) ratio_wd_minus.path = "/ATLAS_2014_I1282447/d06-x01-y02" ratio_wdstar_plus = h_wdstar_plus.divide(h_winc_plus) ratio_wdstar_plus.path = "/ATLAS_2014_I1282447/d06-x01-y03" ratio_wdstar_minus = h_wdstar_plus.divide(h_winc_minus) ratio_wdstar_minus.path = "/ATLAS_2014_I1282447/d06-x01-y04" ratio_wd_plus = h_wd_plus.divide(h_winc_plus) ratio_wd_plus.path = "/ATLAS_2014_I1282447/d06-x01-y01" ratio_wd_minus = h_wd_plus.divide(h_winc_minus) ratio_wd_minus.path = "/ATLAS_2014_I1282447/d06-x01-y02" h_winc_plus= hists_winc["/ATLAS_2014_I1282447/d06-x01-y01_winc"] h_winc_minus= hists_winc["/ATLAS_2014_I1282447/d06-x01-y02_winc"] ## copy other histograms for plotting d01x01y01= hists_wc["/ATLAS_2014_I1282447/d01-x01-y01"] d01x01y01.path = "/ATLAS_2014_I1282447/d01-x01-y01" d01x01y02= hists_wc["/ATLAS_2014_I1282447/d01-x01-y02"] d01x01y02.path = "/ATLAS_2014_I1282447/d01-x01-y02" d01x01y03= hists_wc["/ATLAS_2014_I1282447/d01-x01-y03"] d01x01y03.path = "/ATLAS_2014_I1282447/d01-x01-y03" d03x01y01= hists_wc["/ATLAS_2014_I1282447/d03-x01-y01"] d03x01y01.path = "/ATLAS_2014_I1282447/d03-x01-y01" d03x01y02= hists_wc["/ATLAS_2014_I1282447/d03-x01-y02"] d03x01y02.path = "/ATLAS_2014_I1282447/d03-x01-y02" d04x01y01= hists_wc["/ATLAS_2014_I1282447/d04-x01-y01"] d04x01y01.path = "/ATLAS_2014_I1282447/d04-x01-y01" d04x01y02= hists_wc["/ATLAS_2014_I1282447/d04-x01-y02"] d04x01y02.path = "/ATLAS_2014_I1282447/d04-x01-y02" d04x01y03= hists_wc["/ATLAS_2014_I1282447/d04-x01-y03"] d04x01y03.path = "/ATLAS_2014_I1282447/d04-x01-y03" d04x01y04= hists_wc["/ATLAS_2014_I1282447/d04-x01-y04"] d04x01y04.path = "/ATLAS_2014_I1282447/d04-x01-y04" d07x01y01= hists_wc["/ATLAS_2014_I1282447/d07-x01-y01"] d07x01y01.path = "/ATLAS_2014_I1282447/d07-x01-y01" yoda.write([ratio_d02y01,ratio_d02y02,ratio_d08y01, ratio_wd ,ratio_wdstar,ratio_wd_plus,ratio_wd_minus ,ratio_wdstar_plus,ratio_wdstar_minus,d01x01y01,d01x01y02,d01x01y03,d03x01y01,d03x01y02,d04x01y01,d04x01y02,d04x01y03,d04x01y04,d07x01y01],"validation.yoda") */ ////////////////////////////////////////////////////////////////////////// #include "Rivet/Analysis.hh" #include "Rivet/Projections/UnstableFinalState.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { class ATLAS_2014_I1282447 : public Analysis { public: /// Constructor ATLAS_2014_I1282447() : Analysis("ATLAS_2014_I1282447") { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { /// @todo Initialise and register projections here UnstableFinalState fs; Cut cuts = Cuts::etaIn(-2.5, 2.5) & (Cuts::pT > 20*GeV); /// should use sample WITHOUT QED radiation off the electron - WFinder wfinder_born_el(fs, cuts, PID::ELECTRON, 25*GeV, 8000*GeV, 15*GeV, 0.1, WFinder::ClusterPhotons::ALL, WFinder::AddPhotons::YES); + WFinder wfinder_born_el(fs, cuts, PID::ELECTRON, 25*GeV, 8000*GeV, 15*GeV, 0.1, WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::ALL, WFinder::AddPhotons::YES); declare(wfinder_born_el, "WFinder_born_el"); - WFinder wfinder_born_mu(fs, cuts, PID::MUON , 25*GeV, 8000*GeV, 15*GeV, 0.1, WFinder::ClusterPhotons::ALL, WFinder::AddPhotons::YES); + WFinder wfinder_born_mu(fs, cuts, PID::MUON , 25*GeV, 8000*GeV, 15*GeV, 0.1, WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::ALL, WFinder::AddPhotons::YES); declare(wfinder_born_mu, "WFinder_born_mu"); // all hadrons that could be coming from a charm decay -- // -- for safety, use region -3.5 - 3.5 declare(UnstableFinalState(Cuts::abseta <3.5), "hadrons"); // Input for the jets: no neutrinos, no muons, and no electron which passed the electron cuts // also: NO electron, muon or tau (needed due to ATLAS jet truth reconstruction feature) VetoedFinalState veto; veto.addVetoOnThisFinalState(wfinder_born_el); veto.addVetoOnThisFinalState(wfinder_born_mu); veto.addVetoPairId(PID::ELECTRON); veto.addVetoPairId(PID::MUON); veto.addVetoPairId(PID::TAU); FastJets jets(veto, FastJets::ANTIKT, 0.4); declare(jets, "jets"); // Book histograms // charge separated integrated cross sections book(_hist_wcjet_charge ,"d01-x01-y01"); book(_hist_wd_charge ,"d01-x01-y02"); book(_hist_wdstar_charge ,"d01-x01-y03"); // charge integrated total cross sections book(_hist_wcjet_ratio,"d02-x01-y01"); book(_hist_wd_ratio ,"d02-x01-y02"); book(_hist_wcjet_minus ,"d02-x01-y01_minus"); book(_hist_wd_minus ,"d02-x01-y02_minus"); book(_hist_wcjet_plus ,"d02-x01-y01_plus"); book(_hist_wd_plus ,"d02-x01-y02_plus"); // eta distributions book(_hist_wplus_wcjet_eta_lep ,"d03-x01-y01"); book(_hist_wminus_wcjet_eta_lep ,"d03-x01-y02"); book(_hist_wplus_wdminus_eta_lep ,"d04-x01-y01"); book(_hist_wminus_wdplus_eta_lep ,"d04-x01-y02"); book(_hist_wplus_wdstar_eta_lep ,"d04-x01-y03"); book(_hist_wminus_wdstar_eta_lep ,"d04-x01-y04"); // ratio of cross section (WD over W inclusive) // postprocess! book(_hist_w_inc ,"d05-x01-y01"); book(_hist_wd_winc_ratio ,"d05-x01-y02"); book(_hist_wdstar_winc_ratio,"d05-x01-y03"); // ratio of cross section (WD over W inclusive -- function of pT of D meson) book(_hist_wplusd_wplusinc_pt_ratio ,"d06-x01-y01"); book(_hist_wminusd_wminusinc_pt_ratio ,"d06-x01-y02"); book(_hist_wplusdstar_wplusinc_pt_ratio ,"d06-x01-y03"); book(_hist_wminusdstar_wminusinc_pt_ratio,"d06-x01-y04"); // could use for postprocessing! book(_hist_wplusd_wplusinc_pt ,"d06-x01-y01_wplus"); book(_hist_wminusd_wminusinc_pt ,"d06-x01-y02_wminus"); book(_hist_wplusdstar_wplusinc_pt ,"d06-x01-y03_wplus"); book(_hist_wminusdstar_wminusinc_pt ,"d06-x01-y04_wminus"); book(_hist_wplus_winc ,"d06-x01-y01_winc"); book(_hist_wminus_winc ,"d06-x01-y02_winc"); // jet multiplicity of charge integrated W+cjet cross section (+0 or +1 jet in addition to the charm jet) book(_hist_wcjet_jets ,"d07-x01-y01"); // jet multiplicity of W+cjet cross section ratio (+0 or +1 jet in addition to the charm jet) book(_hist_wcjet_jets_ratio ,"d08-x01-y01"); book(_hist_wcjet_jets_plus ,"d08-x01-y01_plus"); book(_hist_wcjet_jets_minus ,"d08-x01-y01_minus"); } /// Perform the per-event analysis void analyze(const Event& event) { double charge_weight = 0; // account for OS/SS events int lepton_charge = 0; double lepton_eta = 0.; /// Find leptons const WFinder& wfinder_born_el = apply(event, "WFinder_born_el"); const WFinder& wfinder_born_mu = apply(event, "WFinder_born_mu"); if (wfinder_born_el.empty() && wfinder_born_mu.empty()) { MSG_DEBUG("No W bosons found"); vetoEvent; } bool keepevent = false; //check electrons if (!wfinder_born_el.empty()) { const FourMomentum nu = wfinder_born_el.constituentNeutrinos()[0]; if (wfinder_born_el.mT() > 40*GeV && nu.pT() > 25*GeV) { keepevent = true; lepton_charge = wfinder_born_el.constituentLeptons()[0].charge(); lepton_eta = fabs(wfinder_born_el.constituentLeptons()[0].pseudorapidity()); } } //check muons if (!wfinder_born_mu.empty()) { const FourMomentum nu = wfinder_born_mu.constituentNeutrinos()[0]; if (wfinder_born_mu.mT() > 40*GeV && nu.pT() > 25*GeV) { keepevent = true; lepton_charge = wfinder_born_mu.constituentLeptons()[0].charge(); lepton_eta = fabs(wfinder_born_mu.constituentLeptons()[0].pseudorapidity()); } } if (!keepevent) { MSG_DEBUG("Event does not pass mT and MET cuts"); vetoEvent; } if (lepton_charge > 0) { _hist_wplus_winc->fill(10.); _hist_wplus_winc->fill(16.); _hist_wplus_winc->fill(30.); _hist_wplus_winc->fill(60.); _hist_w_inc->fill(+1); } else if (lepton_charge < 0) { _hist_wminus_winc->fill(10.); _hist_wminus_winc->fill(16.); _hist_wminus_winc->fill(30.); _hist_wminus_winc->fill(60.); _hist_w_inc->fill(-1); } // Find hadrons in the event const UnstableFinalState& fs = apply(event, "hadrons"); /// FIND Different channels // 1: wcjet // get jets const Jets& jets = apply(event, "jets").jetsByPt(Cuts::pT>25.0*GeV && Cuts::abseta<2.5); // loop over jets to select jets used to match to charm Jets js; int matched_charmHadron = 0; double charm_charge = 0.; int njets = 0; int nj = 0; bool mat_jet = false; double ptcharm = 0; if (matched_charmHadron > -1) { for (const Jet& j : jets) { mat_jet = false; njets += 1; for (const Particle& p : fs.particles()) { /// @todo Avoid touching HepMC! const GenParticle* part = p.genParticle(); if (p.hasCharm()) { //if(isFromBDecay(p)) continue; if (p.fromBottom()) continue; if (p.pT() < 5*GeV ) continue; if (hasCharmedChildren(part)) continue; if (deltaR(p, j) < 0.3) { mat_jet = true; if (p.pT() > ptcharm) { charm_charge = part->pdg_id(); ptcharm = p.pT(); } } } } if (mat_jet) nj++; } if (charm_charge * lepton_charge > 0) charge_weight = -1; else charge_weight = +1; if (nj == 1) { if (lepton_charge > 0) { _hist_wcjet_charge ->fill( 1, charge_weight); _hist_wcjet_plus ->fill( 0, charge_weight); _hist_wplus_wcjet_eta_lep ->fill(lepton_eta, charge_weight); _hist_wcjet_jets_plus ->fill(njets-1 , charge_weight); } else if (lepton_charge < 0) { _hist_wcjet_charge ->fill( -1, charge_weight); _hist_wcjet_minus ->fill( 0, charge_weight); _hist_wminus_wcjet_eta_lep->fill(lepton_eta, charge_weight); _hist_wcjet_jets_minus ->fill(njets-1 , charge_weight); } _hist_wcjet_jets->fill(njets-1, charge_weight); } } // // 1/2: w+d(*) meson for (const Particle& p : fs.particles()) { /// @todo Avoid touching HepMC! const GenParticle* part = p.genParticle(); if (p.pT() < 8*GeV) continue; if (fabs(p.eta()) > 2.2) continue; // W+D if (abs(part->pdg_id()) == 411) { if (lepton_charge * part->pdg_id() > 0) charge_weight = -1; else charge_weight = +1; // fill histos if (lepton_charge > 0) { _hist_wd_charge ->fill( 1, charge_weight); _hist_wd_plus ->fill( 0, charge_weight); _hist_wplus_wdminus_eta_lep->fill(lepton_eta, charge_weight); _hist_wplusd_wplusinc_pt ->fill( p.pT(), charge_weight); } else if (lepton_charge < 0) { _hist_wd_charge ->fill( -1, charge_weight); _hist_wd_minus ->fill( 0, charge_weight); _hist_wminus_wdplus_eta_lep->fill(lepton_eta, charge_weight); _hist_wminusd_wminusinc_pt ->fill(p.pT() , charge_weight); } } // W+Dstar if ( abs(part->pdg_id()) == 413 ) { if (lepton_charge*part->pdg_id() > 0) charge_weight = -1; else charge_weight = +1; if (lepton_charge > 0) { _hist_wdstar_charge->fill(+1, charge_weight); _hist_wd_plus->fill( 0, charge_weight); _hist_wplus_wdstar_eta_lep->fill( lepton_eta, charge_weight); _hist_wplusdstar_wplusinc_pt->fill( p.pT(), charge_weight); } else if (lepton_charge < 0) { _hist_wdstar_charge->fill(-1, charge_weight); _hist_wd_minus->fill(0, charge_weight); _hist_wminus_wdstar_eta_lep->fill(lepton_eta, charge_weight); _hist_wminusdstar_wminusinc_pt->fill(p.pT(), charge_weight); } } } } /// Normalise histograms etc., after the run void finalize() { const double sf = crossSection() / sumOfWeights(); // norm to cross section // d01 scale(_hist_wcjet_charge, sf); scale(_hist_wd_charge, sf); scale(_hist_wdstar_charge, sf); //d02 scale(_hist_wcjet_plus, sf); scale(_hist_wcjet_minus, sf); scale(_hist_wd_plus, sf); scale(_hist_wd_minus, sf); divide(_hist_wcjet_plus, _hist_wcjet_minus, _hist_wcjet_ratio); divide(_hist_wd_plus, _hist_wd_minus, _hist_wd_ratio ); //d03 scale(_hist_wplus_wcjet_eta_lep, sf); scale(_hist_wminus_wcjet_eta_lep, sf); //d04 scale(_hist_wplus_wdminus_eta_lep, crossSection()/sumOfWeights()); scale(_hist_wminus_wdplus_eta_lep, crossSection()/sumOfWeights()); scale(_hist_wplus_wdstar_eta_lep , crossSection()/sumOfWeights()); scale(_hist_wminus_wdstar_eta_lep, crossSection()/sumOfWeights()); //d05 scale(_hist_w_inc, 0.01 * sf); // in percent --> /100 divide(_hist_wd_charge, _hist_w_inc, _hist_wd_winc_ratio ); divide(_hist_wdstar_charge, _hist_w_inc, _hist_wdstar_winc_ratio); //d06, in percentage! scale(_hist_wplusd_wplusinc_pt, sf); scale(_hist_wminusd_wminusinc_pt, sf); scale(_hist_wplusdstar_wplusinc_pt, sf); scale(_hist_wminusdstar_wminusinc_pt, sf); scale(_hist_wplus_winc, 0.01 * sf); // in percent --> /100 scale(_hist_wminus_winc, 0.01 * sf); // in percent --> /100 divide(_hist_wplusd_wplusinc_pt, _hist_wplus_winc , _hist_wplusd_wplusinc_pt_ratio ); divide(_hist_wminusd_wminusinc_pt, _hist_wminus_winc, _hist_wminusd_wminusinc_pt_ratio ); divide(_hist_wplusdstar_wplusinc_pt, _hist_wplus_winc , _hist_wplusdstar_wplusinc_pt_ratio ); divide(_hist_wminusdstar_wminusinc_pt, _hist_wminus_winc, _hist_wminusdstar_wminusinc_pt_ratio); //d07 scale(_hist_wcjet_jets, sf); //d08 scale(_hist_wcjet_jets_minus, sf); scale(_hist_wcjet_jets_plus, sf); divide(_hist_wcjet_jets_plus, _hist_wcjet_jets_minus , _hist_wcjet_jets_ratio); } //@} private: // Data members like post-cuts event weight counters go here // Check whether particle comes from b-decay /// @todo Use built-in method and avoid HepMC bool isFromBDecay(const Particle& p) { bool isfromB = false; if (p.genParticle() == nullptr) return false; const GenParticle* part = p.genParticle(); const GenVertex* ivtx = const_cast(part->production_vertex()); while (ivtx) { if (ivtx->particles_in_size() < 1) { isfromB = false; break; } const HepMC::GenVertex::particles_in_const_iterator iPart_invtx = ivtx->particles_in_const_begin(); part = (*iPart_invtx); if (!part) { isfromB = false; break; } isfromB = PID::hasBottom(part->pdg_id()); if (isfromB == true) break; ivtx = const_cast(part->production_vertex()); if ( part->pdg_id() == 2212 || !ivtx ) break; // reached beam } return isfromB; } // Check whether particle has charmed children /// @todo Use built-in method and avoid HepMC! bool hasCharmedChildren(const GenParticle *part) { bool hasCharmedChild = false; if (part == nullptr) return false; const GenVertex* ivtx = const_cast(part->end_vertex()); if (ivtx == nullptr) return false; // if (ivtx->particles_out_size() < 2) return false; HepMC::GenVertex::particles_out_const_iterator iPart_invtx = ivtx->particles_out_const_begin(); HepMC::GenVertex::particles_out_const_iterator end_invtx = ivtx->particles_out_const_end(); for ( ; iPart_invtx != end_invtx; iPart_invtx++ ) { const GenParticle* p2 = (*iPart_invtx); if (p2 == part) continue; hasCharmedChild = PID::hasCharm(p2->pdg_id()); if (hasCharmedChild == true) break; hasCharmedChild = hasCharmedChildren(p2); if (hasCharmedChild == true) break; } return hasCharmedChild; } private: /// @name Histograms //@{ //d01-x01- Histo1DPtr _hist_wcjet_charge; Histo1DPtr _hist_wd_charge; Histo1DPtr _hist_wdstar_charge; //d02-x01- Scatter2DPtr _hist_wcjet_ratio; Scatter2DPtr _hist_wd_ratio; Histo1DPtr _hist_wcjet_plus; Histo1DPtr _hist_wd_plus; Histo1DPtr _hist_wcjet_minus; Histo1DPtr _hist_wd_minus; //d03-x01- Histo1DPtr _hist_wplus_wcjet_eta_lep; Histo1DPtr _hist_wminus_wcjet_eta_lep; //d04-x01- Histo1DPtr _hist_wplus_wdminus_eta_lep; Histo1DPtr _hist_wminus_wdplus_eta_lep; //d05-x01- Histo1DPtr _hist_wplus_wdstar_eta_lep; Histo1DPtr _hist_wminus_wdstar_eta_lep; // postprocessing histos //d05-x01 Histo1DPtr _hist_w_inc; Scatter2DPtr _hist_wd_winc_ratio; Scatter2DPtr _hist_wdstar_winc_ratio; //d06-x01 Histo1DPtr _hist_wplus_winc; Histo1DPtr _hist_wminus_winc; Scatter2DPtr _hist_wplusd_wplusinc_pt_ratio; Scatter2DPtr _hist_wminusd_wminusinc_pt_ratio; Scatter2DPtr _hist_wplusdstar_wplusinc_pt_ratio; Scatter2DPtr _hist_wminusdstar_wminusinc_pt_ratio; Histo1DPtr _hist_wplusd_wplusinc_pt ; Histo1DPtr _hist_wminusd_wminusinc_pt; Histo1DPtr _hist_wplusdstar_wplusinc_pt; Histo1DPtr _hist_wminusdstar_wminusinc_pt; // d07-x01 Histo1DPtr _hist_wcjet_jets ; //d08-x01 Scatter2DPtr _hist_wcjet_jets_ratio ; Histo1DPtr _hist_wcjet_jets_plus ; Histo1DPtr _hist_wcjet_jets_minus; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2014_I1282447); } diff --git a/analyses/pluginATLAS/ATLAS_2014_I1306294.cc b/analyses/pluginATLAS/ATLAS_2014_I1306294.cc --- a/analyses/pluginATLAS/ATLAS_2014_I1306294.cc +++ b/analyses/pluginATLAS/ATLAS_2014_I1306294.cc @@ -1,230 +1,230 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/HeavyHadrons.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { class ATLAS_2014_I1306294 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructors ATLAS_2014_I1306294(std::string name="ATLAS_2014_I1306294") : Analysis(name) { _mode = 1; } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FinalState fs; Cut cuts = Cuts::etaIn(-2.5,2.5) & (Cuts::pT > 20.0*GeV); ZFinder zfinder(fs, cuts, _mode==1? PID::ELECTRON : PID::MUON, 76.0*GeV, 106.0*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::NO); declare(zfinder, "ZFinder"); //FastJets jetpro1( getProjection("ZFinder").remainingFinalState(), FastJets::ANTIKT, 0.4); VetoedFinalState jet_fs(fs); jet_fs.addVetoOnThisFinalState(getProjection("ZFinder")); FastJets jetpro1(jet_fs, FastJets::ANTIKT, 0.4); jetpro1.useInvisibles(); declare(jetpro1, "AntiKtJets04"); declare(HeavyHadrons(), "BHadrons"); //Histograms with data binning book(_h_bjet_Pt , 3, 1, 1); book(_h_bjet_Y , 5, 1, 1); book(_h_bjet_Yboost , 7, 1, 1); book(_h_bjet_DY20 , 9, 1, 1); book(_h_bjet_ZdPhi20 ,11, 1, 1); book(_h_bjet_ZdR20 ,13, 1, 1); book(_h_bjet_ZPt ,15, 1, 1); book(_h_bjet_ZY ,17, 1, 1); book(_h_2bjet_dR ,21, 1, 1); book(_h_2bjet_Mbb ,23, 1, 1); book(_h_2bjet_ZPt ,25, 1, 1); book(_h_2bjet_ZY ,27, 1, 1); } //========================================================================================== /// Perform the per-event analysis void analyze(const Event& e) { //--------------------------- // -- check we have a Z: const ZFinder& zfinder = apply(e, "ZFinder"); if(zfinder.bosons().size() != 1) vetoEvent; - const ParticleVector boson_s = zfinder.bosons(); + const Particles boson_s = zfinder.bosons(); const Particle boson_f = boson_s[0] ; - const ParticleVector zleps = zfinder.constituents(); + const Particles zleps = zfinder.constituents(); //--------------------------- //--------------------------- //------------- stop processing the event if no true b-partons or hadrons are found const Particles& allBs = apply(e, "BHadrons").bHadrons(5.0*GeV); Particles stableBs; for(Particle p : allBs) { if(p.abseta() < 2.5) stableBs += p; } if( stableBs.empty() ) vetoEvent; //--------------------------- // -- get the b-jets: const Jets& jets = apply(e, "AntiKtJets04").jetsByPt(Cuts::pT >20.0*GeV && Cuts::abseta <2.4); Jets b_jets; for(const Jet& jet : jets) { //veto overlaps with Z leptons: bool veto = false; for(const Particle& zlep : zleps) { if(deltaR(jet, zlep) < 0.5) veto = true; } if(veto) continue; for(const Particle& bhadron : stableBs) { if( deltaR(jet, bhadron) <= 0.3 ) { b_jets.push_back(jet); break; // match } } // end loop on b-hadrons } //and make sure we have at least 1: if(b_jets.empty()) vetoEvent; //--------------------------- // fill the plots: const double ZpT = boson_f.pT()/GeV; const double ZY = boson_f.absrap(); _h_bjet_ZPt->fill(ZpT); _h_bjet_ZY ->fill(ZY); for(const Jet& jet : b_jets) { _h_bjet_Pt->fill(jet.pT()/GeV); _h_bjet_Y ->fill(jet.absrap()); const double Yboost = 0.5 * fabs(boson_f.rapidity() + jet.rapidity()); _h_bjet_Yboost->fill(Yboost); if(ZpT > 20.) { const double ZBDY = fabs( boson_f.rapidity() - jet.rapidity() ); const double ZBDPHI = fabs( deltaPhi(jet.phi(), boson_f.phi()) ); const double ZBDR = deltaR(jet, boson_f, RAPIDITY); _h_bjet_DY20->fill( ZBDY); _h_bjet_ZdPhi20->fill(ZBDPHI); _h_bjet_ZdR20->fill( ZBDR); } } //loop over b-jets if (b_jets.size() < 2) return; _h_2bjet_ZPt->fill(ZpT); _h_2bjet_ZY ->fill(ZY); const double BBDR = deltaR(b_jets[0], b_jets[1], RAPIDITY); const double Mbb = (b_jets[0].momentum() + b_jets[1].momentum()).mass(); _h_2bjet_dR ->fill(BBDR); _h_2bjet_Mbb->fill(Mbb); } // end of analysis loop /// Normalise histograms etc., after the run void finalize() { const double normfac = crossSection() / sumOfWeights(); scale( _h_bjet_Pt, normfac); scale( _h_bjet_Y, normfac); scale( _h_bjet_Yboost, normfac); scale( _h_bjet_DY20, normfac); scale( _h_bjet_ZdPhi20, normfac); scale( _h_bjet_ZdR20, normfac); scale( _h_bjet_ZPt, normfac); scale( _h_bjet_ZY, normfac); scale( _h_2bjet_dR, normfac); scale( _h_2bjet_Mbb, normfac); scale( _h_2bjet_ZPt, normfac); scale( _h_2bjet_ZY, normfac); } //@} protected: // Data members like post-cuts event weight counters go here size_t _mode; private: Histo1DPtr _h_bjet_Pt; Histo1DPtr _h_bjet_Y; Histo1DPtr _h_bjet_Yboost; Histo1DPtr _h_bjet_DY20; Histo1DPtr _h_bjet_ZdPhi20; Histo1DPtr _h_bjet_ZdR20; Histo1DPtr _h_bjet_ZPt; Histo1DPtr _h_bjet_ZY; Histo1DPtr _h_2bjet_dR; Histo1DPtr _h_2bjet_Mbb; Histo1DPtr _h_2bjet_ZPt; Histo1DPtr _h_2bjet_ZY; }; class ATLAS_2014_I1306294_EL : public ATLAS_2014_I1306294 { public: ATLAS_2014_I1306294_EL() : ATLAS_2014_I1306294("ATLAS_2014_I1306294_EL") { _mode = 1; } }; class ATLAS_2014_I1306294_MU : public ATLAS_2014_I1306294 { public: ATLAS_2014_I1306294_MU() : ATLAS_2014_I1306294("ATLAS_2014_I1306294_MU") { _mode = 2; } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2014_I1306294); DECLARE_RIVET_PLUGIN(ATLAS_2014_I1306294_MU); DECLARE_RIVET_PLUGIN(ATLAS_2014_I1306294_EL); } diff --git a/analyses/pluginATLAS/ATLAS_2014_I1306615.cc b/analyses/pluginATLAS/ATLAS_2014_I1306615.cc --- a/analyses/pluginATLAS/ATLAS_2014_I1306615.cc +++ b/analyses/pluginATLAS/ATLAS_2014_I1306615.cc @@ -1,484 +1,484 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief ATLAS H->yy differential cross-sections measurement /// /// @author Michaela Queitsch-Maitland // // arXiv: http://arxiv.org/abs/ARXIV:1407.4222 // HepData: http://hepdata.cedar.ac.uk/view/ins1306615 class ATLAS_2014_I1306615 : public Analysis { public: // Constructor ATLAS_2014_I1306615() : Analysis("ATLAS_2014_I1306615") { } // Book histograms and initialise projections before the run void init() { // Final state // All particles within |eta| < 5.0 const FinalState FS(Cuts::abseta<5.0); declare(FS,"FS"); // Project photons with pT > 25 GeV and |eta| < 2.37 IdentifiedFinalState ph_FS(Cuts::abseta<2.37 && Cuts::pT>25.0*GeV); ph_FS.acceptIdPair(PID::PHOTON); declare(ph_FS, "PH_FS"); // Project photons for dressing IdentifiedFinalState ph_dressing_FS(FS); ph_dressing_FS.acceptIdPair(PID::PHOTON); // Project bare electrons IdentifiedFinalState el_bare_FS(FS); el_bare_FS.acceptIdPair(PID::ELECTRON); declare(el_bare_FS,"el_bare_FS"); // Project dressed electrons with pT > 15 GeV and |eta| < 2.47 DressedLeptons el_dressed_FS(ph_dressing_FS, el_bare_FS, 0.1, Cuts::abseta < 2.47 && Cuts::pT > 15*GeV); declare(el_dressed_FS,"EL_DRESSED_FS"); // Project bare muons IdentifiedFinalState mu_bare_FS(FS); mu_bare_FS.acceptIdPair(PID::MUON); // Project dressed muons with pT > 15 GeV and |eta| < 2.47 //DressedLeptons mu_dressed_FS(ph_dressing_FS, mu_bare_FS, 0.1, true, -2.47, 2.47, 15.0*GeV, false); DressedLeptons mu_dressed_FS(ph_dressing_FS, mu_bare_FS, 0.1, Cuts::abseta < 2.47 && Cuts::pT > 15*GeV); declare(mu_dressed_FS,"MU_DRESSED_FS"); // Final state excluding muons and neutrinos (for jet building and photon isolation) VetoedFinalState veto_mu_nu_FS(FS); veto_mu_nu_FS.vetoNeutrinos(); veto_mu_nu_FS.addVetoPairId(PID::MUON); declare(veto_mu_nu_FS, "VETO_MU_NU_FS"); // Build the anti-kT R=0.4 jets, using FinalState particles (vetoing muons and neutrinos) FastJets jets(veto_mu_nu_FS, FastJets::ANTIKT, 0.4); declare(jets, "JETS"); // Book histograms // 1D distributions book(_h_pT_yy ,1,1,1); book(_h_y_yy ,2,1,1); book(_h_Njets30 ,3,1,1); book(_h_Njets50 ,4,1,1); book(_h_pT_j1 ,5,1,1); book(_h_y_j1 ,6,1,1); book(_h_HT ,7,1,1); book(_h_pT_j2 ,8,1,1); book(_h_Dy_jj ,9,1,1); book(_h_Dphi_yy_jj ,10,1,1); book(_h_cosTS_CS ,11,1,1); book(_h_cosTS_CS_5bin ,12,1,1); book(_h_Dphi_jj ,13,1,1); book(_h_pTt_yy ,14,1,1); book(_h_Dy_yy ,15,1,1); book(_h_tau_jet ,16,1,1); book(_h_sum_tau_jet ,17,1,1); book(_h_y_j2 ,18,1,1); book(_h_pT_j3 ,19,1,1); book(_h_m_jj ,20,1,1); book(_h_pT_yy_jj ,21,1,1); // 2D distributions of cosTS_CS x pT_yy book(_h_cosTS_pTyy_low ,22,1,1); book(_h_cosTS_pTyy_high ,22,1,2); book(_h_cosTS_pTyy_rest ,22,1,3); // 2D distributions of Njets x pT_yy book(_h_pTyy_Njets0 ,23,1,1); book(_h_pTyy_Njets1 ,23,1,2); book(_h_pTyy_Njets2 ,23,1,3); book(_h_pTj1_excl ,24,1,1); // Fiducial regions book(_h_fidXSecs ,29,1,1); } // Perform the per-event analysis void analyze(const Event& event) { // Get final state particles - const ParticleVector& FS_ptcls = apply(event, "FS").particles(); - const ParticleVector& ptcls_veto_mu_nu = apply(event, "VETO_MU_NU_FS").particles(); - const ParticleVector& photons = apply(event, "PH_FS").particlesByPt(); + const Particles& FS_ptcls = apply(event, "FS").particles(); + const Particles& ptcls_veto_mu_nu = apply(event, "VETO_MU_NU_FS").particles(); + const Particles& photons = apply(event, "PH_FS").particlesByPt(); const vector& el_dressed = apply(event, "EL_DRESSED_FS").dressedLeptons(); const vector& mu_dressed = apply(event, "MU_DRESSED_FS").dressedLeptons(); // For isolation calculation float dR_iso = 0.4; float ETcut_iso = 14.0; FourMomentum ET_iso; // Fiducial selection: pT > 25 GeV, |eta| < 2.37 and isolation (in cone deltaR = 0.4) is < 14 GeV vector fid_photons; for (const Particle& ph : photons) { // Veto photons from hadron or tau decay if ( fromHadronDecay(ph) ) continue; // Calculate isolation ET_iso = - ph.momentum(); // Loop over fs truth particles (excluding muons and neutrinos) for (const Particle& p : ptcls_veto_mu_nu) { // Check if the truth particle is in a cone of 0.4 if ( deltaR(ph.momentum(), p.momentum()) < dR_iso ) ET_iso += p.momentum(); } // Check isolation if ( ET_iso.Et() > ETcut_iso ) continue; // Fill vector of photons passing fiducial selection fid_photons.push_back(&ph); } if(fid_photons.size() < 2) vetoEvent; const FourMomentum& y1 = fid_photons[0]->momentum(); const FourMomentum& y2 = fid_photons[1]->momentum(); double m_yy = (y1 + y2).mass(); // Relative pT cuts if ( y1.pT() < 0.35 * m_yy || y2.pT() < 0.25 * m_yy ) vetoEvent; // Mass window cut if ( m_yy < 105 || m_yy > 160 ) vetoEvent; // -------------------------------------------- // // Passed diphoton baseline fiducial selection! // // -------------------------------------------- // // Electron selection vector good_el; for(const DressedLepton& els : el_dressed) { const Particle& el = els.constituentLepton(); if ( el.momentum().pT() < 15 ) continue; if ( fabs(el.momentum().eta()) > 2.47 ) continue; if ( deltaR(el.momentum(), y1) < 0.4 ) continue; if ( deltaR(el.momentum(), y2) < 0.4 ) continue; if ( fromHadronDecay(el) ) continue; // Veto electrons from hadron or tau decay good_el.push_back(&el); } // Muon selection vector good_mu; for(const DressedLepton& mus : mu_dressed) { const Particle& mu = mus.constituentLepton(); if ( mu.momentum().pT() < 15 ) continue; if ( fabs(mu.momentum().eta()) > 2.47 ) continue; if ( deltaR(mu.momentum(), y1) < 0.4 ) continue; if ( deltaR(mu.momentum(), y2) < 0.4 ) continue; if ( fromHadronDecay(mu) ) continue; // Veto muons from hadron or tau decay good_mu.push_back(&mu); } // Find prompt, invisible particles for missing ET calculation // Based on VisibleFinalState projection FourMomentum invisible(0,0,0,0); for (const Particle& p : FS_ptcls) { // Veto non-prompt particles (from hadron or tau decay) if ( fromHadronDecay(p) ) continue; // Charged particles are visible if ( PID::charge3( p.pid() ) != 0 ) continue; // Neutral hadrons are visible if ( PID::isHadron( p.pid() ) ) continue; // Photons are visible if ( p.pid() == PID::PHOTON ) continue; // Gluons are visible (for parton level analyses) if ( p.pid() == PID::GLUON ) continue; // Everything else is invisible invisible += p.momentum(); } double MET = invisible.Et(); // Jet selection // Get jets with pT > 25 GeV and |rapidity| < 4.4 //const Jets& jets = apply(event, "JETS").jetsByPt(25.0*GeV, DBL_MAX, -4.4, 4.4, RAPIDITY); const Jets& jets = apply(event, "JETS").jetsByPt(Cuts::pT>25.0*GeV && Cuts::absrap <4.4); vector jets_25; vector jets_30; vector jets_50; for (const Jet& jet : jets) { bool passOverlap = true; // Overlap with leading photons if ( deltaR(y1, jet.momentum()) < 0.4 ) passOverlap = false; if ( deltaR(y2, jet.momentum()) < 0.4 ) passOverlap = false; // Overlap with good electrons for (const Particle* el : good_el) if ( deltaR(el->momentum(), jet.momentum()) < 0.2 ) passOverlap = false; if ( ! passOverlap ) continue; if ( fabs(jet.momentum().eta()) < 2.4 || ( fabs(jet.momentum().eta()) > 2.4 && jet.momentum().pT() > 30 ) ) jets_25.push_back(&jet); if ( jet.momentum().pT() > 30 ) jets_30.push_back(&jet); if ( jet.momentum().pT() > 50 ) jets_50.push_back(&jet); } // Fiducial regions _h_fidXSecs->fill(1); if ( jets_30.size() >= 1 ) _h_fidXSecs->fill(2); if ( jets_30.size() >= 2 ) _h_fidXSecs->fill(3); if ( jets_30.size() >= 3 ) _h_fidXSecs->fill(4); if ( jets_30.size() >= 2 && passVBFCuts(y1 + y2, jets_30.at(0)->momentum(), jets_30.at(1)->momentum()) ) _h_fidXSecs->fill(5); if ( (good_el.size() + good_mu.size()) > 0 ) _h_fidXSecs->fill(6); if ( MET > 80 ) _h_fidXSecs->fill(7); // Fill histograms // Inclusive variables _pT_yy = (y1 + y2).pT(); _y_yy = fabs( (y1 + y2).rapidity() ); _cosTS_CS = cosTS_CS(y1, y2); _pTt_yy = pTt(y1, y2); _Dy_yy = fabs( deltaRap(y1, y2) ); _Njets30 = jets_30.size() > 3 ? 3 : jets_30.size(); _Njets50 = jets_50.size() > 3 ? 3 : jets_50.size(); _h_Njets30->fill(_Njets30); _h_Njets50->fill(_Njets50); _pT_j1 = jets_30.size() > 0 ? jets_30.at(0)->momentum().pT() : 0.; _pT_j2 = jets_30.size() > 1 ? jets_30.at(1)->momentum().pT() : 0.; _pT_j3 = jets_30.size() > 2 ? jets_30.at(2)->momentum().pT() : 0.; _HT = 0.0; for (const Jet* jet : jets_30) _HT += jet->momentum().pT(); _tau_jet = tau_jet_max(y1 + y2, jets_25); _sum_tau_jet = sum_tau_jet(y1 + y2, jets_25); _h_pT_yy ->fill(_pT_yy); _h_y_yy ->fill(_y_yy); _h_pT_j1 ->fill(_pT_j1); _h_cosTS_CS ->fill(_cosTS_CS); _h_cosTS_CS_5bin->fill(_cosTS_CS); _h_HT ->fill(_HT); _h_pTt_yy ->fill(_pTt_yy); _h_Dy_yy ->fill(_Dy_yy); _h_tau_jet ->fill(_tau_jet); _h_sum_tau_jet ->fill(_sum_tau_jet); // >=1 jet variables if ( jets_30.size() >= 1 ) { FourMomentum j1 = jets_30[0]->momentum(); _y_j1 = fabs( j1.rapidity() ); _h_pT_j2->fill(_pT_j2); _h_y_j1 ->fill(_y_j1); } // >=2 jet variables if ( jets_30.size() >= 2 ) { FourMomentum j1 = jets_30[0]->momentum(); FourMomentum j2 = jets_30[1]->momentum(); _Dy_jj = fabs( deltaRap(j1, j2) ); _Dphi_jj = fabs( deltaPhi(j1, j2) ); _Dphi_yy_jj = fabs( deltaPhi(y1 + y2, j1 + j2) ); _m_jj = (j1 + j2).mass(); _pT_yy_jj = (y1 + y2 + j1 + j2).pT(); _y_j2 = fabs( j2.rapidity() ); _h_Dy_jj ->fill(_Dy_jj); _h_Dphi_jj ->fill(_Dphi_jj); _h_Dphi_yy_jj ->fill(_Dphi_yy_jj); _h_m_jj ->fill(_m_jj); _h_pT_yy_jj ->fill(_pT_yy_jj); _h_pT_j3 ->fill(_pT_j3); _h_y_j2 ->fill(_y_j2); } // 2D distributions of cosTS_CS x pT_yy if ( _pT_yy < 80 ) _h_cosTS_pTyy_low->fill(_cosTS_CS); else if ( _pT_yy > 80 && _pT_yy < 200 ) _h_cosTS_pTyy_high->fill(_cosTS_CS); else if ( _pT_yy > 200 ) _h_cosTS_pTyy_rest->fill(_cosTS_CS); // 2D distributions of pT_yy x Njets if ( _Njets30 == 0 ) _h_pTyy_Njets0->fill(_pT_yy); else if ( _Njets30 == 1 ) _h_pTyy_Njets1->fill(_pT_yy); else if ( _Njets30 >= 2 ) _h_pTyy_Njets2->fill(_pT_yy); if ( _Njets30 == 1 ) _h_pTj1_excl->fill(_pT_j1); } // Normalise histograms after the run void finalize() { const double xs = crossSectionPerEvent()/femtobarn; scale(_h_pT_yy, xs); scale(_h_y_yy, xs); scale(_h_pT_j1, xs); scale(_h_y_j1, xs); scale(_h_HT, xs); scale(_h_pT_j2, xs); scale(_h_Dy_jj, xs); scale(_h_Dphi_yy_jj, xs); scale(_h_cosTS_CS, xs); scale(_h_cosTS_CS_5bin, xs); scale(_h_Dphi_jj, xs); scale(_h_pTt_yy, xs); scale(_h_Dy_yy, xs); scale(_h_tau_jet, xs); scale(_h_sum_tau_jet, xs); scale(_h_y_j2, xs); scale(_h_pT_j3, xs); scale(_h_m_jj, xs); scale(_h_pT_yy_jj, xs); scale(_h_cosTS_pTyy_low, xs); scale(_h_cosTS_pTyy_high, xs); scale(_h_cosTS_pTyy_rest, xs); scale(_h_pTyy_Njets0, xs); scale(_h_pTyy_Njets1, xs); scale(_h_pTyy_Njets2, xs); scale(_h_pTj1_excl, xs); scale(_h_Njets30, xs); scale(_h_Njets50, xs); scale(_h_fidXSecs, xs); } // Trace event record to see if particle came from a hadron (or a tau from a hadron decay) // Based on fromDecay() function bool fromHadronDecay(const Particle& p ) { const GenParticle * gp = p.genParticle(); if (!gp) return false; /// TODO: something weird to make this necessary const GenVertex* prodVtx = p.genParticle()->production_vertex(); if (prodVtx == NULL) return false; for (const GenParticle* ancestor : particles(prodVtx, HepMC::ancestors)) { const PdgId pid = ancestor->pdg_id(); if (ancestor->status() == 2 && PID::isHadron(pid)) return true; if (ancestor->status() == 2 && (abs(pid) == PID::TAU && fromHadronDecay(ancestor))) return true; } return false; } // VBF-enhanced dijet topology selection cuts bool passVBFCuts(const FourMomentum &H, const FourMomentum &j1, const FourMomentum &j2) { return ( fabs(deltaRap(j1, j2)) > 2.8 && (j1 + j2).mass() > 400 && fabs(deltaPhi(H, j1 + j2)) > 2.6 ); } // Cosine of the decay angle in the Collins-Soper frame double cosTS_CS(const FourMomentum &y1, const FourMomentum &y2) { return fabs( ( (y1.E() + y1.pz())* (y2.E() - y2.pz()) - (y1.E() - y1.pz()) * (y2.E() + y2.pz()) ) / ((y1 + y2).mass() * sqrt(pow((y1 + y2).mass(), 2) + pow((y1 + y2).pt(), 2)) ) ); } // Diphoton pT along thrust axis double pTt(const FourMomentum &y1, const FourMomentum &y2) { return fabs(y1.px() * y2.py() - y2.px() * y1.py()) / (y1 - y2).pT()*2; } // Tau of jet (see paper for description) // tau_jet = mT/(2*cosh(y*)), where mT = pT (+) m, and y* = rapidty in Higgs rest frame double tau_jet( const FourMomentum &H, const FourMomentum &jet ) { return sqrt( pow(jet.pT(),2) + pow(jet.mass(),2) ) / (2.0 * cosh( jet.rapidity() - H.rapidity() ) ); } // Maximal (leading) tau_jet (see paper for description) double tau_jet_max( const FourMomentum &H, const vector jets, double tau_jet_cut = 8. ) { double max_tj = 0; for (size_t i=0; i < jets.size(); ++i) { FourMomentum jet = jets[i]->momentum(); if ( tau_jet(H, jet) > tau_jet_cut ) max_tj = max( tau_jet(H, jet), max_tj ); } return max_tj; } // Scalar sum of tau for all jets (see paper for description) double sum_tau_jet( const FourMomentum &H, const vector jets, double tau_jet_cut = 8. ) { double sum_tj = 0; for (size_t i=0; i < jets.size(); ++i) { FourMomentum jet = jets[i]->momentum(); if ( tau_jet(H, jet) > tau_jet_cut ) sum_tj += tau_jet(H, jet); } return sum_tj; } private: Histo1DPtr _h_pT_yy; Histo1DPtr _h_y_yy; Histo1DPtr _h_Njets30; Histo1DPtr _h_Njets50; Histo1DPtr _h_pT_j1; Histo1DPtr _h_y_j1; Histo1DPtr _h_HT; Histo1DPtr _h_pT_j2; Histo1DPtr _h_Dy_jj; Histo1DPtr _h_Dphi_yy_jj; Histo1DPtr _h_cosTS_CS; Histo1DPtr _h_cosTS_CS_5bin; Histo1DPtr _h_Dphi_jj; Histo1DPtr _h_pTt_yy; Histo1DPtr _h_Dy_yy; Histo1DPtr _h_tau_jet; Histo1DPtr _h_sum_tau_jet; Histo1DPtr _h_y_j2; Histo1DPtr _h_pT_j3; Histo1DPtr _h_m_jj; Histo1DPtr _h_pT_yy_jj; Histo1DPtr _h_cosTS_pTyy_low; Histo1DPtr _h_cosTS_pTyy_high; Histo1DPtr _h_cosTS_pTyy_rest; Histo1DPtr _h_pTyy_Njets0; Histo1DPtr _h_pTyy_Njets1; Histo1DPtr _h_pTyy_Njets2; Histo1DPtr _h_pTj1_excl; Histo1DPtr _h_fidXSecs; int _Njets30; int _Njets50; double _pT_yy; double _y_yy; double _cosTS_CS; double _pT_j1; double _m_jj; double _y_j1; double _HT; double _pT_j2; double _y_j2; double _Dphi_yy_jj; double _pT_yy_jj; double _Dphi_jj; double _Dy_jj; double _pT_j3; double _pTt_yy; double _Dy_yy; double _tau_jet; double _sum_tau_jet; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2014_I1306615); } diff --git a/analyses/pluginATLAS/ATLAS_2014_I1315949.cc b/analyses/pluginATLAS/ATLAS_2014_I1315949.cc --- a/analyses/pluginATLAS/ATLAS_2014_I1315949.cc +++ b/analyses/pluginATLAS/ATLAS_2014_I1315949.cc @@ -1,225 +1,225 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { class ATLAS_2014_I1315949 : public Analysis { public: /// Constructor ATLAS_2014_I1315949() : Analysis("ATLAS_2014_I1315949") { } void init() { FinalState fs; ZFinder zfinder(fs, Cuts::abseta<2.4 && Cuts::pT>20.0*GeV, PID::MUON, 66*GeV, 116*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY); declare(zfinder, "ZFinder"); ChargedFinalState cfs( zfinder.remainingFinalState() ); declare(cfs, "cfs"); book(_h_pTsum_tow , 1, 1, 1); book(_h_pTsum_trv , 1, 1, 2); book(_h_pTsum_away , 1, 1, 3); book(_h_pTsum_tmin , 1, 1, 4); book(_h_pTsum_tmax , 1, 1, 5); book(_h_pTsum_tdif , 1, 1, 6); book(_h_Nchg_tow , 2, 1, 1); book(_h_Nchg_trv , 2, 1, 2); book(_h_Nchg_away , 2, 1, 3); book(_h_Nchg_tmin , 2, 1, 4); book(_h_Nchg_tmax , 2, 1, 5); book(_h_Nchg_tdif , 2, 1, 6); book(_h_pTavg_tow , 3, 1, 1); book(_h_pTavg_trv , 3, 1, 2); book(_h_pTavg_away , 3, 1, 3); book(_h_pTavgvsmult_tow , 4, 1, 1); book(_h_pTavgvsmult_trv , 4, 1, 2); book(_h_pTavgvsmult_away , 4, 1, 3); // Book sumpt and nch histos for (int i_reg = 0; i_reg < 4; i_reg++) { for (int i_bin = 0; i_bin < 6.; i_bin++) { book(_h_ptSum_1D[i_reg][i_bin], 5, i_reg+1 , i_bin+1); book( _h_Nchg_1D[i_reg][i_bin], 6, i_reg+1 , i_bin+1); } } } /// Perform the per-event analysis void analyze(const Event& event) { const ZFinder& zfinder = apply(event, "ZFinder"); if (zfinder.bosons().size() != 1) vetoEvent; double Zpt = zfinder.bosons()[0].momentum().pT()/GeV; double Zphi = zfinder.bosons()[0].momentum().phi(); double Zmass = zfinder.bosons()[0].momentum().mass()/GeV; if(Zmass < 66. || Zmass > 116.) vetoEvent; // Initialise counters for Nch and sumPt for all regions int nTowards(0), nTransverse(0), nLeft(0), nRight(0), nTrmin(0), nTrmax(0), nAway(0); double ptSumTowards(0.0), ptSumTransverse(0.0), ptSumLeft(0.0), ptSumRight(0.0), ptSumTrmin(0.0), ptSumTrmax(0.0), ptSumAway(0.0); // The charged particles - ParticleVector particles = apply(event, "cfs").particlesByPt( + Particles particles = apply(event, "cfs").particlesByPt( Cuts::pT > 0.5*GeV && Cuts::abseta <2.5); // Loop over charged particles with pT>500 MeV and |eta|<2.5 for(const Particle& p : particles) { double dphi = p.momentum().phi() - Zphi, pT = p.momentum().pT(); // Get multiples of 2pi right for(; std::fabs(dphi) > M_PI; dphi += (dphi > 0. ? -2.*M_PI : 2.*M_PI) ); // Towards region if( std::fabs(dphi) < M_PI/3. ) { nTowards++; ptSumTowards += pT; } // Transverse region else if( std::fabs(dphi) < 2.*M_PI/3. ) { nTransverse++; ptSumTransverse += pT; if(dphi > 0.) { nRight++; ptSumRight += pT; } else { nLeft++; ptSumLeft += pT; } } // Away region else { nAway++; ptSumAway += pT; } } // TransMAX, TransMIN regions if (ptSumLeft > ptSumRight) { ptSumTrmax = ptSumLeft; ptSumTrmin = ptSumRight; nTrmax = nLeft; nTrmin = nRight; } else { ptSumTrmax = ptSumRight; ptSumTrmin = ptSumLeft; nTrmax = nRight; nTrmin = nLeft; } // min max regions have difference are than all other regions const double area = 5.*2./3.*M_PI; // Fill sumPt vs. Zpt region profiles _h_pTsum_tow->fill( Zpt, ptSumTowards/area); _h_pTsum_trv->fill( Zpt, ptSumTransverse/area); _h_pTsum_away->fill(Zpt, ptSumAway/area); _h_pTsum_tmin->fill(Zpt, ptSumTrmin/(0.5*area)); _h_pTsum_tmax->fill(Zpt, ptSumTrmax/(0.5*area)); _h_pTsum_tdif->fill(Zpt, (ptSumTrmax - ptSumTrmin)/(0.5*area)); // Fill Nch vs. Zpt region profiles _h_Nchg_tow->fill( Zpt, nTowards/area); _h_Nchg_trv->fill( Zpt, nTransverse/area); _h_Nchg_away->fill(Zpt, nAway/area); _h_Nchg_tmin->fill(Zpt, nTrmin/(0.5*area)); _h_Nchg_tmax->fill(Zpt, nTrmax/(0.5*area)); _h_Nchg_tdif->fill(Zpt, (nTrmax - nTrmin)/(0.5*area)); // Fill vs. ZpT profiles _h_pTavg_tow->fill( Zpt, nTowards > 0.? ptSumTowards/nTowards : 0.); _h_pTavg_trv->fill( Zpt, nTransverse > 0.? ptSumTransverse/nTransverse : 0.); _h_pTavg_away->fill(Zpt, nAway > 0.? ptSumAway/nAway : 0.); // Fill vs. ZpT profiles _h_pTavgvsmult_tow->fill( nTowards, nTowards > 0.? ptSumTowards/nTowards : 0.); _h_pTavgvsmult_trv->fill( nTransverse, nTransverse > 0.? ptSumTransverse/nTransverse : 0.); _h_pTavgvsmult_away->fill(nAway, nAway > 0.? ptSumAway/nAway : 0.); // Determine Zpt region histo to fill int i_bin(0); if (inRange(Zpt,0,5) ) i_bin=0; if (inRange(Zpt,5,10) ) i_bin=1; if (inRange(Zpt,10,20) ) i_bin=2; if (inRange(Zpt,20,50) ) i_bin=3; if (inRange(Zpt,50,110) ) i_bin=4; if (Zpt>110) i_bin=5; // SumPt histos for Zpt region _h_ptSum_1D[0][i_bin]->fill(ptSumTowards/area); _h_ptSum_1D[1][i_bin]->fill(ptSumTransverse/area); _h_ptSum_1D[2][i_bin]->fill(ptSumTrmin/(0.5*area)); _h_ptSum_1D[3][i_bin]->fill(ptSumTrmax/(0.5*area)); // Nch histos for Zpt region _h_Nchg_1D[0][i_bin]->fill(nTowards/area); _h_Nchg_1D[1][i_bin]->fill(nTransverse/area); _h_Nchg_1D[2][i_bin]->fill(nTrmin/(0.5*area)); _h_Nchg_1D[3][i_bin]->fill(nTrmax/(0.5*area)); } /// Normalise histograms etc., after the run void finalize() { for(int i_reg = 0; i_reg < 4; i_reg++) { for(int i_bin = 0; i_bin < 6; i_bin++) { normalize( _h_ptSum_1D[i_reg][i_bin] ); normalize( _h_Nchg_1D[ i_reg][i_bin] ); } } } private: Profile1DPtr _h_pTsum_tow, _h_pTsum_trv, _h_pTsum_away, _h_pTsum_tmin, _h_pTsum_tmax, _h_pTsum_tdif, _h_Nchg_tow, _h_Nchg_trv, _h_Nchg_away, _h_Nchg_tmin, _h_Nchg_tmax, _h_Nchg_tdif, _h_pTavg_tow, _h_pTavg_trv, _h_pTavg_away, _h_pTavgvsmult_tow, _h_pTavgvsmult_trv, _h_pTavgvsmult_away; Histo1DPtr _h_ptSum_1D[4][6], _h_Nchg_1D[4][6]; }; // This global object acts as a hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2014_I1315949); } diff --git a/analyses/pluginATLAS/ATLAS_2014_I1319490.cc b/analyses/pluginATLAS/ATLAS_2014_I1319490.cc --- a/analyses/pluginATLAS/ATLAS_2014_I1319490.cc +++ b/analyses/pluginATLAS/ATLAS_2014_I1319490.cc @@ -1,232 +1,232 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { class ATLAS_2014_I1319490 : public Analysis { public: ATLAS_2014_I1319490(string name = "ATLAS_2014_I1319490") : Analysis(name) { _mode = 0; // using electron channel for combined data by default } // Book histograms and initialise projections before the run void init() { FinalState fs; Cut cuts; if (_mode == 2) { // muon channel cuts = (Cuts::pT > 25.0*GeV) & Cuts::etaIn(-2.4, 2.4); } else if (_mode) { // electron channel cuts = (Cuts::pT > 25.0*GeV) & ( Cuts::etaIn(-2.47, -1.52) | Cuts::etaIn(-1.37, 1.37) | Cuts::etaIn(1.52, 2.47) ); } else { // combined data extrapolated to common phase space cuts = (Cuts::pT > 25.0*GeV) & Cuts::etaIn(-2.5, 2.5); } // bosons WFinder wfinder(fs, cuts, _mode > 1? PID::MUON : PID::ELECTRON, 40.0*GeV, DBL_MAX, 0.0*GeV, 0.1, - WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); + WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); declare(wfinder, "WF"); // jets VetoedFinalState jet_fs(fs); jet_fs.addVetoOnThisFinalState(getProjection("WF")); FastJets jets(jet_fs, FastJets::ANTIKT, 0.4); jets.useInvisibles(true); declare(jets, "Jets"); // book histograms book(histos["h_N_incl"] ,1,1,_mode+1); book(histos["h_N"] ,4,1,_mode+1); book(histos["h_pt_jet1_1jet"] ,5,1,_mode+1); book(histos["h_pt_jet1_1jet_excl"] ,6,1,_mode+1); book(histos["h_pt_jet1_2jet"] ,7,1,_mode+1); book(histos["h_pt_jet1_3jet"] ,8,1,_mode+1); book(histos["h_pt_jet2_2jet"] ,9,1,_mode+1); book(histos["h_pt_jet3_3jet"] ,10,1,_mode+1); book(histos["h_pt_jet4_4jet"] ,11,1,_mode+1); book(histos["h_pt_jet5_5jet"] ,12,1,_mode+1); book(histos["h_y_jet1_1jet"] ,13,1,_mode+1); book(histos["h_y_jet2_2jet"] ,14,1,_mode+1); book(histos["h_HT_1jet"] ,15,1,_mode+1); book(histos["h_HT_1jet_excl"] ,16,1,_mode+1); book(histos["h_HT_2jet"] ,17,1,_mode+1); book(histos["h_HT_2jet_excl"] ,18,1,_mode+1); book(histos["h_HT_3jet"] ,19,1,_mode+1); book(histos["h_HT_3jet_excl"] ,20,1,_mode+1); book(histos["h_HT_4jet"] ,21,1,_mode+1); book(histos["h_HT_5jet"] ,22,1,_mode+1); book(histos["h_deltaPhi_jet12"] ,23,1,_mode+1); book(histos["h_deltaRap_jet12"] ,24,1,_mode+1); book(histos["h_deltaR_jet12"] ,25,1,_mode+1); book(histos["h_M_Jet12_2jet"] ,26,1,_mode+1); book(histos["h_y_jet3_3jet"] ,27,1,_mode+1); book(histos["h_y_jet4_4jet"] ,28,1,_mode+1); book(histos["h_y_jet5_5jet"] ,29,1,_mode+1); book(histos["h_ST_1jet"] ,30,1,_mode+1); book(histos["h_ST_2jet"] ,31,1,_mode+1); book(histos["h_ST_2jet_excl"] ,32,1,_mode+1); book(histos["h_ST_3jet"] ,33,1,_mode+1); book(histos["h_ST_3jet_excl"] ,34,1,_mode+1); book(histos["h_ST_4jet"] ,35,1,_mode+1); book(histos["h_ST_5jet"] ,36,1,_mode+1); } void fillPlots(const Particle& lepton, const double& missET, Jets& all_jets) { // do jet-lepton overlap removal Jets jets; double ST = 0.0; // scalar pT sum of all selected jets for (const Jet &j : all_jets) { if (deltaR(j, lepton) > 0.5) { jets += j; ST += j.pT() / GeV; } } const size_t njets = jets.size(); const double HT = ST + lepton.pT() / GeV + missET; histos["h_N"]->fill(njets + 0.5); for (size_t i = 0; i <= njets; ++i) { histos["h_N_incl"]->fill(i + 0.5); } if (njets) { const double pT1 = jets[0].pT() / GeV; const double rap1 = jets[0].absrap(); histos["h_pt_jet1_1jet" ]->fill(pT1); histos["h_y_jet1_1jet"]->fill(rap1); histos["h_HT_1jet"]->fill(HT); histos["h_ST_1jet"]->fill(ST); if (njets == 1) { histos["h_pt_jet1_1jet_excl"]->fill(pT1); histos["h_HT_1jet_excl"]->fill(HT); } else { const double pT2 = jets[1].pT() / GeV; const double rap2 = jets[1].absrap(); const double dR = deltaR(jets[0], jets[1]); const double dRap = deltaRap(jets[0], jets[1]); const double dPhi = deltaPhi(jets[0], jets[1]); const double mjj = (jets[0].momentum() + jets[1].momentum()).mass() / GeV; histos["h_pt_jet1_2jet"]->fill(pT1); histos["h_pt_jet2_2jet"]->fill(pT2); histos["h_y_jet2_2jet"]->fill(rap2); histos["h_M_Jet12_2jet"]->fill(mjj); histos["h_HT_2jet"]->fill(HT); histos["h_ST_2jet"]->fill(ST); histos["h_deltaPhi_jet12"]->fill(dPhi); histos["h_deltaRap_jet12"]->fill(dRap); histos["h_deltaR_jet12"]->fill(dR); if (njets == 2) { histos["h_ST_2jet_excl"]->fill(ST); histos["h_HT_2jet_excl"]->fill(HT); } else { const double pT3 = jets[2].pT() / GeV; const double rap3 = jets[2].absrap(); histos["h_pt_jet1_3jet"]->fill(pT1); histos["h_pt_jet3_3jet"]->fill(pT3); histos["h_y_jet3_3jet"]->fill(rap3); histos["h_HT_3jet"]->fill(HT); histos["h_ST_3jet"]->fill(ST); if(njets == 3) { histos["h_ST_3jet_excl"]->fill(ST); histos["h_HT_3jet_excl"]->fill(HT); } else { const double pT4 = jets[3].pT() / GeV; const double rap4 = jets[3].absrap(); histos["h_pt_jet4_4jet"]->fill(pT4); histos["h_y_jet4_4jet"]->fill(rap4); histos["h_HT_4jet"]->fill(HT); histos["h_ST_4jet"]->fill(ST); if (njets > 4) { const double pT5 = jets[4].pT() / GeV; const double rap5 = jets[4].absrap(); histos["h_pt_jet5_5jet"]->fill(pT5); histos["h_y_jet5_5jet"]->fill(rap5); histos["h_HT_5jet"]->fill(HT); histos["h_ST_5jet"]->fill(ST); } } } } } } // Perform the per-event analysis void analyze(const Event& event) { // Retrieve boson candidate const WFinder& wf = apply(event, "WF"); if (wf.empty()) vetoEvent; // Retrieve jets const JetAlg& jetfs = apply(event, "Jets"); Jets all_jets = jetfs.jetsByPt(Cuts::pT > 30.0*GeV && Cuts::absrap < 4.4); const Particles& leptons = wf.constituentLeptons(); const double missET = wf.constituentNeutrino().pT() / GeV; if (leptons.size() == 1 && missET > 25.0 && wf.mT() > 40.0*GeV) { const Particle& lep = leptons[0]; fillPlots(lep, missET, all_jets); } } void finalize() { const double scalefactor(crossSection() / sumOfWeights()); /// @todo Update to use C++11 range-for for (map::iterator hit = histos.begin(); hit != histos.end(); ++hit) { scale(hit->second, scalefactor); } } protected: size_t _mode; private: map histos; }; class ATLAS_2014_I1319490_EL : public ATLAS_2014_I1319490 { public: ATLAS_2014_I1319490_EL() : ATLAS_2014_I1319490("ATLAS_2014_I1319490_EL") { _mode = 1; } }; class ATLAS_2014_I1319490_MU : public ATLAS_2014_I1319490 { public: ATLAS_2014_I1319490_MU() : ATLAS_2014_I1319490("ATLAS_2014_I1319490_MU") { _mode = 2; } }; // The hooks for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2014_I1319490); DECLARE_RIVET_PLUGIN(ATLAS_2014_I1319490_EL); DECLARE_RIVET_PLUGIN(ATLAS_2014_I1319490_MU); } diff --git a/analyses/pluginATLAS/ATLAS_2015_I1408516.cc b/analyses/pluginATLAS/ATLAS_2015_I1408516.cc --- a/analyses/pluginATLAS/ATLAS_2015_I1408516.cc +++ b/analyses/pluginATLAS/ATLAS_2015_I1408516.cc @@ -1,242 +1,242 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { class ATLAS_2015_I1408516 : public Analysis { public: /// Constructor ATLAS_2015_I1408516(string name="ATLAS_2015_I1408516", size_t mode=0) : Analysis(name), _mode(mode) // using electron channel for combined data { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Configure projections FinalState fs; Cut cuts = Cuts::abseta < 2.4 && Cuts::pT > 20*GeV; ZFinder zfinder(fs, cuts, (_mode ? PID::MUON : PID::ELECTRON), 12*GeV, 150*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::NO); declare(zfinder, _mode ? "ZFinder_mu" : "ZFinder_el"); // Book histograms const size_t offset = _mode ? 4 : 1; book(_h["phistar_lo_00_08"] , 2, 1, offset); book(_h["phistar_lo_08_16"] , 3, 1, offset); book(_h["phistar_lo_16_24"] , 4, 1, offset); book(_h["phistar_me_00_04"] , 5, 1, offset); book(_h["phistar_me_04_08"] , 6, 1, offset); book(_h["phistar_me_08_12"] , 7, 1, offset); book(_h["phistar_me_12_16"] , 8, 1, offset); book(_h["phistar_me_16_20"] , 9, 1, offset); book(_h["phistar_me_20_24"] ,10, 1, offset); book(_h["phistar_hi_00_08"] ,11, 1, offset); book(_h["phistar_hi_08_16"] ,12, 1, offset); book(_h["phistar_hi_16_24"] ,13, 1, offset); book(_h["phistar_mll_46_66" ] ,14, 1, offset); book(_h["phistar_mll_66_116" ] ,15, 1, offset); book(_h["phistar_mll_116_150"] ,16, 1, offset); book(_h["zpt_00_04"] ,17, 1, offset); book(_h["zpt_04_08"] ,18, 1, offset); book(_h["zpt_08_12"] ,19, 1, offset); book(_h["zpt_12_16"] ,20, 1, offset); book(_h["zpt_16_20"] ,21, 1, offset); book(_h["zpt_20_24"] ,22, 1, offset); book(_h["zpt_mll_12_20" ] ,23, 1, offset); book(_h["zpt_mll_20_30" ] ,24, 1, offset); book(_h["zpt_mll_30_46" ] ,25, 1, offset); book(_h["zpt_mll_46_66" ] ,26, 1, offset); book(_h["zpt_mll_66_116" ] ,27, 1, offset); book(_h["zpt_mll_116_150"] ,28, 1, offset); book(_h["zpt_00_04_xsec"] ,29, 1, offset); book(_h["zpt_04_08_xsec"] ,30, 1, offset); book(_h["zpt_08_12_xsec"] ,31, 1, offset); book(_h["zpt_12_16_xsec"] ,32, 1, offset); book(_h["zpt_16_20_xsec"] ,33, 1, offset); book(_h["zpt_20_24_xsec"] ,34, 1, offset); book(_h["zpt_mll_12_20_xsec" ] ,35, 1, offset); book(_h["zpt_mll_20_30_xsec" ] ,36, 1, offset); book(_h["zpt_mll_30_46_xsec" ] ,37, 1, offset); book(_h["zpt_mll_46_66_xsec" ] ,38, 1, offset); book(_h["zpt_mll_66_116_xsec" ] ,39, 1, offset); book(_h["zpt_mll_116_150_xsec"] ,40, 1, offset); book(_h["mll_xsec"] ,41, 1, 1 + _mode); } /// Perform the per-event analysis void analyze(const Event& event) { // Get leptonic Z boson const ZFinder& zfinder = apply(event, _mode ? "ZFinder_mu" : "ZFinder_el"); if (zfinder.bosons().size() != 1 ) vetoEvent; const Particle& Zboson = zfinder.boson(); // Get/cut on heavily used Z boson properties const double zpt = Zboson.pT(); const double zrap = Zboson.absrap(); const double zmass = Zboson.mass(); if (zrap > 2.4) vetoEvent; // Get/cut on Z boson leptons - const ParticleVector& leptons = zfinder.constituents(); + const Particles& leptons = zfinder.constituents(); if (leptons.size() != 2 || leptons[0].charge3() * leptons[1].charge3() > 0) vetoEvent; const Particle& lminus = leptons[0].charge() < 0 ? leptons[0] : leptons[1]; const Particle& lplus = leptons[0].charge() < 0 ? leptons[1] : leptons[0]; // Compute phi* const double phi_acop = M_PI - deltaPhi(lminus, lplus); const double costhetastar = tanh( 0.5 * (lminus.eta() - lplus.eta()) ); const double sin2thetastar = (costhetastar > 1) ? 0.0 : (1.0 - sqr(costhetastar)); const double phistar = tan(0.5 * phi_acop) * sqrt(sin2thetastar); // Inclusive mll if (zmass > 46*GeV || zpt > 45*GeV) { // 46 GeV < mll < 150 GeV OR (12 GeV < mll < 46 GeV AND ZpT >45 GeV) _h["mll_xsec"]->fill(zmass); } // 12 GeV < mll < 150 GeV observables if (zmass < 20*GeV) { // 12 GeV < mll < 20 GeV if (zpt > 45*GeV) { // ZpT cut only for low-mass regions _h["zpt_mll_12_20_xsec"]->fill(zpt); _h["zpt_mll_12_20" ]->fill(zpt); } } else if (zmass < 30*GeV) { // 20 GeV < mll < 30 GeV if (zpt > 45*GeV) { // ZpT cut only for low-mass regions _h["zpt_mll_20_30_xsec"]->fill(zpt); _h["zpt_mll_20_30" ]->fill(zpt); } } else if (zmass < 46*GeV) { // 30 GeV < mll < 46 GeV if (zpt > 45*GeV) { // ZpT cut only for low-mass regions _h["zpt_mll_30_46_xsec"]->fill(zpt); _h["zpt_mll_30_46" ]->fill(zpt); } } else if (zmass < 66*GeV) { // 46 GeV < mll < 66 GeV _h["zpt_mll_46_66_xsec"]->fill(zpt); _h["zpt_mll_46_66" ]->fill(zpt); _h["phistar_mll_46_66"]->fill(phistar); if (zrap < 0.8) _h["phistar_lo_00_08"]->fill(phistar); else if (zrap < 1.6) _h["phistar_lo_08_16"]->fill(phistar); else _h["phistar_lo_16_24"]->fill(phistar); } else if (zmass < 116*GeV) { // 66 GeV < mll < 116 GeV _h["zpt_mll_66_116_xsec"]->fill(zpt); _h["zpt_mll_66_116" ]->fill(zpt); if (zrap < 0.4) { _h["zpt_00_04_xsec"]->fill(zpt); _h["zpt_00_04"]->fill(zpt); } else if (zrap < 0.8) { _h["zpt_04_08_xsec"]->fill(zpt); _h["zpt_04_08"]->fill(zpt); } else if (zrap < 1.2) { _h["zpt_08_12_xsec"]->fill(zpt); _h["zpt_08_12"]->fill(zpt); } else if (zrap < 1.6) { _h["zpt_12_16_xsec"]->fill(zpt); _h["zpt_12_16"]->fill(zpt); } else if (zrap < 2.0) { _h["zpt_16_20_xsec"]->fill(zpt); _h["zpt_16_20"]->fill(zpt); } else { _h["zpt_20_24_xsec"]->fill(zpt); _h["zpt_20_24"]->fill(zpt); } _h["phistar_mll_66_116"]->fill(phistar); if (zrap < 0.4) _h["phistar_me_00_04"]->fill(phistar); else if (zrap < 0.8) _h["phistar_me_04_08"]->fill(phistar); else if (zrap < 1.2) _h["phistar_me_08_12"]->fill(phistar); else if (zrap < 1.6) _h["phistar_me_12_16"]->fill(phistar); else if (zrap < 2.0) _h["phistar_me_16_20"]->fill(phistar); else _h["phistar_me_20_24"]->fill(phistar); } else { // 116 GeV < mll < 150 GeV _h["zpt_mll_116_150_xsec"]->fill(zpt); _h["zpt_mll_116_150" ]->fill(zpt); _h["phistar_mll_116_150"]->fill(phistar); if (zrap < 0.8) _h["phistar_hi_00_08"]->fill(phistar); else if (zrap < 1.6) _h["phistar_hi_08_16"]->fill(phistar); else _h["phistar_hi_16_24"]->fill(phistar); } } /// Normalise histograms etc., after the run void finalize() { // Scale non-xsec plots to cross-section const double sf = crossSection() / picobarn / sumOfWeights(); for (const auto& key_hist : _h) { scale(key_hist.second, sf); if (!contains(key_hist.first, "_xsec")) normalize(key_hist.second); } // M(ll) plot isn't a differential cross section so shouldn't be divided by bin width for (size_t i = 0; i < 6; ++i) { double bw = _h["mll_xsec"]->bin(i).xWidth(); _h["mll_xsec"]->bin(i).scaleW(bw); } } //@} protected: size_t _mode; private: /// @name Histograms //@{ map _h; //@} }; class ATLAS_2015_I1408516_EL : public ATLAS_2015_I1408516 { public: ATLAS_2015_I1408516_EL() : ATLAS_2015_I1408516("ATLAS_2015_I1408516_EL", 0) { } }; class ATLAS_2015_I1408516_MU : public ATLAS_2015_I1408516 { public: ATLAS_2015_I1408516_MU() : ATLAS_2015_I1408516("ATLAS_2015_I1408516_MU", 1) { } }; DECLARE_RIVET_PLUGIN(ATLAS_2015_I1408516); DECLARE_RIVET_PLUGIN(ATLAS_2015_I1408516_EL); DECLARE_RIVET_PLUGIN(ATLAS_2015_I1408516_MU); } diff --git a/analyses/pluginATLAS/ATLAS_2016_I1502620_W.cc b/analyses/pluginATLAS/ATLAS_2016_I1502620_W.cc --- a/analyses/pluginATLAS/ATLAS_2016_I1502620_W.cc +++ b/analyses/pluginATLAS/ATLAS_2016_I1502620_W.cc @@ -1,137 +1,138 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/WFinder.hh" namespace Rivet { /// @brief inclusive W cross sections at 7 TeV class ATLAS_2016_I1502620_W : public Analysis { public: /// Constructor ATLAS_2016_I1502620_W(string name="ATLAS_2016_I1502620_W") : Analysis(name) { // using electron channel by default _mode = 0; } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { ///Initialise and register projections here const FinalState fs; Cut cut = Cuts::pT >= 25*GeV; // minimum lepton pT WFinder wfinder_dressed(fs, cut, _mode? PID::MUON : PID::ELECTRON, 40*GeV, 13*TeV, 25*GeV, 0.1, + WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); declare(wfinder_dressed, "WFinder_dressed"); /// Book histograms here book(_h_Wp_eta, 9, 1, _mode + 1); book(_h_Wm_eta, 10, 1, _mode + 1); book(_h_W_asym, 35, 1, _mode + 1); } /// Perform the per-event analysis void analyze(const Event& event) { const WFinder& wfinder = apply(event, "WFinder_dressed"); if (wfinder.bosons().size() != 1) vetoEvent; const Particle lep = wfinder.constituentLeptons()[0]; if (lep.charge3() > 0) _h_Wp_eta->fill(lep.abseta()); else _h_Wm_eta->fill(lep.abseta()); } /// Normalise histograms etc., after the run void finalize() { // Construct asymmetry: (dsig+/deta - dsig-/deta) / (dsig+/deta + dsig-/deta) //divide(*_h_Wp_eta - *_h_Wm_eta, *_h_Wp_eta + *_h_Wm_eta, _h_W_asym); for (size_t i = 0; i < _h_Wp_eta->numBins(); ++i) { YODA::HistoBin1D& bp = _h_Wp_eta->bin(i); YODA::HistoBin1D& bm = _h_Wm_eta->bin(i); const double sum = bp.height() + bm.height(); //const double xerr = 0.5 * bp.xWidth(); double val = 0., yerr = 0.; if (sum) { const double pos2 = bp.height() * bp.height(); const double min2 = bm.height() * bm.height(); const double errp2 = bp.heightErr() * bp.heightErr(); const double errm2 = bm.heightErr() * bm.heightErr(); val = (bp.height() - bm.height()) / sum; yerr = 2. * sqrt(errm2 * pos2 + errp2 * min2) / (sum * sum); } _h_W_asym->addPoint(bp.midpoint(), val, 0.5*bp.xWidth(), yerr); } // Print summary info const double xs_pb(crossSection() / picobarn); const double sumw(sumOfWeights()); MSG_DEBUG( "Cross-section/pb : " << xs_pb ); MSG_DEBUG( "Sum of weights : " << sumw ); MSG_DEBUG( "nEvents : " << numEvents() ); /// Normalise, scale and otherwise manipulate histograms here const double sf = 0.5 * xs_pb / sumw; // 0.5 accounts for rapidity bin width scale(_h_Wp_eta, sf); scale(_h_Wm_eta, sf); } //@} protected: size_t _mode; private: /// @name Histograms //@{ Histo1DPtr _h_Wp_eta, _h_Wm_eta; Scatter2DPtr _h_W_asym; //@} }; class ATLAS_2016_I1502620_W_EL : public ATLAS_2016_I1502620_W { public: ATLAS_2016_I1502620_W_EL() : ATLAS_2016_I1502620_W("ATLAS_2016_I1502620_W_EL") { _mode = 0; } }; class ATLAS_2016_I1502620_W_MU : public ATLAS_2016_I1502620_W { public: ATLAS_2016_I1502620_W_MU() : ATLAS_2016_I1502620_W("ATLAS_2016_I1502620_W_MU") { _mode = 1; } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2016_I1502620_W); DECLARE_RIVET_PLUGIN(ATLAS_2016_I1502620_W_EL); DECLARE_RIVET_PLUGIN(ATLAS_2016_I1502620_W_MU); } diff --git a/analyses/pluginATLAS/ATLAS_2016_I1502620_Z.cc b/analyses/pluginATLAS/ATLAS_2016_I1502620_Z.cc --- a/analyses/pluginATLAS/ATLAS_2016_I1502620_Z.cc +++ b/analyses/pluginATLAS/ATLAS_2016_I1502620_Z.cc @@ -1,134 +1,134 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { /// @brief inclusive Z cross sections at 7 TeV class ATLAS_2016_I1502620_Z : public Analysis { public: /// Constructor ATLAS_2016_I1502620_Z(string name="ATLAS_2016_I1502620_Z") : Analysis(name) { // using electron channel by default _mode = 0; } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { const FinalState fs; Cut cuts = Cuts::pT >= 20.0*GeV; ZFinder zfinder(fs, cuts, (_mode ? PID::MUON : PID::ELECTRON), 46.0*GeV, 150*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::NO); declare(zfinder, "ZFinder"); // Book histograms book(_h_Zcenlow_y_dressed , 11, 1, _mode + 1); book(_h_Zcenpeak_y_dressed, 12, 1, _mode + 1); book(_h_Zcenhigh_y_dressed, 13, 1, _mode + 1); book(_h_Zfwdpeak_y_dressed, 14, 1, _mode + 1); book(_h_Zfwdhigh_y_dressed, 15, 1, _mode + 1); } /// Perform the per-event analysis void analyze(const Event& event) { const ZFinder& zfinder = apply(event, "ZFinder"); if (zfinder.bosons().size() != 1 ) vetoEvent; const Particle& Zboson = zfinder.boson(); const double zrap = Zboson.absrap(); const double zmass = Zboson.mass(); // Get/cut on Z boson leptons - const ParticleVector& leptons = zfinder.constituents(); + const Particles& leptons = zfinder.constituents(); const double eta1 = leptons[0].abseta(); const double eta2 = leptons[1].abseta(); // separation into central/forward and three mass bins if (eta1 < 2.5 && eta2 < 2.5) { if (zmass < 66.0*GeV) _h_Zcenlow_y_dressed->fill(zrap); else if (zmass < 116.0*GeV) _h_Zcenpeak_y_dressed->fill(zrap); else _h_Zcenhigh_y_dressed->fill(zrap); } else if ((eta1 < 2.5 && 2.5 < eta2 && eta2 < 4.9) || (eta2 < 2.5 && 2.5 < eta1 && eta1 < 4.9)) { if (zmass < 66.0*GeV) vetoEvent; if (zmass < 116.0*GeV) _h_Zfwdpeak_y_dressed->fill(zrap); else _h_Zfwdhigh_y_dressed->fill(zrap); } } /// Normalise histograms etc., after the run void finalize() { // Print summary info const double xs_pb(crossSection() / picobarn); const double sumw(sumOfWeights()); MSG_DEBUG("Cross-Section/pb: " << xs_pb ); MSG_DEBUG("Sum of weights : " << sumw ); MSG_DEBUG("nEvents : " << numEvents()); // Normalise, scale and otherwise manipulate histograms here const double sf(0.5 * xs_pb / sumw); // 0.5 accounts for rapidity bin width scale(_h_Zcenlow_y_dressed, sf); scale(_h_Zcenpeak_y_dressed, sf); scale(_h_Zcenhigh_y_dressed, sf); scale(_h_Zfwdpeak_y_dressed, sf); scale(_h_Zfwdhigh_y_dressed, sf); } //@} protected: size_t _mode; private: /// @name Histograms //@{ Histo1DPtr _h_Zcenlow_y_dressed; Histo1DPtr _h_Zcenpeak_y_dressed; Histo1DPtr _h_Zcenhigh_y_dressed; Histo1DPtr _h_Zfwdpeak_y_dressed; Histo1DPtr _h_Zfwdhigh_y_dressed; //@} }; class ATLAS_2016_I1502620_Z_EL : public ATLAS_2016_I1502620_Z { public: ATLAS_2016_I1502620_Z_EL() : ATLAS_2016_I1502620_Z("ATLAS_2016_I1502620_Z_EL") { _mode = 0; } }; class ATLAS_2016_I1502620_Z_MU : public ATLAS_2016_I1502620_Z { public: ATLAS_2016_I1502620_Z_MU() : ATLAS_2016_I1502620_Z("ATLAS_2016_I1502620_Z_MU") { _mode = 1; } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2016_I1502620_Z); DECLARE_RIVET_PLUGIN(ATLAS_2016_I1502620_Z_EL); DECLARE_RIVET_PLUGIN(ATLAS_2016_I1502620_Z_MU); } diff --git a/analyses/pluginATLAS/ATLAS_2017_I1624693.cc b/analyses/pluginATLAS/ATLAS_2017_I1624693.cc --- a/analyses/pluginATLAS/ATLAS_2017_I1624693.cc +++ b/analyses/pluginATLAS/ATLAS_2017_I1624693.cc @@ -1,435 +1,435 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" /// @todo Include more projections as required, e.g. ChargedFinalState, FastJets, ZFinder... namespace Rivet { class ATLAS_2017_I1624693 : public Analysis { public: /// Constructor /// @brief Study of ordered hadron chains at 7 TeV DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2017_I1624693); /// @name Analysis methods //@{ struct usedX { int locMin; int locMax; std::vector > chains; // Constructor usedX(int min, int max, int ic, float mass) { locMin=min; locMax=max; chains.clear(); chains.push_back(std::pair(ic,mass)); } // Constructor usedX(int min, int max) { locMin=min; locMax=max; chains.clear(); } void add(int jc, float mass) { if (chains.size()) { std::vector >::iterator it=chains.begin(); while ( it!=chains.end() && mass>(*it).second ) ++it; chains.insert(it,std::pair(jc,mass)); } else { chains.push_back(std::pair(jc,mass)); } } }; /// Book histograms and initialise projections before the run void init() { /// @todo Initialise and register projections here ChargedFinalState cfs(-2.5, 2.5, 0.1*GeV); declare(cfs,"CFS"); // pion mass; pim = 0.1396; /// @todo Book histograms here, e.g.: book(_DeltaQ , 1, 1, 1); book(_Delta3h, 2, 1, 1); book(_dalitz , 3, 1, 1); // auxiliary book(_h_nch, "_nch", 200, -0.5, 199.5); } /// Perform the per-event analysis void analyze(const Event& event) { //const double weight = event.weight(); bool match =false; /// @todo Do the event by event analysis here const ChargedFinalState& had = applyProjection(event, "CFS"); - ParticleVector hs=had.particles(); + Particles hs=had.particles(); int nch = hs.size(); if (nch < 3) return; _h_nch->fill(1.*nch,1.); for (unsigned int i=0; i < hs.size() - 1; ++i) { for (unsigned int j=i+1; j < hs.size(); ++j) { double q12 = qq(hs[i],hs[j],match); if (match) _DeltaQ->fill(q12,-1.); else _DeltaQ->fill(q12,1.); } } // chain selection std::vector wchain; std::vector< std::vector > rchains; std::vector< std::vector > mchains; wchain.clear(); rchains.clear(); mchains.clear(); for (unsigned int ip1 = 0; ip1< hs.size(); ++ip1 ) { wchain.push_back(1.); std::vector cc(1,ip1); std::vector mc; double qlmin=10000.; int ilmin=-1; for (unsigned ip2 = 0; ip2 < hs.size(); ++ip2) { if (ip2==ip1) continue; double ql = qq(hs[ip1],hs[ip2],match); if (!match) continue; // looking for closest like-sign match if (ql ilmin && rchains[ilmin][1]==ip1) { // std::cout <<"exclusive match:"<< std::endl; wchain.back()=0.5; wchain[ilmin]=0.5; } double m3min=10000.; int ixmin=-1; for (unsigned ip2 = 0; ip2< hs.size(); ++ip2) { if (ip2==ip1 || int(ip2)==ilmin ) continue; double qx = qq(hs[ip1],hs[ip2],match); if (match) continue; double qxl = qq(hs[ip2],hs[ilmin],match); double m3 = sqrt(9*pim*pim+qxl*qxl+qlmin*qlmin+qx*qx); if (m3 assoc(hs.size(),0.); // cache for association rate std::vector accept(rchains.size(), false); // loop over chains and accept lowest masses while watching the association rate int inext = 0; while ( inext>-1 ) { inext = -1; float cMin = 100000.; // find non-accepted chain with lowest Q_ls; dissolve chains if association count over 2 for (unsigned int ic=0; ic < rchains.size(); ++ic) { if (rchains[ic].size() < 2) continue; if (accept[ic]) continue; if (mchains[ic][0] < cMin) { cMin = mchains[ic][0]; inext=ic; } } if (inext>-1 ) { unsigned int cloc0 = rchains[inext][0]; unsigned int cloc1 = rchains[inext][1]; if ( (assoc[cloc0] + 1. <= 2.) && (assoc[cloc1] + 1. <= 2.) ) { // chain can be accepted accept[inext]=true; assoc[cloc0]+=1.; assoc[cloc1]+=1.; if (wchain[inext]==0.5) { // accept the identical chain, too for (unsigned int ic=0; ic1 ) { // association count filled up, discard chain accept[inext]=true; wchain[inext]=0.; } else { // dissolve chain and find new association unsigned int i1 = rchains[inext][0]; float mMn = 1000000.; int ipn = -1; for (unsigned int i2=0; i2 1.) continue; if (m > 0. && m = 0) { rchains[inext][1]=ipn; mchains[inext][0]=mMn; // resolve chain weight : by default, it is 1. wchain[inext]=1.; // check exclusivity of pairing for (unsigned int ico=0; ico0.) continue; float q12 = qq(hs[ipn],hs[ij],match); double m3 = sqrt(9*pim*pim+q02*q02+mMn*mMn+q12*q12); if (m3>0. && m3 =0) { rchains[inext].push_back(ipnn); rchains[inext][2]=ipnn; mchains[inext][1]=mMnn; } else {accept[inext]=true; wchain[inext]=0.;} } else { // chain not recovered wchain[inext]=0.; accept[inext]=true; } } } } // end loop over chains // cleanup: association rate for unlike-sign pairs // third member verification std::vector accept3(rchains.size(),false); // watch unlike-sign combinations used std::vector used; // loop over chains and accept lowest masses while watching the association rate inext = 0; while ( inext>-1 ) { inext = -1; float cMin = 100000.; // find non-accepted chain with lowest mass; dissolve chains if association count over 3 for (unsigned int ic=0; ic < rchains.size(); ++ic) { if (rchains[ic].size() < 3 || !wchain[ic] || !accept[ic]) continue; if (accept3[ic]) continue; if (mchains[ic][1]-1 ) { unsigned int cloc0 = rchains[inext][0]; unsigned int cloc1 = rchains[inext][1]; unsigned int cloc2 = rchains[inext][2]; // map use of unlike sign pairs int iu0 = -1; float w0=0.; for (unsigned int iu=0; iu 0) for (unsigned int iw=0; iw0) for (unsigned int iw=0; iw 0.) continue; if (assoc[i3] > 3-wchain[inext]) continue; // check pair association w0=0.; w1=0.; for (unsigned int iu=0; iu 0) for (unsigned int iw=0; iw0) for (unsigned int iw=0; iw2. || w1+wchain[inext]>2.) continue; float q12 = qq(hs[i2],hs[i3],match); float q01 = qq(hs[i1],hs[i2],match); float m = sqrt(9*pim*pim+q02*q02+q01*q01+q12*q12); if (m>0. && m =0) { rchains[inext].push_back(ipn); rchains[inext][2]=iploc; mchains[inext][1]=mMn; } else { // chain not recovered wchain[inext]=0.; } } } } // end loop over chains // end 3rd member optimization for (unsigned int ip=0; ip < wchain.size(); ++ip) { if (!wchain[ip]) continue; if (rchains[ip].size() < 3) continue; float m3min = mchains[ip][1]; if (m3min > 0.59) continue; // dalitz plot std::pair dd = dalitz3(hs[rchains[ip][0]], hs[rchains[ip][1]], hs[rchains[ip][2]]); _dalitz->fill(dd.first,dd.second,1.*wchain[ip]); // Delta(Q) spectra float qlmin = mchains[ip][0]; float qxmin = qq(hs[rchains[ip][0]], hs[rchains[ip][2]], match); float xlmin = qq(hs[rchains[ip][1]], hs[rchains[ip][2]], match); _Delta3h->fill(qxmin, 0.5*wchain[ip]); _Delta3h->fill(xlmin, 0.5*wchain[ip]); _Delta3h->fill(qlmin, -1.*wchain[ip]); } } /// Normalise histograms etc., after the run void finalize() { // normalize by the number of charged particles // counter automatic division by bin size double norm = 0.01 / (_h_nch->xMean()*_h_nch->numEntries()); _dalitz->scaleW(norm); _DeltaQ->scaleW(norm); _Delta3h->scaleW(norm); } //@} double qq(const Particle& gp1, const Particle& gp2, bool& match) { match = gp1.charge() * gp2.charge() > 0; FourMomentum p1, p2; p1.setPM(gp1.px(), gp1.py(), gp1.pz(), pim); p2.setPM(gp2.px(), gp2.py(), gp2.pz(), pim); return sqrt(fmax(0., (p1 + p2).mass2() - 4*pim*pim)); } std::pair dalitz3(const Particle& gp1, const Particle& gp2, const Particle& gp3) const { float p1= gp1.pt(); float p2= gp2.pt(); float p3= gp3.pt(); float th1 = gp1.theta(); float th2 = gp2.theta(); float th3 = gp3.theta(); float ph1 = gp1.phi(); float ph2 = gp2.phi(); float ph3 = gp3.phi(); float e1 = sqrt(p1*p1+pim*pim); float e2 = sqrt(p2*p2+pim*pim); float e3 = sqrt(p3*p3+pim*pim); float p1x = p1*cos(ph1)*sin(th1); float p1y = p1*sin(ph1)*sin(th1); float p1z = p1*cos(th1); float p2x = p2*cos(ph2)*sin(th2); float p2y = p2*sin(ph2)*sin(th2); float p2z = p2*cos(th2); float p3x = p3*cos(ph3)*sin(th3); float p3y = p3*sin(ph3)*sin(th3); float p3z = p3*cos(th3); float px = p1x+p2x+p3x; float py = p1y+p2y+p3y; float pz = p1z+p2z+p3z; float ap = sqrt(px*px+py*py+pz*pz); float e=e1+e2+e3; float beta = ap/e; float gamma = 1./sqrt(1-beta*beta); float p1l = (p1x*px+p1y*py+p1z*pz)/ap; float p2l = (p2x*px+p2y*py+p2z*pz)/ap; float p3l = (p3x*px+p3y*py+p3z*pz)/ap; float e1_boost = gamma*e1-gamma*beta*p1l; float e2_boost = gamma*e2-gamma*beta*p2l; float e3_boost = gamma*e3-gamma*beta*p3l; float Q = sqrt(e*e-ap*ap)-3*pim; return std::pair(sqrt(3.)*(e1_boost-e2_boost)/Q , 3*(e3_boost-pim)/Q-1.); } private: // Data members like post-cuts event weight counters go here float pim; private: /// @name Histograms Histo1DPtr _DeltaQ; Histo1DPtr _Delta3h; Histo1DPtr _h_nch; Histo2DPtr _dalitz; //@} }; // This global object acts as a hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2017_I1624693); } diff --git a/analyses/pluginCMS/CMS_2013_I1209721.cc b/analyses/pluginCMS/CMS_2013_I1209721.cc --- a/analyses/pluginCMS/CMS_2013_I1209721.cc +++ b/analyses/pluginCMS/CMS_2013_I1209721.cc @@ -1,173 +1,173 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/Thrust.hh" namespace Rivet { /// CMS Z+jets delta(phi) and jet thrust measurement at 7 TeV class CMS_2013_I1209721 : public Analysis { public: CMS_2013_I1209721() : Analysis("CMS_2013_I1209721") { } /// Book projections and histograms void init() { // Full final state const FinalState fs(-5.0,5.0); declare(fs, "FS"); // Z finders for electrons and muons Cut cuts = Cuts::abseta < 2.4 && Cuts::pT > 20*GeV; const ZFinder zfe(fs, cuts, PID::ELECTRON, 71*GeV, 111*GeV); const ZFinder zfm(fs, cuts, PID::MUON, 71*GeV, 111*GeV); declare(zfe, "ZFE"); declare(zfm, "ZFM"); // Jets const FastJets jets(fs, FastJets::ANTIKT, 0.5); declare(jets, "JETS"); // Book histograms from data for (size_t i = 0; i < 2; ++i) { book(_histDeltaPhiZJ1_1[i] ,1+i*9, 1, 1); book(_histDeltaPhiZJ1_2[i] ,2+i*9, 1, 1); book(_histDeltaPhiZJ1_3[i] ,4+i*9, 1, 1); book(_histDeltaPhiZJ2_3[i] ,5+i*9, 1, 1); book(_histDeltaPhiZJ3_3[i] ,3+i*9, 1, 1); book(_histDeltaPhiJ1J2_3[i] ,6+i*9, 1, 1); book(_histDeltaPhiJ1J3_3[i] ,7+i*9, 1, 1); book(_histDeltaPhiJ2J3_3[i] ,8+i*9, 1, 1); book(_histTransvThrust[i] ,9+i*9, 1, 1); } } void analyze(const Event& event) { const double weight = 1.0; // Apply the Z finders const ZFinder& zfe = apply(event, "ZFE"); const ZFinder& zfm = apply(event, "ZFM"); // Choose the Z candidate (there must be one) if (zfe.empty() && zfm.empty()) vetoEvent; - const ParticleVector& z = !zfm.empty() ? zfm.bosons() : zfe.bosons(); - const ParticleVector& leptons = !zfm.empty() ? zfm.constituents() : zfe.constituents(); + const Particles& z = !zfm.empty() ? zfm.bosons() : zfe.bosons(); + const Particles& leptons = !zfm.empty() ? zfm.constituents() : zfe.constituents(); // Determine whether we are in the boosted regime const bool is_boosted = (z[0].pT() > 150*GeV); // Build the jets const FastJets& jetfs = apply(event, "JETS"); const Jets& jets = jetfs.jetsByPt(Cuts::pT > 50*GeV && Cuts::abseta < 2.5); // Clean the jets against the lepton candidates, as in the paper, with a deltaR cut of 0.4 against the clustered leptons vector cleanedJets; for (size_t i = 0; i < jets.size(); ++i) { bool isolated = true; for (size_t j = 0; j < 2; ++j) { if (deltaR(leptons[j], jets[i]) < 0.4) { isolated = false; break; } } if (isolated) cleanedJets.push_back(&jets[i]); } // Require at least 1 jet const unsigned int Njets = cleanedJets.size(); if (Njets < 1) vetoEvent; // Now compute the thrust // Collect Z and jets transverse momenta to calculate transverse thrust vector momenta; momenta.clear(); Vector3 mom = z[0].p3(); mom.setZ(0); momenta.push_back(mom); for (size_t i = 0; i < cleanedJets.size(); ++i) { Vector3 mj = cleanedJets[i]->momentum().p3(); mj.setZ(0); momenta.push_back(mj); } if (momenta.size() <= 2){ // We need to use a ghost so that Thrust.calc() doesn't return 1. momenta.push_back(Vector3(0.0000001,0.0000001,0.)); } Thrust thrust; thrust.calc(momenta); const double T = thrust.thrust(); FILLx2(_histTransvThrust, is_boosted, log(max(1-T, 1e-6)), weight); const double dphiZJ1 = deltaPhi(z[0], *cleanedJets[0]); FILLx2(_histDeltaPhiZJ1_1, is_boosted, dphiZJ1, weight); if (Njets > 1) { FILLx2(_histDeltaPhiZJ1_2, is_boosted, dphiZJ1, weight); if (Njets > 2) { FILLx2(_histDeltaPhiZJ1_3, is_boosted, dphiZJ1, weight); FILLx2(_histDeltaPhiZJ2_3, is_boosted, deltaPhi(z[0], *cleanedJets[1]), weight); FILLx2(_histDeltaPhiZJ3_3, is_boosted, deltaPhi(z[0], *cleanedJets[2]), weight); FILLx2(_histDeltaPhiJ1J2_3, is_boosted, deltaPhi(*cleanedJets[0], *cleanedJets[1]), weight); FILLx2(_histDeltaPhiJ1J3_3, is_boosted, deltaPhi(*cleanedJets[0], *cleanedJets[2]), weight); FILLx2(_histDeltaPhiJ2J3_3, is_boosted, deltaPhi(*cleanedJets[1], *cleanedJets[2]), weight); } } } /// Normalizations /// @note Most of these data normalizations neglect the overflow bins void finalize() { for (size_t i = 0; i < 2; ++i) { normalize(_histDeltaPhiZJ1_1[i], 1, false); normalize(_histDeltaPhiZJ1_2[i], 1, false); normalize(_histDeltaPhiZJ1_3[i], 1, false); normalize(_histDeltaPhiZJ2_3[i], 1, false); normalize(_histDeltaPhiZJ3_3[i], 1, false); normalize(_histDeltaPhiJ1J2_3[i], 1, false); normalize(_histDeltaPhiJ1J3_3[i], 1, false); normalize(_histDeltaPhiJ2J3_3[i], 1, false); normalize(_histTransvThrust[i]); } } private: // Define a helper to appropriately fill both unboosted and boosted histo versions void FILLx2(Histo1DPtr* HNAME, bool is_boosted, double VAL, double weight) { double x = VAL; for (size_t i = 0; i < 2; ++i) { if (i == 0 || is_boosted) HNAME[i]->fill(x, weight); } } // Arrays of unboosted/boosted histos Histo1DPtr _histDeltaPhiZJ1_1[2]; Histo1DPtr _histDeltaPhiZJ1_2[2]; Histo1DPtr _histDeltaPhiZJ1_3[2]; Histo1DPtr _histDeltaPhiZJ2_3[2]; Histo1DPtr _histDeltaPhiZJ3_3[2]; Histo1DPtr _histDeltaPhiJ1J2_3[2]; Histo1DPtr _histDeltaPhiJ1J3_3[2]; Histo1DPtr _histDeltaPhiJ2J3_3[2]; Histo1DPtr _histTransvThrust[2]; }; DECLARE_RIVET_PLUGIN(CMS_2013_I1209721); } diff --git a/analyses/pluginCMS/CMS_2013_I1224539_WJET.cc b/analyses/pluginCMS/CMS_2013_I1224539_WJET.cc --- a/analyses/pluginCMS/CMS_2013_I1224539_WJET.cc +++ b/analyses/pluginCMS/CMS_2013_I1224539_WJET.cc @@ -1,193 +1,193 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/ZFinder.hh" #include "fastjet/tools/Filter.hh" #include "fastjet/tools/Pruner.hh" namespace Rivet { class CMS_2013_I1224539_WJET : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor CMS_2013_I1224539_WJET() : Analysis("CMS_2013_I1224539_WJET"), _filter(fastjet::Filter(fastjet::JetDefinition(fastjet::cambridge_algorithm, 0.3), fastjet::SelectorNHardest(3))), _trimmer(fastjet::Filter(fastjet::JetDefinition(fastjet::kt_algorithm, 0.2), fastjet::SelectorPtFractionMin(0.03))), _pruner(fastjet::Pruner(fastjet::cambridge_algorithm, 0.1, 0.5)) { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FinalState fs(-2.4, 2.4, 0*GeV); declare(fs, "FS"); // Find W's with pT > 120, MET > 50 WFinder wfinder(fs, Cuts::abseta < 2.4 && Cuts::pT > 80*GeV, PID::ELECTRON, 50*GeV, 1000*GeV, 50.0*GeV, - 0.2, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); + 0.2, WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); declare(wfinder, "WFinder"); // W+jet jet collections declare(FastJets(wfinder.remainingFinalState(), FastJets::ANTIKT, 0.7), "JetsAK7_wj"); declare(FastJets(wfinder.remainingFinalState(), FastJets::CAM, 0.8), "JetsCA8_wj"); declare(FastJets(wfinder.remainingFinalState(), FastJets::CAM, 1.2), "JetsCA12_wj"); // Histograms /// @note These are 2D histos rendered into slices const int wjetsOffset = 51; for (size_t i = 0; i < N_PT_BINS_vj; ++i) { book(_h_ungroomedJetMass_AK7_wj[i] ,wjetsOffset+i+1+0*N_PT_BINS_vj, 1, 1); book(_h_filteredJetMass_AK7_wj[i] ,wjetsOffset+i+1+1*N_PT_BINS_vj, 1, 1); book(_h_trimmedJetMass_AK7_wj[i] ,wjetsOffset+i+1+2*N_PT_BINS_vj, 1, 1); book(_h_prunedJetMass_AK7_wj[i] ,wjetsOffset+i+1+3*N_PT_BINS_vj, 1, 1); book(_h_prunedJetMass_CA8_wj[i] ,wjetsOffset+i+1+4*N_PT_BINS_vj, 1, 1); if (i > 0) book(_h_filteredJetMass_CA12_wj[i] ,wjetsOffset+i+5*N_PT_BINS_vj, 1, 1); } } bool isBackToBack_wj(const WFinder& wf, const fastjet::PseudoJet& psjet) { const FourMomentum w = wf.bosons()[0]; const FourMomentum l1 = wf.constituentLeptons()[0]; const FourMomentum l2 = wf.constituentNeutrinos()[0]; /// @todo We should make FourMomentum know how to construct itself from a PseudoJet const FourMomentum jmom(psjet.e(), psjet.px(), psjet.py(), psjet.pz()); return (deltaPhi(w, jmom) > 2.0 && deltaR(l1, jmom) > 1.0 && deltaPhi(l2, jmom) > 0.4); } // Find the pT histogram bin index for value pt (in GeV), to hack a 2D histogram equivalent /// @todo Use a YODA axis/finder alg when available size_t findPtBin(double ptJ) { const double ptBins_vj[N_PT_BINS_vj+1] = { 125.0, 150.0, 220.0, 300.0, 450.0 }; for (size_t ibin = 0; ibin < N_PT_BINS_vj; ++ibin) { if (inRange(ptJ, ptBins_vj[ibin], ptBins_vj[ibin+1])) return ibin; } return N_PT_BINS_vj; } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; // Get the W const WFinder& wfinder = apply(event, "WFinder"); if (wfinder.bosons().size() != 1) vetoEvent; const Particle w = wfinder.bosons()[0]; const Particle l = wfinder.constituentLeptons()[0]; // Require a fairly high-pT W and charged lepton if (l.pT() < 80*GeV || w.pT() < 120*GeV) vetoEvent; // Get the pseudojets. const PseudoJets psjetsCA8_wj = apply(event, "JetsCA8_wj").pseudoJetsByPt( 50.0*GeV ); const PseudoJets psjetsCA12_wj = apply(event, "JetsCA12_wj").pseudoJetsByPt( 50.0*GeV ); // AK7 jets const PseudoJets psjetsAK7_wj = apply(event, "JetsAK7_wj").pseudoJetsByPt( 50.0*GeV ); if (!psjetsAK7_wj.empty()) { // Get the leading jet and make sure it's back-to-back with the W const fastjet::PseudoJet& j0 = psjetsAK7_wj[0]; if (isBackToBack_wj(wfinder, j0)) { const size_t njetBin = findPtBin(j0.pt()/GeV); if (njetBin < N_PT_BINS_vj) { fastjet::PseudoJet filtered0 = _filter(j0); fastjet::PseudoJet trimmed0 = _trimmer(j0); fastjet::PseudoJet pruned0 = _pruner(j0); _h_ungroomedJetMass_AK7_wj[njetBin]->fill(j0.m()/GeV, weight); _h_filteredJetMass_AK7_wj[njetBin]->fill(filtered0.m()/GeV, weight); _h_trimmedJetMass_AK7_wj[njetBin]->fill(trimmed0.m()/GeV, weight); _h_prunedJetMass_AK7_wj[njetBin]->fill(pruned0.m()/GeV, weight); } } } // CA8 jets if (!psjetsCA8_wj.empty()) { // Get the leading jet and make sure it's back-to-back with the W const fastjet::PseudoJet& j0 = psjetsCA8_wj[0]; if (isBackToBack_wj(wfinder, j0)) { const size_t njetBin = findPtBin(j0.pt()/GeV); if (njetBin < N_PT_BINS_vj) { fastjet::PseudoJet pruned0 = _pruner(j0); _h_prunedJetMass_CA8_wj[njetBin]->fill(pruned0.m()/GeV, weight); } } } // CA12 jets if (!psjetsCA12_wj.empty()) { // Get the leading jet and make sure it's back-to-back with the W const fastjet::PseudoJet& j0 = psjetsCA12_wj[0]; if (isBackToBack_wj(wfinder, j0)) { const size_t njetBin = findPtBin(j0.pt()/GeV); if (njetBin < N_PT_BINS_vj&&njetBin>0) { fastjet::PseudoJet filtered0 = _filter(j0); _h_filteredJetMass_CA12_wj[njetBin]->fill( filtered0.m() / GeV, weight); } } } } /// Normalise histograms etc., after the run void finalize() { const double normalizationVal = 1000; for (size_t i = 0; i < N_PT_BINS_vj; ++i) { normalize(_h_ungroomedJetMass_AK7_wj[i], normalizationVal); normalize(_h_filteredJetMass_AK7_wj[i], normalizationVal); normalize(_h_trimmedJetMass_AK7_wj[i], normalizationVal); normalize(_h_prunedJetMass_AK7_wj[i], normalizationVal); normalize(_h_prunedJetMass_CA8_wj[i], normalizationVal); if (i > 0) normalize( _h_filteredJetMass_CA12_wj[i], normalizationVal); } } //@} private: /// @name FastJet grooming tools (configured in constructor init list) //@{ const fastjet::Filter _filter; const fastjet::Filter _trimmer; const fastjet::Pruner _pruner; //@} /// @name Histograms //@{ enum BINS_vj { PT_125_150_vj=0, PT_150_220_vj, PT_220_300_vj, PT_300_450_vj, N_PT_BINS_vj }; Histo1DPtr _h_ungroomedJetMass_AK7_wj[N_PT_BINS_vj]; Histo1DPtr _h_filteredJetMass_AK7_wj[N_PT_BINS_vj]; Histo1DPtr _h_trimmedJetMass_AK7_wj[N_PT_BINS_vj]; Histo1DPtr _h_prunedJetMass_AK7_wj[N_PT_BINS_vj]; Histo1DPtr _h_prunedJetMass_CA8_wj[N_PT_BINS_vj]; Histo1DPtr _h_filteredJetMass_CA12_wj[N_PT_BINS_vj]; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2013_I1224539_WJET); } diff --git a/analyses/pluginCMS/CMS_2013_I1258128.cc b/analyses/pluginCMS/CMS_2013_I1258128.cc --- a/analyses/pluginCMS/CMS_2013_I1258128.cc +++ b/analyses/pluginCMS/CMS_2013_I1258128.cc @@ -1,165 +1,165 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/Thrust.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" namespace Rivet { /// CMS Z rapidity measurement class CMS_2013_I1258128 : public Analysis { public: // Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2013_I1258128); void init() { // Full final state const FinalState fs(Cuts::abseta < 5); declare(fs, "FS"); // Z finders for electrons and muons Cut cuts = Cuts::abseta < 2.1 && Cuts::pT > 20*GeV; const ZFinder zfe(fs, cuts, PID::ELECTRON, 76*GeV, 106*GeV); const ZFinder zfm(fs, cuts, PID::MUON, 76*GeV, 106*GeV); declare(zfe, "ZFE"); declare(zfm, "ZFM"); // Try to get the leading photon LeadingParticlesFinalState photonfs(FinalState(-2.5, 2.5, 40.0*GeV)); photonfs.addParticleId(PID::PHOTON); declare(photonfs, "LeadingPhoton"); // Jets const FastJets jets(fs, FastJets::ANTIKT, 0.5); declare(jets, "JETS"); // Histograms book(_hist1YZ ,1, 1, 1); book(_hist1YJet ,2, 1, 1); book(_hist1YSum ,3, 1, 1); book( _hist1YDif ,4, 1, 1); book( _hist2YPhoton ,5, 1, 1); book( _hist2YJet ,6, 1, 1); book( _hist2YSum ,7, 1, 1); book( _hist2YDif ,8, 1, 1); } void makeZCut(const Event& event) { // Apply the Z finders and veto if no Z found const ZFinder& zfe = apply(event, "ZFE"); const ZFinder& zfm = apply(event, "ZFM"); if (zfe.empty() && zfm.empty()) vetoEvent; // Choose the Z candidate - const ParticleVector& z = (!zfm.empty()) ? zfm.bosons() : zfe.bosons(); - const ParticleVector& clusteredConstituents = (!zfm.empty()) ? zfm.constituents() : zfe.constituents(); + const Particles& z = (!zfm.empty()) ? zfm.bosons() : zfe.bosons(); + const Particles& clusteredConstituents = (!zfm.empty()) ? zfm.constituents() : zfe.constituents(); // Insist that the Z is in a high-pT (boosted) regime if (z[0].pT() < 40*GeV) return; // Build the jets const FastJets& jetfs = apply(event, "JETS"); Jets jets = jetfs.jetsByPt(Cuts::pT > 30*GeV && Cuts::abseta < 2.4); if (jets.empty()) return; // Clean the jets against the lepton candidates with a DeltaR cut of 0.5 vector cleanedJets; for (const Jet& j : jets) { bool isolated = true; for (const Particle& p : clusteredConstituents) { if (deltaR(p, j) < 0.5) { isolated = false; break; } } if (isolated) cleanedJets.push_back(&j); } // Require exactly 1 isolated jet if (cleanedJets.size() != 1) return; // Fill histos const double weight = 1.0; const double yz = z[0].rapidity(); const double yjet = cleanedJets[0]->momentum().rapidity(); _hist1YZ->fill(fabs(yz), weight); _hist1YJet->fill(fabs(yjet), weight); _hist1YSum->fill(0.5*fabs(yz + yjet), weight); _hist1YDif->fill(0.5*fabs(yz - yjet), weight); } void makePhotonCut(const Event& event) { // Get the photon const FinalState& photonfs = apply(event, "LeadingPhoton"); if (photonfs.particles().size() < 1) return; const Particle& photon = photonfs.particles().front(); if (photon.pT() < 40*GeV) return; if (fabs(photon.eta()) > 1.4442 ) return; // Build the jets const FastJets& jetfs = apply(event, "JETS"); Jets jets = jetfs.jetsByPt(Cuts::pT > 30*GeV && Cuts::abseta < 2.4); if (jets.empty()) return; // Clean the jets against the photon candidate with a DeltaR cut of 0.5 vector cleanedJets; for (const Jet& j : jets) if (deltaR(photon, j) > 0.5) cleanedJets.push_back(&j); // Require exactly 1 jet if (cleanedJets.size() != 1) return; // Fill histos const double weight = 1.0; const double ypho = photon.rapidity(); const double yjet = cleanedJets[0]->momentum().rapidity(); _hist2YPhoton->fill(fabs(ypho), weight); _hist2YJet->fill(fabs(yjet), weight); _hist2YSum->fill(0.5*fabs(ypho + yjet), weight); _hist2YDif->fill(0.5*fabs(ypho - yjet), weight); } void analyze(const Event& event) { makeZCut(event); makePhotonCut(event); } void finalize() { normalizeByContents(_hist1YZ); normalizeByContents(_hist1YJet); normalizeByContents(_hist1YSum); normalizeByContents(_hist1YDif); normalizeByContents(_hist2YPhoton); normalizeByContents(_hist2YJet); normalizeByContents(_hist2YSum); normalizeByContents(_hist2YDif); } // The CMS normalization in this analysis is that the sum over bin contents // is equal to 1. This function normalizes to area = area*bin_width. / // @note This is a strange definition... why? void normalizeByContents(Histo1DPtr h) { normalize(h, h->bin(0).xWidth()); } private: Histo1DPtr _hist1YZ, _hist1YJet, _hist1YSum, _hist1YDif; Histo1DPtr _hist2YPhoton, _hist2YJet, _hist2YSum, _hist2YDif; }; // Plugin system hook DECLARE_RIVET_PLUGIN(CMS_2013_I1258128); } diff --git a/analyses/pluginCMS/CMS_2015_I1356998.cc b/analyses/pluginCMS/CMS_2015_I1356998.cc --- a/analyses/pluginCMS/CMS_2015_I1356998.cc +++ b/analyses/pluginCMS/CMS_2015_I1356998.cc @@ -1,143 +1,143 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { class CMS_2015_I1356998 : public Analysis { public: CMS_2015_I1356998() : Analysis("CMS_2015_I1356998"), edge(4.7) { } void init() { declare(FinalState(),"FS"); book(_h_noCASTORtag ,1, 1, 1); book(_h_CASTORtag ,2, 1, 1); book(_h_centralGap ,3, 1, 1); book(_h_sigmaVis ,4, 1, 1); book(_h_maxFwdGap ,5, 1, 1); } void analyze(const Event& event) { const double weight = 1.0; const FinalState& fs = apply(event, "FS"); // A vector containing a lot of eta values vector detparticles; detparticles.push_back(-edge); for (const Particle& p : fs.particles(Cuts::pT > 0.2*GeV && Cuts::abseta::iterator iter; vector detgaps; for (iter = detparticles.begin()+1; iter != detparticles.end(); ++iter) { const double detgap = *iter - *(iter-1); detgaps.push_back(detgap); } double detgapbwd = detgaps.front(); double detgapfwd = detgaps.back(); double detfmax = max(detgapbwd, detgapfwd); // Fill rapidity gap histo if (detfmax != 2*edge ) { _h_maxFwdGap->fill(detfmax, weight); } // Everything that follows has to do with the cross-section measurements if (fs.size() < 2) vetoEvent; // Gap center calculations - const ParticleVector particlesByRapidity = fs.particles(cmpMomByRap); //ByRapidity(); + const Particles particlesByRapidity = fs.particles(cmpMomByRap); //ByRapidity(); vector gaps; vector midpoints; for (size_t ip = 1; ip < particlesByRapidity.size(); ++ip) { const Particle& p1 = particlesByRapidity[ip-1]; const Particle& p2 = particlesByRapidity[ip]; const double gap = p2.momentum().rapidity() - p1.momentum().rapidity(); const double mid = (p2.momentum().rapidity() + p1.momentum().rapidity()) / 2.; gaps.push_back(gap); midpoints.push_back(mid); } int imid = std::distance(gaps.begin(), max_element(gaps.begin(), gaps.end())); double gapcenter = midpoints[imid]; // Calculations for cross-sections FourMomentum MxFourVector(0.,0.,0.,0.); FourMomentum MyFourVector(0.,0.,0.,0.); for(const Particle& p : fs.particles(cmpMomByEta)) { if (p.momentum().rapidity() > gapcenter) { MxFourVector += p.momentum(); } else { MyFourVector += p.momentum(); } } double Mx = MxFourVector.mass(); double My = MyFourVector.mass(); const double xix = (Mx*Mx)/(sqrtS()/GeV * sqrtS()/GeV); if (log10(My) < 0.5) { _h_noCASTORtag->fill(log10(xix), weight); if (log10(xix) > -5.5 && log10(xix) < -2.5) _h_sigmaVis->fill(0.5, weight); } else if (log10(My) < 1.1) { _h_CASTORtag->fill(log10(xix), weight); if (log10(xix) > -5.5 && log10(xix) < -2.5) _h_sigmaVis->fill(1.5, weight); } // Central gap x-section double xigen = (Mx*Mx) * (My*My) / (sqrtS()/GeV * sqrtS()/GeV * 0.93827 * 0.93827); // Proton masses... double dy0 = -log(xigen); if (dy0 > 3.) { if (log10(My) > 1.1 && log10(Mx) > 1.1) { _h_centralGap->fill(dy0, weight); _h_sigmaVis->fill(2.5, weight); } } } void finalize() { double xs = crossSection()/millibarn/sumOfWeights(); scale(_h_noCASTORtag, xs); scale(_h_CASTORtag , xs); scale(_h_centralGap , xs); scale(_h_sigmaVis , xs); scale(_h_maxFwdGap , xs); } private: Histo1DPtr _h_noCASTORtag; Histo1DPtr _h_CASTORtag; Histo1DPtr _h_centralGap; Histo1DPtr _h_sigmaVis; Histo1DPtr _h_maxFwdGap; double edge; }; DECLARE_RIVET_PLUGIN(CMS_2015_I1356998); } diff --git a/analyses/pluginCMS/CMS_2015_I1370682.cc b/analyses/pluginCMS/CMS_2015_I1370682.cc --- a/analyses/pluginCMS/CMS_2015_I1370682.cc +++ b/analyses/pluginCMS/CMS_2015_I1370682.cc @@ -1,604 +1,604 @@ #include "Rivet/Analysis.hh" #include "Rivet/Math/LorentzTrans.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { namespace { //< only visible in this compilation unit /// @brief Pseudo top finder /// /// Find top quark in the particle level. /// The definition is based on the agreement at the LHC working group. class PseudoTop : public FinalState { public: /// @name Standard constructors and destructors. //@{ /// The default constructor. May specify the minimum and maximum /// pseudorapidity \f$ \eta \f$ and the min \f$ p_T \f$ (in GeV). PseudoTop(double lepR = 0.1, double lepMinPt = 20, double lepMaxEta = 2.4, double jetR = 0.4, double jetMinPt = 30, double jetMaxEta = 4.7) : FinalState(-DBL_MAX, DBL_MAX, 0*GeV), _lepR(lepR), _lepMinPt(lepMinPt), _lepMaxEta(lepMaxEta), _jetR(jetR), _jetMinPt(jetMinPt), _jetMaxEta(jetMaxEta) { setName("PseudoTop"); } enum TTbarMode {CH_NONE=-1, CH_FULLHADRON = 0, CH_SEMILEPTON, CH_FULLLEPTON}; enum DecayMode {CH_HADRON = 0, CH_MUON, CH_ELECTRON}; TTbarMode mode() const { if (!_isValid) return CH_NONE; if (_mode1 == CH_HADRON && _mode2 == CH_HADRON) return CH_FULLHADRON; else if ( _mode1 != CH_HADRON && _mode2 != CH_HADRON) return CH_FULLLEPTON; else return CH_SEMILEPTON; } DecayMode mode1() const {return _mode1;} DecayMode mode2() const {return _mode2;} /// Clone on the heap. virtual unique_ptr clone() const { return unique_ptr(new PseudoTop(*this)); } //@} public: Particle t1() const {return _t1;} Particle t2() const {return _t2;} Particle b1() const {return _b1;} Particle b2() const {return _b2;} - ParticleVector wDecays1() const {return _wDecays1;} - ParticleVector wDecays2() const {return _wDecays2;} + Particles wDecays1() const {return _wDecays1;} + Particles wDecays2() const {return _wDecays2;} Jets jets() const {return _jets;} Jets bjets() const {return _bjets;} Jets ljets() const {return _ljets;} protected: // Apply the projection to the event void project(const Event& e); // override; ///< @todo Re-enable when C++11 allowed void cleanup(std::map >& v, const bool doCrossCleanup=false) const; private: const double _lepR, _lepMinPt, _lepMaxEta; const double _jetR, _jetMinPt, _jetMaxEta; //constexpr ///< @todo Re-enable when C++11 allowed static double _tMass; // = 172.5*GeV; ///< @todo Re-enable when C++11 allowed //constexpr ///< @todo Re-enable when C++11 allowed static double _wMass; // = 80.4*GeV; ///< @todo Re-enable when C++11 allowed private: bool _isValid; DecayMode _mode1, _mode2; Particle _t1, _t2; Particle _b1, _b2; - ParticleVector _wDecays1, _wDecays2; + Particles _wDecays1, _wDecays2; Jets _jets, _bjets, _ljets; }; // More implementation below the analysis code } /// Pseudo-top analysis from CMS class CMS_2015_I1370682 : public Analysis { public: CMS_2015_I1370682() : Analysis("CMS_2015_I1370682"), _applyCorrection(true), _doShapeOnly(true) { } void init() { declare(PseudoTop(0.1, 20, 2.4, 0.5, 30, 2.4), "ttbar"); // Lepton + Jet channel book(_hSL_topPt ,"d15-x01-y01"); // 1/sigma dsigma/dpt(top) book(_hSL_topPtTtbarSys ,"d16-x01-y01"); // 1/sigma dsigma/dpt*(top) book(_hSL_topY ,"d17-x01-y01"); // 1/sigma dsigma/dy(top) book(_hSL_ttbarDelPhi ,"d18-x01-y01"); // 1/sigma dsigma/ddeltaphi(t,tbar) book(_hSL_topPtLead ,"d19-x01-y01"); // 1/sigma dsigma/dpt(t1) book(_hSL_topPtSubLead ,"d20-x01-y01"); // 1/sigma dsigma/dpt(t2) book(_hSL_ttbarPt ,"d21-x01-y01"); // 1/sigma dsigma/dpt(ttbar) book(_hSL_ttbarY ,"d22-x01-y01"); // 1/sigma dsigma/dy(ttbar) book(_hSL_ttbarMass ,"d23-x01-y01"); // 1/sigma dsigma/dm(ttbar) // Dilepton channel book(_hDL_topPt ,"d24-x01-y01"); // 1/sigma dsigma/dpt(top) book(_hDL_topPtTtbarSys ,"d25-x01-y01"); // 1/sigma dsigma/dpt*(top) book(_hDL_topY ,"d26-x01-y01"); // 1/sigma dsigma/dy(top) book(_hDL_ttbarDelPhi ,"d27-x01-y01"); // 1/sigma dsigma/ddeltaphi(t,tbar) book(_hDL_topPtLead ,"d28-x01-y01"); // 1/sigma dsigma/dpt(t1) book(_hDL_topPtSubLead ,"d29-x01-y01"); // 1/sigma dsigma/dpt(t2) book(_hDL_ttbarPt ,"d30-x01-y01"); // 1/sigma dsigma/dpt(ttbar) book(_hDL_ttbarY ,"d31-x01-y01"); // 1/sigma dsigma/dy(ttbar) book(_hDL_ttbarMass ,"d32-x01-y01"); // 1/sigma dsigma/dm(ttbar) } void analyze(const Event& event) { // Get the ttbar candidate const PseudoTop& ttbar = apply(event, "ttbar"); if ( ttbar.mode() == PseudoTop::CH_NONE ) vetoEvent; const FourMomentum& t1P4 = ttbar.t1().momentum(); const FourMomentum& t2P4 = ttbar.t2().momentum(); const double pt1 = std::max(t1P4.pT(), t2P4.pT()); const double pt2 = std::min(t1P4.pT(), t2P4.pT()); const double dPhi = deltaPhi(t1P4, t2P4); const FourMomentum ttP4 = t1P4 + t2P4; const FourMomentum t1P4AtCM = LorentzTransform::mkFrameTransformFromBeta(ttP4.betaVec()).transform(t1P4); const double weight = 1.0; if ( ttbar.mode() == PseudoTop::CH_SEMILEPTON ) { const Particle lCand1 = ttbar.wDecays1()[0]; // w1 dau0 is the lepton in the PseudoTop if (lCand1.pT() < 33*GeV || lCand1.abseta() > 2.1) vetoEvent; _hSL_topPt->fill(t1P4.pT(), weight); _hSL_topPt->fill(t2P4.pT(), weight); _hSL_topPtTtbarSys->fill(t1P4AtCM.pT(), weight); _hSL_topY->fill(t1P4.rapidity(), weight); _hSL_topY->fill(t2P4.rapidity(), weight); _hSL_ttbarDelPhi->fill(dPhi, weight); _hSL_topPtLead->fill(pt1, weight); _hSL_topPtSubLead->fill(pt2, weight); _hSL_ttbarPt->fill(ttP4.pT(), weight); _hSL_ttbarY->fill(ttP4.rapidity(), weight); _hSL_ttbarMass->fill(ttP4.mass(), weight); } else if ( ttbar.mode() == PseudoTop::CH_FULLLEPTON ) { const Particle lCand1 = ttbar.wDecays1()[0]; // dau0 are the lepton in the PseudoTop const Particle lCand2 = ttbar.wDecays2()[0]; // dau0 are the lepton in the PseudoTop if (lCand1.pT() < 20*GeV || lCand1.abseta() > 2.4) vetoEvent; if (lCand2.pT() < 20*GeV || lCand2.abseta() > 2.4) vetoEvent; _hDL_topPt->fill(t1P4.pT(), weight); _hDL_topPt->fill(t2P4.pT(), weight); _hDL_topPtTtbarSys->fill(t1P4AtCM.pT(), weight); _hDL_topY->fill(t1P4.rapidity(), weight); _hDL_topY->fill(t2P4.rapidity(), weight); _hDL_ttbarDelPhi->fill(dPhi, weight); _hDL_topPtLead->fill(pt1, weight); _hDL_topPtSubLead->fill(pt2, weight); _hDL_ttbarPt->fill(ttP4.pT(), weight); _hDL_ttbarY->fill(ttP4.rapidity(), weight); _hDL_ttbarMass->fill(ttP4.mass(), weight); } } void finalize() { if ( _applyCorrection ) { // Correction functions for TOP-12-028 paper, (parton bin height)/(pseudotop bin height) const double ch15[] = { 5.473609, 4.941048, 4.173346, 3.391191, 2.785644, 2.371346, 2.194161, 2.197167, }; const double ch16[] = { 5.470905, 4.948201, 4.081982, 3.225532, 2.617519, 2.239217, 2.127878, 2.185918, }; const double ch17[] = { 10.003667, 4.546519, 3.828115, 3.601018, 3.522194, 3.524694, 3.600951, 3.808553, 4.531891, 9.995370, }; const double ch18[] = { 4.406683, 4.054041, 3.885393, 4.213646, }; const double ch19[] = { 6.182537, 5.257703, 4.422280, 3.568402, 2.889408, 2.415878, 2.189974, 2.173210, }; const double ch20[] = { 5.199874, 4.693318, 3.902882, 3.143785, 2.607877, 2.280189, 2.204124, 2.260829, }; const double ch21[] = { 6.053523, 3.777506, 3.562251, 3.601356, 3.569347, 3.410472, }; const double ch22[] = { 11.932351, 4.803773, 3.782709, 3.390775, 3.226806, 3.218982, 3.382678, 3.773653, 4.788191, 11.905338, }; const double ch23[] = { 7.145255, 5.637595, 4.049882, 3.025917, 2.326430, 1.773824, 1.235329, }; const double ch24[] = { 2.268193, 2.372063, 2.323975, 2.034655, 1.736793, }; const double ch25[] = { 2.231852, 2.383086, 2.341894, 2.031318, 1.729672, 1.486993, }; const double ch26[] = { 3.993526, 2.308249, 2.075136, 2.038297, 2.036302, 2.078270, 2.295817, 4.017713, }; const double ch27[] = { 2.205978, 2.175010, 2.215376, 2.473144, }; const double ch28[] = { 2.321077, 2.371895, 2.338871, 2.057821, 1.755382, }; const double ch29[] = { 2.222707, 2.372591, 2.301688, 1.991162, 1.695343, }; const double ch30[] = { 2.599677, 2.026855, 2.138620, 2.229553, }; const double ch31[] = { 5.791779, 2.636219, 2.103642, 1.967198, 1.962168, 2.096514, 2.641189, 5.780828, }; const double ch32[] = { 2.006685, 2.545525, 2.477745, 2.335747, 2.194226, 2.076500, }; applyCorrection(_hSL_topPt, ch15); applyCorrection(_hSL_topPtTtbarSys, ch16); applyCorrection(_hSL_topY, ch17); applyCorrection(_hSL_ttbarDelPhi, ch18); applyCorrection(_hSL_topPtLead, ch19); applyCorrection(_hSL_topPtSubLead, ch20); applyCorrection(_hSL_ttbarPt, ch21); applyCorrection(_hSL_ttbarY, ch22); applyCorrection(_hSL_ttbarMass, ch23); applyCorrection(_hDL_topPt, ch24); applyCorrection(_hDL_topPtTtbarSys, ch25); applyCorrection(_hDL_topY, ch26); applyCorrection(_hDL_ttbarDelPhi, ch27); applyCorrection(_hDL_topPtLead, ch28); applyCorrection(_hDL_topPtSubLead, ch29); applyCorrection(_hDL_ttbarPt, ch30); applyCorrection(_hDL_ttbarY, ch31); applyCorrection(_hDL_ttbarMass, ch32); } if ( _doShapeOnly ) { normalize(_hSL_topPt ); normalize(_hSL_topPtTtbarSys); normalize(_hSL_topY ); normalize(_hSL_ttbarDelPhi ); normalize(_hSL_topPtLead ); normalize(_hSL_topPtSubLead ); normalize(_hSL_ttbarPt ); normalize(_hSL_ttbarY ); normalize(_hSL_ttbarMass ); normalize(_hDL_topPt ); normalize(_hDL_topPtTtbarSys); normalize(_hDL_topY ); normalize(_hDL_ttbarDelPhi ); normalize(_hDL_topPtLead ); normalize(_hDL_topPtSubLead ); normalize(_hDL_ttbarPt ); normalize(_hDL_ttbarY ); normalize(_hDL_ttbarMass ); } else { const double s = 1./sumOfWeights(); scale(_hSL_topPt , s); scale(_hSL_topPtTtbarSys, s); scale(_hSL_topY , s); scale(_hSL_ttbarDelPhi , s); scale(_hSL_topPtLead , s); scale(_hSL_topPtSubLead , s); scale(_hSL_ttbarPt , s); scale(_hSL_ttbarY , s); scale(_hSL_ttbarMass , s); scale(_hDL_topPt , s); scale(_hDL_topPtTtbarSys, s); scale(_hDL_topY , s); scale(_hDL_ttbarDelPhi , s); scale(_hDL_topPtLead , s); scale(_hDL_topPtSubLead , s); scale(_hDL_ttbarPt , s); scale(_hDL_ttbarY , s); scale(_hDL_ttbarMass , s); } } void applyCorrection(Histo1DPtr h, const double* cf) { vector& bins = h->bins(); for (size_t i=0, n=bins.size(); i >& v, const bool doCrossCleanup) const { vector >::iterator> toErase; set usedLeg1, usedLeg2; if ( !doCrossCleanup ) { /// @todo Reinstate when C++11 allowed: for (auto key = v.begin(); key != v.end(); ++key) { for (map >::iterator key = v.begin(); key != v.end(); ++key) { const size_t leg1 = key->second.first; const size_t leg2 = key->second.second; if (usedLeg1.find(leg1) == usedLeg1.end() and usedLeg2.find(leg2) == usedLeg2.end()) { usedLeg1.insert(leg1); usedLeg2.insert(leg2); } else { toErase.push_back(key); } } } else { /// @todo Reinstate when C++11 allowed: for (auto key = v.begin(); key != v.end(); ++key) { for (map >::iterator key = v.begin(); key != v.end(); ++key) { const size_t leg1 = key->second.first; const size_t leg2 = key->second.second; if (usedLeg1.find(leg1) == usedLeg1.end() and usedLeg1.find(leg2) == usedLeg1.end()) { usedLeg1.insert(leg1); usedLeg1.insert(leg2); } else { toErase.push_back(key); } } } /// @todo Reinstate when C++11 allowed: for (auto& key : toErase) v.erase(key); for (size_t i = 0; i < toErase.size(); ++i) v.erase(toErase[i]); } void PseudoTop::project(const Event& e) { // Leptons : do the lepton clustering anti-kt R=0.1 using stable photons and leptons not from hadron decay // Neutrinos : neutrinos not from hadron decay // MET : vector sum of all invisible particles in x-y plane // Jets : anti-kt R=0.4 using all particles excluding neutrinos and particles used in lepton clustering // add ghost B hadrons during the jet clustering to identify B jets. // W->lv : dressed lepton and neutrino pairs // W->jj : light flavored dijet // W candidate : select lv or jj pairs which minimise |mW1-80.4|+|mW2-80.4| // lepton-neutrino pair will be selected with higher priority // t->Wb : W candidate + b jet // t candidate : select Wb pairs which minimise |mtop1-172.5|+|mtop2-172.5| _isValid = false; _theParticles.clear(); _wDecays1.clear(); _wDecays2.clear(); _jets.clear(); _bjets.clear(); _ljets.clear(); _mode1 = _mode2 = CH_HADRON; // Collect final state particles Particles pForLep, pForJet; Particles neutrinos; // Prompt neutrinos /// @todo Avoid this unsafe jump into HepMC -- all this can be done properly via VisibleFS and HeavyHadrons projections for (const GenParticle* p : Rivet::particles(e.genEvent())) { const int status = p->status(); const int pid = p->pdg_id(); if (status == 1) { Particle rp = *p; if (!PID::isHadron(pid) && !rp.fromHadron()) { // Collect particles not from hadron decay if (rp.isNeutrino()) { // Prompt neutrinos are kept in separate collection neutrinos.push_back(rp); } else if (pid == 22 || rp.isLepton()) { // Leptons and photons for the dressing pForLep.push_back(rp); } } else if (!rp.isNeutrino()) { // Use all particles from hadron decay pForJet.push_back(rp); } } else if (PID::isHadron(pid) && PID::hasBottom(pid)) { // NOTE: Consider B hadrons with pT > 5GeV - not in CMS proposal //if ( p->momentum().perp() < 5 ) continue; // Do unstable particles, to be used in the ghost B clustering // Use last B hadrons only bool isLast = true; for (const GenParticlePtr pp : Rivet::particles(p->end_vertex(), HepMC::children)) { if (PID::hasBottom(pp->pdg_id())) { isLast = false; break; } } if (!isLast) continue; // Rescale momentum by 10^-20 Particle ghost(pid, FourMomentum(p->momentum())*1e-20/p->momentum().rho()); pForJet.push_back(ghost); } } // Start object building from trivial thing - prompt neutrinos sortByPt(neutrinos); // Proceed to lepton dressing FastJets fjLep(FinalState(), FastJets::ANTIKT, _lepR); fjLep.calc(pForLep); Jets leptons; vector leptonsId; set dressedIdxs; for (const Jet& lep : fjLep.jetsByPt(_lepMinPt)) { if (lep.abseta() > _lepMaxEta) continue; double leadingPt = -1; int leptonId = 0; for (const Particle& p : lep.particles()) { /// @warning Barcodes aren't future-proof in HepMC dressedIdxs.insert(p.genParticle()->barcode()); if (p.isLepton() && p.pT() > leadingPt) { leadingPt = p.pT(); leptonId = p.pid(); } } if (leptonId == 0) continue; leptons.push_back(lep); leptonsId.push_back(leptonId); } // Re-use particles not used in lepton dressing for (const Particle& rp : pForLep) { /// @warning Barcodes aren't future-proof in HepMC const int barcode = rp.genParticle()->barcode(); // Skip if the particle is used in dressing if (dressedIdxs.find(barcode) != dressedIdxs.end()) continue; // Put back to be used in jet clustering pForJet.push_back(rp); } // Then do the jet clustering FastJets fjJet(FinalState(), FastJets::ANTIKT, _jetR); //fjJet.useInvisibles(); // NOTE: CMS proposal to remove neutrinos (AB: wouldn't work anyway, since they were excluded from clustering inputs) fjJet.calc(pForJet); for (const Jet& jet : fjJet.jetsByPt(_jetMinPt)) { if (jet.abseta() > _jetMaxEta) continue; _jets.push_back(jet); bool isBJet = false; for (const Particle& rp : jet.particles()) { if (PID::hasBottom(rp.pid())) { isBJet = true; break; } } if ( isBJet ) _bjets.push_back(jet); else _ljets.push_back(jet); } // Every building blocks are ready. Continue to pseudo-W and pseudo-top combination if (_bjets.size() < 2) return; // Ignore single top for now map > wLepCandIdxs; map > wHadCandIdxs; // Collect leptonic-decaying W's for (size_t iLep = 0, nLep = leptons.size(); iLep < nLep; ++iLep) { const Jet& lep = leptons.at(iLep); for (size_t iNu = 0, nNu = neutrinos.size(); iNu < nNu; ++iNu) { const Particle& nu = neutrinos.at(iNu); const double m = (lep.momentum()+nu.momentum()).mass(); const double dm = std::abs(m-_wMass); wLepCandIdxs[dm] = make_pair(iLep, iNu); } } // Continue to hadronic decaying W's for (size_t i = 0, nLjet = _ljets.size(); i < nLjet; ++i) { const Jet& ljet1 = _ljets[i]; for (size_t j = i+1; j < nLjet; ++j) { const Jet& ljet2 = _ljets[j]; const double m = (ljet1.momentum()+ljet2.momentum()).mass(); const double dm = std::abs(m-_wMass); wHadCandIdxs[dm] = make_pair(i, j); } } // Cleanup W candidate, choose pairs with minimum dm if they share decay products cleanup(wLepCandIdxs); cleanup(wHadCandIdxs, true); const size_t nWLepCand = wLepCandIdxs.size(); const size_t nWHadCand = wHadCandIdxs.size(); if (nWLepCand + nWHadCand < 2) return; // We skip single top int w1Q = 1, w2Q = -1; int w1dau1Id = 1, w2dau1Id = -1; FourMomentum w1dau1LVec, w1dau2LVec; FourMomentum w2dau1LVec, w2dau2LVec; if (nWLepCand == 0) { // Full hadronic case const pair& idPair1 = wHadCandIdxs.begin()->second; const pair& idPair2 = (++wHadCandIdxs.begin())->second; ///< @todo Reinstate std::next const Jet& w1dau1 = _ljets[idPair1.first]; const Jet& w1dau2 = _ljets[idPair1.second]; const Jet& w2dau1 = _ljets[idPair2.first]; const Jet& w2dau2 = _ljets[idPair2.second]; w1dau1LVec = w1dau1.momentum(); w1dau2LVec = w1dau2.momentum(); w2dau1LVec = w2dau1.momentum(); w2dau2LVec = w2dau2.momentum(); } else if (nWLepCand == 1) { // Semi-leptonic case const pair& idPair1 = wLepCandIdxs.begin()->second; const pair& idPair2 = wHadCandIdxs.begin()->second; const Jet& w1dau1 = leptons[idPair1.first]; const Particle& w1dau2 = neutrinos[idPair1.second]; const Jet& w2dau1 = _ljets[idPair2.first]; const Jet& w2dau2 = _ljets[idPair2.second]; w1dau1LVec = w1dau1.momentum(); w1dau2LVec = w1dau2.momentum(); w2dau1LVec = w2dau1.momentum(); w2dau2LVec = w2dau2.momentum(); w1dau1Id = leptonsId[idPair1.first]; w1Q = w1dau1Id > 0 ? -1 : 1; w2Q = -w1Q; switch (w1dau1Id) { case 13: case -13: _mode1 = CH_MUON; break; case 11: case -11: _mode1 = CH_ELECTRON; break; } } else { // Full leptonic case const pair& idPair1 = wLepCandIdxs.begin()->second; const pair& idPair2 = (++wLepCandIdxs.begin())->second; ///< @todo Reinstate std::next const Jet& w1dau1 = leptons[idPair1.first]; const Particle& w1dau2 = neutrinos[idPair1.second]; const Jet& w2dau1 = leptons[idPair2.first]; const Particle& w2dau2 = neutrinos[idPair2.second]; w1dau1LVec = w1dau1.momentum(); w1dau2LVec = w1dau2.momentum(); w2dau1LVec = w2dau1.momentum(); w2dau2LVec = w2dau2.momentum(); w1dau1Id = leptonsId[idPair1.first]; w2dau1Id = leptonsId[idPair2.first]; w1Q = w1dau1Id > 0 ? -1 : 1; w2Q = w2dau1Id > 0 ? -1 : 1; switch (w1dau1Id) { case 13: case -13: _mode1 = CH_MUON; break; case 11: case -11: _mode1 = CH_ELECTRON; break; } switch (w2dau1Id) { case 13: case -13: _mode2 = CH_MUON; break; case 11: case -11: _mode2 = CH_ELECTRON; break; } } const FourMomentum w1LVec = w1dau1LVec+w1dau2LVec; const FourMomentum w2LVec = w2dau1LVec+w2dau2LVec; // Combine b jets double sumDm = 1e9; FourMomentum b1LVec, b2LVec; for (size_t i = 0, n = _bjets.size(); i < n; ++i) { const Jet& bjet1 = _bjets[i]; const double mtop1 = (w1LVec+bjet1.momentum()).mass(); const double dmtop1 = std::abs(mtop1-_tMass); for (size_t j=0; j= 1e9) return; // Failed to make top, but this should not happen. const FourMomentum t1LVec = w1LVec + b1LVec; const FourMomentum t2LVec = w2LVec + b2LVec; // Put all of them into candidate collection _t1 = Particle(w1Q*6, t1LVec); _b1 = Particle(w1Q*5, b1LVec); _wDecays1.push_back(Particle(w1dau1Id, w1dau1LVec)); _wDecays1.push_back(Particle(-w1dau1Id+w1Q, w1dau2LVec)); _t2 = Particle(w2Q*6, t2LVec); _b2 = Particle(w2Q*5, b2LVec); _wDecays2.push_back(Particle(w2dau1Id, w2dau1LVec)); _wDecays2.push_back(Particle(-w2dau1Id+w2Q, w2dau2LVec)); _isValid = true; } } } diff --git a/analyses/pluginCMS/CMS_2016_I1491953.cc b/analyses/pluginCMS/CMS_2016_I1491953.cc --- a/analyses/pluginCMS/CMS_2016_I1491953.cc +++ b/analyses/pluginCMS/CMS_2016_I1491953.cc @@ -1,331 +1,331 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/WFinder.hh" namespace Rivet { /// @brief Differential cross sections for associated production of a W boson and jets at 8 TeV class CMS_2016_I1491953 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_I1491953); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections FinalState fs; WFinder wfinder_mu(fs, Cuts::abseta < 2.4 && Cuts::pT > 0*GeV, PID::MUON, 0*GeV, 1000000*GeV, - 0*GeV, 0.1, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::YES, WFinder::MassWindow::MT); + 0*GeV, 0.1, WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::YES, WFinder::MassWindow::MT); declare(wfinder_mu, "WFinder_mu"); // Define veto FS VetoedFinalState vfs; vfs.addVetoOnThisFinalState(wfinder_mu); vfs.addVetoPairId(PID::MUON); vfs.vetoNeutrinos(); FastJets fastjets(vfs, FastJets::ANTIKT, 0.5); declare(fastjets, "Jets"); book(_hist_Mult_exc ,1,1,1); book(_hist_inc_WJetMult ,2,1,1); book(_hist_addJetPt1j ,3,1,1); book(_hist_addJetPt2j ,4,1,1); book(_hist_addJetPt3j ,5,1,1); book(_hist_addJetPt4j ,6,1,1); book(_hist_addHt_1j ,7,1,1); book(_hist_addHt_2j ,8,1,1); book(_hist_addHt_3j ,9,1,1); book(_hist_addHt_4j ,10,1,1); book(_hist_diJetPt_2j ,11,1,1); book(_hist_diJetPt_3j ,12,1,1); book(_hist_diJetPt_4j ,13,1,1); book(_hist_dijetM_2j ,14,1,1); book(_hist_dijetM_3j ,15,1,1); book(_hist_dijetM_4j ,16,1,1); book(_hist_Jeteta1j ,17,1,1); book(_hist_Jeteta2j ,18,1,1); book(_hist_Jeteta3j ,19,1,1); book(_hist_Jeteta4j ,20,1,1); book(_hist_dyj1j2_2j ,21,1,1); book(_hist_dyj1j2_3j ,22,1,1); book(_hist_dyj1j2_4j ,23,1,1); book(_hist_dyj1j3_3j ,24,1,1); book(_hist_dyj2j3_3j ,25,1,1); book(_hist_dyjFjB_2j ,26,1,1); book(_hist_dyjFjB_3j ,27,1,1); book(_hist_dyjFjB_4j ,28,1,1); book(_hist_dphij1j2_2j ,29,1,1); book(_hist_dphijFjB_2j ,30,1,1); book(_hist_dRj1j2_2j ,31,1,1); book(_hist_dphij1mu_1j ,32,1,1); book(_hist_dphij2mu_2j ,33,1,1); book(_hist_dphij3mu_3j ,34,1,1); book(_hist_dphij4mu_4j ,35,1,1); book(_hist_MeanNJht_1j ,36,1,1); book(_hist_MeanNJht_2j ,37,1,1); book(_hist_MeanNJdyj1j2_2j ,38,1,1); book(_hist_MeanNJdyjFjB_2j ,39,1,1); } // Define function used for filiing inc Njets histo void _fill(Histo1DPtr& _histJetMult, vector& finaljet_list) { _histJetMult->fill(0); for (size_t i = 0 ; i < finaljet_list.size() ; ++i) { if (i == 7) break; _histJetMult->fill(i+1); // inclusive multiplicity } } /// Perform the per-event analysis void analyze(const Event& event) { const WFinder& wfinder_mu = apply(event, "WFinder_mu"); if (wfinder_mu.bosons().size() != 1) vetoEvent; //const FourMomentum& lepton0 = wfinder_mu.constituentLeptons()[0].momentum(); //const FourMomentum& neutrino = wfinder_mu.constituentNeutrinos()[0].momentum(); //double WmT = sqrt( 2 * lepton0.pT() * neutrino.pT() * (1 - cos(deltaPhi(lepton0, neutrino))) ); const FourMomentum& lepton0 = wfinder_mu.constituentLepton().momentum(); double WmT = wfinder_mu.mT(); if (WmT < 50.0*GeV) vetoEvent; if (lepton0.abseta() > 2.1 || lepton0.pT() < 25.0*GeV) vetoEvent; // Select final jets, ordered by decreasing pT vector finaljet_list; double HT = 0.0; const Jets jListAll = apply(event, "Jets").jetsByPt(30.0*GeV); for (const Jet& j : jListAll) { if (j.abseta() < 2.4 && j.pT() > 30.0*GeV && deltaR(lepton0, j) > 0.5) { finaljet_list.push_back(j.momentum()); HT += j.pT(); } } // Another jet list, sorted by increasing rapidity vector jListRap = finaljet_list; std::sort(jListRap.begin(), jListRap.end(), cmpMomByRap); // Multiplicity exc plot. if (finaljet_list.size()<=7) { _hist_Mult_exc->fill(finaljet_list.size()); } else if (finaljet_list.size()>7){ _hist_Mult_exc->fill(7.); } // Multiplicity inc plot. _fill(_hist_inc_WJetMult, finaljet_list); if (finaljet_list.size()>=1) { _hist_addJetPt1j->fill(finaljet_list[0].pT()); _hist_Jeteta1j->fill(fabs(finaljet_list[0].eta())); _hist_addHt_1j->fill(HT); _hist_dphij1mu_1j->fill( deltaPhi(finaljet_list[0].phi(), lepton0.phi()) ); _hist_MeanNJht_1j->fill( HT, finaljet_list.size()); } if (finaljet_list.size()>=2) { _hist_addJetPt2j->fill(finaljet_list[1].pT()); _hist_Jeteta2j->fill(fabs(finaljet_list[1].eta())); _hist_addHt_2j->fill(HT); _hist_dyj1j2_2j ->fill( fabs(finaljet_list[0].rapidity() - finaljet_list[1].rapidity())); _hist_dyjFjB_2j ->fill( fabs(jListRap[0].rapidity() - jListRap[jListRap.size()-1].rapidity())); _hist_dphij1j2_2j ->fill( deltaPhi(finaljet_list[0].phi(), finaljet_list[1].phi())); _hist_dphijFjB_2j ->fill( deltaPhi(jListRap[0].phi(), jListRap[jListRap.size()-1].phi()) ); _hist_dijetM_2j ->fill( (add(finaljet_list[0], finaljet_list[1])).mass()); _hist_diJetPt_2j ->fill( (add(finaljet_list[0], finaljet_list[1])).pT()); _hist_dRj1j2_2j ->fill( deltaR(finaljet_list[0].rapidity(), finaljet_list[0].phi(), finaljet_list[1].rapidity(), finaljet_list[1].phi())); _hist_dphij2mu_2j ->fill( deltaPhi(finaljet_list[1].phi(), lepton0.phi()) ); _hist_MeanNJht_2j->fill( HT, finaljet_list.size()); _hist_MeanNJdyj1j2_2j->fill( fabs(finaljet_list[0].rapidity() - finaljet_list[1].rapidity()), finaljet_list.size()); _hist_MeanNJdyjFjB_2j->fill( fabs(jListRap[0].rapidity() - jListRap[jListRap.size()-1].rapidity()), finaljet_list.size()); } if (finaljet_list.size()>=3) { _hist_addJetPt3j->fill(finaljet_list[2].pT()); _hist_Jeteta3j->fill(fabs(finaljet_list[2].eta())); _hist_addHt_3j->fill(HT); _hist_dyj1j2_3j ->fill( fabs(finaljet_list[0].rapidity() - finaljet_list[1].rapidity())); _hist_dyj1j3_3j ->fill( fabs(finaljet_list[0].rapidity() - finaljet_list[2].rapidity())); _hist_dyj2j3_3j ->fill( fabs(finaljet_list[1].rapidity() - finaljet_list[2].rapidity())); _hist_dyjFjB_3j ->fill( fabs(jListRap[0].rapidity() - jListRap[jListRap.size()-1].rapidity())); _hist_dijetM_3j ->fill( (add(finaljet_list[0], finaljet_list[1])).mass()); _hist_diJetPt_3j ->fill( (add(finaljet_list[0], finaljet_list[1])).pT()); _hist_dphij3mu_3j->fill( deltaPhi(finaljet_list[2].phi(), lepton0.phi()) ); } if (finaljet_list.size()>=4) { _hist_addJetPt4j->fill(finaljet_list[3].pT()); _hist_Jeteta4j->fill(fabs(finaljet_list[3].eta())); _hist_addHt_4j->fill(HT); _hist_dyj1j2_4j ->fill( fabs(finaljet_list[0].rapidity() - finaljet_list[1].rapidity())); _hist_dyjFjB_4j ->fill( fabs(jListRap[0].rapidity() - jListRap[jListRap.size()-1].rapidity())); _hist_dijetM_4j ->fill( (add(finaljet_list[0], finaljet_list[1])).mass()); _hist_diJetPt_4j ->fill( (add(finaljet_list[0], finaljet_list[1])).pT()); _hist_dphij4mu_4j->fill( deltaPhi(finaljet_list[3].phi(), lepton0.phi()) ); } } //void loop /// Normalise histograms etc., after the run void finalize() { const double crossec = !std::isnan(crossSectionPerEvent()) ? crossSection() : 36703*picobarn; if (std::isnan(crossSectionPerEvent())){ MSG_INFO("No valid cross-section given, using NNLO xsec calculated by FEWZ " << crossec/picobarn << " pb"); } scale(_hist_Mult_exc, crossec/picobarn/sumOfWeights()); scale(_hist_inc_WJetMult, crossec/picobarn/sumOfWeights()); scale(_hist_addJetPt1j, crossec/picobarn/sumOfWeights()); scale(_hist_addJetPt2j, crossec/picobarn/sumOfWeights()); scale(_hist_addJetPt3j, crossec/picobarn/sumOfWeights()); scale(_hist_addJetPt4j, crossec/picobarn/sumOfWeights()); scale(_hist_Jeteta1j, crossec/picobarn/sumOfWeights()); scale(_hist_Jeteta2j, crossec/picobarn/sumOfWeights()); scale(_hist_Jeteta3j, crossec/picobarn/sumOfWeights()); scale(_hist_Jeteta4j, crossec/picobarn/sumOfWeights()); scale(_hist_addHt_1j, crossec/picobarn/sumOfWeights()); scale(_hist_addHt_2j, crossec/picobarn/sumOfWeights()); scale(_hist_addHt_3j, crossec/picobarn/sumOfWeights()); scale(_hist_addHt_4j, crossec/picobarn/sumOfWeights()); //------------------------------------- scale(_hist_dyj1j2_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dyj1j2_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dyj1j2_4j, crossec/picobarn/sumOfWeights()); scale(_hist_dyjFjB_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dyjFjB_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dyjFjB_4j, crossec/picobarn/sumOfWeights()); scale(_hist_dyj1j3_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dyj2j3_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij1j2_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dphijFjB_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dRj1j2_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dijetM_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dijetM_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dijetM_4j, crossec/picobarn/sumOfWeights()); scale(_hist_diJetPt_2j, crossec/picobarn/sumOfWeights()); scale(_hist_diJetPt_3j, crossec/picobarn/sumOfWeights()); scale(_hist_diJetPt_4j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij1mu_1j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij2mu_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij3mu_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij4mu_4j, crossec/picobarn/sumOfWeights()); } //@} private: /// @name Histograms //@{ Histo1DPtr _hist_inc_WJetMult; Histo1DPtr _hist_Mult_exc; Histo1DPtr _hist_addJetPt1j; Histo1DPtr _hist_addJetPt2j; Histo1DPtr _hist_addJetPt3j; Histo1DPtr _hist_addJetPt4j; Histo1DPtr _hist_Jeteta1j; Histo1DPtr _hist_Jeteta2j; Histo1DPtr _hist_Jeteta3j; Histo1DPtr _hist_Jeteta4j; Histo1DPtr _hist_addHt_1j; Histo1DPtr _hist_addHt_2j; Histo1DPtr _hist_addHt_3j; Histo1DPtr _hist_addHt_4j; //------------------------------------- Histo1DPtr _hist_dyj1j2_2j; Histo1DPtr _hist_dyj1j2_3j; Histo1DPtr _hist_dyj1j2_4j; Histo1DPtr _hist_dyjFjB_2j; Histo1DPtr _hist_dyjFjB_3j; Histo1DPtr _hist_dyjFjB_4j; Histo1DPtr _hist_dyj1j3_3j; Histo1DPtr _hist_dyj2j3_3j; Histo1DPtr _hist_dphij1j2_2j; Histo1DPtr _hist_dphijFjB_2j; Histo1DPtr _hist_dRj1j2_2j; Histo1DPtr _hist_dijetM_2j; Histo1DPtr _hist_dijetM_3j; Histo1DPtr _hist_dijetM_4j; Histo1DPtr _hist_diJetPt_2j; Histo1DPtr _hist_diJetPt_3j; Histo1DPtr _hist_diJetPt_4j; Histo1DPtr _hist_dphij1mu_1j; Histo1DPtr _hist_dphij2mu_2j; Histo1DPtr _hist_dphij3mu_3j; Histo1DPtr _hist_dphij4mu_4j; //------------------------------------- Profile1DPtr _hist_MeanNJht_1j; Profile1DPtr _hist_MeanNJht_2j; Profile1DPtr _hist_MeanNJdyj1j2_2j; Profile1DPtr _hist_MeanNJdyjFjB_2j; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2016_I1491953); } diff --git a/analyses/pluginCMS/CMS_2017_I1610623.cc b/analyses/pluginCMS/CMS_2017_I1610623.cc --- a/analyses/pluginCMS/CMS_2017_I1610623.cc +++ b/analyses/pluginCMS/CMS_2017_I1610623.cc @@ -1,261 +1,261 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/AnalysisLoader.hh" #include "Rivet/AnalysisInfo.hh" #include "Rivet/Tools/RivetYODA.hh" #include namespace Rivet { /// @brief Add a short analysis description here class CMS_2017_I1610623 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2017_I1610623); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections FinalState fs; - WFinder wfinder_mu(fs, Cuts::abseta < 2.4 && Cuts::pT > 0*GeV, PID::MUON, 0*GeV, 1000000*GeV, 0*GeV, 0.1, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::YES, WFinder::MassWindow::MT); - //WFinder wfinder_mu(fs, Cuts::abseta < 2.4 && Cuts::pT > 0*GeV, PID::MUON, 0*GeV, 1000000*GeV, 0*GeV, 0.1, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); + WFinder wfinder_mu(fs, Cuts::abseta < 2.4 && Cuts::pT > 0*GeV, PID::MUON, 0*GeV, 1000000*GeV, 0*GeV, 0.1, WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::YES, WFinder::MassWindow::MT); + //WFinder wfinder_mu(fs, Cuts::abseta < 2.4 && Cuts::pT > 0*GeV, PID::MUON, 0*GeV, 1000000*GeV, 0*GeV, 0.1, WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); declare(wfinder_mu, "WFinder_mu"); // Define veto FS VetoedFinalState vfs; vfs.addVetoOnThisFinalState(wfinder_mu); vfs.addVetoPairId(PID::MUON); vfs.vetoNeutrinos(); FastJets fastjets(vfs, FastJets::ANTIKT, 0.4); declare(fastjets, "Jets"); //------------- book(_hist_Mult_exc ,"d01-x01-y01"); book(_hist_inc_WJetMult ,"d02-x01-y01"); //------------- book(_hist_JetPt1j ,"d03-x01-y01"); book(_hist_JetPt2j ,"d04-x01-y01"); book(_hist_JetPt3j ,"d05-x01-y01"); book(_hist_JetPt4j ,"d06-x01-y01"); //------------- book(_hist_JetRap1j ,"d07-x01-y01"); book(_hist_JetRap2j ,"d08-x01-y01"); book(_hist_JetRap3j ,"d09-x01-y01"); book(_hist_JetRap4j ,"d10-x01-y01"); //------------- book(_hist_Ht_1j ,"d11-x01-y01"); book(_hist_Ht_2j ,"d12-x01-y01"); book(_hist_Ht_3j ,"d13-x01-y01"); book(_hist_Ht_4j ,"d14-x01-y01"); //------------- book(_hist_dphij1mu_1j , "d15-x01-y01"); book(_hist_dphij2mu_2j , "d16-x01-y01"); book(_hist_dphij3mu_3j , "d17-x01-y01"); book(_hist_dphij4mu_4j , "d18-x01-y01"); //------------- book(_hist_dRmuj_1j , "d19-x01-y01"); } // define function used for filiing inc Njets histo void Fill(Histo1DPtr& _histJetMult, std::vector& finaljet_list){ _histJetMult->fill(0); for (size_t i=0 ; ifill(i+1); // inclusive multiplicity } } /// Perform the per-event analysis void analyze(const Event& event) { /// @todo Do the event by event analysis here const WFinder& wfinder_mu = applyProjection(event, "WFinder_mu"); if (wfinder_mu.bosons().size() != 1) { vetoEvent; } if (wfinder_mu.bosons().size() == 1) { const FourMomentum lepton0 = wfinder_mu.constituentLepton().momentum(); const FourMomentum neutrino = wfinder_mu.constituentNeutrino().momentum(); double WmT = wfinder_mu.mT(); if (WmT < 50.0*GeV) vetoEvent; double pt0 = lepton0.pT(); double eta0 = lepton0.eta(); if ( (fabs(eta0) > 2.4) || (pt0 < 25.0*GeV) ) vetoEvent; // Obtain the jets. vector finaljet_list; vector jet100_list; double HT = 0.0; // loop over jets in an event, pushback in finaljet_list collection for (const Jet& j : applyProjection(event, "Jets").jetsByPt(30.0*GeV)) { const double jrap = j.momentum().rap(); const double jpt = j.momentum().pT(); if ( (fabs(jrap) < 2.4) && (deltaR(lepton0, j.momentum()) > 0.4) ) { if(jpt > 30.0*GeV) { finaljet_list.push_back(j.momentum()); HT += j.momentum().pT(); } if(jpt > 100.0*GeV) { jet100_list.push_back(j.momentum()); } } } // end looping over jets //---------------------- FILL HISTOGRAMS ------------------ // Multiplicity exc plot. _hist_Mult_exc->fill(finaljet_list.size()); // Multiplicity inc plot. Fill(_hist_inc_WJetMult, finaljet_list); // dRmuj plot. double mindR(99999); if(jet100_list.size()>=1) { for (unsigned ji = 0; ji < jet100_list.size(); ji++){ double dr_(9999); dr_ = fabs(deltaR(lepton0, jet100_list[ji])); if (dr_ < mindR){ mindR = dr_; } } if(jet100_list[0].pT() > 300.0*GeV){ _hist_dRmuj_1j->fill(mindR); } } if(finaljet_list.size()>=1) { _hist_JetPt1j->fill(finaljet_list[0].pT()); _hist_JetRap1j->fill(fabs(finaljet_list[0].rap())); _hist_Ht_1j->fill(HT); _hist_dphij1mu_1j->fill(deltaPhi(finaljet_list[0].phi(), lepton0.phi())); } if(finaljet_list.size()>=2) { _hist_JetPt2j->fill(finaljet_list[1].pT()); _hist_JetRap2j->fill(fabs(finaljet_list[1].rap())); _hist_Ht_2j->fill(HT); _hist_dphij2mu_2j->fill(deltaPhi(finaljet_list[1].phi(), lepton0.phi())); } if(finaljet_list.size()>=3) { _hist_JetPt3j->fill(finaljet_list[2].pT()); _hist_JetRap3j->fill(fabs(finaljet_list[2].rap())); _hist_Ht_3j->fill(HT); _hist_dphij3mu_3j->fill(deltaPhi(finaljet_list[2].phi(), lepton0.phi())); } if(finaljet_list.size()>=4) { _hist_JetPt4j->fill(finaljet_list[3].pT()); _hist_JetRap4j->fill(fabs(finaljet_list[3].rap())); _hist_Ht_4j->fill(HT); _hist_dphij4mu_4j->fill(deltaPhi(finaljet_list[3].phi(), lepton0.phi())); } } // close the Wboson loop } //void loop /// Normalise histograms etc., after the run void finalize() { const double crossec = !std::isnan(crossSectionPerEvent()) ? crossSection() : 61526.7*picobarn; if (std::isnan(crossSectionPerEvent())){ MSG_INFO("No valid cross-section given, using NNLO xsec calculated by FEWZ " << crossec/picobarn << " pb"); } scale(_hist_Mult_exc, crossec/picobarn/sumOfWeights()); scale(_hist_inc_WJetMult, crossec/picobarn/sumOfWeights()); scale(_hist_JetPt1j, crossec/picobarn/sumOfWeights()); scale(_hist_JetPt2j, crossec/picobarn/sumOfWeights()); scale(_hist_JetPt3j, crossec/picobarn/sumOfWeights()); scale(_hist_JetPt4j, crossec/picobarn/sumOfWeights()); scale(_hist_JetRap1j, crossec/picobarn/sumOfWeights()); scale(_hist_JetRap2j, crossec/picobarn/sumOfWeights()); scale(_hist_JetRap3j, crossec/picobarn/sumOfWeights()); scale(_hist_JetRap4j, crossec/picobarn/sumOfWeights()); scale(_hist_Ht_1j, crossec/picobarn/sumOfWeights()); scale(_hist_Ht_2j, crossec/picobarn/sumOfWeights()); scale(_hist_Ht_3j, crossec/picobarn/sumOfWeights()); scale(_hist_Ht_4j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij1mu_1j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij2mu_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij3mu_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij4mu_4j, crossec/picobarn/sumOfWeights()); scale(_hist_dRmuj_1j, crossec/picobarn/sumOfWeights()); } //@} private: /// @name Histograms //@{ Histo1DPtr _hist_Mult_exc; Histo1DPtr _hist_inc_WJetMult; Histo1DPtr _hist_JetPt1j; Histo1DPtr _hist_JetPt2j; Histo1DPtr _hist_JetPt3j; Histo1DPtr _hist_JetPt4j; Histo1DPtr _hist_JetRap1j; Histo1DPtr _hist_JetRap2j; Histo1DPtr _hist_JetRap3j; Histo1DPtr _hist_JetRap4j; Histo1DPtr _hist_Ht_1j; Histo1DPtr _hist_Ht_2j; Histo1DPtr _hist_Ht_3j; Histo1DPtr _hist_Ht_4j; Histo1DPtr _hist_dphij1mu_1j; Histo1DPtr _hist_dphij2mu_2j; Histo1DPtr _hist_dphij3mu_3j; Histo1DPtr _hist_dphij4mu_4j; Histo1DPtr _hist_dRmuj_1j; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2017_I1610623); } diff --git a/analyses/pluginLEP/OPAL_2004_I631361.cc b/analyses/pluginLEP/OPAL_2004_I631361.cc --- a/analyses/pluginLEP/OPAL_2004_I631361.cc +++ b/analyses/pluginLEP/OPAL_2004_I631361.cc @@ -1,117 +1,117 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class OPAL_2004_I631361 : public Analysis { public: /// Constructor OPAL_2004_I631361() : Analysis("OPAL_2004_I631361"), _sumW(0.0) { } /// @name Analysis methods //@{ void init() { declare(FinalState(), "FS"); declare(ChargedFinalState(), "CFS"); int ih(0), iy(0); if (inRange(0.5*sqrtS()/GeV, 5.0, 5.5)) { ih = 1; iy = 1; } else if (inRange(0.5*sqrtS()/GeV, 5.5, 6.5)) { ih = 1; iy = 2; } else if (inRange(0.5*sqrtS()/GeV, 6.5, 7.5)) { ih = 1; iy = 3; } else if (inRange(0.5*sqrtS()/GeV, 7.5, 9.5)) { ih = 2; iy = 1; } else if (inRange(0.5*sqrtS()/GeV, 9.5, 13.0)) { ih = 2; iy = 2; } else if (inRange(0.5*sqrtS()/GeV, 13.0, 16.0)) { ih = 3; iy = 1; } else if (inRange(0.5*sqrtS()/GeV, 16.0, 20.0)) { ih = 3; iy = 2; } assert(ih>0); book(_h_chMult,ih,1,iy); if(ih==3) book(_h_chFragFunc,5,1,iy); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; // find the initial gluons - ParticleVector initial; + Particles initial; for (const GenParticle* p : Rivet::particles(event.genEvent())) { const GenVertex* pv = p->production_vertex(); const PdgId pid = p->pdg_id(); if(pid!=21) continue; bool passed = false; for (const GenParticle* pp : particles_in(pv)) { const PdgId ppid = abs(pp->pdg_id()); passed = (ppid == PID::ELECTRON || ppid == PID::HIGGS || ppid == PID::ZBOSON || ppid == PID::GAMMA); if(passed) break; } if(passed) initial.push_back(Particle(*p)); } if(initial.size()!=2) vetoEvent; // use the direction for the event axis Vector3 axis = initial[0].momentum().p3().unit(); // fill histograms const Particles& chps = applyProjection(event, "CFS").particles(); unsigned int nMult[2] = {0,0}; _sumW += 2.*weight; // distribution for(const Particle& p : chps) { double xE = 2.*p.E()/sqrtS(); if(_h_chFragFunc) _h_chFragFunc->fill(xE, weight); if(p.momentum().p3().dot(axis)>0.) ++nMult[0]; else ++nMult[1]; } // multiplcities in jet _h_chMult->fill(nMult[0],weight); _h_chMult->fill(nMult[1],weight); } /// Normalise histograms etc., after the run void finalize() { normalize(_h_chMult); if(_h_chFragFunc) scale(_h_chFragFunc, 1./_sumW); } //@} private: double _sumW; /// @name Histograms //@{ Histo1DPtr _h_chMult; Histo1DPtr _h_chFragFunc; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_2004_I631361); } diff --git a/analyses/pluginLEP/OPAL_2004_I648738.cc b/analyses/pluginLEP/OPAL_2004_I648738.cc --- a/analyses/pluginLEP/OPAL_2004_I648738.cc +++ b/analyses/pluginLEP/OPAL_2004_I648738.cc @@ -1,118 +1,118 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class OPAL_2004_I648738 : public Analysis { public: /// Constructor OPAL_2004_I648738() : Analysis("OPAL_2004_I648738"), _sumW(3), _histo_xE(3) { } /// @name Analysis methods //@{ void init() { declare(FinalState(), "FS"); declare(ChargedFinalState(), "CFS"); unsigned int ih=0; if (inRange(0.5*sqrtS()/GeV, 4.0, 9.0)) { ih = 1; } else if (inRange(0.5*sqrtS()/GeV, 9.0, 19.0)) { ih = 2; } else if (inRange(0.5*sqrtS()/GeV, 19.0, 30.0)) { ih = 3; } else if (inRange(0.5*sqrtS()/GeV, 45.5, 45.7)) { ih = 5; } else if (inRange(0.5*sqrtS()/GeV, 30.0, 70.0)) { ih = 4; } else if (inRange(0.5*sqrtS()/GeV, 91.5, 104.5)) { ih = 6; } assert(ih>0); // book the histograms book(_histo_xE[0], ih+5,1,1); book(_histo_xE[1], ih+5,1,2); if(ih<5) book(_histo_xE[2] ,ih+5,1,3); book(_sumW[0], "sumW_0"); book(_sumW[1], "sumW_1"); book(_sumW[2], "sumW_2"); } /// Perform the per-event analysis void analyze(const Event& event) { // find the initial quarks/gluons - ParticleVector initial; + Particles initial; for (const GenParticle* p : Rivet::particles(event.genEvent())) { const GenVertex* pv = p->production_vertex(); const PdgId pid = abs(p->pdg_id()); if(!( (pid>=1&&pid<=5) || pid ==21) ) continue; bool passed = false; for (const GenParticle* pp : particles_in(pv)) { const PdgId ppid = abs(pp->pdg_id()); passed = (ppid == PID::ELECTRON || ppid == PID::HIGGS || ppid == PID::ZBOSON || ppid == PID::GAMMA); if(passed) break; } if(passed) initial.push_back(Particle(*p)); } if(initial.size()!=2) { vetoEvent; } // type of event unsigned int itype=2; if(initial[0].pid()==-initial[1].pid()) { PdgId pid = abs(initial[0].pid()); if(pid>=1&&pid<=4) itype=0; else itype=1; } assert(itype<_histo_xE.size()); // fill histograms _sumW[itype]->fill(2.); const Particles& chps = applyProjection(event, "CFS").particles(); for(const Particle& p : chps) { double xE = 2.*p.E()/sqrtS(); _histo_xE[itype]->fill(xE); } } /// Normalise histograms etc., after the run void finalize() { for(unsigned int ix=0;ix<_histo_xE.size();++ix) { if(_sumW[ix]->val()>0.) scale(_histo_xE[ix],1./ *_sumW[ix]); } } //@} private: vector _sumW; /// @name Histograms //@{ vector _histo_xE; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_2004_I648738); } diff --git a/analyses/pluginLHCb/LHCB_2014_I1262703.cc b/analyses/pluginLHCb/LHCB_2014_I1262703.cc --- a/analyses/pluginLHCb/LHCB_2014_I1262703.cc +++ b/analyses/pluginLHCb/LHCB_2014_I1262703.cc @@ -1,105 +1,105 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief Study of forward Z + jet production at 7 TeV at LHCb /// @author W. Barter, A. Bursche, M. Sirendi (Rivet implementation) class LHCB_2014_I1262703 : public Analysis { public: /// Default constructor DEFAULT_RIVET_ANALYSIS_CTOR(LHCB_2014_I1262703); /// Initialise histograms and projections void init() { // Projections const Cut mycut = Cuts::eta >= 2.0 && Cuts::eta <= 4.5 && Cuts::pT > 20*GeV; ZFinder zfinder(FinalState(), mycut, PID::MUON, 60*GeV, 120*GeV, 0., ZFinder::ClusterPhotons::NONE); declare(zfinder, "ZFinder"); FastJets jetpro(zfinder.remainingFinalState(), FastJets::ANTIKT, 0.5); declare(jetpro, "Jets"); // Histograms book(_h_jet_pT , 3, 1, 1); book(_h_jet_eta20, 4, 1, 1); book(_h_jet_eta10, 4, 1, 2); book(_h_Z_y20 , 5, 1, 1); book(_h_Z_y10 , 5, 1, 2); book(_h_Z_pT20 , 6, 1, 1); book(_h_Z_pT10 , 6, 1, 2); book(_h_dphi20 , 7, 1, 1); book(_h_dphi10 , 7, 1, 2); book(_h_dy20 , 8, 1, 1); book(_h_dy10 , 8, 1, 2); } /// Do the analysis void analyze(const Event & e) { const ZFinder& zfinder = apply(e, "ZFinder"); if (zfinder.bosons().size() != 1) vetoEvent; - const ParticleVector leptons = zfinder.constituents(); + const Particles leptons = zfinder.constituents(); const Cut jetSelector = Cuts::eta >= 2.0 && Cuts::eta <= 4.5 && Cuts::pT > 10*GeV; const Jets jets = apply(e, "Jets").jetsByPt(jetSelector); // Clean the jets against the lepton candidates with a deltaR cut of 0.4 const Jets cleanedJets = filter_discard(jets, [&](const Jet& j) { return any(leptons, deltaRLess(j, 0.4)); }); // vector cleanedJets; // for (size_t i = 0; i < jets.size(); i++) { // bool isolated = true; // for (size_t j = 0; j < 2; j++) { // if (deltaR(leptons[j], jets[i]) < 0.4) { // isolated = false; // break; // } // } // if (isolated) cleanedJets.push_back(&jets[i]); // } // Require at least 1 survivor and note if it is above a 20 GeV jet pT threshold if (cleanedJets.empty()) vetoEvent; const bool above20 = cleanedJets[0].pT() > 20*GeV; const double dphi = deltaPhi(zfinder.boson(), cleanedJets[0]); const double drap = zfinder.boson().rap() - cleanedJets[0].rap(); // Fill histograms _h_jet_pT->fill(cleanedJets[0].pT()/GeV); _h_jet_eta10->fill(cleanedJets[0].eta()); _h_Z_y10->fill(zfinder.boson().rap()); _h_Z_pT10->fill(zfinder.boson().pT()/GeV); _h_dphi10->fill(dphi); _h_dy10->fill(drap); if (above20) { _h_jet_eta20->fill(cleanedJets[0].eta()); _h_Z_y20->fill(zfinder.boson().rap()); _h_Z_pT20->fill(zfinder.boson().pT()/GeV); _h_dphi20->fill(dphi); _h_dy20->fill(drap); } } /// Finalize void finalize() { normalize({_h_jet_pT, _h_jet_eta20, _h_jet_eta10, _h_Z_y20, _h_Z_y10, _h_Z_pT20, _h_Z_pT10, _h_dphi20, _h_dphi10, _h_dy20, _h_dy10}); } /// Histograms Histo1DPtr _h_jet_pT, _h_jet_eta20, _h_jet_eta10, _h_Z_y20, _h_Z_y10, _h_Z_pT20, _h_Z_pT10, _h_dphi20, _h_dphi10, _h_dy20, _h_dy10; }; DECLARE_RIVET_PLUGIN(LHCB_2014_I1262703); } diff --git a/include/Rivet/Particle.fhh b/include/Rivet/Particle.fhh --- a/include/Rivet/Particle.fhh +++ b/include/Rivet/Particle.fhh @@ -1,65 +1,62 @@ // -*- C++ -*- #ifndef RIVET_Particle_FHH #define RIVET_Particle_FHH #include "Rivet/Tools/RivetSTL.hh" #include "Rivet/Math/Vectors.hh" namespace Rivet { /// @name Particle declarations //@{ // Forward declaration class Particle; /// Typedefs for a vector of Particle objects. class Particles : public std::vector { public: using base = std::vector; //< using-declarations don't like template syntax using base::base; //< import base-class constructors /// Copy constructor from vector Particles(const std::vector& vps); operator FourMomenta () const; //< implementation in Particle.cc /// @todo Add conversion to PseudoJets? }; Particles operator + (const Particles& a, const Particles& b); - /// @deprecated Old name: use Particles instead. Will be removed - typedef Particles ParticleVector; - /// Typedef for a pair of Particle objects. typedef std::pair ParticlePair; //@} /// @name Particle function/functor declarations //@{ /// std::function instantiation for functors taking a Particle and returning a bool using ParticleSelector = function; /// std::function instantiation for functors taking two Particles and returning a bool using ParticleSorter = function; //@} /// @name PdgId declarations //@{ /// Typedefs for a PDG ID code. typedef int PdgId; //typedef PdgId PID; //< can't do this, because it's also a (sub)namespace /// Typedef for a pair of particle names. typedef std::pair PdgIdPair; //@} } #endif diff --git a/include/Rivet/Particle.hh b/include/Rivet/Particle.hh --- a/include/Rivet/Particle.hh +++ b/include/Rivet/Particle.hh @@ -1,700 +1,700 @@ // -*- C++ -*- #ifndef RIVET_Particle_HH #define RIVET_Particle_HH #include "Rivet/Particle.fhh" #include "Rivet/ParticleBase.hh" #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Tools/Cuts.hh" #include "Rivet/Tools/Utils.hh" #include "Rivet/Math/LorentzTrans.hh" // NOTE: Rivet/Tools/ParticleUtils.hh included at the end #include "fastjet/PseudoJet.hh" namespace Rivet { /// Particle representation, either from a HepMC::GenEvent or reconstructed. class Particle : public ParticleBase { public: /// @name Constructors //@{ /// Default constructor. /// @note A particle without info is useless. This only exists to keep STL containers happy. Particle() : ParticleBase(), _original(nullptr), _id(PID::ANY) { } /// Constructor without GenParticle. Particle(PdgId pid, const FourMomentum& mom, const FourVector& pos=FourVector()) : ParticleBase(), _original(nullptr), _id(pid), _momentum(mom), _origin(pos) { } /// Constructor from a HepMC GenParticle pointer. Particle(const GenParticle* gp) : ParticleBase(), _original(gp), _id(gp->pdg_id()), _momentum(gp->momentum()) { const GenVertex* vprod = gp->production_vertex(); if (vprod != nullptr) { setOrigin(vprod->position().t(), vprod->position().x(), vprod->position().y(), vprod->position().z()); } } /// Constructor from a HepMC GenParticle. Particle(const GenParticle& gp) : ParticleBase(), _original(&gp), _id(gp.pdg_id()), _momentum(gp.momentum()) { const GenVertex* vprod = gp.production_vertex(); if (vprod != nullptr) { setOrigin(vprod->position().t(), vprod->position().x(), vprod->position().y(), vprod->position().z()); } } //@} /// @name Kinematic properties //@{ /// The momentum. const FourMomentum& momentum() const { return _momentum; } /// Set the momentum. Particle& setMomentum(const FourMomentum& momentum) { _momentum = momentum; return *this; } /// Set the momentum via components. Particle& setMomentum(double E, double px, double py, double pz) { _momentum = FourMomentum(E, px, py, pz); return *this; } /// Apply an active Lorentz transform to this particle Particle& transformBy(const LorentzTransform& lt); //@ /// @name Positional properties //@{ /// The origin position. const FourVector& origin() const { return _origin; } /// Set the origin position. Particle& setOrigin(const FourVector& position) { _origin = position; return *this; } /// Set the origin position via components. Particle& setOrigin(double t, double x, double y, double z) { _origin = FourMomentum(t, x, y, z); return *this; } //@} /// @name Other representations and implicit casts to momentum-like objects //@{ /// Converter to FastJet3 PseudoJet virtual fastjet::PseudoJet pseudojet() const { return fastjet::PseudoJet(mom().px(), mom().py(), mom().pz(), mom().E()); } /// Cast operator to FastJet3 PseudoJet operator PseudoJet () const { return pseudojet(); } /// Get a const pointer to the original GenParticle const GenParticle* genParticle() const { return _original; } /// Cast operator for conversion to GenParticle* operator const GenParticle* () const { return genParticle(); } //@} /// @name Particle ID code accessors //@{ /// This Particle's PDG ID code. PdgId pid() const { return _id; } /// Absolute value of the PDG ID code. PdgId abspid() const { return std::abs(_id); } //@} /// @name Charge //@{ /// The charge of this Particle. double charge() const { return PID::charge(pid()); } /// The absolute charge of this Particle. double abscharge() const { return PID::abscharge(pid()); } /// Three times the charge of this Particle (i.e. integer multiple of smallest quark charge). int charge3() const { return PID::charge3(pid()); } /// Three times the absolute charge of this Particle (i.e. integer multiple of smallest quark charge). int abscharge3() const { return PID::abscharge3(pid()); } /// Is this Particle charged? bool isCharged() const { return charge3() != 0; } //@} /// @name Particle species //@{ /// Is this a hadron? bool isHadron() const { return PID::isHadron(pid()); } /// Is this a meson? bool isMeson() const { return PID::isMeson(pid()); } /// Is this a baryon? bool isBaryon() const { return PID::isBaryon(pid()); } /// Is this a lepton? bool isLepton() const { return PID::isLepton(pid()); } /// Is this a charged lepton? bool isChargedLepton() const { return PID::isChargedLepton(pid()); } /// Is this a neutrino? bool isNeutrino() const { return PID::isNeutrino(pid()); } /// Does this (hadron) contain a b quark? bool hasBottom() const { return PID::hasBottom(pid()); } /// Does this (hadron) contain a c quark? bool hasCharm() const { return PID::hasCharm(pid()); } // /// Does this (hadron) contain an s quark? // bool hasStrange() const { return PID::hasStrange(pid()); } /// Is this particle potentially visible in a detector? bool isVisible() const; //@} /// @name Constituents (for composite particles) //@{ /// Set direct constituents of this particle virtual void setConstituents(const Particles& cs, bool setmom=false); /// Add a single direct constituent to this particle virtual void addConstituent(const Particle& c, bool addmom=false); /// Add direct constituents to this particle virtual void addConstituents(const Particles& cs, bool addmom=false); /// Determine if this Particle is a composite of other Rivet Particles bool isComposite() const { return !constituents().empty(); } /// @brief Direct constituents of this particle, returned by reference /// /// The returned vector will be empty if this particle is non-composite, /// and its entries may themselves be composites. const Particles& constituents() const { return _constituents; } /// @brief Direct constituents of this particle, sorted by a functor /// @note Returns a copy, thanks to the sorting const Particles constituents(const ParticleSorter& sorter) const { return sortBy(constituents(), sorter); } /// @brief Direct constituents of this particle, filtered by a Cut /// @note Returns a copy, thanks to the filtering const Particles constituents(const Cut& c) const { return filter_select(constituents(), c); } /// @brief Direct constituents of this particle, sorted by a functor /// @note Returns a copy, thanks to the filtering and sorting const Particles constituents(const Cut& c, const ParticleSorter& sorter) const { return sortBy(constituents(c), sorter); } /// @brief Direct constituents of this particle, filtered by a selection functor /// @note Returns a copy, thanks to the filtering const Particles constituents(const ParticleSelector& selector) const { return filter_select(constituents(), selector); } /// @brief Direct constituents of this particle, filtered and sorted by functors /// @note Returns a copy, thanks to the filtering and sorting const Particles constituents(const ParticleSelector& selector, const ParticleSorter& sorter) const { return sortBy(constituents(selector), sorter); } /// @brief Fundamental constituents of this particle /// @note Returns {{*this}} if this particle is non-composite. Particles rawConstituents() const; /// @brief Fundamental constituents of this particle, sorted by a functor /// @note Returns a copy, thanks to the sorting const Particles rawConstituents(const ParticleSorter& sorter) const { return sortBy(rawConstituents(), sorter); } /// @brief Fundamental constituents of this particle, filtered by a Cut /// @note Returns a copy, thanks to the filtering const Particles rawConstituents(const Cut& c) const { return filter_select(rawConstituents(), c); } /// @brief Fundamental constituents of this particle, sorted by a functor /// @note Returns a copy, thanks to the filtering and sorting const Particles rawConstituents(const Cut& c, const ParticleSorter& sorter) const { return sortBy(rawConstituents(c), sorter); } /// @brief Fundamental constituents of this particle, filtered by a selection functor /// @note Returns a copy, thanks to the filtering const Particles rawConstituents(const ParticleSelector& selector) const { return filter_select(rawConstituents(), selector); } /// @brief Fundamental constituents of this particle, filtered and sorted by functors /// @note Returns a copy, thanks to the filtering and sorting const Particles rawConstituents(const ParticleSelector& selector, const ParticleSorter& sorter) const { return sortBy(rawConstituents(selector), sorter); } //@} /// @name Ancestry (for fundamental particles with a HepMC link) //@{ /// Get a list of the direct parents of the current particle (with optional selection Cut) /// /// @note This is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! Particles parents(const Cut& c=Cuts::OPEN) const; /// Get a list of the direct parents of the current particle (with selector function) /// /// @note This is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! Particles parents(const ParticleSelector& f) const { return filter_select(parents(), f); } /// Check whether any particle in the particle's parent list has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasParentWith(const ParticleSelector& f) const { return !parents(f).empty(); } /// Check whether any particle in the particle's parent list has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasParentWith(const Cut& c) const; /// Check whether any particle in the particle's parent list does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasParentWithout(const ParticleSelector& f) const { return hasParentWith([&](const Particle& p){ return !f(p); }); } /// Check whether any particle in the particle's parent list does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasParentWithout(const Cut& c) const; /// Check whether a given PID is found in the particle's parent list /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! /// /// @deprecated Prefer e.g. hasParentWith(Cut::pid == 123) bool hasParent(PdgId pid) const; /// Get a list of the ancestors of the current particle (with optional selection Cut) /// /// @note By default only physical ancestors, with status=2, are returned. /// /// @note This is valid in MC, but may not be answerable experimentally -- /// use this function with care when replicating experimental analyses! Particles ancestors(const Cut& c=Cuts::OPEN, bool only_physical=true) const; /// Get a list of the direct parents of the current particle (with selector function) /// /// @note By default only physical ancestors, with status=2, are returned. /// /// @note This is valid in MC, but may not be answerable experimentally -- /// use this function with care when replicating experimental analyses! Particles ancestors(const ParticleSelector& f, bool only_physical=true) const { return filter_select(ancestors(Cuts::OPEN, only_physical), f); } /// Check whether any particle in the particle's ancestor list has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasAncestorWith(const ParticleSelector& f, bool only_physical=true) const { return !ancestors(f, only_physical).empty(); } /// Check whether any particle in the particle's ancestor list has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasAncestorWith(const Cut& c, bool only_physical=true) const; /// Check whether any particle in the particle's ancestor list does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasAncestorWithout(const ParticleSelector& f, bool only_physical=true) const { return hasAncestorWith([&](const Particle& p){ return !f(p); }, only_physical); } /// Check whether any particle in the particle's ancestor list does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasAncestorWithout(const Cut& c, bool only_physical=true) const; /// Check whether a given PID is found in the particle's ancestor list /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! /// /// @deprecated Prefer hasAncestorWith(Cuts::pid == pid) etc. bool hasAncestor(PdgId pid, bool only_physical=true) const; /// @brief Determine whether the particle is from a b-hadron decay /// /// @note This question is valid in MC, but may not be perfectly answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool fromBottom() const; /// @brief Determine whether the particle is from a c-hadron decay /// /// @note If a hadron contains b and c quarks it is considered a bottom /// hadron and NOT a charm hadron. /// /// @note This question is valid in MC, but may not be perfectly answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool fromCharm() const; // /// @brief Determine whether the particle is from a s-hadron decay // /// // /// @note If a hadron contains b or c quarks as well as strange it is // /// considered a b or c hadron, but NOT a strange hadron. // /// // /// @note This question is valid in MC, but may not be perfectly answerable // /// experimentally -- use this function with care when replicating // /// experimental analyses! // bool fromStrange() const; /// @brief Determine whether the particle is from a hadron decay /// /// @note This question is valid in MC, but may not be perfectly answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool fromHadron() const; /// @brief Determine whether the particle is from a tau decay /// /// @note This question is valid in MC, but may not be perfectly answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool fromTau(bool prompt_taus_only=false) const; /// @brief Determine whether the particle is from a prompt tau decay /// /// @note This question is valid in MC, but may not be perfectly answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool fromPromptTau() const { return fromTau(true); } /// @brief Determine whether the particle is from a tau which decayed hadronically /// /// @note This question is valid in MC, but may not be perfectly answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool fromHadronicTau(bool prompt_taus_only=false) const; /// @brief Determine whether the particle is from a hadron or tau decay /// /// Specifically, walk up the ancestor chain until a status 2 hadron or /// tau is found, if at all. /// /// @note This question is valid in MC, but may not be perfectly answerable /// experimentally -- use this function with care when replicating /// experimental analyses! /// - /// @deprecated Too vague: use fromHadron or fromHadronicTau + DEPRECATED("Too vague: use fromHadron or fromHadronicTau") bool fromDecay() const { return fromHadron() || fromPromptTau(); } /// @brief Shorthand definition of 'promptness' based on set definition flags /// /// A "direct" particle is one directly connected to the hard process. It is a /// preferred alias for "prompt", since it has no confusing implications about /// distinguishability by timing information. /// /// The boolean arguments allow a decay lepton to be considered direct if /// its parent was a "real" direct lepton. /// /// @note This one doesn't make any judgements about final-stateness bool isDirect(bool allow_from_direct_tau=false, bool allow_from_direct_mu=false) const; /// Alias for isDirect bool isPrompt(bool allow_from_prompt_tau=false, bool allow_from_prompt_mu=false) const { return isDirect(allow_from_prompt_tau, allow_from_prompt_mu); } //@} /// @name Decay info //@{ /// Whether this particle is stable according to the generator bool isStable() const; /// @todo isDecayed? How to restrict to physical particles? /// Get a list of the direct descendants from the current particle (with optional selection Cut) Particles children(const Cut& c=Cuts::OPEN) const; /// Get a list of the direct descendants from the current particle (with selector function) Particles children(const ParticleSelector& f) const { return filter_select(children(), f); } /// Check whether any direct child of this particle has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasChildWith(const ParticleSelector& f) const { return !children(f).empty(); } /// Check whether any direct child of this particle has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasChildWith(const Cut& c) const; /// Check whether any direct child of this particle does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasChildWithout(const ParticleSelector& f) const { return hasChildWith([&](const Particle& p){ return !f(p); }); } /// Check whether any direct child of this particle does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasChildWithout(const Cut& c) const; /// Get a list of all the descendants from the current particle (with optional selection Cut) Particles allDescendants(const Cut& c=Cuts::OPEN, bool remove_duplicates=true) const; /// Get a list of all the descendants from the current particle (with selector function) Particles allDescendants(const ParticleSelector& f, bool remove_duplicates=true) const { return filter_select(allDescendants(Cuts::OPEN, remove_duplicates), f); } /// Check whether any descendant of this particle has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasDescendantWith(const ParticleSelector& f, bool remove_duplicates=true) const { return !allDescendants(f, remove_duplicates).empty(); } /// Check whether any descendant of this particle has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasDescendantWith(const Cut& c, bool remove_duplicates=true) const; /// Check whether any descendant of this particle does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasDescendantWithout(const ParticleSelector& f, bool remove_duplicates=true) const { return hasDescendantWith([&](const Particle& p){ return !f(p); }, remove_duplicates); } /// Check whether any descendant of this particle does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasDescendantWithout(const Cut& c, bool remove_duplicates=true) const; /// Get a list of all the stable descendants from the current particle (with optional selection Cut) /// /// @todo Use recursion through replica-avoiding MCUtils functions to avoid bookkeeping duplicates /// @todo Insist that the current particle is post-hadronization, otherwise throw an exception? Particles stableDescendants(const Cut& c=Cuts::OPEN) const; /// Get a list of all the stable descendants from the current particle (with selector function) Particles stableDescendants(const ParticleSelector& f) const { return filter_select(stableDescendants(), f); } /// Check whether any stable descendant of this particle has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasStableDescendantWith(const ParticleSelector& f) const { return !stableDescendants(f).empty(); } /// Check whether any stable descendant of this particle has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasStableDescendantWith(const Cut& c) const; /// Check whether any stable descendant of this particle does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasStableDescendantWithout(const ParticleSelector& f) const { return hasStableDescendantWith([&](const Particle& p){ return !f(p); }); } /// Check whether any stable descendant of this particle does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasStableDescendantWithout(const Cut& c) const; /// Flight length (divide by mm or cm to get the appropriate units) double flightLength() const; //@} /// @name Duplicate testing //@{ /// @brief Determine whether a particle is the first in a decay chain to meet the function requirement inline bool isFirstWith(const ParticleSelector& f) const { if (!f(*this)) return false; //< This doesn't even meet f, let alone being the last to do so if (any(parents(), f)) return false; //< If a direct parent has this property, this isn't the first return true; } /// @brief Determine whether a particle is the first in a decay chain not to meet the function requirement inline bool isFirstWithout(const ParticleSelector& f) const { return isFirstWith([&](const Particle& p){ return !f(p); }); } /// @brief Determine whether a particle is the last in a decay chain to meet the function requirement inline bool isLastWith(const ParticleSelector& f) const { if (!f(*this)) return false; //< This doesn't even meet f, let alone being the last to do so if (any(children(), f)) return false; //< If a child has this property, this isn't the last return true; } /// @brief Determine whether a particle is the last in a decay chain not to meet the function requirement inline bool isLastWithout(const ParticleSelector& f) const { return isLastWith([&](const Particle& p){ return !f(p); }); } //@} protected: /// A pointer to the original GenParticle from which this Particle is projected (may be null) const GenParticle* _original; /// Constituent particles if this is a composite (may be empty) Particles _constituents; /// The PDG ID code for this Particle. PdgId _id; /// The momentum of this particle. FourMomentum _momentum; /// The creation position of this particle. FourVector _origin; }; /// @name String representation and streaming support //@{ /// Allow a Particle to be passed to an ostream. std::ostream& operator << (std::ostream& os, const Particle& p); /// Allow ParticlePair to be passed to an ostream. std::ostream& operator << (std::ostream& os, const ParticlePair& pp); //@} } #include "Rivet/Tools/ParticleUtils.hh" #endif diff --git a/include/Rivet/Projections/FinalState.hh b/include/Rivet/Projections/FinalState.hh --- a/include/Rivet/Projections/FinalState.hh +++ b/include/Rivet/Projections/FinalState.hh @@ -1,56 +1,56 @@ // -*- C++ -*- #ifndef RIVET_FinalState_HH #define RIVET_FinalState_HH #include "Rivet/Projections/ParticleFinder.hh" namespace Rivet { /// @brief Project out all final-state particles in an event. /// Probably the most important projection in Rivet! class FinalState : public ParticleFinder { public: /// @name Standard constructors etc. //@{ /// Construction using Cuts object FinalState(const Cut& c=Cuts::open()); /// Construction using another FinalState and a Cuts object FinalState(const FinalState& fsp, const Cut& c); /// Old constructor with numeric cut arguments, retained for compatibility - /// @deprecated Use the versions with Cut arguments + //DEPRECATED("Use the versions with Cut arguments") FinalState(double mineta, double maxeta, double minpt=0.0*GeV); /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(FinalState); //@} /// Apply the projection to the event. virtual void project(const Event& e); /// Compare projections. virtual CmpState compare(const Projection& p) const; /// Decide if a particle is to be accepted or not. /// @todo Rename to _accept or acceptFinal? virtual bool accept(const Particle& p) const; private: // Hide lossy copy constructors for all classes derived from FinalState template FinalState(const T& rhs); template FinalState const& operator=(T const& rhs); }; } #endif diff --git a/include/Rivet/Projections/InitialQuarks.hh b/include/Rivet/Projections/InitialQuarks.hh --- a/include/Rivet/Projections/InitialQuarks.hh +++ b/include/Rivet/Projections/InitialQuarks.hh @@ -1,59 +1,61 @@ // -*- C++ -*- #ifndef RIVET_InitialQuarks_HH #define RIVET_InitialQuarks_HH +#warning "This is a dangerous projection for a few specific old analyses. Not for general use!" + #include "Rivet/Projection.hh" #include "Rivet/Particle.hh" #include "Rivet/Event.hh" namespace Rivet { /// @brief Project out quarks from the hard process in \f$ e^+ e^- \to Z^0 \f$ events /// /// @deprecated We're not sure exactly when we'lll get rid of this, but it's going to happen... /// /// @warning This is a very dangerous and specific projection! class InitialQuarks : public Projection { public: /// @name Standard constructors and destructors. //@{ /// The default constructor. May specify the minimum and maximum /// pseudorapidity \f$ \eta \f$ and the min \f$ p_T \f$ (in GeV). InitialQuarks() { setName("InitialQuarks"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(InitialQuarks); //@} /// Access the projected final-state particles. virtual const Particles& particles() const { return _theParticles; } /// Is this final state empty? virtual bool empty() const { return _theParticles.empty(); } protected: /// Apply the projection to the event. virtual void project(const Event& e); /// Compare projections. virtual CmpState compare(const Projection& p) const; protected: /// The final-state particles. Particles _theParticles; }; } #endif diff --git a/include/Rivet/Projections/JetAlg.hh b/include/Rivet/Projections/JetAlg.hh --- a/include/Rivet/Projections/JetAlg.hh +++ b/include/Rivet/Projections/JetAlg.hh @@ -1,218 +1,219 @@ // -*- C++ -*- #ifndef RIVET_JetAlg_HH #define RIVET_JetAlg_HH #include "Rivet/Projection.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Particle.hh" #include "Rivet/Jet.hh" namespace Rivet { /// Abstract base class for projections which can return a set of {@link Jet}s. class JetAlg : public Projection { public: /// Enum for the treatment of muons: whether to include all, some, or none in jet-finding enum class Muons { NONE, DECAY, ALL }; /// Enum for the treatment of invisible particles: whether to include all, some, or none in jet-finding enum class Invisibles { NONE, DECAY, ALL }; /// Constructor JetAlg(const FinalState& fs, Muons usemuons = Muons::ALL, Invisibles useinvis = Invisibles::NONE); /// Default constructor JetAlg() = default; /// Clone on the heap. virtual unique_ptr clone() const = 0; /// Destructor virtual ~JetAlg() = default; /// @name Control the treatment of muons and invisible particles /// /// Since MC-based jet calibration (and/or particle flow) can add back in /// particles that weren't seen in calorimeters/trackers. //@{ /// @brief Include (some) muons in jet construction. /// /// The default behaviour is that jets are only constructed from visible /// particles. Some jet studies, including those from ATLAS, use a definition /// in which neutrinos from hadron decays are included via MC-based calibrations. /// Setting this flag to true avoids the automatic restriction to a VisibleFinalState. void useMuons(Muons usemuons = Muons::ALL) { _useMuons = usemuons; } /// @brief Include (some) invisible particles in jet construction. /// /// The default behaviour is that jets are only constructed from visible /// particles. Some jet studies, including those from ATLAS, use a definition /// in which neutrinos from hadron decays are included via MC-based calibrations. /// Setting this flag to true avoids the automatic restriction to a VisibleFinalState. void useInvisibles(Invisibles useinvis = Invisibles::DECAY) { _useInvisibles = useinvis; } /// @brief obsolete chooser DEPRECATED("make an explicit choice from Invisibles::{NONE,DECAY,ALL}. This boolean call does not allow for ALL") void useInvisibles(bool useinvis) { _useInvisibles = useinvis ? Invisibles::DECAY : Invisibles::NONE; } //@} /// @name Access to jet objects //@{ /// Get jets in no guaranteed order, with an optional Cut /// @note Returns a copy rather than a reference, due to cuts virtual Jets jets(const Cut& c=Cuts::open()) const { return filter_select(_jets(), c); } /// Get jets in no guaranteed order, with a selection functor /// @note Returns a copy rather than a reference, due to cuts virtual Jets jets(const JetSelector& selector) const { return filter_select(_jets(), selector); } /// Get the jets with a Cut applied, and ordered by supplied sorting functor /// @note Returns a copy rather than a reference, due to cuts and sorting Jets jets(const Cut& c, const JetSorter& sorter) const { /// @todo Will the vector be efficiently std::move'd by value through this function chain? return sortBy(jets(c), sorter); } /// Get the jets, ordered by supplied sorting functor, with an optional Cut /// @note Returns a copy rather than a reference, due to cuts and sorting Jets jets(const JetSorter& sorter, const Cut& c=Cuts::open()) const { /// @todo Will the vector be efficiently std::move'd by value through this function chain? return jets(c, sorter); } /// Get the jets, ordered by supplied sorting function object, with optional cuts on \f$ p_\perp \f$ and rapidity. /// @note Returns a copy rather than a reference, due to cuts and sorting Jets jets(const JetSelector& selector, const JetSorter& sorter) const { /// @todo Will the vector be efficiently std::move'd by value through this function chain? return sortBy(jets(selector), sorter); } /// Get the jets, ordered by supplied sorting functor and with a selection functor applied /// @note Returns a copy rather than a reference, due to cuts and sorting Jets jets(const JetSorter& sorter, const JetSelector selector) const { /// @todo Will the vector be efficiently std::move'd by value through this function chain? return jets(selector, sorter); } /// Get the jets, ordered by \f$ p_T \f$, with optional cuts. /// /// @note Returns a copy rather than a reference, due to cuts and sorting /// /// This is a very common use-case, so is available as syntatic sugar for jets(c, cmpMomByPt). /// @todo The other sorted accessors should be removed in a cleanup. Jets jetsByPt(const Cut& c=Cuts::open()) const { return jets(c, cmpMomByPt); } /// Get the jets, ordered by \f$ p_T \f$, with cuts via a selection functor. /// /// @note Returns a copy rather than a reference, due to cuts and sorting /// /// This is a very common use-case, so is available as syntatic sugar for jets(c, cmpMomByPt). /// @todo The other sorted accessors should be removed in a cleanup. Jets jetsByPt(const JetSelector& selector) const { return jets(selector, cmpMomByPt); } /// Get the jets, ordered by \f$ p_T \f$, with a cut on \f$ p_\perp \f$. /// /// @deprecated Use the version with a Cut argument /// @note Returns a copy rather than a reference, due to cuts and sorting /// /// This is a very common use-case, so is available as syntatic sugar for jets(Cuts::pT >= ptmin, cmpMomByPt). /// @todo The other sorted accessors should be removed in a cleanup. + DEPRECATED("Use the version with a Cut argument") Jets jetsByPt(double ptmin) const { return jets(Cuts::pT >= ptmin, cmpMomByPt); } //@} protected: /// @brief Internal pure virtual method for getting jets in no guaranteed order. virtual Jets _jets() const = 0; public: /// Count the jets size_t size() const { return jets().size(); } /// Count the jets after a Cut is applied. size_t size(const Cut& c) const { return jets(c).size(); } /// Count the jets after a selection functor is applied. size_t size(const JetSelector& s) const { return jets(s).size(); } /// Is this jet finder empty? bool empty() const { return size() == 0; } /// Is this jet finder empty after a Cut is applied? bool empty(const Cut& c) const { return size(c) == 0; } /// Is this jet finder empty after a selection functor is applied? bool empty(const JetSelector& s) const { return size(s) == 0; } /// Clear the projection. virtual void reset() = 0; typedef Jet entity_type; typedef Jets collection_type; /// Template-usable interface common to FinalState. collection_type entities() const { return jets(); } // /// Do the calculation locally (no caching). // virtual void calc(const Particles& constituents, const Particles& tagparticles=Particles()) = 0; protected: /// Perform the projection on the Event. virtual void project(const Event& e) = 0; /// Compare projections. virtual CmpState compare(const Projection& p) const = 0; protected: /// Flag to determine whether or not to exclude (some) muons from the would-be constituents. Muons _useMuons; /// Flag to determine whether or not to exclude (some) invisible particles from the would-be constituents. Invisibles _useInvisibles; }; /// Compatibility typedef, for equivalence with ParticleFinder /// @todo Should we make this the canonical name? Would "require" a header filename change -> breakage or ugly. using JetFinder = JetAlg; } #endif diff --git a/include/Rivet/Projections/WFinder.hh b/include/Rivet/Projections/WFinder.hh --- a/include/Rivet/Projections/WFinder.hh +++ b/include/Rivet/Projections/WFinder.hh @@ -1,179 +1,146 @@ // -*- C++ -*- #ifndef RIVET_WFinder_HH #define RIVET_WFinder_HH #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// @brief Convenience finder of leptonically decaying W /// /// Chain together different projections as convenience for finding one W /// from one lepton and the missing E 4-vector in the final state, including photon clustering. class WFinder : public ParticleFinder { public: enum class ChargedLeptons { PROMPT, ALL }; enum class ClusterPhotons { NONE, NODECAY, ALL }; enum class AddPhotons { NO, YES }; enum class MassWindow { M, MT }; /// @name Constructors //@{ /// Constructor taking cuts object /// @param inputfs Input final state /// @param leptoncuts Charged lepton cuts /// @param pid Type of the charged lepton /// @param minmass,maxmass (Transverse) mass window /// @param missingET Minimal amount of missing ET (neutrinos) required /// @param dRmax Maximum dR of photons around charged lepton to take into account /// for W reconstruction (only relevant if one of the following are true) /// @param chLeptons Only use prompt charged leptons, or any charged leptons? /// @param clusterPhotons Whether such photons are supposed to be /// clustered to the lepton object and thus W mom /// @param trackPhotons Whether such photons should be added to _theParticles /// @param masstype Whether mass window should be applied using m or mT /// /// @todo Revisit AddPhotons::NO as default? WFinder(const FinalState& inputfs, const Cut& leptoncuts, PdgId pid, double minmass, double maxmass, double missingET, double dRmax=0.1, ChargedLeptons chLeptons=ChargedLeptons::PROMPT, ClusterPhotons clusterPhotons=ClusterPhotons::NODECAY, AddPhotons trackPhotons=AddPhotons::NO, MassWindow masstype=MassWindow::M, double masstarget=80.4*GeV); - /// Backward-compatible constructor with implicit chLeptons mode = ChargedLeptons::PROMPT - /// @deprecated Remove this and always use the constructor with chLeptons argument. - WFinder(const FinalState& inputfs, - const Cut& leptoncuts, - PdgId pid, - double minmass, double maxmass, - double missingET, - double dRmax, - ClusterPhotons clusterPhotons, - AddPhotons trackPhotons=AddPhotons::NO, - MassWindow masstype=MassWindow::M, - double masstarget=80.4*GeV) - : WFinder(inputfs, leptoncuts, pid, minmass, maxmass, missingET, - dRmax, ChargedLeptons::PROMPT, clusterPhotons, trackPhotons, masstype, masstarget) - { } - - // /// Constructor with more convenient argument ordering and default args - // /// - // /// @todo Revisit AddPhotons::NO as default? - // WFinder(const FinalState& inputfs, - // const Cut& leptoncuts, - // PdgId pid, - // double minmass, double maxmass, - // double missingET, - // MassWindow masstype, - // double masstarget=80.4*GeV, - // ClusterPhotons clusterPhotons=ClusterPhotons::NODECAY, - // double dRmax=0.1, - // AddPhotons trackPhotons=AddPhotons::NO) - // : WFinder(inputfs, leptoncuts, pid, minmass, maxmass, missingET, - // dRmax, clusterPhotons, trackPhotons, masstype, masstarget) - // { } - /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(WFinder); //@} /// @brief Access to the found bosons, equivalent to constituents() /// @note Currently either 0 or 1 boson can be found. const Particles& bosons() const { return particles(); } /// Access to the found boson (assuming it exists) /// @todo C++17 std::optional... const Particle& boson() const { return particles().front(); } /// @brief Access to the Ws' constituent clustered leptons /// @note Either size 0 if no boson was found or 1 if one boson was found const Particles& constituentLeptons() const { return _leptons; } /// brief Access to the W's constituent clustered lepton (assuming it exists) /// @todo C++17 std::optional... const Particle& constituentLepton() const { return _leptons.front(); } /// Access to the Ws' constituent neutrinos /// /// @note Either size 0 if no boson was found or 1 if one boson was found /// @note The neutrino can't be perfecly reconstructed -- this is a pseudo-nu from the MET. const Particles& constituentNeutrinos() const { return _neutrinos; } /// Access to the W's constituent neutrino (assuming it exists) /// @note The neutrino can't be perfecly reconstructed -- this is a pseudo-nu from the MET. const Particle& constituentNeutrino() const { return _neutrinos.front(); } /// Access to the particles other than the W leptons and clustered photons /// /// Useful for e.g. input to a jet finder const VetoedFinalState& remainingFinalState() const; /// Access to the missing momentum projection used to find the "neutrino" const MissingMomentum& missingMom() const; /// @brief Calculate the transverse mass of the W, from the charged lepton and neutrino /// /// Defined as sqrt(2 pT_l pT_nu (1.0 - cos(dphi_lnu))). Return -1 if no boson found. double mT() const { if (bosons().empty()) return -1; return Rivet::mT(constituentLepton().mom(), constituentNeutrino().mom()); } protected: /// Apply the projection on the supplied event. void project(const Event& e); /// Compare projections. CmpState compare(const Projection& p) const; public: /// Clear the projection void clear() { _theParticles.clear(); } private: /// (Transverse) mass cuts double _minmass, _maxmass, _masstarget; /// Use transverse or complete mass? bool _useTransverseMass; /// Missing ET cut double _etMissMin; /// Switch for tracking of photons (whether to include them in the W particle) /// This is relevant when the clustered photons need to be excluded from e.g. a jet finder AddPhotons _trackPhotons; /// Charged lepton flavour PdgId _pid; /// Result caches. Will be filled by project() Particles _leptons, _neutrinos; }; } #endif diff --git a/include/Rivet/Tools/ParticleUtils.hh b/include/Rivet/Tools/ParticleUtils.hh --- a/include/Rivet/Tools/ParticleUtils.hh +++ b/include/Rivet/Tools/ParticleUtils.hh @@ -1,768 +1,768 @@ #ifndef RIVET_PARTICLEUTILS_HH #define RIVET_PARTICLEUTILS_HH #include "Rivet/Particle.hh" #include "Rivet/Tools/ParticleBaseUtils.hh" #include "Rivet/Tools/ParticleIdUtils.hh" // Macros to map Rivet::Particle functions to PID:: functions of the same name #define PARTICLE_TO_PID_BOOLFN(fname) inline bool fname (const Particle& p) { return PID:: fname (p.pid()); } #define PARTICLE_TO_PID_INTFN(fname) inline int fname (const Particle& p) { return PID:: fname (p.pid()); } #define PARTICLE_TO_PID_DBLFN(fname) inline double fname (const Particle& p) { return PID:: fname (p.pid()); } namespace Rivet { /// @name Particle classifier functions //@{ /// Unbound function access to PID code inline int pid(const Particle& p) { return p.pid(); } /// Unbound function access to abs PID code inline int abspid(const Particle& p) { return p.abspid(); } /// Is this particle species charged? PARTICLE_TO_PID_BOOLFN(isCharged) /// Is this particle species neutral? PARTICLE_TO_PID_BOOLFN(isNeutral) /// Is this a neutrino? PARTICLE_TO_PID_BOOLFN(isNeutrino) /// Determine if the PID is that of a charged lepton PARTICLE_TO_PID_BOOLFN(isChargedLepton) PARTICLE_TO_PID_BOOLFN(isChLepton) /// Determine if the PID is that of a lepton (charged or neutral) PARTICLE_TO_PID_BOOLFN(isLepton) /// Determine if the PID is that of a photon PARTICLE_TO_PID_BOOLFN(isPhoton) /// Determine if the PID is that of an electron or positron PARTICLE_TO_PID_BOOLFN(isElectron) /// Determine if the PID is that of an muon or antimuon PARTICLE_TO_PID_BOOLFN(isMuon) /// Determine if the PID is that of an tau or antitau PARTICLE_TO_PID_BOOLFN(isTau) /// Determine if the PID is that of a hadron PARTICLE_TO_PID_BOOLFN(isHadron) /// Determine if the PID is that of a meson PARTICLE_TO_PID_BOOLFN(isMeson) /// Determine if the PID is that of a baryon PARTICLE_TO_PID_BOOLFN(isBaryon) /// Determine if the PID is that of a quark PARTICLE_TO_PID_BOOLFN(isQuark) /// Determine if the PID is that of a parton (quark or gluon) PARTICLE_TO_PID_BOOLFN(isParton) /// Determine if the PID is that of a W+ PARTICLE_TO_PID_BOOLFN(isWplus) /// Determine if the PID is that of a W- PARTICLE_TO_PID_BOOLFN(isWminus) /// Determine if the PID is that of a W+- PARTICLE_TO_PID_BOOLFN(isW) /// Determine if the PID is that of a Z0 PARTICLE_TO_PID_BOOLFN(isZ) /// Determine if the PID is that of an SM/lightest SUSY Higgs PARTICLE_TO_PID_BOOLFN(isHiggs) /// Determine if the PID is that of an s/sbar PARTICLE_TO_PID_BOOLFN(isStrange) /// Determine if the PID is that of a c/cbar PARTICLE_TO_PID_BOOLFN(isCharm) /// Determine if the PID is that of a b/bbar PARTICLE_TO_PID_BOOLFN(isBottom) /// Determine if the PID is that of a t/tbar PARTICLE_TO_PID_BOOLFN(isTop) /// Determine if the particle is a heavy flavour hadron or parton PARTICLE_TO_PID_BOOLFN(isHeavyFlavour) /// Determine if the PID is that of a heavy parton (c,b,t) PARTICLE_TO_PID_BOOLFN(isHeavyParton) /// Determine if the PID is that of a light parton (u,d,s) PARTICLE_TO_PID_BOOLFN(isLightParton) /// Determine if the PID is that of a heavy flavour (b or c) meson PARTICLE_TO_PID_BOOLFN(isHeavyMeson) /// Determine if the PID is that of a heavy flavour (b or c) baryon PARTICLE_TO_PID_BOOLFN(isHeavyBaryon) /// Determine if the PID is that of a heavy flavour (b or c) hadron PARTICLE_TO_PID_BOOLFN(isHeavyHadron) /// Determine if the PID is that of a light flavour (not b or c) meson PARTICLE_TO_PID_BOOLFN(isLightMeson) /// Determine if the PID is that of a light flavour (not b or c) baryon PARTICLE_TO_PID_BOOLFN(isLightBaryon) /// Determine if the PID is that of a light flavour (not b or c) hadron PARTICLE_TO_PID_BOOLFN(isLightHadron) /// Determine if the PID is that of a b-meson. PARTICLE_TO_PID_BOOLFN(isBottomMeson) /// Determine if the PID is that of a b-baryon. PARTICLE_TO_PID_BOOLFN(isBottomBaryon) /// Determine if the PID is that of a b-hadron. PARTICLE_TO_PID_BOOLFN(isBottomHadron) /// @brief Determine if the PID is that of a c-meson. /// /// Specifically, the _heaviest_ quark is a c: a B_c is a b-meson and NOT a c-meson. /// Charmonia (closed charm) are counted as c-mesons here. PARTICLE_TO_PID_BOOLFN(isCharmMeson) /// @brief Determine if the PID is that of a c-baryon. /// /// Specifically, the _heaviest_ quark is a c: a baryon containing a b & c /// is a b-baryon and NOT a c-baryon. To test for the simpler case, just use /// a combination of hasCharm() and isBaryon(). PARTICLE_TO_PID_BOOLFN(isCharmBaryon) /// Determine if the PID is that of a c-hadron. PARTICLE_TO_PID_BOOLFN(isCharmHadron) // /// Determine if the PID is that of a strange meson // PARTICLE_TO_PID_BOOLFN(isStrangeMeson) // /// Determine if the PID is that of a strange baryon // PARTICLE_TO_PID_BOOLFN(isStrangeBaryon) // /// Determine if the PID is that of a strange hadron // PARTICLE_TO_PID_BOOLFN(isStrangeHadron) /// Is this a pomeron, odderon, or generic reggeon? PARTICLE_TO_PID_BOOLFN(isReggeon) /// Determine if the PID is that of a diquark (used in hadronization models) PARTICLE_TO_PID_BOOLFN(isDiquark) /// Determine if the PID is that of a pentaquark (hypothetical hadron) PARTICLE_TO_PID_BOOLFN(isPentaquark) /// Is this a fundamental SUSY particle? PARTICLE_TO_PID_BOOLFN(isSUSY) /// Is this an R-hadron? PARTICLE_TO_PID_BOOLFN(isRhadron) /// Is this a technicolor particle? PARTICLE_TO_PID_BOOLFN(isTechnicolor) /// Is this an excited (composite) quark or lepton? PARTICLE_TO_PID_BOOLFN(isExcited) /// Is this a Kaluza-Klein excitation? PARTICLE_TO_PID_BOOLFN(isKK) /// Is this a graviton? PARTICLE_TO_PID_BOOLFN(isGraviton) /// Is this a BSM particle (including graviton)? PARTICLE_TO_PID_BOOLFN(isBSM) /// Determine if the PID is in the generator-specific range PARTICLE_TO_PID_BOOLFN(isGenSpecific) /// Determine if the PID is that of an EW scale resonance PARTICLE_TO_PID_BOOLFN(isResonance) /// Check the PID for usability in transport codes like Geant4 PARTICLE_TO_PID_BOOLFN(isTransportable) /// Does this particle contain an up quark? PARTICLE_TO_PID_BOOLFN(hasUp) /// Does this particle contain a down quark? PARTICLE_TO_PID_BOOLFN(hasDown) /// Does this particle contain a strange quark? PARTICLE_TO_PID_BOOLFN(hasStrange) /// Does this particle contain a charm quark? PARTICLE_TO_PID_BOOLFN(hasCharm) /// Does this particle contain a bottom quark? PARTICLE_TO_PID_BOOLFN(hasBottom) /// Does this particle contain a top quark? PARTICLE_TO_PID_BOOLFN(hasTop) /// jSpin returns 2J+1, where J is the total spin PARTICLE_TO_PID_INTFN(jSpin) /// sSpin returns 2S+1, where S is the spin PARTICLE_TO_PID_INTFN(sSpin) /// lSpin returns 2L+1, where L is the orbital angular momentum PARTICLE_TO_PID_INTFN(lSpin) /// Return the charge PARTICLE_TO_PID_DBLFN(charge) /// Return 3 times the charge (3 x quark charge is an int) PARTICLE_TO_PID_INTFN(charge3) /// Return the absolute charge PARTICLE_TO_PID_DBLFN(abscharge) /// Return 3 times the abs charge (3 x quark charge is an int) PARTICLE_TO_PID_INTFN(abscharge3) /// Get the atomic number (number of protons) in a nucleus/ion PARTICLE_TO_PID_INTFN(nuclZ) /// Get the atomic weight (number of nucleons) in a nucleus/ion PARTICLE_TO_PID_INTFN(nuclA) /// If this is a nucleus (ion), get nLambda PARTICLE_TO_PID_INTFN(nuclNlambda) //@} /// @name Particle pair classifiers /// @todo Make versions that work on ParticlePair? //@{ inline bool isSameSign(const Particle& a, const Particle& b) { return PID::isSameSign(a.pid(), b.pid()); } inline bool isOppSign(const Particle& a, const Particle& b) { return PID::isOppSign(a.pid(), b.pid()); } inline bool isSameFlav(const Particle& a, const Particle& b) { return PID::isSameFlav(a.pid(), b.pid()); } inline bool isOppFlav(const Particle& a, const Particle& b) { return PID::isOppFlav(a.pid(), b.pid()); } inline bool isOSSF(const Particle& a, const Particle& b) { return PID::isOSSF(a.pid(), b.pid()); } inline bool isSSSF(const Particle& a, const Particle& b) { return PID::isSSSF(a.pid(), b.pid()); } inline bool isOSOF(const Particle& a, const Particle& b) { return PID::isOSOF(a.pid(), b.pid()); } inline bool isSSOF(const Particle& a, const Particle& b) { return PID::isSSOF(a.pid(), b.pid()); } //@} /// @name Particle charge/sign comparison functions //@{ /// @brief Return true if Particles @a a and @a b have the opposite charge sign /// @note Two neutrals returns false inline bool oppSign(const Particle& a, const Particle& b) { return sign(a.charge3()) == -sign(b.charge3()) && sign(a.charge3()) != ZERO; } /// Return true if Particles @a a and @a b have the same charge sign /// @note Two neutrals returns true inline bool sameSign(const Particle& a, const Particle& b) { return sign(a.charge3()) == sign(b.charge3()); } /// Return true if Particles @a a and @a b have the exactly opposite charge /// @note Two neutrals returns false inline bool oppCharge(const Particle& a, const Particle& b) { return a.charge3() == -b.charge3() && a.charge3() != 0; } /// Return true if Particles @a a and @a b have the same charge (including neutral) /// @note Two neutrals returns true inline bool sameCharge(const Particle& a, const Particle& b) { return a.charge3() == b.charge3(); } /// Return true if Particles @a a and @a b have a different (not necessarily opposite) charge inline bool diffCharge(const Particle& a, const Particle& b) { return a.charge3() != b.charge3(); } //@} ////////////////////////////////////// /// @name Non-PID particle properties, via unbound functions //@{ /// @brief Determine whether a particle is the first in a decay chain to meet the function requirement inline bool isFirstWith(const Particle& p, const ParticleSelector& f) { return p.isFirstWith(f); } /// @brief Determine whether a particle is the first in a decay chain not to meet the function requirement inline bool isFirstWithout(const Particle& p, const ParticleSelector& f) { return p.isFirstWithout(f); } /// @brief Determine whether a particle is the last in a decay chain to meet the function requirement inline bool isLastWith(const Particle& p, const ParticleSelector& f) { return p.isLastWith(f); } /// @brief Determine whether a particle is the last in a decay chain not to meet the function requirement inline bool isLastWithout(const Particle& p, const ParticleSelector& f) { return p.isLastWithout(f); } /// @brief Determine whether a particle has an ancestor which meets the function requirement inline bool hasAncestorWith(const Particle& p, const ParticleSelector& f) { return p.hasAncestorWith(f); } /// @brief Determine whether a particle has an ancestor which doesn't meet the function requirement inline bool hasAncestorWithout(const Particle& p, const ParticleSelector& f) { return p.hasAncestorWithout(f); } /// @brief Determine whether a particle has a parent which meets the function requirement inline bool hasParentWith(const Particle& p, const ParticleSelector& f) { return p.hasParentWith(f); } /// @brief Determine whether a particle has a parent which doesn't meet the function requirement inline bool hasParentWithout(const Particle& p, const ParticleSelector& f) { return p.hasParentWithout(f); } /// @brief Determine whether a particle has a child which meets the function requirement inline bool hasChildWith(const Particle& p, const ParticleSelector& f) { return p.hasChildWith(f); } /// @brief Determine whether a particle has a child which doesn't meet the function requirement inline bool hasChildWithout(const Particle& p, const ParticleSelector& f) { return p.hasChildWithout(f); } /// @brief Determine whether a particle has a descendant which meets the function requirement inline bool hasDescendantWith(const Particle& p, const ParticleSelector& f) { return p.hasDescendantWith(f); // return !p.allDescendants(f).empty(); } /// @brief Determine whether a particle has a descendant which doesn't meet the function requirement inline bool hasDescendantWithout(const Particle& p, const ParticleSelector& f) { return p.hasDescendantWithout(f); } /// @brief Determine whether a particle has a stable descendant which meets the function requirement inline bool hasStableDescendantWith(const Particle& p, const ParticleSelector& f) { return p.hasStableDescendantWith(f); } /// @brief Determine whether a particle has a stable descendant which doesn't meet the function requirement inline bool hasStableDescendantWithout(const Particle& p, const ParticleSelector& f) { return p.hasStableDescendantWithout(f); } /// Is this particle potentially visible in a detector? inline bool isVisible(const Particle& p) { return p.isVisible(); } /// @brief Decide if a given particle is direct, via Particle::isDirect() /// /// A "direct" particle is one directly connected to the hard process. It is a /// preferred alias for "prompt", since it has no confusing implications about /// distinguishability by timing information. /// /// The boolean arguments allow a decay lepton to be considered direct if /// its parent was a "real" direct lepton. inline bool isDirect(const Particle& p, bool allow_from_direct_tau=false, bool allow_from_direct_mu=false) { return p.isDirect(allow_from_direct_tau, allow_from_direct_mu); } /// @brief Decide if a given particle is prompt, via Particle::isPrompt() /// /// The boolean arguments allow a decay lepton to be considered prompt if /// its parent was a "real" prompt lepton. inline bool isPrompt(const Particle& p, bool allow_from_prompt_tau=false, bool allow_from_prompt_mu=false) { return p.isPrompt(allow_from_prompt_tau, allow_from_prompt_mu); } /// Decide if a given particle is stable, via Particle::isStable() inline bool isStable(const Particle& p) { return p.isStable(); } /// Decide if a given particle decays hadronically inline bool hasHadronicDecay(const Particle& p) { if (p.isStable()) return false; if (p.hasChildWith(isHadron)) return true; return false; } /// Decide if a given particle decays leptonically (decays, and no hadrons) inline bool hasLeptonicDecay(const Particle& p) { if (p.isStable()) return false; if (p.hasChildWith(isHadron)) return false; return true; } /// Check whether a given PID is found in the particle's ancestor list /// @deprecated Prefer hasAncestorWith inline bool hasAncestor(const Particle& p, PdgId pid) { return p.hasAncestor(pid); } /// Determine whether the particle is from a b-hadron decay inline bool fromBottom(const Particle& p) { return p.fromBottom(); } /// @brief Determine whether the particle is from a c-hadron decay inline bool fromCharm(const Particle& p) { return p.fromCharm(); } /// @brief Determine whether the particle is from a hadron decay inline bool fromHadron(const Particle& p) { return p.fromHadron(); } /// @brief Determine whether the particle is from a tau decay inline bool fromTau(const Particle& p, bool prompt_taus_only=false) { return p.fromTau(prompt_taus_only); } /// @brief Determine whether the particle is from a prompt tau decay inline bool fromPromptTau(const Particle& p) { return p.fromPromptTau(); } /// @brief Determine whether the particle is from a hadron or tau decay - /// @deprecated Too vague: use fromHadron or fromHadronicTau + DEPRECATED("Too vague: use fromHadron or fromHadronicTau") inline bool fromDecay(const Particle& p) { return p.fromDecay(); } //@} /// @name Particle classifier -> bool functors /// /// To be passed to any() or all() e.g. any(p.children(), HasPID(PID::MUON)) //@{ /// Base type for Particle -> bool functors struct BoolParticleFunctor { virtual bool operator()(const Particle& p) const = 0; virtual ~BoolParticleFunctor() {} }; /// Functor for and-combination of selector logic struct BoolParticleAND : public BoolParticleFunctor { BoolParticleAND(const std::vector& sels) : selectors(sels) {} BoolParticleAND(const ParticleSelector& a, const ParticleSelector& b) : selectors({a,b}) {} BoolParticleAND(const ParticleSelector& a, const ParticleSelector& b, const ParticleSelector& c) : selectors({a,b,c}) {} bool operator()(const Particle& p) const { for (const ParticleSelector& sel : selectors) if (!sel(p)) return false; return true; } std::vector selectors; }; /// Operator syntactic sugar for AND construction inline BoolParticleAND operator && (const ParticleSelector& a, const ParticleSelector& b) { return BoolParticleAND(a, b); } /// Functor for or-combination of selector logic struct BoolParticleOR : public BoolParticleFunctor { BoolParticleOR(const std::vector& sels) : selectors(sels) {} BoolParticleOR(const ParticleSelector& a, const ParticleSelector& b) : selectors({a,b}) {} BoolParticleOR(const ParticleSelector& a, const ParticleSelector& b, const ParticleSelector& c) : selectors({a,b,c}) {} bool operator()(const Particle& p) const { for (const ParticleSelector& sel : selectors) if (sel(p)) return true; return false; } std::vector selectors; }; /// Operator syntactic sugar for OR construction inline BoolParticleOR operator || (const ParticleSelector& a, const ParticleSelector& b) { return BoolParticleOR(a, b); } /// Functor for inverting selector logic struct BoolParticleNOT : public BoolParticleFunctor { BoolParticleNOT(const ParticleSelector& sel) : selector(sel) {} bool operator()(const Particle& p) const { return !selector(p); } ParticleSelector selector; }; /// Operator syntactic sugar for NOT construction inline BoolParticleNOT operator ! (const ParticleSelector& a) { return BoolParticleNOT(a); } /// PID matching functor struct HasPID : public BoolParticleFunctor { HasPID(PdgId pid) : targetpids{pid} { } HasPID(vector pids) : targetpids{pids} { } HasPID(initializer_list pids) : targetpids{pids} { } bool operator()(const Particle& p) const { return contains(targetpids, p.pid()); } vector targetpids; }; using hasPID = HasPID; /// |PID| matching functor struct HasAbsPID : public BoolParticleFunctor { HasAbsPID(PdgId pid) : targetapids{abs(pid)} { } HasAbsPID(vector pids) { for (PdgId pid : pids) targetapids.push_back(abs(pid)); } HasAbsPID(initializer_list pids) { for (PdgId pid : pids) targetapids.push_back(abs(pid)); } bool operator()(const Particle& p) const { return contains(targetapids, p.abspid()); } vector targetapids; }; using hasAbsPID = HasAbsPID; /// Determine whether a particle is the first in a decay chain to meet the cut/function struct FirstParticleWith : public BoolParticleFunctor { FirstParticleWith(const ParticleSelector& f) : fn(f) { } FirstParticleWith(const Cut& c); bool operator()(const Particle& p) const { return isFirstWith(p, fn); } ParticleSelector fn; }; using firstParticleWith = FirstParticleWith; /// Determine whether a particle is the first in a decay chain not to meet the cut/function struct FirstParticleWithout : public BoolParticleFunctor { FirstParticleWithout(const ParticleSelector& f) : fn(f) { } FirstParticleWithout(const Cut& c); bool operator()(const Particle& p) const { return isFirstWithout(p, fn); } ParticleSelector fn; }; using firstParticleWithout = FirstParticleWithout; /// Determine whether a particle is the last in a decay chain to meet the cut/function struct LastParticleWith : public BoolParticleFunctor { template LastParticleWith(const FN& f) : fn(f) { } LastParticleWith(const Cut& c); bool operator()(const Particle& p) const { return isLastWith(p, fn); } std::function fn; }; using lastParticleWith = LastParticleWith; /// Determine whether a particle is the last in a decay chain not to meet the cut/function struct LastParticleWithout : public BoolParticleFunctor { LastParticleWithout(const ParticleSelector& f) : fn(f) { } LastParticleWithout(const Cut& c); bool operator()(const Particle& p) const { return isLastWithout(p, fn); } ParticleSelector fn; }; using lastParticleWithout = LastParticleWithout; /// Determine whether a particle has an ancestor which meets the cut/function struct HasParticleAncestorWith : public BoolParticleFunctor { HasParticleAncestorWith(const ParticleSelector& f) : fn(f) { } HasParticleAncestorWith(const Cut& c); bool operator()(const Particle& p) const { return hasAncestorWith(p, fn); } ParticleSelector fn; }; using hasParticleAncestorWith = HasParticleAncestorWith; /// Determine whether a particle has an ancestor which doesn't meet the cut/function struct HasParticleAncestorWithout : public BoolParticleFunctor { HasParticleAncestorWithout(const ParticleSelector& f) : fn(f) { } HasParticleAncestorWithout(const Cut& c); bool operator()(const Particle& p) const { return hasAncestorWithout(p, fn); } ParticleSelector fn; }; using hasParticleAncestorWithout = HasParticleAncestorWithout; /// Determine whether a particle has an parent which meets the cut/function struct HasParticleParentWith : public BoolParticleFunctor { HasParticleParentWith(const ParticleSelector& f) : fn(f) { } HasParticleParentWith(const Cut& c); bool operator()(const Particle& p) const { return hasParentWith(p, fn); } ParticleSelector fn; }; using hasParticleParentWith = HasParticleParentWith; /// Determine whether a particle has an parent which doesn't meet the cut/function struct HasParticleParentWithout : public BoolParticleFunctor { HasParticleParentWithout(const ParticleSelector& f) : fn(f) { } HasParticleParentWithout(const Cut& c); bool operator()(const Particle& p) const { return hasParentWithout(p, fn); } ParticleSelector fn; }; using hasParticleParentWithout = HasParticleParentWithout; /// Determine whether a particle has a child which meets the cut/function struct HasParticleChildWith : public BoolParticleFunctor { HasParticleChildWith(const ParticleSelector& f) : fn(f) { } HasParticleChildWith(const Cut& c); bool operator()(const Particle& p) const { return hasChildWith(p, fn); } ParticleSelector fn; }; using hasParticleChildWith = HasParticleChildWith; /// Determine whether a particle has a child which doesn't meet the cut/function struct HasParticleChildWithout : public BoolParticleFunctor { HasParticleChildWithout(const ParticleSelector& f) : fn(f) { } HasParticleChildWithout(const Cut& c); bool operator()(const Particle& p) const { return hasChildWithout(p, fn); } ParticleSelector fn; }; using hasParticleChildWithout = HasParticleChildWithout; /// Determine whether a particle has a descendant which meets the cut/function struct HasParticleDescendantWith : public BoolParticleFunctor { HasParticleDescendantWith(const ParticleSelector& f) : fn(f) { } HasParticleDescendantWith(const Cut& c); bool operator()(const Particle& p) const { return hasDescendantWith(p, fn); } ParticleSelector fn; }; using hasParticleDescendantWith = HasParticleDescendantWith; /// Determine whether a particle has a descendant which doesn't meet the cut/function struct HasParticleDescendantWithout : public BoolParticleFunctor { HasParticleDescendantWithout(const ParticleSelector& f) : fn(f) { } HasParticleDescendantWithout(const Cut& c); bool operator()(const Particle& p) const { return hasDescendantWithout(p, fn); } ParticleSelector fn; }; using hasParticleDescendantWithout = HasParticleDescendantWithout; //@} /// @name Unbound functions for filtering particles //@{ /// Filter a particle collection in-place to the subset that passes the supplied Cut Particles& ifilter_select(Particles& particles, const Cut& c); /// Alias for ifilter_select /// @deprecated Use ifilter_select inline Particles& ifilterBy(Particles& particles, const Cut& c) { return ifilter_select(particles, c); } /// Filter a particle collection in-place to the subset that passes the supplied Cut inline Particles filter_select(const Particles& particles, const Cut& c) { Particles rtn = particles; return ifilter_select(rtn, c); } /// Alias for ifilter_select /// @deprecated Use filter_select inline Particles filterBy(const Particles& particles, const Cut& c) { return filter_select(particles, c); } /// Filter a particle collection in-place to the subset that passes the supplied Cut inline Particles filter_select(const Particles& particles, const Cut& c, Particles& out) { out = filter_select(particles, c); return out; } /// Alias for ifilter_select /// @deprecated Use filter_select inline Particles filterBy(const Particles& particles, const Cut& c, Particles& out) { return filter_select(particles, c, out); } /// Filter a particle collection in-place to the subset that fails the supplied Cut Particles& ifilter_discard(Particles& particles, const Cut& c); /// Filter a particle collection in-place to the subset that fails the supplied Cut inline Particles filter_discard(const Particles& particles, const Cut& c) { Particles rtn = particles; return ifilter_discard(rtn, c); } /// Filter a particle collection in-place to the subset that fails the supplied Cut inline Particles filter_discard(const Particles& particles, const Cut& c, Particles& out) { out = filter_discard(particles, c); return out; } // inline void ifilterIsolateDeltaR(Particles& particles, const FourMomenta& vecs) { // ifilter_discard(particles, // } // inline Particles filterIsolateDeltaR(const Particles& particles, const FourMomenta& vecs) { // } //@} /// @name Particle pair functions //@{ /// Get the PDG ID codes of a ParticlePair /// @todo Make ParticlePair a custom class instead? inline PdgIdPair pids(const ParticlePair& pp) { return make_pair(pp.first.pid(), pp.second.pid()); } //@} /// @name Operations on collections of Particle /// @note This can't be done on generic collections of ParticleBase -- thanks, C++ :-/ //@{ namespace Kin { inline double sumPt(const Particles& ps) { return sum(ps, pT, 0.0); } inline FourMomentum sumP4(const Particles& ps) { return sum(ps, p4, FourMomentum()); } inline Vector3 sumP3(const Particles& ps) { return sum(ps, p3, Vector3()); } /// @todo Min dPhi, min dR? /// @todo Isolation routines? } //@} // Import Kin namespace into Rivet using namespace Kin; } #endif