diff --git a/analyses/pluginALICE/ALICE_2010_S8624100.cc b/analyses/pluginALICE/ALICE_2010_S8624100.cc --- a/analyses/pluginALICE/ALICE_2010_S8624100.cc +++ b/analyses/pluginALICE/ALICE_2010_S8624100.cc @@ -1,92 +1,92 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class ALICE_2010_S8624100 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor ALICE_2010_S8624100() : Analysis("ALICE_2010_S8624100") { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - ChargedFinalState cfs05(-0.5, 0.5); - ChargedFinalState cfs10(-1.0, 1.0); - ChargedFinalState cfs13(-1.3, 1.3); + ChargedFinalState cfs05((Cuts::etaIn(-0.5, 0.5))); + ChargedFinalState cfs10((Cuts::etaIn(-1.0, 1.0))); + ChargedFinalState cfs13((Cuts::etaIn(-1.3, 1.3))); declare(cfs05, "CFS05"); declare(cfs10, "CFS10"); declare(cfs13, "CFS13"); if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) { book(_h_dN_dNch_05 ,11, 1, 1); book(_h_dN_dNch_10 ,12, 1, 1); book(_h_dN_dNch_13 ,13, 1, 1); } else if (fuzzyEquals(sqrtS()/GeV, 2360, 1E-3)) { book(_h_dN_dNch_05 ,17, 1, 1); book(_h_dN_dNch_10 ,18, 1, 1); book(_h_dN_dNch_13 ,19, 1, 1); } } /// Perform the per-event analysis void analyze(const Event& event) { const ChargedFinalState& charged_05 = apply(event, "CFS05"); const ChargedFinalState& charged_10 = apply(event, "CFS10"); const ChargedFinalState& charged_13 = apply(event, "CFS13"); _h_dN_dNch_05->fill(charged_05.size()); _h_dN_dNch_10->fill(charged_10.size()); _h_dN_dNch_13->fill(charged_13.size()); } /// Normalise histograms etc., after the run void finalize() { normalize(_h_dN_dNch_05); normalize(_h_dN_dNch_10); normalize(_h_dN_dNch_13); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_dN_dNch_05; Histo1DPtr _h_dN_dNch_10; Histo1DPtr _h_dN_dNch_13; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2010_S8624100); } diff --git a/analyses/pluginALICE/ALICE_2010_S8625980.cc b/analyses/pluginALICE/ALICE_2010_S8625980.cc --- a/analyses/pluginALICE/ALICE_2010_S8625980.cc +++ b/analyses/pluginALICE/ALICE_2010_S8625980.cc @@ -1,97 +1,97 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class ALICE_2010_S8625980 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor ALICE_2010_S8625980() : Analysis("ALICE_2010_S8625980") { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - ChargedFinalState cfs(-1.0, 1.0); + ChargedFinalState cfs((Cuts::etaIn(-1.0, 1.0))); declare(cfs, "CFS"); if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) { book(_h_dN_deta ,4, 1, 1); } else if (fuzzyEquals(sqrtS()/GeV, 2360, 1E-3)) { book(_h_dN_deta ,5, 1, 1); } else if (fuzzyEquals(sqrtS()/GeV, 7000, 1E-3)) { book(_h_dN_deta ,6, 1, 1); book(_h_dN_dNch ,3, 1, 1); } book(_Nevt_after_cuts, "Nevt_after_cuts"); } /// Perform the per-event analysis void analyze(const Event& event) { const ChargedFinalState& charged = apply(event, "CFS"); if (charged.size() < 1) { vetoEvent; } _Nevt_after_cuts->fill(); for (const Particle& p : charged.particles()) { const double eta = p.eta(); _h_dN_deta->fill(eta); } if (fuzzyEquals(sqrtS()/GeV, 7000, 1E-3)) { _h_dN_dNch->fill(charged.size()); } } /// Normalise histograms etc., after the run void finalize() { if (fuzzyEquals(sqrtS()/GeV, 7000, 1E-3)) { normalize(_h_dN_dNch); } scale(_h_dN_deta, 1.0/ *_Nevt_after_cuts); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_dN_deta; Histo1DPtr _h_dN_dNch; CounterPtr _Nevt_after_cuts; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2010_S8625980); } diff --git a/analyses/pluginALICE/ALICE_2010_S8706239.cc b/analyses/pluginALICE/ALICE_2010_S8706239.cc --- a/analyses/pluginALICE/ALICE_2010_S8706239.cc +++ b/analyses/pluginALICE/ALICE_2010_S8706239.cc @@ -1,100 +1,100 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class ALICE_2010_S8706239 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor ALICE_2010_S8706239() : Analysis("ALICE_2010_S8706239") { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - ChargedFinalState cfs(-0.8, 0.8, 0.15); + ChargedFinalState cfs((Cuts::etaIn(-0.8, 0.8) && Cuts::pT >= 0.15)); declare(cfs, "CFS"); book(_h_pT ,4, 1, 1); book(_h_pT_Nch_015 ,11, 1, 1); book(_h_pT_Nch_05 ,12, 1, 1); book(_Nevt_after_cuts,"Nevt_after_cuts"); } /// Perform the per-event analysis void analyze(const Event& event) { const ChargedFinalState& charged = apply(event, "CFS"); _Nevt_after_cuts->fill(); // Get number of particles that fulfill certain pT requirements int Nch_015 = 0; int Nch_05 = 0; for (const Particle& p : charged.particles()) { double pT = p.pT()/GeV; if (pT < 4.0) Nch_015++; if (pT > 0.5 && pT < 4.0) Nch_05++; } // Now we can fill histograms for (const Particle& p : charged.particles()) { double pT = p.pT()/GeV; if (pT < 4.0) _h_pT_Nch_015 ->fill(Nch_015, pT); if (pT > 0.5 && pT < 4.0) _h_pT_Nch_05 ->fill(Nch_05, pT); // To get the Yield, fill appropriate weight 1/(2PI * pT * d eta) _h_pT->fill(pT, 1.0 /(TWOPI*pT*1.6) ); } } /// Normalise histograms etc., after the run void finalize() { scale(_h_pT, 1.0/ *_Nevt_after_cuts); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_pT; Profile1DPtr _h_pT_Nch_015 ; Profile1DPtr _h_pT_Nch_05 ; CounterPtr _Nevt_after_cuts; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2010_S8706239); } diff --git a/analyses/pluginALICE/ALICE_2011_S8945144.cc b/analyses/pluginALICE/ALICE_2011_S8945144.cc --- a/analyses/pluginALICE/ALICE_2011_S8945144.cc +++ b/analyses/pluginALICE/ALICE_2011_S8945144.cc @@ -1,104 +1,104 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class ALICE_2011_S8945144 : public Analysis { public: ALICE_2011_S8945144() : Analysis("ALICE_2011_S8945144") {} public: void init() { - const ChargedFinalState cfs(-15, 15); + const ChargedFinalState cfs((Cuts::etaIn(-15, 15))); declare(cfs, "CFS"); book(_histPtPions ,"d01-x01-y01"); book(_histPtAntiPions ,"d01-x01-y02"); book(_histPtKaons ,"d02-x01-y01"); book(_histPtAntiKaons ,"d02-x01-y02"); book(_histPtProtons ,"d03-x01-y01"); book(_histPtAntiProtons ,"d03-x01-y02"); book(_histAveragePt ,"d04-x01-y01"); } void analyze(const Event& event) { const ChargedFinalState& cfs = apply(event, "CFS"); for (const Particle& p : cfs.particles()) { if(p.absrap()<0.5) { switch (p.pid()) { case 211: _histPtPions->fill(p.pT()/GeV); _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV); break; case -211: _histPtAntiPions->fill(p.pT()/GeV); _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV); break; case 2212: if ( !(p.hasAncestor(3322) || // Xi0 p.hasAncestor(3122) || p.hasAncestor(-3122) || // Lambda p.hasAncestor(3222) || p.hasAncestor(-3222) || // Sigma+/- p.hasAncestor(3312) || p.hasAncestor(-3312) ) ) { // Xi-/+ _histPtProtons->fill(p.pT()/GeV); _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV); } break; case -2212: if ( !(p.hasAncestor(3322) || // Xi0 p.hasAncestor(3122) || p.hasAncestor(-3122) || // Lambda p.hasAncestor(3222) || p.hasAncestor(-3222) || // Sigma+/- p.hasAncestor(3312) || p.hasAncestor(-3312) ) ) { // Xi-/+ _histPtAntiProtons->fill(p.pT()/GeV); _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV); } break; case 321: _histPtKaons->fill(p.pT()/GeV); _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV); break; case -321: _histPtAntiKaons->fill(p.pT()/GeV); _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV); break; } } } } void finalize() { scale(_histPtPions, 1./sumOfWeights()); scale(_histPtProtons, 1./sumOfWeights()); scale(_histPtKaons, 1./sumOfWeights()); scale(_histPtAntiPions, 1./sumOfWeights()); scale(_histPtAntiProtons, 1./sumOfWeights()); scale(_histPtAntiKaons, 1./sumOfWeights()); } private: Histo1DPtr _histPtPions; Histo1DPtr _histPtProtons; Histo1DPtr _histPtKaons; Histo1DPtr _histPtAntiPions; Histo1DPtr _histPtAntiProtons; Histo1DPtr _histPtAntiKaons; Profile1DPtr _histAveragePt; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2011_S8945144); } diff --git a/analyses/pluginATLAS/ATLAS_2010_CONF_2010_049.cc b/analyses/pluginATLAS/ATLAS_2010_CONF_2010_049.cc --- a/analyses/pluginATLAS/ATLAS_2010_CONF_2010_049.cc +++ b/analyses/pluginATLAS/ATLAS_2010_CONF_2010_049.cc @@ -1,125 +1,125 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class ATLAS_2010_CONF_2010_049 : public Analysis { public: ATLAS_2010_CONF_2010_049() : Analysis("ATLAS_2010_CONF_2010_049") { } void init() { - ChargedFinalState cfs(-1.5, 1.5, 0.5*GeV); + ChargedFinalState cfs((Cuts::etaIn(-1.5, 1.5) && Cuts::pT >= 0.5*GeV)); declare(cfs, "CFS"); FastJets jetsproj6(cfs, FastJets::ANTIKT, 0.6); declare(jetsproj6, "Jets6"); FastJets jetsproj4(cfs, FastJets::ANTIKT, 0.4); declare(jetsproj4, "Jets4"); // @todo tmp YOs for (size_t i=0 ; i<2 ; i++) { book(_h_xsec[i] ,1+i, 1, 1); book(_h_frag_04_06[i] ,3+i, 1, 1); book(_h_frag_06_10[i] ,3+i, 2, 1); book(_h_frag_10_15[i] ,3+i, 3, 1); book(_h_frag_15_24[i] ,3+i, 4, 1); book(_njets_04_06[i], "njets_04_06"); book(_njets_06_10[i], "njets_06_10"); book(_njets_10_15[i], "njets_10_15"); book(_njets_15_24[i], "njets_15_24"); } } void analyze(const Event& event) { const FastJets & jetsproj6 = apply(event, "Jets6"); const FastJets & jetsproj4 = apply(event, "Jets4"); Jets alljets[2]; alljets[0] = jetsproj6.jetsByPt(4.0*GeV); alljets[1] = jetsproj4.jetsByPt(4.0*GeV); for (size_t i=0 ; i<2 ; i++) { Jets jets; // First we want to make sure that we only use jets within |eta|<0.57 for (const Jet& jet : alljets[i]) { if (jet.abseta()<0.57) { jets.push_back(jet); } } for (const Jet& jet : jets) { const double pTjet = jet.pT(); const double pjet = jet.p3().mod(); _h_xsec[i]->fill(pTjet); if (pTjet > 24*GeV) continue; for (const Particle& p : jet.particles()) { double z = p.p3().mod()/pjet; if (z >= 1) z = 0.9999; // Make sure that z=1 doesn't go into overflow if (pTjet > 15*GeV) { _h_frag_15_24[i]->fill(z); } else if (pTjet > 10*GeV) { _h_frag_10_15[i]->fill(z); } else if (pTjet > 6*GeV) { _h_frag_06_10[i]->fill(z); } else { _h_frag_04_06[i]->fill(z); } } if (pTjet > 15*GeV) { _njets_15_24[i]->fill(); } else if (pTjet > 10*GeV) { _njets_10_15[i]->fill(); } else if (pTjet > 6*GeV) { _njets_06_10[i]->fill(); } else { _njets_04_06[i]->fill(); } } } } void finalize() { for (size_t i=0 ; i<2 ; i++) { // deta = 2*0.57 scale(_h_xsec[i], crossSection()/microbarn/sumOfWeights()/(2*0.57)); scale(_h_frag_04_06[i], 1./_njets_04_06[i]->val()); scale(_h_frag_06_10[i], 1./_njets_06_10[i]->val()); scale(_h_frag_10_15[i], 1./_njets_10_15[i]->val()); scale(_h_frag_15_24[i], 1./_njets_15_24[i]->val()); } } private: Histo1DPtr _h_xsec[2]; Histo1DPtr _h_frag_04_06[2]; Histo1DPtr _h_frag_06_10[2]; Histo1DPtr _h_frag_10_15[2]; Histo1DPtr _h_frag_15_24[2]; CounterPtr _njets_04_06[2]; CounterPtr _njets_06_10[2]; CounterPtr _njets_10_15[2]; CounterPtr _njets_15_24[2]; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2010_CONF_2010_049); } diff --git a/analyses/pluginATLAS/ATLAS_2010_S8591806.cc b/analyses/pluginATLAS/ATLAS_2010_S8591806.cc --- a/analyses/pluginATLAS/ATLAS_2010_S8591806.cc +++ b/analyses/pluginATLAS/ATLAS_2010_S8591806.cc @@ -1,71 +1,71 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { /// @brief ATLAS minimum bias analysis at 900 GeV /// @author Frank Siegert class ATLAS_2010_S8591806 : public Analysis { public: ATLAS_2010_S8591806() : Analysis("ATLAS_2010_S8591806") { } void init() { - ChargedFinalState cfs(-2.5, 2.5, 0.5*GeV); + ChargedFinalState cfs((Cuts::etaIn(-2.5, 2.5) && Cuts::pT >= 0.5*GeV)); declare(cfs, "CFS"); book(_h_dNch_deta ,2, 1, 1); book(_h_dNch_dpT ,3, 1, 1); book(_h_dNevt_dNch ,4, 1, 1); book(_p_meanpT_Nch ,5, 1, 1); book(_Nevt_after_cuts, "nevt_pass"); } void analyze(const Event& event) { const ChargedFinalState& charged = apply(event, "CFS"); if (charged.size() < 1) { vetoEvent; } _Nevt_after_cuts->fill(); _h_dNevt_dNch->fill(charged.size()); for (const Particle& p : charged.particles()) { double pT = p.pT()/GeV; _h_dNch_deta->fill(p.eta()); _h_dNch_dpT->fill(pT, 1.0/pT); _p_meanpT_Nch->fill(charged.size(), pT); } } void finalize() { double deta = 5.0; scale(_h_dNch_deta, 1.0/_Nevt_after_cuts->val()); scale(_h_dNch_dpT, 1.0/_Nevt_after_cuts->val()/TWOPI/deta); scale(_h_dNevt_dNch, 1.0/_Nevt_after_cuts->val()); } private: Histo1DPtr _h_dNch_deta; Histo1DPtr _h_dNch_dpT; Histo1DPtr _h_dNevt_dNch; Profile1DPtr _p_meanpT_Nch; CounterPtr _Nevt_after_cuts; }; //DECLARE_RIVET_PLUGIN(ATLAS_2010_S8591806); DECLARE_ALIASED_RIVET_PLUGIN(ATLAS_2010_S8591806, ATLAS_2010_I849050); } diff --git a/analyses/pluginATLAS/ATLAS_2010_S8894728.cc b/analyses/pluginATLAS/ATLAS_2010_S8894728.cc --- a/analyses/pluginATLAS/ATLAS_2010_S8894728.cc +++ b/analyses/pluginATLAS/ATLAS_2010_S8894728.cc @@ -1,340 +1,340 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class ATLAS_2010_S8894728 : public Analysis { public: ATLAS_2010_S8894728() : Analysis("ATLAS_2010_S8894728") { } void init() { - const ChargedFinalState cfs100(-2.5, 2.5, 100*MeV); + const ChargedFinalState cfs100((Cuts::etaIn(-2.5, 2.5) && Cuts::pT >= 100*MeV)); declare(cfs100, "CFS100"); - const ChargedFinalState cfs500(-2.5, 2.5, 500*MeV); + const ChargedFinalState cfs500((Cuts::etaIn(-2.5, 2.5) && Cuts::pT >= 500*MeV)); declare(cfs500, "CFS500"); - const ChargedFinalState cfslead(-2.5, 2.5, 1.0*GeV); + const ChargedFinalState cfslead((Cuts::etaIn(-2.5, 2.5) && Cuts::pT >= 1.0*GeV)); declare(cfslead, "CFSlead"); // Get an index for the beam energy int isqrts = -1; if (fuzzyEquals(sqrtS(), 900*GeV)) isqrts = 0; else if (fuzzyEquals(sqrtS(), 7*TeV)) isqrts = 1; assert(isqrts >= 0); // Nch profiles, 500 MeV track pT cut book(_hist_nch_transverse_500[0] ,1+isqrts, 1, 1); book(_hist_nch_toward_500 ,1+isqrts, 1, 2); book(_hist_nch_away_500 ,1+isqrts, 1, 3); // pTsum profiles, 500 MeV track pT cut book(_hist_ptsum_transverse_500[0] ,3+isqrts, 1, 1); book(_hist_ptsum_toward_500 ,3+isqrts, 1, 2); book(_hist_ptsum_away_500 ,3+isqrts, 1, 3); // Standard deviation profiles // First the higher moments of main profiles to calculate variance and error on variance... for (size_t i = 1; i < 4; ++i) { book(_hist_nch_transverse_500[i], "TMP/nch"+to_str(i), refData(1+isqrts, 1, 1)); book(_hist_ptsum_transverse_500[i], "TMP/ptsum"+to_str(i), refData(3+isqrts, 1, 1)); } // Then the data point sets into which the results will be inserted book(_dps_sdnch_transverse_500 , 5+isqrts, 1, 1); book(_dps_sdptsum_transverse_500, 7+isqrts, 1, 1); // profiles, 500 MeV track pT cut book(_hist_ptavg_transverse_500 ,9+isqrts, 1, 1); book(_hist_ptavg_toward_500 ,9+isqrts, 1, 2); book(_hist_ptavg_away_500 ,9+isqrts, 1, 3); // vs. Nch profiles, 500 MeV track pT cut book(_hist_dn_dpt_transverse_500 ,11+isqrts, 1, 1); book(_hist_dn_dpt_toward_500 ,11+isqrts, 1, 2); book(_hist_dn_dpt_away_500 ,11+isqrts, 1, 3); // Nch vs. Delta(phi) profiles, 500 MeV track pT cut book(_hist_N_vs_dPhi_1_500 ,13+isqrts, 1, 1); book(_hist_N_vs_dPhi_2_500 ,13+isqrts, 1, 2); book(_hist_N_vs_dPhi_3_500 ,13+isqrts, 1, 3); book(_hist_N_vs_dPhi_5_500 ,13+isqrts, 1, 4); // pT vs. Delta(phi) profiles, 500 MeV track pT cut book(_hist_pT_vs_dPhi_1_500 ,15+isqrts, 1, 1); book(_hist_pT_vs_dPhi_2_500 ,15+isqrts, 1, 2); book(_hist_pT_vs_dPhi_3_500 ,15+isqrts, 1, 3); book(_hist_pT_vs_dPhi_5_500 ,15+isqrts, 1, 4); // Nch and pTsum profiles, 100 MeV track pT cut book(_hist_nch_transverse_100 ,17+isqrts, 1, 1); book(_hist_nch_toward_100 ,17+isqrts, 1, 2); book(_hist_nch_away_100 ,17+isqrts, 1, 3); book(_hist_ptsum_transverse_100 ,19+isqrts, 1, 1); book(_hist_ptsum_toward_100 ,19+isqrts, 1, 2); book(_hist_ptsum_away_100 ,19+isqrts, 1, 3); // Profiles vs. eta (7 TeV only) if (isqrts == 1) { book(_hist_nch_vs_eta_transverse_100 ,21, 1, 1); book(_hist_ptsum_vs_eta_transverse_100 ,22, 1, 1); } } // Little helper function to identify Delta(phi) regions inline int region_index(double dphi) { assert(inRange(dphi, 0.0, PI, CLOSED, CLOSED)); if (dphi < PI/3.0) return 0; if (dphi < 2*PI/3.0) return 1; return 2; } void analyze(const Event& event) { // Require at least one track in the event with pT >= 1 GeV const ChargedFinalState& cfslead = apply(event, "CFSlead"); if (cfslead.size() < 1) { vetoEvent; } // These are the charged particles (tracks) with pT > 500 MeV const ChargedFinalState& charged500 = apply(event, "CFS500"); // Identify leading track and its phi and pT (this is the same for both the 100 MeV and 500 MeV track cuts) Particles particles500 = charged500.particlesByPt(); Particle p_lead = particles500[0]; const double philead = p_lead.phi(); const double etalead = p_lead.eta(); const double pTlead = p_lead.perp(); MSG_DEBUG("Leading track: pT = " << pTlead << ", eta = " << etalead << ", phi = " << philead); // Iterate over all > 500 MeV particles and count particles and scalar pTsum in the three regions vector num500(3, 0), ptSum500(3, 0.0); // Temporary histos that bin Nch and pT in dPhi. // NB. Only one of each needed since binnings are the same for the energies and pT cuts Histo1D hist_num_dphi_500(refData(13,1,1)); Histo1D hist_pt_dphi_500(refData(15,1,1)); for (const Particle& p : particles500) { const double pT = p.pT(); const double dPhi = deltaPhi(philead, p.phi()); const int ir = region_index(dPhi); num500[ir] += 1; ptSum500[ir] += pT; // Fill temp histos to bin Nch and pT in dPhi if (p.genParticle() != p_lead.genParticle()) { // We don't want to fill all those zeros from the leading track... hist_num_dphi_500.fill(dPhi, 1); hist_pt_dphi_500.fill(dPhi, pT); } } // Iterate over charged particles again for profiles against Nch // This is necessary since the Nch are region-specific and so are only known after the first loop for (const Particle& p : particles500) { const double pT = p.pT(); const double dPhi = deltaPhi(philead, p.phi()); const int ir = region_index(dPhi); switch (ir) { case 0: _hist_dn_dpt_toward_500->fill(num500[0], pT); break; case 1: _hist_dn_dpt_transverse_500->fill(num500[1], pT); break; case 2: _hist_dn_dpt_away_500->fill(num500[2], pT); break; default: assert(false && "How did we get here?"); } } // Now fill underlying event histograms // The densities are calculated by dividing the UE properties by dEta*dPhi // -- each region has a dPhi of 2*PI/3 and dEta is two times 2.5 const double dEtadPhi = (2*2.5 * 2*PI/3.0); // Transverse profiles need 4 orders of moments for stddev with errors for (int i = 0; i < 4; ++i) { _hist_nch_transverse_500[i]->fill(pTlead/GeV, intpow(num500[1]/dEtadPhi, i+1)); _hist_ptsum_transverse_500[i]->fill(pTlead/GeV, intpow(ptSum500[1]/GeV/dEtadPhi, i+1)); } // Toward and away profiles only need the first moment (the mean) _hist_nch_toward_500->fill(pTlead/GeV, num500[0]/dEtadPhi); _hist_nch_away_500->fill(pTlead/GeV, num500[2]/dEtadPhi); _hist_ptsum_toward_500->fill(pTlead/GeV, ptSum500[0]/GeV/dEtadPhi); _hist_ptsum_away_500->fill(pTlead/GeV, ptSum500[2]/GeV/dEtadPhi); // profiles //MSG_INFO("Trans pT1, pTsum, Nch, " << pTlead/GeV << ", " << ptSum500[1]/GeV << ", " << num500[1] << ", " << ptSum500[1]/GeV/num500[1]); if (num500[1] > 0) _hist_ptavg_transverse_500->fill(pTlead/GeV, ptSum500[1]/GeV/num500[1]); if (num500[0] > 0) _hist_ptavg_toward_500->fill(pTlead/GeV, ptSum500[0]/GeV/num500[0]); if (num500[2] > 0) _hist_ptavg_away_500->fill(pTlead/GeV, ptSum500[2]/GeV/num500[2]); // Update the "proper" dphi profile histograms // Note that we fill dN/dEtadPhi: dEta = 2*2.5, dPhi = 2*PI/nBins // The values tabulated in the note are for an (undefined) signed Delta(phi) rather than // |Delta(phi)| and so differ by a factor of 2: we have to actually norm for angular range = 2pi const size_t nbins = refData(13,1,1).numPoints(); std::vector ptcut; if (fuzzyEquals(sqrtS(), 900*GeV)) { ptcut += 1.0; ptcut += 1.5; ptcut += 2.0; ptcut += 2.5; } else if (fuzzyEquals(sqrtS(), 7*TeV)) { ptcut += 1.0; ptcut += 2.0; ptcut += 3.0; ptcut += 5.0; } assert(ptcut.size() == 4); for (size_t i = 0; i < nbins; ++i) { // First Nch double mean = hist_num_dphi_500.bin(i).xMid(); double value = 0.; if (hist_num_dphi_500.bin(i).numEntries() > 0) { mean = hist_num_dphi_500.bin(i).xMean(); value = hist_num_dphi_500.bin(i).area()/hist_num_dphi_500.bin(i).xWidth()/10.0; } if (pTlead/GeV >= ptcut[0]) _hist_N_vs_dPhi_1_500->fill(mean, value); if (pTlead/GeV >= ptcut[1]) _hist_N_vs_dPhi_2_500->fill(mean, value); if (pTlead/GeV >= ptcut[2]) _hist_N_vs_dPhi_3_500->fill(mean, value); if (pTlead/GeV >= ptcut[3]) _hist_N_vs_dPhi_5_500->fill(mean, value); // Then pT mean = hist_pt_dphi_500.bin(i).xMid(); value = 0.; if (hist_pt_dphi_500.bin(i).numEntries() > 0) { mean = hist_pt_dphi_500.bin(i).xMean(); value = hist_pt_dphi_500.bin(i).area()/hist_pt_dphi_500.bin(i).xWidth()/10.0; } if (pTlead/GeV >= ptcut[0]) _hist_pT_vs_dPhi_1_500->fill(mean, value); if (pTlead/GeV >= ptcut[1]) _hist_pT_vs_dPhi_2_500->fill(mean, value); if (pTlead/GeV >= ptcut[2]) _hist_pT_vs_dPhi_3_500->fill(mean, value); if (pTlead/GeV >= ptcut[3]) _hist_pT_vs_dPhi_5_500->fill(mean, value); } ////////////////////// // These are the charged particles (tracks) with pT > 100 MeV const ChargedFinalState& charged100 = apply(event, "CFS100"); // Iterate over all > 100 MeV particles and count particles and scalar pTsum in the three regions vector num100(3, 0), ptSum100(3, 0.0); for (const Particle& p : charged100.particles()) { const double pT = p.pT(); const double dPhi = deltaPhi(philead, p.phi()); const int ir = region_index(dPhi); num100[ir] += 1; ptSum100[ir] += pT; } // Now fill the two sets of 100 MeV underlying event histograms _hist_nch_transverse_100->fill(pTlead/GeV, num100[1]/dEtadPhi); _hist_nch_toward_100->fill(pTlead/GeV, num100[0]/dEtadPhi); _hist_nch_away_100->fill(pTlead/GeV, num100[2]/dEtadPhi); _hist_ptsum_transverse_100->fill(pTlead/GeV, ptSum100[1]/GeV/dEtadPhi); _hist_ptsum_toward_100->fill(pTlead/GeV, ptSum100[0]/GeV/dEtadPhi); _hist_ptsum_away_100->fill(pTlead/GeV, ptSum100[2]/GeV/dEtadPhi); // And finally the Nch and pT vs eta_lead profiles (again from > 100 MeV tracks, and only at 7 TeV) if (fuzzyEquals(sqrtS(), 7*TeV) && pTlead > 5*GeV) { // MSG_INFO(sqrtS() << " " << pTlead << " " << ptSum100[1]/dEtadPhi << " " << num100[1]/dEtadPhi); _hist_nch_vs_eta_transverse_100->fill(etalead, num100[1]/dEtadPhi); _hist_ptsum_vs_eta_transverse_100->fill(etalead, ptSum100[1]/GeV/dEtadPhi); } } void finalize() { // Convert the various moments of the 500 MeV trans pT and Nch distributions to std devs with correct error _moments_to_stddev(_hist_nch_transverse_500, _dps_sdnch_transverse_500); _moments_to_stddev(_hist_ptsum_transverse_500, _dps_sdptsum_transverse_500); } private: inline void _moments_to_stddev(Profile1DPtr moment_profiles[], Scatter2DPtr target_dps) { for (size_t b = 0; b < moment_profiles[0]->numBins(); ++b) { // loop over points /// @todo Assuming unit weights here! Should use N_effective = sumW**2/sumW2? const double numentries = moment_profiles[0]->bin(b).numEntries(); const double x = moment_profiles[0]->bin(b).xMid(); const double ex = moment_profiles[0]->bin(b).xWidth()/2.; double var = 0.; double sd = 0.; if (numentries > 0) { var = moment_profiles[1]->bin(b).mean() - intpow(moment_profiles[0]->bin(b).mean(), 2); sd = fuzzyLessEquals(var,0.) ? 0 : sqrt(var); ///< Numerical safety check } if (sd == 0 || numentries < 3) { MSG_WARNING("Need at least 3 bin entries and a non-zero central value to calculate " << "an error on standard deviation profiles (bin " << b << ")"); target_dps->addPoint(x, sd, ex, 0); continue; } // c2(y) = m4(x) - 4 m3(x) m1(x) - m2(x)^2 + 8 m2(x) m1(x)^2 - 4 m1(x)^4 const double var_on_var = moment_profiles[3]->bin(b).mean() - 4 * moment_profiles[2]->bin(b).mean() * moment_profiles[0]->bin(b).mean() - intpow(moment_profiles[1]->bin(b).mean(), 2) + 8 * moment_profiles[1]->bin(b).mean() * intpow(moment_profiles[0]->bin(b).mean(), 2) - 4 * intpow(moment_profiles[0]->bin(b).mean(), 4); const double stderr_on_var = sqrt(var_on_var/(numentries-2.0)); const double stderr_on_sd = stderr_on_var / (2.0*sd); target_dps->addPoint(x, sd, ex, stderr_on_sd); } } private: Profile1DPtr _hist_nch_transverse_500[4]; Profile1DPtr _hist_nch_toward_500; Profile1DPtr _hist_nch_away_500; Profile1DPtr _hist_ptsum_transverse_500[4]; Profile1DPtr _hist_ptsum_toward_500; Profile1DPtr _hist_ptsum_away_500; Scatter2DPtr _dps_sdnch_transverse_500; Scatter2DPtr _dps_sdptsum_transverse_500; Profile1DPtr _hist_ptavg_transverse_500; Profile1DPtr _hist_ptavg_toward_500; Profile1DPtr _hist_ptavg_away_500; Profile1DPtr _hist_dn_dpt_transverse_500; Profile1DPtr _hist_dn_dpt_toward_500; Profile1DPtr _hist_dn_dpt_away_500; Profile1DPtr _hist_N_vs_dPhi_1_500; Profile1DPtr _hist_N_vs_dPhi_2_500; Profile1DPtr _hist_N_vs_dPhi_3_500; Profile1DPtr _hist_N_vs_dPhi_5_500; Profile1DPtr _hist_pT_vs_dPhi_1_500; Profile1DPtr _hist_pT_vs_dPhi_2_500; Profile1DPtr _hist_pT_vs_dPhi_3_500; Profile1DPtr _hist_pT_vs_dPhi_5_500; Profile1DPtr _hist_nch_transverse_100; Profile1DPtr _hist_nch_toward_100; Profile1DPtr _hist_nch_away_100; Profile1DPtr _hist_ptsum_transverse_100; Profile1DPtr _hist_ptsum_toward_100; Profile1DPtr _hist_ptsum_away_100; Profile1DPtr _hist_nch_vs_eta_transverse_100; Profile1DPtr _hist_ptsum_vs_eta_transverse_100; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2010_S8894728); } diff --git a/analyses/pluginATLAS/ATLAS_2011_I919017.cc b/analyses/pluginATLAS/ATLAS_2011_I919017.cc --- a/analyses/pluginATLAS/ATLAS_2011_I919017.cc +++ b/analyses/pluginATLAS/ATLAS_2011_I919017.cc @@ -1,1220 +1,1220 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { namespace { inline double calcz(const Jet& j, const Particle& p) { const double num = j.p3().dot(p.p3()); const double den = j.p3().mod2(); return num/den; } inline double calcptrel(const Jet& j, const Particle& p) { const double num = j.p3().cross(p.p3()).mod(); const double den = j.p3().mod(); return num/den; } inline double calcr(const Jet& j, const Particle& p) { return deltaR(j.rapidity(), j.phi(), p.rapidity(), p.phi()); } // For annulus area kludge /// @todo Improve somehow... need normalisation *without* bin width factors! inline double calcrweight(const Jet& j, const Particle& p) { size_t nBins_r = 26; double bins_r[] = { 0.00, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.12, 0.14, 0.16, 0.18, 0.20, 0.22, 0.24, 0.26, 0.28, 0.30, 0.35, 0.40, 0.45, 0.50, 0.55, 0.60 }; double r = calcr(j,p); for (size_t bin = 0 ; bin < nBins_r ; bin++) { if (r < bins_r[bin+1]) { double up = bins_r[bin+1]; double down = bins_r[bin]; return ((up-down)/(M_PI*(up*up-down*down))); } } return 1.0; } } class ATLAS_2011_I919017 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor ATLAS_2011_I919017() : Analysis("ATLAS_2011_I919017") { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - ChargedFinalState cfs(-2.5, 2.5, 0.3*GeV); + ChargedFinalState cfs((Cuts::etaIn(-2.5, 2.5) && Cuts::pT >= 0.3*GeV)); FastJets trkjets04(cfs, FastJets::ANTIKT, 0.4); FastJets trkjets06(cfs, FastJets::ANTIKT, 0.6); declare(trkjets04, "Jets04"); declare(trkjets06, "Jets06"); // Book histograms book(_h_pt04_00_05 ,1, 1, 1); book(_h_pt06_00_05 ,2, 1, 1); book(_h_N04_00_05_04_06 ,1, 2, 1+5); book(_h_N06_00_05_04_06 ,2, 2, 1+5); book(_h_N04_00_05_06_10 ,1, 2, 2+5); book(_h_N06_00_05_06_10 ,2, 2, 2+5); book(_h_N04_00_05_10_15 ,1, 2, 3+5); book(_h_N06_00_05_10_15 ,2, 2, 3+5); book(_h_N04_00_05_15_24 ,1, 2, 4+5); book(_h_N06_00_05_15_24 ,2, 2, 4+5); book(_h_N04_00_05_24_40 ,1, 2, 5+5); book(_h_N06_00_05_24_40 ,2, 2, 5+5); book(_h_z04_00_05_04_06 ,1, 3, 1+5); book(_h_z06_00_05_04_06 ,2, 3, 1+5); book(_h_z04_00_05_06_10 ,1, 3, 2+5); book(_h_z06_00_05_06_10 ,2, 3, 2+5); book(_h_z04_00_05_10_15 ,1, 3, 3+5); book(_h_z06_00_05_10_15 ,2, 3, 3+5); book(_h_z04_00_05_15_24 ,1, 3, 4+5); book(_h_z06_00_05_15_24 ,2, 3, 4+5); book(_h_z04_00_05_24_40 ,1, 3, 5+5); book(_h_z06_00_05_24_40 ,2, 3, 5+5); book(_h_ptrel04_00_05_04_06 ,1, 4, 1+5); book(_h_ptrel06_00_05_04_06 ,2, 4, 1+5); book(_h_ptrel04_00_05_06_10 ,1, 4, 2+5); book(_h_ptrel06_00_05_06_10 ,2, 4, 2+5); book(_h_ptrel04_00_05_10_15 ,1, 4, 3+5); book(_h_ptrel06_00_05_10_15 ,2, 4, 3+5); book(_h_ptrel04_00_05_15_24 ,1, 4, 4+5); book(_h_ptrel06_00_05_15_24 ,2, 4, 4+5); book(_h_ptrel04_00_05_24_40 ,1, 4, 5+5); book(_h_ptrel06_00_05_24_40 ,2, 4, 5+5); book(_h_rdA04_00_05_04_06 ,1, 5, 1+5); book(_h_rdA06_00_05_04_06 ,2, 5, 1+5); book(_h_rdA04_00_05_06_10 ,1, 5, 2+5); book(_h_rdA06_00_05_06_10 ,2, 5, 2+5); book(_h_rdA04_00_05_10_15 ,1, 5, 3+5); book(_h_rdA06_00_05_10_15 ,2, 5, 3+5); book(_h_rdA04_00_05_15_24 ,1, 5, 4+5); book(_h_rdA06_00_05_15_24 ,2, 5, 4+5); book(_h_rdA04_00_05_24_40 ,1, 5, 5+5); book(_h_rdA06_00_05_24_40 ,2, 5, 5+5); book(_h_pt04_05_10 ,1, 1, 2); book(_h_pt06_05_10 ,2, 1, 2); book(_h_N04_05_10_04_06 ,1, 2, 1+10); book(_h_N06_05_10_04_06 ,2, 2, 1+10); book(_h_N04_05_10_06_10 ,1, 2, 2+10); book(_h_N06_05_10_06_10 ,2, 2, 2+10); book(_h_N04_05_10_10_15 ,1, 2, 3+10); book(_h_N06_05_10_10_15 ,2, 2, 3+10); book(_h_N04_05_10_15_24 ,1, 2, 4+10); book(_h_N06_05_10_15_24 ,2, 2, 4+10); book(_h_N04_05_10_24_40 ,1, 2, 5+10); book(_h_N06_05_10_24_40 ,2, 2, 5+10); book(_h_z04_05_10_04_06 ,1, 3, 1+10); book(_h_z06_05_10_04_06 ,2, 3, 1+10); book(_h_z04_05_10_06_10 ,1, 3, 2+10); book(_h_z06_05_10_06_10 ,2, 3, 2+10); book(_h_z04_05_10_10_15 ,1, 3, 3+10); book(_h_z06_05_10_10_15 ,2, 3, 3+10); book(_h_z04_05_10_15_24 ,1, 3, 4+10); book(_h_z06_05_10_15_24 ,2, 3, 4+10); book(_h_z04_05_10_24_40 ,1, 3, 5+10); book(_h_z06_05_10_24_40 ,2, 3, 5+10); book(_h_ptrel04_05_10_04_06 ,1, 4, 1+10); book(_h_ptrel06_05_10_04_06 ,2, 4, 1+10); book(_h_ptrel04_05_10_06_10 ,1, 4, 2+10); book(_h_ptrel06_05_10_06_10 ,2, 4, 2+10); book(_h_ptrel04_05_10_10_15 ,1, 4, 3+10); book(_h_ptrel06_05_10_10_15 ,2, 4, 3+10); book(_h_ptrel04_05_10_15_24 ,1, 4, 4+10); book(_h_ptrel06_05_10_15_24 ,2, 4, 4+10); book(_h_ptrel04_05_10_24_40 ,1, 4, 5+10); book(_h_ptrel06_05_10_24_40 ,2, 4, 5+10); book(_h_rdA04_05_10_04_06 ,1, 5, 1+10); book(_h_rdA06_05_10_04_06 ,2, 5, 1+10); book(_h_rdA04_05_10_06_10 ,1, 5, 2+10); book(_h_rdA06_05_10_06_10 ,2, 5, 2+10); book(_h_rdA04_05_10_10_15 ,1, 5, 3+10); book(_h_rdA06_05_10_10_15 ,2, 5, 3+10); book(_h_rdA04_05_10_15_24 ,1, 5, 4+10); book(_h_rdA06_05_10_15_24 ,2, 5, 4+10); book(_h_rdA04_05_10_24_40 ,1, 5, 5+10); book(_h_rdA06_05_10_24_40 ,2, 5, 5+10); book(_h_pt04_10_15 ,1, 1, 3); book(_h_pt06_10_15 ,2, 1, 3); book(_h_N04_10_15_04_06 ,1, 2, 1+15); book(_h_N06_10_15_04_06 ,2, 2, 1+15); book(_h_N04_10_15_06_10 ,1, 2, 2+15); book(_h_N06_10_15_06_10 ,2, 2, 2+15); book(_h_N04_10_15_10_15 ,1, 2, 3+15); book(_h_N06_10_15_10_15 ,2, 2, 3+15); book(_h_N04_10_15_15_24 ,1, 2, 4+15); book(_h_N06_10_15_15_24 ,2, 2, 4+15); book(_h_N04_10_15_24_40 ,1, 2, 5+15); book(_h_N06_10_15_24_40 ,2, 2, 5+15); book(_h_z04_10_15_04_06 ,1, 3, 1+15); book(_h_z06_10_15_04_06 ,2, 3, 1+15); book(_h_z04_10_15_06_10 ,1, 3, 2+15); book(_h_z06_10_15_06_10 ,2, 3, 2+15); book(_h_z04_10_15_10_15 ,1, 3, 3+15); book(_h_z06_10_15_10_15 ,2, 3, 3+15); book(_h_z04_10_15_15_24 ,1, 3, 4+15); book(_h_z06_10_15_15_24 ,2, 3, 4+15); book(_h_z04_10_15_24_40 ,1, 3, 5+15); book(_h_z06_10_15_24_40 ,2, 3, 5+15); book(_h_ptrel04_10_15_04_06 ,1, 4, 1+15); book(_h_ptrel06_10_15_04_06 ,2, 4, 1+15); book(_h_ptrel04_10_15_06_10 ,1, 4, 2+15); book(_h_ptrel06_10_15_06_10 ,2, 4, 2+15); book(_h_ptrel04_10_15_10_15 ,1, 4, 3+15); book(_h_ptrel06_10_15_10_15 ,2, 4, 3+15); book(_h_ptrel04_10_15_15_24 ,1, 4, 4+15); book(_h_ptrel06_10_15_15_24 ,2, 4, 4+15); book(_h_ptrel04_10_15_24_40 ,1, 4, 5+15); book(_h_ptrel06_10_15_24_40 ,2, 4, 5+15); book(_h_rdA04_10_15_04_06 ,1, 5, 1+15); book(_h_rdA06_10_15_04_06 ,2, 5, 1+15); book(_h_rdA04_10_15_06_10 ,1, 5, 2+15); book(_h_rdA06_10_15_06_10 ,2, 5, 2+15); book(_h_rdA04_10_15_10_15 ,1, 5, 3+15); book(_h_rdA06_10_15_10_15 ,2, 5, 3+15); book(_h_rdA04_10_15_15_24 ,1, 5, 4+15); book(_h_rdA06_10_15_15_24 ,2, 5, 4+15); book(_h_rdA04_10_15_24_40 ,1, 5, 5+15); book(_h_rdA06_10_15_24_40 ,2, 5, 5+15); book(_h_pt04_15_19 ,1, 1, 4); book(_h_pt06_15_19 ,2, 1, 4); book(_h_N04_15_19_04_06 ,1, 2, 1+20); book(_h_N06_15_19_04_06 ,2, 2, 1+20); book(_h_N04_15_19_06_10 ,1, 2, 2+20); book(_h_N06_15_19_06_10 ,2, 2, 2+20); book(_h_N04_15_19_10_15 ,1, 2, 3+20); book(_h_N06_15_19_10_15 ,2, 2, 3+20); book(_h_N04_15_19_15_24 ,1, 2, 4+20); book(_h_N06_15_19_15_24 ,2, 2, 4+20); book(_h_N04_15_19_24_40 ,1, 2, 5+20); book(_h_N06_15_19_24_40 ,2, 2, 5+20); book(_h_z04_15_19_04_06 ,1, 3, 1+20); book(_h_z06_15_19_04_06 ,2, 3, 1+20); book(_h_z04_15_19_06_10 ,1, 3, 2+20); book(_h_z06_15_19_06_10 ,2, 3, 2+20); book(_h_z04_15_19_10_15 ,1, 3, 3+20); book(_h_z06_15_19_10_15 ,2, 3, 3+20); book(_h_z04_15_19_15_24 ,1, 3, 4+20); book(_h_z06_15_19_15_24 ,2, 3, 4+20); book(_h_z04_15_19_24_40 ,1, 3, 5+20); book(_h_z06_15_19_24_40 ,2, 3, 5+20); book(_h_ptrel04_15_19_04_06 ,1, 4, 1+20); book(_h_ptrel06_15_19_04_06 ,2, 4, 1+20); book(_h_ptrel04_15_19_06_10 ,1, 4, 2+20); book(_h_ptrel06_15_19_06_10 ,2, 4, 2+20); book(_h_ptrel04_15_19_10_15 ,1, 4, 3+20); book(_h_ptrel06_15_19_10_15 ,2, 4, 3+20); book(_h_ptrel04_15_19_15_24 ,1, 4, 4+20); book(_h_ptrel06_15_19_15_24 ,2, 4, 4+20); book(_h_ptrel04_15_19_24_40 ,1, 4, 5+20); book(_h_ptrel06_15_19_24_40 ,2, 4, 5+20); book(_h_rdA04_15_19_04_06 ,1, 5, 1+20); book(_h_rdA06_15_19_04_06 ,2, 5, 1+20); book(_h_rdA04_15_19_06_10 ,1, 5, 2+20); book(_h_rdA06_15_19_06_10 ,2, 5, 2+20); book(_h_rdA04_15_19_10_15 ,1, 5, 3+20); book(_h_rdA06_15_19_10_15 ,2, 5, 3+20); book(_h_rdA04_15_19_15_24 ,1, 5, 4+20); book(_h_rdA06_15_19_15_24 ,2, 5, 4+20); book(_h_rdA04_15_19_24_40 ,1, 5, 5+20); book(_h_rdA06_15_19_24_40 ,2, 5, 5+20); book(_h_N04_00_19_04_06 ,1, 2, 1+0); book(_h_N06_00_19_04_06 ,2, 2, 1+0); book(_h_N04_00_19_06_10 ,1, 2, 2+0); book(_h_N06_00_19_06_10 ,2, 2, 2+0); book(_h_N04_00_19_10_15 ,1, 2, 3+0); book(_h_N06_00_19_10_15 ,2, 2, 3+0); book(_h_N04_00_19_15_24 ,1, 2, 4+0); book(_h_N06_00_19_15_24 ,2, 2, 4+0); book(_h_N04_00_19_24_40 ,1, 2, 5+0); book(_h_N06_00_19_24_40 ,2, 2, 5+0); book(_h_z04_00_19_04_06 ,1, 3, 1+0); book(_h_z06_00_19_04_06 ,2, 3, 1+0); book(_h_z04_00_19_06_10 ,1, 3, 2+0); book(_h_z06_00_19_06_10 ,2, 3, 2+0); book(_h_z04_00_19_10_15 ,1, 3, 3+0); book(_h_z06_00_19_10_15 ,2, 3, 3+0); book(_h_z04_00_19_15_24 ,1, 3, 4+0); book(_h_z06_00_19_15_24 ,2, 3, 4+0); book(_h_z04_00_19_24_40 ,1, 3, 5+0); book(_h_z06_00_19_24_40 ,2, 3, 5+0); book(_h_ptrel04_00_19_04_06 ,1, 4, 1+0); book(_h_ptrel06_00_19_04_06 ,2, 4, 1+0); book(_h_ptrel04_00_19_06_10 ,1, 4, 2+0); book(_h_ptrel06_00_19_06_10 ,2, 4, 2+0); book(_h_ptrel04_00_19_10_15 ,1, 4, 3+0); book(_h_ptrel06_00_19_10_15 ,2, 4, 3+0); book(_h_ptrel04_00_19_15_24 ,1, 4, 4+0); book(_h_ptrel06_00_19_15_24 ,2, 4, 4+0); book(_h_ptrel04_00_19_24_40 ,1, 4, 5+0); book(_h_ptrel06_00_19_24_40 ,2, 4, 5+0); book(_h_rdA04_00_19_04_06 ,1, 5, 1+0); book(_h_rdA06_00_19_04_06 ,2, 5, 1+0); book(_h_rdA04_00_19_06_10 ,1, 5, 2+0); book(_h_rdA06_00_19_06_10 ,2, 5, 2+0); book(_h_rdA04_00_19_10_15 ,1, 5, 3+0); book(_h_rdA06_00_19_10_15 ,2, 5, 3+0); book(_h_rdA04_00_19_15_24 ,1, 5, 4+0); book(_h_rdA06_00_19_15_24 ,2, 5, 4+0); book(_h_rdA04_00_19_24_40 ,1, 5, 5+0); book(_h_rdA06_00_19_24_40 ,2, 5, 5+0); book(_sumofweights04, "_sumofweights04"); book(_sumofweights06, "_sumofweights06"); book(_numjets04_00_05_04_06, "_numjets04_00_05_04_06"); book(_numjets04_00_05_06_10, "_numjets04_00_05_06_10"); book(_numjets04_00_05_10_15, "_numjets04_00_05_10_15"); book(_numjets04_00_05_15_24, "_numjets04_00_05_15_24"); book(_numjets04_00_05_24_40, "_numjets04_00_05_24_40"); book(_numjets06_00_05_04_06, "_numjets06_00_05_04_06"); book(_numjets06_00_05_06_10, "_numjets06_00_05_06_10"); book(_numjets06_00_05_10_15, "_numjets06_00_05_10_15"); book(_numjets06_00_05_15_24, "_numjets06_00_05_15_24"); book(_numjets06_00_05_24_40, "_numjets06_00_05_24_40"); book(_numjets04_05_10_04_06, "_numjets04_05_10_04_06"); book(_numjets04_05_10_06_10, "_numjets04_05_10_06_10"); book(_numjets04_05_10_10_15, "_numjets04_05_10_10_15"); book(_numjets04_05_10_15_24, "_numjets04_05_10_15_24"); book(_numjets04_05_10_24_40, "_numjets04_05_10_24_40"); book(_numjets06_05_10_04_06, "_numjets06_05_10_04_06"); book(_numjets06_05_10_06_10, "_numjets06_05_10_06_10"); book(_numjets06_05_10_10_15, "_numjets06_05_10_10_15"); book(_numjets06_05_10_15_24, "_numjets06_05_10_15_24"); book(_numjets06_05_10_24_40, "_numjets06_05_10_24_40"); book(_numjets04_10_15_04_06, "_numjets04_10_15_04_06"); book(_numjets04_10_15_06_10, "_numjets04_10_15_06_10"); book(_numjets04_10_15_10_15, "_numjets04_10_15_10_15"); book(_numjets04_10_15_15_24, "_numjets04_10_15_15_24"); book(_numjets04_10_15_24_40, "_numjets04_10_15_24_40"); book(_numjets06_10_15_04_06, "_numjets06_10_15_04_06"); book(_numjets06_10_15_06_10, "_numjets06_10_15_06_10"); book(_numjets06_10_15_10_15, "_numjets06_10_15_10_15"); book(_numjets06_10_15_15_24, "_numjets06_10_15_15_24"); book(_numjets06_10_15_24_40, "_numjets06_10_15_24_40"); book(_numjets04_15_19_04_06, "_numjets04_15_19_04_06"); book(_numjets04_15_19_06_10, "_numjets04_15_19_06_10"); book(_numjets04_15_19_10_15, "_numjets04_15_19_10_15"); book(_numjets04_15_19_15_24, "_numjets04_15_19_15_24"); book(_numjets04_15_19_24_40, "_numjets04_15_19_24_40"); book(_numjets06_15_19_04_06, "_numjets06_15_19_04_06"); book(_numjets06_15_19_06_10, "_numjets06_15_19_06_10"); book(_numjets06_15_19_10_15, "_numjets06_15_19_10_15"); book(_numjets06_15_19_15_24, "_numjets06_15_19_15_24"); book(_numjets06_15_19_24_40, "_numjets06_15_19_24_40"); book(_numjets04_00_19_04_06, "_numjets04_00_19_04_06"); book(_numjets04_00_19_06_10, "_numjets04_00_19_06_10"); book(_numjets04_00_19_10_15, "_numjets04_00_19_10_15"); book(_numjets04_00_19_15_24, "_numjets04_00_19_15_24"); book(_numjets04_00_19_24_40, "_numjets04_00_19_24_40"); book(_numjets06_00_19_04_06, "_numjets06_00_19_04_06"); book(_numjets06_00_19_06_10, "_numjets06_00_19_06_10"); book(_numjets06_00_19_10_15, "_numjets06_00_19_10_15"); book(_numjets06_00_19_15_24, "_numjets06_00_19_15_24"); book(_numjets06_00_19_24_40, "_numjets06_00_19_24_40"); } /// Perform the per-event analysis void analyze(const Event& event) { const Jets& jets04 = apply(event, "Jets04").jets(); if (!jets04.empty()) { _sumofweights04->fill(); for (const Jet& j : jets04) { const double jetpt = j.pT(); if (j.absrap() < 0.5) { _h_pt04_00_05->fill(jetpt/GeV); if (inRange(jetpt/GeV, 4., 6.)) { _numjets04_00_05_04_06->fill(); _h_N04_00_05_04_06->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_00_05_04_06->fill(calcz(j,p)); _h_ptrel04_00_05_04_06->fill(calcptrel(j,p)); _h_rdA04_00_05_04_06->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 6., 10.)) { _numjets04_00_05_06_10->fill(); _h_N04_00_05_06_10->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_00_05_06_10->fill(calcz(j,p)); _h_ptrel04_00_05_06_10->fill(calcptrel(j,p)); _h_rdA04_00_05_06_10->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 10., 15.)) { _numjets04_00_05_10_15->fill(); _h_N04_00_05_10_15->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_00_05_10_15->fill(calcz(j,p)); _h_ptrel04_00_05_10_15->fill(calcptrel(j,p)); _h_rdA04_00_05_10_15->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 15., 24.)) { _numjets04_00_05_15_24->fill(); _h_N04_00_05_15_24->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_00_05_15_24->fill(calcz(j,p)); _h_ptrel04_00_05_15_24->fill(calcptrel(j,p)); _h_rdA04_00_05_15_24->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 24., 40.)) { _numjets04_00_05_24_40->fill(); _h_N04_00_05_24_40->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_00_05_24_40->fill(calcz(j,p)); _h_ptrel04_00_05_24_40->fill(calcptrel(j,p)); _h_rdA04_00_05_24_40->fill(calcr(j,p),calcrweight(j,p)); } } } if (j.absrap() > 0.5 && j.absrap() < 1.0) { _h_pt04_05_10->fill(jetpt/GeV); if (inRange(jetpt/GeV, 4., 6.)) { _numjets04_05_10_04_06->fill(); _h_N04_05_10_04_06->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_05_10_04_06->fill(calcz(j,p)); _h_ptrel04_05_10_04_06->fill(calcptrel(j,p)); _h_rdA04_05_10_04_06->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 6., 10.)) { _numjets04_05_10_06_10->fill(); _h_N04_05_10_06_10->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_05_10_06_10->fill(calcz(j,p)); _h_ptrel04_05_10_06_10->fill(calcptrel(j,p)); _h_rdA04_05_10_06_10->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 10., 15.)) { _numjets04_05_10_10_15->fill(); _h_N04_05_10_10_15->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_05_10_10_15->fill(calcz(j,p)); _h_ptrel04_05_10_10_15->fill(calcptrel(j,p)); _h_rdA04_05_10_10_15->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 15., 24.)) { _numjets04_05_10_15_24->fill(); _h_N04_05_10_15_24->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_05_10_15_24->fill(calcz(j,p)); _h_ptrel04_05_10_15_24->fill(calcptrel(j,p)); _h_rdA04_05_10_15_24->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 24., 40.)) { _numjets04_05_10_24_40->fill(); _h_N04_05_10_24_40->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_05_10_24_40->fill(calcz(j,p)); _h_ptrel04_05_10_24_40->fill(calcptrel(j,p)); _h_rdA04_05_10_24_40->fill(calcr(j,p),calcrweight(j,p)); } } } if (j.absrap() > 1.0 && j.absrap() < 1.5) { _h_pt04_10_15->fill(jetpt/GeV); if (inRange(jetpt/GeV, 4., 6.)) { _numjets04_10_15_04_06->fill(); _h_N04_10_15_04_06->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_10_15_04_06->fill(calcz(j,p)); _h_ptrel04_10_15_04_06->fill(calcptrel(j,p)); _h_rdA04_10_15_04_06->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 6., 10.)) { _numjets04_10_15_06_10->fill(); _h_N04_10_15_06_10->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_10_15_06_10->fill(calcz(j,p)); _h_ptrel04_10_15_06_10->fill(calcptrel(j,p)); _h_rdA04_10_15_06_10->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 10., 15.)) { _numjets04_10_15_10_15->fill(); _h_N04_10_15_10_15->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_10_15_10_15->fill(calcz(j,p)); _h_ptrel04_10_15_10_15->fill(calcptrel(j,p)); _h_rdA04_10_15_10_15->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 15., 24.)) { _numjets04_10_15_15_24->fill(); _h_N04_10_15_15_24->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_10_15_15_24->fill(calcz(j,p)); _h_ptrel04_10_15_15_24->fill(calcptrel(j,p)); _h_rdA04_10_15_15_24->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 24., 40.)) { _numjets04_10_15_24_40->fill(); _h_N04_10_15_24_40->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_10_15_24_40->fill(calcz(j,p)); _h_ptrel04_10_15_24_40->fill(calcptrel(j,p)); _h_rdA04_10_15_24_40->fill(calcr(j,p),calcrweight(j,p)); } } } if (j.absrap() > 1.5 && j.absrap() < 1.9) { _h_pt04_15_19->fill(jetpt/GeV); if (inRange(jetpt/GeV, 4., 6.)) { _numjets04_15_19_04_06->fill(); _h_N04_15_19_04_06->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_15_19_04_06->fill(calcz(j,p)); _h_ptrel04_15_19_04_06->fill(calcptrel(j,p)); _h_rdA04_15_19_04_06->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 6., 10.)) { _numjets04_15_19_06_10->fill(); _h_N04_15_19_06_10->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_15_19_06_10->fill(calcz(j,p)); _h_ptrel04_15_19_06_10->fill(calcptrel(j,p)); _h_rdA04_15_19_06_10->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 10., 15.)) { _numjets04_15_19_10_15->fill(); _h_N04_15_19_10_15->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_15_19_10_15->fill(calcz(j,p)); _h_ptrel04_15_19_10_15->fill(calcptrel(j,p)); _h_rdA04_15_19_10_15->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 15., 24.)) { _numjets04_15_19_15_24->fill(); _h_N04_15_19_15_24->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_15_19_15_24->fill(calcz(j,p)); _h_ptrel04_15_19_15_24->fill(calcptrel(j,p)); _h_rdA04_15_19_15_24->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 24., 40.)) { _numjets04_15_19_24_40->fill(); _h_N04_15_19_24_40->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_15_19_24_40->fill(calcz(j,p)); _h_ptrel04_15_19_24_40->fill(calcptrel(j,p)); _h_rdA04_15_19_24_40->fill(calcr(j,p),calcrweight(j,p)); } } } // 1.5 < rapidity < 1.9 if (j.absrap() < 1.9) { if (inRange(jetpt/GeV, 4., 6.)) { _numjets04_00_19_04_06->fill(); _h_N04_00_19_04_06->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_00_19_04_06->fill(calcz(j,p)); _h_ptrel04_00_19_04_06->fill(calcptrel(j,p)); _h_rdA04_00_19_04_06->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 6., 10.)) { _numjets04_00_19_06_10->fill(); _h_N04_00_19_06_10->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_00_19_06_10->fill(calcz(j,p)); _h_ptrel04_00_19_06_10->fill(calcptrel(j,p)); _h_rdA04_00_19_06_10->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 10., 15.)) { _numjets04_00_19_10_15->fill(); _h_N04_00_19_10_15->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_00_19_10_15->fill(calcz(j,p)); _h_ptrel04_00_19_10_15->fill(calcptrel(j,p)); _h_rdA04_00_19_10_15->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 15., 24.)) { _numjets04_00_19_15_24->fill(); _h_N04_00_19_15_24->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_00_19_15_24->fill(calcz(j,p)); _h_ptrel04_00_19_15_24->fill(calcptrel(j,p)); _h_rdA04_00_19_15_24->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 24., 40.)) { _numjets04_00_19_24_40->fill(); _h_N04_00_19_24_40->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z04_00_19_24_40->fill(calcz(j,p)); _h_ptrel04_00_19_24_40->fill(calcptrel(j,p)); _h_rdA04_00_19_24_40->fill(calcr(j,p),calcrweight(j,p)); } } } // 0.0 < rapidity < 1.9 } // each jet } // jets04 not empty const Jets& jets06 = apply(event, "Jets06").jets(); if (!jets06.empty()) { _sumofweights06->fill(); for (const Jet& j : jets06) { const double jetpt = j.pT(); if (j.absrap() < 0.5) { _h_pt06_00_05->fill(jetpt/GeV); if (inRange(jetpt/GeV, 4., 6.)) { _numjets06_00_05_04_06->fill(); _h_N06_00_05_04_06->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_00_05_04_06->fill(calcz(j,p)); _h_ptrel06_00_05_04_06->fill(calcptrel(j,p)); _h_rdA06_00_05_04_06->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 6., 10.)) { _numjets06_00_05_06_10->fill(); _h_N06_00_05_06_10->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_00_05_06_10->fill(calcz(j,p)); _h_ptrel06_00_05_06_10->fill(calcptrel(j,p)); _h_rdA06_00_05_06_10->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 10., 15.)) { _numjets06_00_05_10_15->fill(); _h_N06_00_05_10_15->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_00_05_10_15->fill(calcz(j,p)); _h_ptrel06_00_05_10_15->fill(calcptrel(j,p)); _h_rdA06_00_05_10_15->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 15., 24.)) { _numjets06_00_05_15_24->fill(); _h_N06_00_05_15_24->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_00_05_15_24->fill(calcz(j,p)); _h_ptrel06_00_05_15_24->fill(calcptrel(j,p)); _h_rdA06_00_05_15_24->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 24., 40.)) { _numjets06_00_05_24_40->fill(); _h_N06_00_05_24_40->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_00_05_24_40->fill(calcz(j,p)); _h_ptrel06_00_05_24_40->fill(calcptrel(j,p)); _h_rdA06_00_05_24_40->fill(calcr(j,p),calcrweight(j,p)); } } } if (j.absrap() > 0.5 && j.absrap() < 1.0) { _h_pt06_05_10->fill(jetpt/GeV); if (inRange(jetpt/GeV, 4., 6.)) { _numjets06_05_10_04_06->fill(); _h_N06_05_10_04_06->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_05_10_04_06->fill(calcz(j,p)); _h_ptrel06_05_10_04_06->fill(calcptrel(j,p)); _h_rdA06_05_10_04_06->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 6., 10.)) { _numjets06_05_10_06_10->fill(); _h_N06_05_10_06_10->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_05_10_06_10->fill(calcz(j,p)); _h_ptrel06_05_10_06_10->fill(calcptrel(j,p)); _h_rdA06_05_10_06_10->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 10., 15.)) { _numjets06_05_10_10_15->fill(); _h_N06_05_10_10_15->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_05_10_10_15->fill(calcz(j,p)); _h_ptrel06_05_10_10_15->fill(calcptrel(j,p)); _h_rdA06_05_10_10_15->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 15., 24.)) { _numjets06_05_10_15_24->fill(); _h_N06_05_10_15_24->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_05_10_15_24->fill(calcz(j,p)); _h_ptrel06_05_10_15_24->fill(calcptrel(j,p)); _h_rdA06_05_10_15_24->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 24., 40.)) { _numjets06_05_10_24_40->fill(); _h_N06_05_10_24_40->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_05_10_24_40->fill(calcz(j,p)); _h_ptrel06_05_10_24_40->fill(calcptrel(j,p)); _h_rdA06_05_10_24_40->fill(calcr(j,p),calcrweight(j,p)); } } } if (j.absrap() > 1.0 && j.absrap() < 1.5) { _h_pt06_10_15->fill(jetpt/GeV); if (inRange(jetpt/GeV, 4., 6.)) { _numjets06_10_15_04_06->fill(); _h_N06_10_15_04_06->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_10_15_04_06->fill(calcz(j,p)); _h_ptrel06_10_15_04_06->fill(calcptrel(j,p)); _h_rdA06_10_15_04_06->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 6., 10.)) { _numjets06_10_15_06_10->fill(); _h_N06_10_15_06_10->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_10_15_06_10->fill(calcz(j,p)); _h_ptrel06_10_15_06_10->fill(calcptrel(j,p)); _h_rdA06_10_15_06_10->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 10., 15.)) { _numjets06_10_15_10_15->fill(); _h_N06_10_15_10_15->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_10_15_10_15->fill(calcz(j,p)); _h_ptrel06_10_15_10_15->fill(calcptrel(j,p)); _h_rdA06_10_15_10_15->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 15., 24.)) { _numjets06_10_15_15_24->fill(); _h_N06_10_15_15_24->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_10_15_15_24->fill(calcz(j,p)); _h_ptrel06_10_15_15_24->fill(calcptrel(j,p)); _h_rdA06_10_15_15_24->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 24., 40.)) { _numjets06_10_15_24_40->fill(); _h_N06_10_15_24_40->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_10_15_24_40->fill(calcz(j,p)); _h_ptrel06_10_15_24_40->fill(calcptrel(j,p)); _h_rdA06_10_15_24_40->fill(calcr(j,p),calcrweight(j,p)); } } } if (j.absrap() > 1.5 && j.absrap() < 1.9) { _h_pt06_15_19->fill(jetpt/GeV); if (inRange(jetpt/GeV, 4., 6.)) { _numjets06_15_19_04_06->fill(); _h_N06_15_19_04_06->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_15_19_04_06->fill(calcz(j,p)); _h_ptrel06_15_19_04_06->fill(calcptrel(j,p)); _h_rdA06_15_19_04_06->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 6., 10.)) { _numjets06_15_19_06_10->fill(); _h_N06_15_19_06_10->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_15_19_06_10->fill(calcz(j,p)); _h_ptrel06_15_19_06_10->fill(calcptrel(j,p)); _h_rdA06_15_19_06_10->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 10., 15.)) { _numjets06_15_19_10_15->fill(); _h_N06_15_19_10_15->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_15_19_10_15->fill(calcz(j,p)); _h_ptrel06_15_19_10_15->fill(calcptrel(j,p)); _h_rdA06_15_19_10_15->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 15., 24.)) { _numjets06_15_19_15_24->fill(); _h_N06_15_19_15_24->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_15_19_15_24->fill(calcz(j,p)); _h_ptrel06_15_19_15_24->fill(calcptrel(j,p)); _h_rdA06_15_19_15_24->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 24., 40.)) { _numjets06_15_19_24_40->fill(); _h_N06_15_19_24_40->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_15_19_24_40->fill(calcz(j,p)); _h_ptrel06_15_19_24_40->fill(calcptrel(j,p)); _h_rdA06_15_19_24_40->fill(calcr(j,p),calcrweight(j,p)); } } } // 1.5 < rapidity < 1.9 if (j.absrap() < 1.9) { if (inRange(jetpt/GeV, 4., 6.)) { _numjets06_00_19_04_06->fill(); _h_N06_00_19_04_06->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_00_19_04_06->fill(calcz(j,p)); _h_ptrel06_00_19_04_06->fill(calcptrel(j,p)); _h_rdA06_00_19_04_06->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 6., 10.)) { _numjets06_00_19_06_10->fill(); _h_N06_00_19_06_10->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_00_19_06_10->fill(calcz(j,p)); _h_ptrel06_00_19_06_10->fill(calcptrel(j,p)); _h_rdA06_00_19_06_10->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 10., 15.)) { _numjets06_00_19_10_15->fill(); _h_N06_00_19_10_15->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_00_19_10_15->fill(calcz(j,p)); _h_ptrel06_00_19_10_15->fill(calcptrel(j,p)); _h_rdA06_00_19_10_15->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 15., 24.)) { _numjets06_00_19_15_24->fill(); _h_N06_00_19_15_24->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_00_19_15_24->fill(calcz(j,p)); _h_ptrel06_00_19_15_24->fill(calcptrel(j,p)); _h_rdA06_00_19_15_24->fill(calcr(j,p),calcrweight(j,p)); } } if (inRange(jetpt/GeV, 24., 40.)) { _numjets06_00_19_24_40->fill(); _h_N06_00_19_24_40->fill(j.particles().size()); for (const Particle& p : j.particles()) { _h_z06_00_19_24_40->fill(calcz(j,p)); _h_ptrel06_00_19_24_40->fill(calcptrel(j,p)); _h_rdA06_00_19_24_40->fill(calcr(j,p),calcrweight(j,p)); } } } } // each jet } // jets06 not empty } // end of event /// Normalise histograms etc., after the run void finalize() { // pT histos: d2sigma_jet / deta dpT const double xsec = crossSection()/microbarn; safeinvscale(_h_pt04_00_05, _sumofweights04->val()*(2*0.5)/xsec); safeinvscale(_h_pt06_00_05, _sumofweights06->val()*(2*0.5)/xsec); safeinvscale(_h_pt04_05_10, _sumofweights04->val()*(2*0.5)/xsec); safeinvscale(_h_pt06_05_10, _sumofweights06->val()*(2*0.5)/xsec); safeinvscale(_h_pt04_10_15, _sumofweights04->val()*(2*0.5)/xsec); safeinvscale(_h_pt06_10_15, _sumofweights06->val()*(2*0.5)/xsec); safeinvscale(_h_pt04_15_19, _sumofweights04->val()*(2*0.4)/xsec); safeinvscale(_h_pt06_15_19, _sumofweights06->val()*(2*0.4)/xsec); // N histos: 1/N_jet dN_jet / dN^{ch}_jet safeinvscale(_h_N04_00_05_04_06, _numjets04_00_05_04_06->val()); safeinvscale(_h_N06_00_05_04_06, _numjets06_00_05_04_06->val()); safeinvscale(_h_N04_00_05_06_10, _numjets04_00_05_06_10->val()); safeinvscale(_h_N06_00_05_06_10, _numjets06_00_05_06_10->val()); safeinvscale(_h_N04_00_05_10_15, _numjets04_00_05_10_15->val()); safeinvscale(_h_N06_00_05_10_15, _numjets06_00_05_10_15->val()); safeinvscale(_h_N04_00_05_15_24, _numjets04_00_05_15_24->val()); safeinvscale(_h_N06_00_05_15_24, _numjets06_00_05_15_24->val()); safeinvscale(_h_N04_00_05_24_40, _numjets04_00_05_24_40->val()); safeinvscale(_h_N06_00_05_24_40, _numjets06_00_05_24_40->val()); safeinvscale(_h_N04_05_10_04_06, _numjets04_05_10_04_06->val()); safeinvscale(_h_N06_05_10_04_06, _numjets06_05_10_04_06->val()); safeinvscale(_h_N04_05_10_06_10, _numjets04_05_10_06_10->val()); safeinvscale(_h_N06_05_10_06_10, _numjets06_05_10_06_10->val()); safeinvscale(_h_N04_05_10_10_15, _numjets04_05_10_10_15->val()); safeinvscale(_h_N06_05_10_10_15, _numjets06_05_10_10_15->val()); safeinvscale(_h_N04_05_10_15_24, _numjets04_05_10_15_24->val()); safeinvscale(_h_N06_05_10_15_24, _numjets06_05_10_15_24->val()); safeinvscale(_h_N04_05_10_24_40, _numjets04_05_10_24_40->val()); safeinvscale(_h_N06_05_10_24_40, _numjets06_05_10_24_40->val()); safeinvscale(_h_N04_10_15_04_06, _numjets04_10_15_04_06->val()); safeinvscale(_h_N06_10_15_04_06, _numjets06_10_15_04_06->val()); safeinvscale(_h_N04_10_15_06_10, _numjets04_10_15_06_10->val()); safeinvscale(_h_N06_10_15_06_10, _numjets06_10_15_06_10->val()); safeinvscale(_h_N04_10_15_10_15, _numjets04_10_15_10_15->val()); safeinvscale(_h_N06_10_15_10_15, _numjets06_10_15_10_15->val()); safeinvscale(_h_N04_10_15_15_24, _numjets04_10_15_15_24->val()); safeinvscale(_h_N06_10_15_15_24, _numjets06_10_15_15_24->val()); safeinvscale(_h_N04_10_15_24_40, _numjets04_10_15_24_40->val()); safeinvscale(_h_N06_10_15_24_40, _numjets06_10_15_24_40->val()); safeinvscale(_h_N04_15_19_04_06, _numjets04_15_19_04_06->val()); safeinvscale(_h_N06_15_19_04_06, _numjets06_15_19_04_06->val()); safeinvscale(_h_N04_15_19_06_10, _numjets04_15_19_06_10->val()); safeinvscale(_h_N06_15_19_06_10, _numjets06_15_19_06_10->val()); safeinvscale(_h_N04_15_19_10_15, _numjets04_15_19_10_15->val()); safeinvscale(_h_N06_15_19_10_15, _numjets06_15_19_10_15->val()); safeinvscale(_h_N04_15_19_15_24, _numjets04_15_19_15_24->val()); safeinvscale(_h_N06_15_19_15_24, _numjets06_15_19_15_24->val()); safeinvscale(_h_N04_15_19_24_40, _numjets04_15_19_24_40->val()); safeinvscale(_h_N06_15_19_24_40, _numjets06_15_19_24_40->val()); safeinvscale(_h_N04_00_19_04_06, _numjets04_00_19_04_06->val()); safeinvscale(_h_N06_00_19_04_06, _numjets06_00_19_04_06->val()); safeinvscale(_h_N04_00_19_06_10, _numjets04_00_19_06_10->val()); safeinvscale(_h_N06_00_19_06_10, _numjets06_00_19_06_10->val()); safeinvscale(_h_N04_00_19_10_15, _numjets04_00_19_10_15->val()); safeinvscale(_h_N06_00_19_10_15, _numjets06_00_19_10_15->val()); safeinvscale(_h_N04_00_19_15_24, _numjets04_00_19_15_24->val()); safeinvscale(_h_N06_00_19_15_24, _numjets06_00_19_15_24->val()); safeinvscale(_h_N04_00_19_24_40, _numjets04_00_19_24_40->val()); safeinvscale(_h_N06_00_19_24_40, _numjets06_00_19_24_40->val()); // z histos: 1/N_jet dN_track / dz_track->val() safeinvscale(_h_z04_00_05_04_06, _numjets04_00_05_04_06->val()); safeinvscale(_h_z06_00_05_04_06, _numjets06_00_05_04_06->val()); safeinvscale(_h_z04_00_05_06_10, _numjets04_00_05_06_10->val()); safeinvscale(_h_z06_00_05_06_10, _numjets06_00_05_06_10->val()); safeinvscale(_h_z04_00_05_10_15, _numjets04_00_05_10_15->val()); safeinvscale(_h_z06_00_05_10_15, _numjets06_00_05_10_15->val()); safeinvscale(_h_z04_00_05_15_24, _numjets04_00_05_15_24->val()); safeinvscale(_h_z06_00_05_15_24, _numjets06_00_05_15_24->val()); safeinvscale(_h_z04_00_05_24_40, _numjets04_00_05_24_40->val()); safeinvscale(_h_z06_00_05_24_40, _numjets06_00_05_24_40->val()); safeinvscale(_h_z04_05_10_04_06, _numjets04_05_10_04_06->val()); safeinvscale(_h_z06_05_10_04_06, _numjets06_05_10_04_06->val()); safeinvscale(_h_z04_05_10_06_10, _numjets04_05_10_06_10->val()); safeinvscale(_h_z06_05_10_06_10, _numjets06_05_10_06_10->val()); safeinvscale(_h_z04_05_10_10_15, _numjets04_05_10_10_15->val()); safeinvscale(_h_z06_05_10_10_15, _numjets06_05_10_10_15->val()); safeinvscale(_h_z04_05_10_15_24, _numjets04_05_10_15_24->val()); safeinvscale(_h_z06_05_10_15_24, _numjets06_05_10_15_24->val()); safeinvscale(_h_z04_05_10_24_40, _numjets04_05_10_24_40->val()); safeinvscale(_h_z06_05_10_24_40, _numjets06_05_10_24_40->val()); safeinvscale(_h_z04_10_15_04_06, _numjets04_10_15_04_06->val()); safeinvscale(_h_z06_10_15_04_06, _numjets06_10_15_04_06->val()); safeinvscale(_h_z04_10_15_06_10, _numjets04_10_15_06_10->val()); safeinvscale(_h_z06_10_15_06_10, _numjets06_10_15_06_10->val()); safeinvscale(_h_z04_10_15_10_15, _numjets04_10_15_10_15->val()); safeinvscale(_h_z06_10_15_10_15, _numjets06_10_15_10_15->val()); safeinvscale(_h_z04_10_15_15_24, _numjets04_10_15_15_24->val()); safeinvscale(_h_z06_10_15_15_24, _numjets06_10_15_15_24->val()); safeinvscale(_h_z04_10_15_24_40, _numjets04_10_15_24_40->val()); safeinvscale(_h_z06_10_15_24_40, _numjets06_10_15_24_40->val()); safeinvscale(_h_z04_15_19_04_06, _numjets04_15_19_04_06->val()); safeinvscale(_h_z06_15_19_04_06, _numjets06_15_19_04_06->val()); safeinvscale(_h_z04_15_19_06_10, _numjets04_15_19_06_10->val()); safeinvscale(_h_z06_15_19_06_10, _numjets06_15_19_06_10->val()); safeinvscale(_h_z04_15_19_10_15, _numjets04_15_19_10_15->val()); safeinvscale(_h_z06_15_19_10_15, _numjets06_15_19_10_15->val()); safeinvscale(_h_z04_15_19_15_24, _numjets04_15_19_15_24->val()); safeinvscale(_h_z06_15_19_15_24, _numjets06_15_19_15_24->val()); safeinvscale(_h_z04_15_19_24_40, _numjets04_15_19_24_40->val()); safeinvscale(_h_z06_15_19_24_40, _numjets06_15_19_24_40->val()); safeinvscale(_h_z04_00_19_04_06, _numjets04_00_19_04_06->val()); safeinvscale(_h_z06_00_19_04_06, _numjets06_00_19_04_06->val()); safeinvscale(_h_z04_00_19_06_10, _numjets04_00_19_06_10->val()); safeinvscale(_h_z06_00_19_06_10, _numjets06_00_19_06_10->val()); safeinvscale(_h_z04_00_19_10_15, _numjets04_00_19_10_15->val()); safeinvscale(_h_z06_00_19_10_15, _numjets06_00_19_10_15->val()); safeinvscale(_h_z04_00_19_15_24, _numjets04_00_19_15_24->val()); safeinvscale(_h_z06_00_19_15_24, _numjets06_00_19_15_24->val()); safeinvscale(_h_z04_00_19_24_40, _numjets04_00_19_24_40->val()); safeinvscale(_h_z06_00_19_24_40, _numjets06_00_19_24_40->val()); // ptrel histos: 1/N_jet dN_track / dptrel_track safeinvscale(_h_ptrel04_00_05_04_06, _numjets04_00_05_04_06->val()); safeinvscale(_h_ptrel06_00_05_04_06, _numjets06_00_05_04_06->val()); safeinvscale(_h_ptrel04_00_05_06_10, _numjets04_00_05_06_10->val()); safeinvscale(_h_ptrel06_00_05_06_10, _numjets06_00_05_06_10->val()); safeinvscale(_h_ptrel04_00_05_10_15, _numjets04_00_05_10_15->val()); safeinvscale(_h_ptrel06_00_05_10_15, _numjets06_00_05_10_15->val()); safeinvscale(_h_ptrel04_00_05_15_24, _numjets04_00_05_15_24->val()); safeinvscale(_h_ptrel06_00_05_15_24, _numjets06_00_05_15_24->val()); safeinvscale(_h_ptrel04_00_05_24_40, _numjets04_00_05_24_40->val()); safeinvscale(_h_ptrel06_00_05_24_40, _numjets06_00_05_24_40->val()); safeinvscale(_h_ptrel04_05_10_04_06, _numjets04_05_10_04_06->val()); safeinvscale(_h_ptrel06_05_10_04_06, _numjets06_05_10_04_06->val()); safeinvscale(_h_ptrel04_05_10_06_10, _numjets04_05_10_06_10->val()); safeinvscale(_h_ptrel06_05_10_06_10, _numjets06_05_10_06_10->val()); safeinvscale(_h_ptrel04_05_10_10_15, _numjets04_05_10_10_15->val()); safeinvscale(_h_ptrel06_05_10_10_15, _numjets06_05_10_10_15->val()); safeinvscale(_h_ptrel04_05_10_15_24, _numjets04_05_10_15_24->val()); safeinvscale(_h_ptrel06_05_10_15_24, _numjets06_05_10_15_24->val()); safeinvscale(_h_ptrel04_05_10_24_40, _numjets04_05_10_24_40->val()); safeinvscale(_h_ptrel06_05_10_24_40, _numjets06_05_10_24_40->val()); safeinvscale(_h_ptrel04_10_15_04_06, _numjets04_10_15_04_06->val()); safeinvscale(_h_ptrel06_10_15_04_06, _numjets06_10_15_04_06->val()); safeinvscale(_h_ptrel04_10_15_06_10, _numjets04_10_15_06_10->val()); safeinvscale(_h_ptrel06_10_15_06_10, _numjets06_10_15_06_10->val()); safeinvscale(_h_ptrel04_10_15_10_15, _numjets04_10_15_10_15->val()); safeinvscale(_h_ptrel06_10_15_10_15, _numjets06_10_15_10_15->val()); safeinvscale(_h_ptrel04_10_15_15_24, _numjets04_10_15_15_24->val()); safeinvscale(_h_ptrel06_10_15_15_24, _numjets06_10_15_15_24->val()); safeinvscale(_h_ptrel04_10_15_24_40, _numjets04_10_15_24_40->val()); safeinvscale(_h_ptrel06_10_15_24_40, _numjets06_10_15_24_40->val()); safeinvscale(_h_ptrel04_15_19_04_06, _numjets04_15_19_04_06->val()); safeinvscale(_h_ptrel06_15_19_04_06, _numjets06_15_19_04_06->val()); safeinvscale(_h_ptrel04_15_19_06_10, _numjets04_15_19_06_10->val()); safeinvscale(_h_ptrel06_15_19_06_10, _numjets06_15_19_06_10->val()); safeinvscale(_h_ptrel04_15_19_10_15, _numjets04_15_19_10_15->val()); safeinvscale(_h_ptrel06_15_19_10_15, _numjets06_15_19_10_15->val()); safeinvscale(_h_ptrel04_15_19_15_24, _numjets04_15_19_15_24->val()); safeinvscale(_h_ptrel06_15_19_15_24, _numjets06_15_19_15_24->val()); safeinvscale(_h_ptrel04_15_19_24_40, _numjets04_15_19_24_40->val()); safeinvscale(_h_ptrel06_15_19_24_40, _numjets06_15_19_24_40->val()); safeinvscale(_h_ptrel04_00_19_04_06, _numjets04_00_19_04_06->val()); safeinvscale(_h_ptrel06_00_19_04_06, _numjets06_00_19_04_06->val()); safeinvscale(_h_ptrel04_00_19_06_10, _numjets04_00_19_06_10->val()); safeinvscale(_h_ptrel06_00_19_06_10, _numjets06_00_19_06_10->val()); safeinvscale(_h_ptrel04_00_19_10_15, _numjets04_00_19_10_15->val()); safeinvscale(_h_ptrel06_00_19_10_15, _numjets06_00_19_10_15->val()); safeinvscale(_h_ptrel04_00_19_15_24, _numjets04_00_19_15_24->val()); safeinvscale(_h_ptrel06_00_19_15_24, _numjets06_00_19_15_24->val()); safeinvscale(_h_ptrel04_00_19_24_40, _numjets04_00_19_24_40->val()); safeinvscale(_h_ptrel06_00_19_24_40, _numjets06_00_19_24_40->val()); // r histos: 1/N_jet dN_track / dA safeinvscale(_h_rdA04_00_05_04_06, _numjets04_00_05_04_06->val()); safeinvscale(_h_rdA06_00_05_04_06, _numjets06_00_05_04_06->val()); safeinvscale(_h_rdA04_00_05_06_10, _numjets04_00_05_06_10->val()); safeinvscale(_h_rdA06_00_05_06_10, _numjets06_00_05_06_10->val()); safeinvscale(_h_rdA04_00_05_10_15, _numjets04_00_05_10_15->val()); safeinvscale(_h_rdA06_00_05_10_15, _numjets06_00_05_10_15->val()); safeinvscale(_h_rdA04_00_05_15_24, _numjets04_00_05_15_24->val()); safeinvscale(_h_rdA06_00_05_15_24, _numjets06_00_05_15_24->val()); safeinvscale(_h_rdA04_00_05_24_40, _numjets04_00_05_24_40->val()); safeinvscale(_h_rdA06_00_05_24_40, _numjets06_00_05_24_40->val()); safeinvscale(_h_rdA04_05_10_04_06, _numjets04_05_10_04_06->val()); safeinvscale(_h_rdA06_05_10_04_06, _numjets06_05_10_04_06->val()); safeinvscale(_h_rdA04_05_10_06_10, _numjets04_05_10_06_10->val()); safeinvscale(_h_rdA06_05_10_06_10, _numjets06_05_10_06_10->val()); safeinvscale(_h_rdA04_05_10_10_15, _numjets04_05_10_10_15->val()); safeinvscale(_h_rdA06_05_10_10_15, _numjets06_05_10_10_15->val()); safeinvscale(_h_rdA04_05_10_15_24, _numjets04_05_10_15_24->val()); safeinvscale(_h_rdA06_05_10_15_24, _numjets06_05_10_15_24->val()); safeinvscale(_h_rdA04_05_10_24_40, _numjets04_05_10_24_40->val()); safeinvscale(_h_rdA06_05_10_24_40, _numjets06_05_10_24_40->val()); safeinvscale(_h_rdA04_10_15_04_06, _numjets04_10_15_04_06->val()); safeinvscale(_h_rdA06_10_15_04_06, _numjets06_10_15_04_06->val()); safeinvscale(_h_rdA04_10_15_06_10, _numjets04_10_15_06_10->val()); safeinvscale(_h_rdA06_10_15_06_10, _numjets06_10_15_06_10->val()); safeinvscale(_h_rdA04_10_15_10_15, _numjets04_10_15_10_15->val()); safeinvscale(_h_rdA06_10_15_10_15, _numjets06_10_15_10_15->val()); safeinvscale(_h_rdA04_10_15_15_24, _numjets04_10_15_15_24->val()); safeinvscale(_h_rdA06_10_15_15_24, _numjets06_10_15_15_24->val()); safeinvscale(_h_rdA04_10_15_24_40, _numjets04_10_15_24_40->val()); safeinvscale(_h_rdA06_10_15_24_40, _numjets06_10_15_24_40->val()); safeinvscale(_h_rdA04_15_19_04_06, _numjets04_15_19_04_06->val()); safeinvscale(_h_rdA06_15_19_04_06, _numjets06_15_19_04_06->val()); safeinvscale(_h_rdA04_15_19_06_10, _numjets04_15_19_06_10->val()); safeinvscale(_h_rdA06_15_19_06_10, _numjets06_15_19_06_10->val()); safeinvscale(_h_rdA04_15_19_10_15, _numjets04_15_19_10_15->val()); safeinvscale(_h_rdA06_15_19_10_15, _numjets06_15_19_10_15->val()); safeinvscale(_h_rdA04_15_19_15_24, _numjets04_15_19_15_24->val()); safeinvscale(_h_rdA06_15_19_15_24, _numjets06_15_19_15_24->val()); safeinvscale(_h_rdA04_15_19_24_40, _numjets04_15_19_24_40->val()); safeinvscale(_h_rdA06_15_19_24_40, _numjets06_15_19_24_40->val()); safeinvscale(_h_rdA04_00_19_04_06, _numjets04_00_19_04_06->val()); safeinvscale(_h_rdA06_00_19_04_06, _numjets06_00_19_04_06->val()); safeinvscale(_h_rdA04_00_19_06_10, _numjets04_00_19_06_10->val()); safeinvscale(_h_rdA06_00_19_06_10, _numjets06_00_19_06_10->val()); safeinvscale(_h_rdA04_00_19_10_15, _numjets04_00_19_10_15->val()); safeinvscale(_h_rdA06_00_19_10_15, _numjets06_00_19_10_15->val()); safeinvscale(_h_rdA04_00_19_15_24, _numjets04_00_19_15_24->val()); safeinvscale(_h_rdA06_00_19_15_24, _numjets06_00_19_15_24->val()); safeinvscale(_h_rdA04_00_19_24_40, _numjets04_00_19_24_40->val()); safeinvscale(_h_rdA06_00_19_24_40, _numjets06_00_19_24_40->val()); } //@} private: void safeinvscale(Histo1DPtr h, double denom) { if (denom != 0) { scale(h, 1.0/denom); } else { normalize(h, 0); } } /// Event weights CounterPtr _sumofweights04, _sumofweights06; /// Jet counters CounterPtr _numjets04_00_05_04_06, _numjets04_00_05_06_10, _numjets04_00_05_10_15, _numjets04_00_05_15_24, _numjets04_00_05_24_40; CounterPtr _numjets06_00_05_04_06, _numjets06_00_05_06_10, _numjets06_00_05_10_15, _numjets06_00_05_15_24, _numjets06_00_05_24_40; CounterPtr _numjets04_05_10_04_06, _numjets04_05_10_06_10, _numjets04_05_10_10_15, _numjets04_05_10_15_24, _numjets04_05_10_24_40; CounterPtr _numjets06_05_10_04_06, _numjets06_05_10_06_10, _numjets06_05_10_10_15, _numjets06_05_10_15_24, _numjets06_05_10_24_40; CounterPtr _numjets04_10_15_04_06, _numjets04_10_15_06_10, _numjets04_10_15_10_15, _numjets04_10_15_15_24, _numjets04_10_15_24_40; CounterPtr _numjets06_10_15_04_06, _numjets06_10_15_06_10, _numjets06_10_15_10_15, _numjets06_10_15_15_24, _numjets06_10_15_24_40; CounterPtr _numjets04_15_19_04_06, _numjets04_15_19_06_10, _numjets04_15_19_10_15, _numjets04_15_19_15_24, _numjets04_15_19_24_40; CounterPtr _numjets06_15_19_04_06, _numjets06_15_19_06_10, _numjets06_15_19_10_15, _numjets06_15_19_15_24, _numjets06_15_19_24_40; CounterPtr _numjets04_00_19_04_06, _numjets04_00_19_06_10, _numjets04_00_19_10_15, _numjets04_00_19_15_24, _numjets04_00_19_24_40; CounterPtr _numjets06_00_19_04_06, _numjets06_00_19_06_10, _numjets06_00_19_10_15, _numjets06_00_19_15_24, _numjets06_00_19_24_40; private: /// @name Histograms //@{ Histo1DPtr _h_pt04_00_05, _h_pt06_00_05; Histo1DPtr _h_N04_00_05_04_06, _h_N06_00_05_04_06; Histo1DPtr _h_N04_00_05_06_10, _h_N06_00_05_06_10; Histo1DPtr _h_N04_00_05_10_15, _h_N06_00_05_10_15; Histo1DPtr _h_N04_00_05_15_24, _h_N06_00_05_15_24; Histo1DPtr _h_N04_00_05_24_40, _h_N06_00_05_24_40; Histo1DPtr _h_z04_00_05_04_06, _h_z06_00_05_04_06; Histo1DPtr _h_z04_00_05_06_10, _h_z06_00_05_06_10; Histo1DPtr _h_z04_00_05_10_15, _h_z06_00_05_10_15; Histo1DPtr _h_z04_00_05_15_24, _h_z06_00_05_15_24; Histo1DPtr _h_z04_00_05_24_40, _h_z06_00_05_24_40; Histo1DPtr _h_ptrel04_00_05_04_06, _h_ptrel06_00_05_04_06; Histo1DPtr _h_ptrel04_00_05_06_10, _h_ptrel06_00_05_06_10; Histo1DPtr _h_ptrel04_00_05_10_15, _h_ptrel06_00_05_10_15; Histo1DPtr _h_ptrel04_00_05_15_24, _h_ptrel06_00_05_15_24; Histo1DPtr _h_ptrel04_00_05_24_40, _h_ptrel06_00_05_24_40; Histo1DPtr _h_rdA04_00_05_04_06, _h_rdA06_00_05_04_06; Histo1DPtr _h_rdA04_00_05_06_10, _h_rdA06_00_05_06_10; Histo1DPtr _h_rdA04_00_05_10_15, _h_rdA06_00_05_10_15; Histo1DPtr _h_rdA04_00_05_15_24, _h_rdA06_00_05_15_24; Histo1DPtr _h_rdA04_00_05_24_40, _h_rdA06_00_05_24_40; Histo1DPtr _h_pt04_05_10, _h_pt06_05_10; Histo1DPtr _h_N04_05_10_04_06, _h_N06_05_10_04_06; Histo1DPtr _h_N04_05_10_06_10, _h_N06_05_10_06_10; Histo1DPtr _h_N04_05_10_10_15, _h_N06_05_10_10_15; Histo1DPtr _h_N04_05_10_15_24, _h_N06_05_10_15_24; Histo1DPtr _h_N04_05_10_24_40, _h_N06_05_10_24_40; Histo1DPtr _h_z04_05_10_04_06, _h_z06_05_10_04_06; Histo1DPtr _h_z04_05_10_06_10, _h_z06_05_10_06_10; Histo1DPtr _h_z04_05_10_10_15, _h_z06_05_10_10_15; Histo1DPtr _h_z04_05_10_15_24, _h_z06_05_10_15_24; Histo1DPtr _h_z04_05_10_24_40, _h_z06_05_10_24_40; Histo1DPtr _h_ptrel04_05_10_04_06, _h_ptrel06_05_10_04_06; Histo1DPtr _h_ptrel04_05_10_06_10, _h_ptrel06_05_10_06_10; Histo1DPtr _h_ptrel04_05_10_10_15, _h_ptrel06_05_10_10_15; Histo1DPtr _h_ptrel04_05_10_15_24, _h_ptrel06_05_10_15_24; Histo1DPtr _h_ptrel04_05_10_24_40, _h_ptrel06_05_10_24_40; Histo1DPtr _h_rdA04_05_10_04_06, _h_rdA06_05_10_04_06; Histo1DPtr _h_rdA04_05_10_06_10, _h_rdA06_05_10_06_10; Histo1DPtr _h_rdA04_05_10_10_15, _h_rdA06_05_10_10_15; Histo1DPtr _h_rdA04_05_10_15_24, _h_rdA06_05_10_15_24; Histo1DPtr _h_rdA04_05_10_24_40, _h_rdA06_05_10_24_40; Histo1DPtr _h_pt04_10_15, _h_pt06_10_15; Histo1DPtr _h_N04_10_15_04_06, _h_N06_10_15_04_06; Histo1DPtr _h_N04_10_15_06_10, _h_N06_10_15_06_10; Histo1DPtr _h_N04_10_15_10_15, _h_N06_10_15_10_15; Histo1DPtr _h_N04_10_15_15_24, _h_N06_10_15_15_24; Histo1DPtr _h_N04_10_15_24_40, _h_N06_10_15_24_40; Histo1DPtr _h_z04_10_15_04_06, _h_z06_10_15_04_06; Histo1DPtr _h_z04_10_15_06_10, _h_z06_10_15_06_10; Histo1DPtr _h_z04_10_15_10_15, _h_z06_10_15_10_15; Histo1DPtr _h_z04_10_15_15_24, _h_z06_10_15_15_24; Histo1DPtr _h_z04_10_15_24_40, _h_z06_10_15_24_40; Histo1DPtr _h_ptrel04_10_15_04_06, _h_ptrel06_10_15_04_06; Histo1DPtr _h_ptrel04_10_15_06_10, _h_ptrel06_10_15_06_10; Histo1DPtr _h_ptrel04_10_15_10_15, _h_ptrel06_10_15_10_15; Histo1DPtr _h_ptrel04_10_15_15_24, _h_ptrel06_10_15_15_24; Histo1DPtr _h_ptrel04_10_15_24_40, _h_ptrel06_10_15_24_40; Histo1DPtr _h_rdA04_10_15_04_06, _h_rdA06_10_15_04_06; Histo1DPtr _h_rdA04_10_15_06_10, _h_rdA06_10_15_06_10; Histo1DPtr _h_rdA04_10_15_10_15, _h_rdA06_10_15_10_15; Histo1DPtr _h_rdA04_10_15_15_24, _h_rdA06_10_15_15_24; Histo1DPtr _h_rdA04_10_15_24_40, _h_rdA06_10_15_24_40; Histo1DPtr _h_pt04_15_19, _h_pt06_15_19; Histo1DPtr _h_N04_15_19_04_06, _h_N06_15_19_04_06; Histo1DPtr _h_N04_15_19_06_10, _h_N06_15_19_06_10; Histo1DPtr _h_N04_15_19_10_15, _h_N06_15_19_10_15; Histo1DPtr _h_N04_15_19_15_24, _h_N06_15_19_15_24; Histo1DPtr _h_N04_15_19_24_40, _h_N06_15_19_24_40; Histo1DPtr _h_z04_15_19_04_06, _h_z06_15_19_04_06; Histo1DPtr _h_z04_15_19_06_10, _h_z06_15_19_06_10; Histo1DPtr _h_z04_15_19_10_15, _h_z06_15_19_10_15; Histo1DPtr _h_z04_15_19_15_24, _h_z06_15_19_15_24; Histo1DPtr _h_z04_15_19_24_40, _h_z06_15_19_24_40; Histo1DPtr _h_ptrel04_15_19_04_06, _h_ptrel06_15_19_04_06; Histo1DPtr _h_ptrel04_15_19_06_10, _h_ptrel06_15_19_06_10; Histo1DPtr _h_ptrel04_15_19_10_15, _h_ptrel06_15_19_10_15; Histo1DPtr _h_ptrel04_15_19_15_24, _h_ptrel06_15_19_15_24; Histo1DPtr _h_ptrel04_15_19_24_40, _h_ptrel06_15_19_24_40; Histo1DPtr _h_rdA04_15_19_04_06, _h_rdA06_15_19_04_06; Histo1DPtr _h_rdA04_15_19_06_10, _h_rdA06_15_19_06_10; Histo1DPtr _h_rdA04_15_19_10_15, _h_rdA06_15_19_10_15; Histo1DPtr _h_rdA04_15_19_15_24, _h_rdA06_15_19_15_24; Histo1DPtr _h_rdA04_15_19_24_40, _h_rdA06_15_19_24_40; Histo1DPtr _h_N04_00_19_04_06, _h_N06_00_19_04_06; Histo1DPtr _h_N04_00_19_06_10, _h_N06_00_19_06_10; Histo1DPtr _h_N04_00_19_10_15, _h_N06_00_19_10_15; Histo1DPtr _h_N04_00_19_15_24, _h_N06_00_19_15_24; Histo1DPtr _h_N04_00_19_24_40, _h_N06_00_19_24_40; Histo1DPtr _h_z04_00_19_04_06, _h_z06_00_19_04_06; Histo1DPtr _h_z04_00_19_06_10, _h_z06_00_19_06_10; Histo1DPtr _h_z04_00_19_10_15, _h_z06_00_19_10_15; Histo1DPtr _h_z04_00_19_15_24, _h_z06_00_19_15_24; Histo1DPtr _h_z04_00_19_24_40, _h_z06_00_19_24_40; Histo1DPtr _h_ptrel04_00_19_04_06, _h_ptrel06_00_19_04_06; Histo1DPtr _h_ptrel04_00_19_06_10, _h_ptrel06_00_19_06_10; Histo1DPtr _h_ptrel04_00_19_10_15, _h_ptrel06_00_19_10_15; Histo1DPtr _h_ptrel04_00_19_15_24, _h_ptrel06_00_19_15_24; Histo1DPtr _h_ptrel04_00_19_24_40, _h_ptrel06_00_19_24_40; Histo1DPtr _h_rdA04_00_19_04_06, _h_rdA06_00_19_04_06; Histo1DPtr _h_rdA04_00_19_06_10, _h_rdA06_00_19_06_10; Histo1DPtr _h_rdA04_00_19_10_15, _h_rdA06_00_19_10_15; Histo1DPtr _h_rdA04_00_19_15_24, _h_rdA06_00_19_15_24; Histo1DPtr _h_rdA04_00_19_24_40, _h_rdA06_00_19_24_40; //@} }; // This global object acts as a hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2011_I919017); } diff --git a/analyses/pluginATLAS/ATLAS_2011_I921594.cc b/analyses/pluginATLAS/ATLAS_2011_I921594.cc --- a/analyses/pluginATLAS/ATLAS_2011_I921594.cc +++ b/analyses/pluginATLAS/ATLAS_2011_I921594.cc @@ -1,124 +1,124 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// Inclusive isolated prompt photon analysis with full 2010 LHC data class ATLAS_2011_I921594 : public Analysis { public: /// Constructor ATLAS_2011_I921594() : Analysis("ATLAS_2011_I921594") { } /// Book histograms and initialise projections before the run void init() { FinalState fs; declare(fs, "FS"); // Consider the final state jets for the energy density calculation FastJets fj(fs, FastJets::KT, 0.5); fj.useJetArea(new fastjet::AreaDefinition(fastjet::VoronoiAreaSpec())); declare(fj, "KtJetsD05"); // Consider the leading pt photon with |eta|<2.37 and pT>45 GeV - LeadingParticlesFinalState photonfs(FinalState(-2.37, 2.37, 45*GeV)); + LeadingParticlesFinalState photonfs(FinalState((Cuts::etaIn(-2.37, 2.37) && Cuts::pT >= 45*GeV))); photonfs.addParticleId(PID::PHOTON); declare(photonfs, "LeadingPhoton"); // Book the dsigma/dEt (in eta bins) histograms for (size_t i = 0; i < _eta_bins.size()-1; i++) { if (fuzzyEquals(_eta_bins[i], 1.37)) continue; // skip this bin book(_h_Et_photon[i] ,1, 1, i+1); } } /// Return eta bin for either dsigma/dET histogram (area_eta=false) or energy density correction (area_eta=true) size_t _getEtaBin(double eta, bool area_eta) const { const double aeta = fabs(eta); return (!area_eta) ? binIndex(aeta, _eta_bins) : binIndex(aeta, _eta_bins_areaoffset); } /// Perform the per-event analysis void analyze(const Event& event) { // Retrieve leading photon const Particles& photons = apply(event, "LeadingPhoton").particles(); if (photons.size() != 1) vetoEvent; const Particle& leadingPhoton = photons[0]; // Veto events with photon in ECAL crack if (inRange(leadingPhoton.abseta(), 1.37, 1.52)) vetoEvent; // Compute isolation energy in cone of radius .4 around photon (all particles) FourMomentum mom_in_EtCone; Particles fs = apply(event, "FS").particles(); for (const Particle& p : fs) { // Check if it's outside the cone of 0.4 if (deltaR(leadingPhoton, p) >= 0.4) continue; // Don't count particles in the 5x7 central core if (deltaEta(leadingPhoton, p) < .025*5.0*0.5 && deltaPhi(leadingPhoton, p) < (PI/128.)*7.0*0.5) continue; // Increment isolation energy mom_in_EtCone += p.momentum(); } // Get the area-filtered jet inputs for computing median energy density, etc. vector< vector > ptDensities(_eta_bins_areaoffset.size()-1); FastJets fast_jets = apply(event, "KtJetsD05"); const shared_ptr clust_seq_area = fast_jets.clusterSeqArea(); for (const Jet& jet : fast_jets.jets()) { const double area = clust_seq_area->area(jet); //< Implicit call to .pseudojet() if (area > 1e-4 && jet.abseta() < _eta_bins_areaoffset.back()) ptDensities.at( _getEtaBin(jet.abseta(), true) ).push_back(jet.pT()/area); } // Compute the median energy density, etc. vector ptDensity; for (size_t b = 0; b < _eta_bins_areaoffset.size()-1; b++) { ptDensity += ptDensities[b].empty() ? 0 : median(ptDensities[b]); } // Compute the isolation energy correction (cone area*energy density) const double ETCONE_AREA = M_PI*sqr(0.4) - (7.0*.025)*(5.0*PI/128.); const double correction = ptDensity[_getEtaBin(leadingPhoton.abseta(), true)] * ETCONE_AREA; // Apply isolation cut on area-corrected value if (mom_in_EtCone.Et() - correction > 4*GeV) vetoEvent; // Fill histograms const size_t eta_bin = _getEtaBin(leadingPhoton.abseta(), false); _h_Et_photon[eta_bin]->fill(leadingPhoton.Et()); } /// Normalise histograms etc., after the run void finalize() { for (size_t i = 0; i < _eta_bins.size()-1; i++) { if (fuzzyEquals(_eta_bins[i], 1.37)) continue; scale(_h_Et_photon[i], crossSection()/picobarn/sumOfWeights()); } } private: Histo1DPtr _h_Et_photon[5]; const vector _eta_bins = {0.00, 0.60, 1.37, 1.52, 1.81, 2.37}; const vector _eta_bins_areaoffset = {0.0, 1.5, 3.0}; }; DECLARE_RIVET_PLUGIN(ATLAS_2011_I921594); } diff --git a/analyses/pluginATLAS/ATLAS_2011_I930220.cc b/analyses/pluginATLAS/ATLAS_2011_I930220.cc --- a/analyses/pluginATLAS/ATLAS_2011_I930220.cc +++ b/analyses/pluginATLAS/ATLAS_2011_I930220.cc @@ -1,142 +1,142 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/HeavyHadrons.hh" #include "Rivet/Tools/BinnedHistogram.hh" namespace Rivet { /// @brief ATLAS inclusive b-jet pT spectrum, di-jet mass and di-jet chi class ATLAS_2011_I930220: public Analysis { public: ATLAS_2011_I930220() : Analysis("ATLAS_2011_I930220") { } void init() { - FinalState fs(-3.5, 3.5); + FinalState fs((Cuts::etaIn(-3.5, 3.5))); declare(fs, "FinalState"); FastJets fj(fs, FastJets::ANTIKT, 0.4); fj.useInvisibles(); declare(fj, "Jets"); declare(HeavyHadrons(Cuts::abseta < 3.5 && Cuts::pT > 5*GeV), "BHadrons"); double ybins[] = { 0.0, 0.3, 0.8, 1.2, 2.1 }; for (size_t i = 0; i < 4; ++i) { Histo1DPtr tmp; _bjetpT_SV0.add(ybins[i], ybins[i+1], book(tmp, i+1, 1, 1)); } book(_bjetpT_SV0_All ,5, 1, 1); book(_bjetpT_pTRel ,6, 1, 1); book(_dijet_mass ,7, 1, 1); book(_dijet_phi ,8, 1, 1); book(_dijet_chi_110_370 ,9, 1, 1); book(_dijet_chi_370_850 ,10, 1, 1); _chiCounter1 = 0.0; _chiCounter2 = 0.0; _phiCounter = 0.0; } void analyze(const Event& evt) { const double weight = 1.0; const Particles& bHadrons = apply(evt, "BHadrons").bHadrons(); const Jets& jets = apply(evt, "Jets").jetsByPt(15*GeV); FourMomentum leadingJet, subleadingJet; int leadJet = 0, subJet = 0; for (const Jet& j : jets) { bool hasB = false; for (const Particle& b : bHadrons) if (deltaR(j, b) < 0.3) { hasB = true; break; } // Identify and classify the leading and subleading jets if (j.absrap() < 2.1) { ///< Move this into the jets defn if (!leadJet) { leadingJet = j.momentum(); leadJet = (hasB && j.pT() > 40*GeV) ? 2 : 1; } else if (leadJet && !subJet) { subleadingJet = j.momentum(); subJet = (hasB && j.pT() > 40*GeV) ? 2 : 1; } if (hasB) { _bjetpT_SV0.fill(j.absrap(), j.pT()/GeV, weight); _bjetpT_SV0_All->fill(j.pT()/GeV, weight); _bjetpT_pTRel->fill(j.pT()/GeV, weight); } } } // Di-b-jet plots require both the leading and subleading jets to be b-tagged and have pT > 40 GeV if (leadJet == 2 && subJet == 2) { const double mass = FourMomentum( leadingJet + subleadingJet ).mass(); _dijet_mass->fill(mass/GeV, weight); // Plot dphi for high-mass di-b-jets if (mass > 110*GeV) { _phiCounter += weight; const double d_phi = deltaPhi( leadingJet.phi(), subleadingJet.phi() ); _dijet_phi->fill(fabs(d_phi), weight); } // Plot chi for low y_boost di-b-jets (in two high-mass bins) const double y_boost = 0.5 * (leadingJet.rapidity() + subleadingJet.rapidity()); const double chi = exp( fabs( leadingJet.rapidity() - subleadingJet.rapidity() ) ); if ( fabs(y_boost) < 1.1 ) { if (inRange(mass/GeV, 110, 370)) { _chiCounter1 += weight; _dijet_chi_110_370->fill(chi, weight); } else if (inRange(mass/GeV, 370, 850)) { _chiCounter2 += weight; _dijet_chi_370_850->fill(chi, weight); } } } } void finalize() { // Normalizing to cross-section and mass // Additional factors represent the division by rapidity const double xsec = crossSectionPerEvent()/(picobarn); const double chiScale1 = 1 / _chiCounter1 / 260.0; const double chiScale2 = 1 / _chiCounter2 / 480.0; const double phiScale = 1 / _phiCounter; _bjetpT_SV0.scale(xsec/2, this); scale(_bjetpT_SV0_All, xsec); scale(_bjetpT_pTRel, xsec); scale(_dijet_mass, xsec); scale(_dijet_phi, phiScale ); scale(_dijet_chi_110_370, chiScale1); scale(_dijet_chi_370_850, chiScale2); } private: BinnedHistogram _bjetpT_SV0; Histo1DPtr _bjetpT_SV0_All; Histo1DPtr _bjetpT_pTRel; Histo1DPtr _dijet_mass; Histo1DPtr _dijet_phi; Histo1DPtr _dijet_chi_110_370; Histo1DPtr _dijet_chi_370_850; double _chiCounter1; double _chiCounter2; double _phiCounter; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2011_I930220); } diff --git a/analyses/pluginATLAS/ATLAS_2011_S8924791.cc b/analyses/pluginATLAS/ATLAS_2011_S8924791.cc --- a/analyses/pluginATLAS/ATLAS_2011_S8924791.cc +++ b/analyses/pluginATLAS/ATLAS_2011_S8924791.cc @@ -1,125 +1,125 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Projections/JetShape.hh" namespace Rivet { /// @brief ATLAS jet shape analysis /// @author Andy Buckley, Judith Katzy, Francesc Vives class ATLAS_2011_S8924791 : public Analysis { public: /// Constructor ATLAS_2011_S8924791() : Analysis("ATLAS_2011_S8924791") { } /// @name Analysis methods //@{ void init() { // Set up projections - const FinalState fs(-5.0, 5.0); + const FinalState fs((Cuts::etaIn(-5.0, 5.0))); declare(fs, "FS"); FastJets fj(fs, FastJets::ANTIKT, 0.6); fj.useInvisibles(); declare(fj, "Jets"); // Specify pT bins _ptedges = {{ 30.0, 40.0, 60.0, 80.0, 110.0, 160.0, 210.0, 260.0, 310.0, 400.0, 500.0, 600.0 }}; _yedges = {{ 0.0, 0.3, 0.8, 1.2, 2.1, 2.8 }}; // Register a jet shape projection and histogram for each pT bin for (size_t i = 0; i < 11; ++i) { for (size_t j = 0; j < 6; ++j) { if (i == 8 && j == 4) continue; if (i == 9 && j == 4) continue; if (i == 10 && j != 5) continue; // Set up projections for each (pT,y) bin _jsnames_pT[i][j] = "JetShape" + to_str(i) + to_str(j); const double ylow = (j < 5) ? _yedges[j] : _yedges.front(); const double yhigh = (j < 5) ? _yedges[j+1] : _yedges.back(); const JetShape jsp(fj, 0.0, 0.7, 7, _ptedges[i], _ptedges[i+1], ylow, yhigh, RAPIDITY); declare(jsp, _jsnames_pT[i][j]); // Book profile histograms for each (pT,y) bin book(_profhistRho_pT[i][j] ,i+1, j+1, 1); book(_profhistPsi_pT[i][j] ,i+1, j+1, 2); } } } /// Do the analysis void analyze(const Event& evt) { // Get jets and require at least one to pass pT and y cuts const Jets jets = apply(evt, "Jets") .jetsByPt(Cuts::ptIn(_ptedges.front()*GeV, _ptedges.back()*GeV) && Cuts::absrap < 2.8); MSG_DEBUG("Jet multiplicity before cuts = " << jets.size()); if (jets.size() == 0) { MSG_DEBUG("No jets found in required pT and rapidity range"); vetoEvent; } // Calculate and histogram jet shapes for (size_t ipt = 0; ipt < 11; ++ipt) { for (size_t jy = 0; jy < 6; ++jy) { if (ipt == 8 && jy == 4) continue; if (ipt == 9 && jy == 4) continue; if (ipt == 10 && jy != 5) continue; const JetShape& jsipt = apply(evt, _jsnames_pT[ipt][jy]); for (size_t ijet = 0; ijet < jsipt.numJets(); ++ijet) { for (size_t rbin = 0; rbin < jsipt.numBins(); ++rbin) { const double r_rho = jsipt.rBinMid(rbin); _profhistRho_pT[ipt][jy]->fill(r_rho, (1./0.1)*jsipt.diffJetShape(ijet, rbin)); const double r_Psi = jsipt.rBinMid(rbin); _profhistPsi_pT[ipt][jy]->fill(r_Psi, jsipt.intJetShape(ijet, rbin)); } } } } } // Finalize void finalize() { } //@} private: /// @name Analysis data //@{ /// Jet \f$ p_\perp\f$ bins. vector _ptedges; // This can't be a raw array if we want to initialise it non-painfully vector _yedges; /// JetShape projection name for each \f$p_\perp\f$ bin. string _jsnames_pT[11][6]; //@} /// @name Histograms //@{ Profile1DPtr _profhistRho_pT[11][6]; Profile1DPtr _profhistPsi_pT[11][6]; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2011_S8924791); } diff --git a/analyses/pluginATLAS/ATLAS_2011_S8994773.cc b/analyses/pluginATLAS/ATLAS_2011_S8994773.cc --- a/analyses/pluginATLAS/ATLAS_2011_S8994773.cc +++ b/analyses/pluginATLAS/ATLAS_2011_S8994773.cc @@ -1,136 +1,136 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @author Jinlong Zhang class ATLAS_2011_S8994773 : public Analysis { public: ATLAS_2011_S8994773() : Analysis("ATLAS_2011_S8994773") { } void init() { - const FinalState fs500(-2.5, 2.5, 500*MeV); + const FinalState fs500((Cuts::etaIn(-2.5, 2.5) && Cuts::pT >= 500*MeV)); declare(fs500, "FS500"); - const FinalState fslead(-2.5, 2.5, 1.0*GeV); + const FinalState fslead((Cuts::etaIn(-2.5, 2.5) && Cuts::pT >= 1.0*GeV)); declare(fslead, "FSlead"); // Get an index for the beam energy isqrts = -1; if (fuzzyEquals(sqrtS(), 900*GeV)) isqrts = 0; else if (fuzzyEquals(sqrtS(), 7*TeV)) isqrts = 1; assert(isqrts >= 0); // N profiles, 500 MeV pT cut book(_hist_N_transverse_500 ,1+isqrts, 1, 1); // pTsum profiles, 500 MeV pT cut book(_hist_ptsum_transverse_500 ,3+isqrts, 1, 1); // N vs. Delta(phi) profiles, 500 MeV pT cut book(_hist_N_vs_dPhi_1_500 ,13+isqrts, 1, 1); book(_hist_N_vs_dPhi_2_500 ,13+isqrts, 1, 2); book(_hist_N_vs_dPhi_3_500 ,13+isqrts, 1, 3); } void analyze(const Event& event) { // Require at least one cluster in the event with pT >= 1 GeV const FinalState& fslead = apply(event, "FSlead"); if (fslead.size() < 1) { vetoEvent; } // These are the particles with pT > 500 MeV const FinalState& chargedNeutral500 = apply(event, "FS500"); // Identify leading object and its phi and pT Particles particles500 = chargedNeutral500.particlesByPt(); Particle p_lead = particles500[0]; const double philead = p_lead.phi(); const double etalead = p_lead.eta(); const double pTlead = p_lead.pT(); MSG_DEBUG("Leading object: pT = " << pTlead << ", eta = " << etalead << ", phi = " << philead); // Iterate over all > 500 MeV particles and count particles and scalar pTsum in the three regions vector num500(3, 0), ptSum500(3, 0.0); // Temporary histos that bin N in dPhi. // NB. Only one of each needed since binnings are the same for the energies and pT cuts Histo1D hist_num_dphi_500(refData(13+isqrts,1,1)); for (const Particle& p : particles500) { const double pT = p.pT(); const double dPhi = deltaPhi(philead, p.phi()); const int ir = region_index(dPhi); num500[ir] += 1; ptSum500[ir] += pT; // Fill temp histos to bin N in dPhi if (p.genParticle() != p_lead.genParticle()) { // We don't want to fill all those zeros from the leading track... hist_num_dphi_500.fill(dPhi, 1); } } // Now fill underlying event histograms // The densities are calculated by dividing the UE properties by dEta*dPhi // -- each region has a dPhi of 2*PI/3 and dEta is two times 2.5 const double dEtadPhi = (2*2.5 * 2*PI/3.0); _hist_N_transverse_500->fill(pTlead/GeV, num500[1]/dEtadPhi); _hist_ptsum_transverse_500->fill(pTlead/GeV, ptSum500[1]/GeV/dEtadPhi); // Update the "proper" dphi profile histograms // Note that we fill dN/dEtadPhi: dEta = 2*2.5, dPhi = 2*PI/nBins // The values tabulated in the note are for an (undefined) signed Delta(phi) rather than // |Delta(phi)| and so differ by a factor of 2: we have to actually norm for angular range = 2pi const size_t nbins = refData(13+isqrts,1,1).numPoints(); for (size_t i = 0; i < nbins; ++i) { double mean = hist_num_dphi_500.bin(i).xMid(); double value = 0.; if (hist_num_dphi_500.bin(i).numEntries() > 0) { mean = hist_num_dphi_500.bin(i).xMean(); value = hist_num_dphi_500.bin(i).area()/hist_num_dphi_500.bin(i).xWidth()/10.0; } if (pTlead/GeV >= 1.0) _hist_N_vs_dPhi_1_500->fill(mean, value); if (pTlead/GeV >= 2.0) _hist_N_vs_dPhi_2_500->fill(mean, value); if (pTlead/GeV >= 3.0) _hist_N_vs_dPhi_3_500->fill(mean, value); } } void finalize() { } private: // Little helper function to identify Delta(phi) regions inline int region_index(double dphi) { assert(inRange(dphi, 0.0, PI, CLOSED, CLOSED)); if (dphi < PI/3.0) return 0; if (dphi < 2*PI/3.0) return 1; return 2; } private: int isqrts; Profile1DPtr _hist_N_transverse_500; Profile1DPtr _hist_ptsum_transverse_500; Profile1DPtr _hist_N_vs_dPhi_1_500; Profile1DPtr _hist_N_vs_dPhi_2_500; Profile1DPtr _hist_N_vs_dPhi_3_500; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2011_S8994773); } diff --git a/analyses/pluginATLAS/ATLAS_2011_S9108483.cc b/analyses/pluginATLAS/ATLAS_2011_S9108483.cc --- a/analyses/pluginATLAS/ATLAS_2011_S9108483.cc +++ b/analyses/pluginATLAS/ATLAS_2011_S9108483.cc @@ -1,201 +1,201 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Math/Constants.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/NonHadronicFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { class ATLAS_2011_S9108483 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor ATLAS_2011_S9108483() : Analysis("ATLAS_2011_S9108483") { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // get the non-hadronic final-state particles double etaMax = 2.5; - const NonHadronicFinalState nhfs(-etaMax,etaMax,13.*GeV); + const NonHadronicFinalState nhfs((Cuts::etaIn(-etaMax,etaMax) && Cuts::pT >= 13.*GeV)); declare(nhfs,"NHFS"); // select the charged ones const ChargedFinalState cfs(nhfs); declare(cfs,"CFS"); // and then veto electrons, and taus to be safe VetoedFinalState vfs(cfs); vfs.addVetoPairId(PID::ELECTRON); declare(vfs,"VFS"); /// Book histograms book(_count_trigger ,"count_trigger" , 1, 0., 1.); book(_count_event ,"count_selection", 1, 0., 1.); book(_count_quality ,"count_quality" , 1, 0., 1.); book(_count_beta ,"count_beta" , 1, 0., 1.); book(_count_90 ,"count_90" , 1, 0., 1.); book(_count_110 ,"count_110", 1, 0., 1.); book(_count_120 ,"count_120", 1, 0., 1.); book(_count_130 ,"count_130", 1, 0., 1.); book(_hist_beta ,"beta",1000, 0., 2.); book(_hist_time ,"time",1000, -50, 50.); book(_hist_mass ,"mass", 60, 5., 305.); } double rndGauss(double sigma, double mean) { double r = sqrt(-2.0*log(rand()/static_cast(RAND_MAX))); double phi = rand()/static_cast(RAND_MAX)*2.0*pi; return mean+sigma*r*sin(phi); } /// Perform the per-event analysis void analyze(const Event& event) { // smearing parameters // time measurement (in ns) // const double tsmear=5.*0.7; const double tsmear=0.7; // sagita error const double csag =1.1E-4; // multiple scattering const double cms =2.0E-2; // muon chamber radius (in metres) const double radius = 10.e3; // convert to ns const double tr = radius/c_light; // get the charged final-state particles Particles charged = apply(event,"VFS").particles(); // need at least two candidates if(charged.size()<2) vetoEvent; // number passing trigger _count_trigger->fill(0.5); // Z mass veto for ( const Particle & mu1 : charged ) { for ( const Particle & mu2 : charged ) { double mass = (mu1.momentum()+mu2.momentum()).mass(); double diff = abs(mass-91.18); if(diff<10.) vetoEvent; } } // number passing first event selection _count_event->fill(0.5); // now find the candidates // loop over the particles and find muons and heavy charged particles map muonCandidates; for (const Particle& mu : charged) { // calculate the smeared momentum double pT = mu.pT2(); double pmag = sqrt(pT+sqr(mu.pz())); double deltap = sqrt( sqr(csag*sqr(pmag)) + sqr(cms*mu.E()/GeV)); double psmear = rndGauss(deltap,pmag); // keep particles with pT>40 if(psmear/pmag*mu.perp()<40.*GeV|| psmear/pmag*mu.perp()>1000.*GeV) continue; muonCandidates.insert(make_pair(psmear,mu)); } // require two candidates if(muonCandidates.size()<2) vetoEvent; // number passing "quality" cut _count_quality->fill(0.5); // now do the time of flight bool filled = false; for(map::const_iterator it=muonCandidates.begin(); it!=muonCandidates.end();++it) { // true magnitude and pT of momentum double pT = it->second.pT2(); double pmag = sqrt(pT+sqr(it->second.pz())); pT = sqrt(pT); // true time difference in ns double deltaT =tr *(it->second.E()-pmag)/pT; // smear it deltaT = rndGauss(tsmear,deltaT); // beta double beta = 1./(1.+deltaT/tr*pT/pmag); _hist_beta->fill(beta); _hist_time->fill(deltaT); // beta cut if(beta<0.95) continue; // mass double mass = 2.*pT*it->first*deltaT/tr*(1.+0.5*deltaT/tr*pT/pmag); if(mass<0.) continue; mass = sqrt(mass); filled = true; _hist_mass->fill(mass); if(mass>90. ) { _count_90 ->fill(0.5); if(mass>110.) { _count_110->fill(0.5); if(mass>120.) { _count_120->fill(0.5); if(mass>130.) { _count_130->fill(0.5); } } } } } if(!filled) vetoEvent; // number passing beta cut _count_beta->fill(0.5); } //@} void finalize() { double fact = crossSection()/sumOfWeights()*37; MSG_WARNING("testing " << crossSection() << " " << sumOfWeights() << " " << fact); scale(_hist_beta,fact); scale(_hist_time,fact); scale(_hist_mass,fact); scale( _count_trigger, fact); scale( _count_event, fact); scale( _count_quality, fact); scale( _count_beta, fact); scale( _count_90, fact); scale( _count_110, fact); scale( _count_120, fact); scale( _count_130, fact); } private: /// @name Histograms //@{ Histo1DPtr _hist_beta; Histo1DPtr _hist_time; Histo1DPtr _hist_mass; Histo1DPtr _count_trigger; Histo1DPtr _count_event; Histo1DPtr _count_quality; Histo1DPtr _count_beta; Histo1DPtr _count_90; Histo1DPtr _count_110; Histo1DPtr _count_120; Histo1DPtr _count_130; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2011_S9108483); } diff --git a/analyses/pluginATLAS/ATLAS_2012_I1084540.cc b/analyses/pluginATLAS/ATLAS_2012_I1084540.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1084540.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1084540.cc @@ -1,230 +1,230 @@ // -*- C++ -*- /** * @name ATLAS Diffractive Gaps Rivet Analysis * @author Tim Martin, tim.martin@cern.ch * @version 1.0 * @date 16/01/2012 * @see http://arxiv.org/abs/1201.2808 * @note pp, sqrt(s) = 7 TeV * @note Rapidity gap finding algorithm designed to compliment * the ATLAS detector acceptance. Forward rapidity gap sizes * are calculated for each event, considering all stable * particles above pT cut values 200, 400, 600 and 800 MeV in * turn. A forward rapidity gap is defined to be the largest * continuous region stretching inward from either edge of the * detector at eta = |4.9| which contains zero particles above * pT Cut. Soft diffractive topologies are isolated at large * gap sizes. * */ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { class ATLAS_2012_I1084540 : public Analysis { public: ATLAS_2012_I1084540() : Analysis("ATLAS_2012_I1084540") {} /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { //All final states. Rapidity range = ATLAS calorimetry. Lowest pT cut = 200 MeV. - const FinalState cnfs2(-_etaMax, _etaMax, 0.2 * GeV); - const FinalState cnfs4(-_etaMax, _etaMax, 0.4 * GeV); - const FinalState cnfs6(-_etaMax, _etaMax, 0.6 * GeV); - const FinalState cnfs8(-_etaMax, _etaMax, 0.8 * GeV); + const FinalState cnfs2((Cuts::etaIn(-_etaMax, _etaMax) && Cuts::pT >= 0.2 * GeV)); + const FinalState cnfs4((Cuts::etaIn(-_etaMax, _etaMax) && Cuts::pT >= 0.4 * GeV)); + const FinalState cnfs6((Cuts::etaIn(-_etaMax, _etaMax) && Cuts::pT >= 0.6 * GeV)); + const FinalState cnfs8((Cuts::etaIn(-_etaMax, _etaMax) && Cuts::pT >= 0.8 * GeV)); declare(cnfs2, "CNFS2"); declare(cnfs4, "CNFS4"); declare(cnfs6, "CNFS6"); declare(cnfs8, "CNFS8"); _etaBinSize = (2. * _etaMax)/(double)_etaBins; //Book histogram book(_h_DeltaEtaF_200 ,1, 1, 1); book(_h_DeltaEtaF_400 ,2, 1, 1); book(_h_DeltaEtaF_600 ,3, 1, 1); book(_h_DeltaEtaF_800 ,4, 1, 1); } private: void fillMap(const FinalState& fs, bool* energyMap, double pTcut) { // Fill true/false array by iterating over particles and compare their // pT with pTcut for (const Particle& p : fs.particles(cmpMomByEta)) { int checkBin = -1; double checkEta = -_etaMax; while (1) { checkEta += _etaBinSize; ++checkBin; if (p.eta() < checkEta) { energyMap[checkBin] = (p.pT() > pTcut * GeV); break; } } } } public: /// Perform the per-event analysis void analyze(const Event& event) { static unsigned int event_count = 0; ++event_count; const FinalState& fs2 = apply(event, "CNFS2"); const FinalState& fs4 = apply(event, "CNFS4"); const FinalState& fs6 = apply(event, "CNFS6"); const FinalState& fs8 = apply(event, "CNFS8"); // Set up Yes/No arrays for energy in each eta bin at each pT cut bool energyMap_200[_etaBins]; bool energyMap_400[_etaBins]; bool energyMap_600[_etaBins]; bool energyMap_800[_etaBins]; for (int i = 0; i < _etaBins; ++i) { energyMap_200[i] = false; energyMap_400[i] = false; energyMap_600[i] = false; energyMap_800[i] = false; } // Veto bins based on final state particles > Cut (Where Cut = 200 - 800 MeV pT) fillMap(fs2, energyMap_200, 0.2); fillMap(fs4, energyMap_400, 0.4); fillMap(fs6, energyMap_600, 0.6); fillMap(fs8, energyMap_800, 0.8); // Apply gap finding algorithm // Detector layout follows... // -Eta [Proton --- DetectorCSide --- DetectorBarrel --- DetectorASide --- Proton] +Eta bool gapDirectionAt200 = false; //False is gap on C size, True is gap on A side. double largestEdgeGap_200 = 0.; double largestEdgeGap_400 = 0.; double largestEdgeGap_600 = 0.; double largestEdgeGap_800 = 0.; for (int E = 200; E <= 800; E += 200) { double EdgeGapSizeA = -1, EdgeGapSizeC = -1; bool* energyMap = 0; switch (E) { case 200: energyMap = energyMap_200; break; case 400: energyMap = energyMap_400; break; case 600: energyMap = energyMap_600; break; case 800: energyMap = energyMap_800; break; } // Look left to right for (int a = 0; a < _etaBins; ++a) { if (energyMap[a] == true) { EdgeGapSizeA = (_etaBinSize * a); break; } } // And look right to left for (int c = _etaBins-1; c >= 0; --c) { if (energyMap[c] == true) { EdgeGapSizeC = (2 * _etaMax) - (_etaBinSize * (c+1)); if (fuzzyEquals(EdgeGapSizeC, 4.47035e-08)) EdgeGapSizeC = 0.0; break; } } // Put your hands on your hips // Find the largest gap double largestEdgeGap = 0.; if (E == 200) { // If the 200 MeV pass, take the biggest of the two gaps. Make note of which side for higher pT cuts. largestEdgeGap = std::max(EdgeGapSizeA,EdgeGapSizeC); gapDirectionAt200 = (EdgeGapSizeA > EdgeGapSizeC); } else { // Use the direction from 200 MeV pass, most accurate measure of which side gap is on. if (gapDirectionAt200) { largestEdgeGap = EdgeGapSizeA; } else largestEdgeGap = EdgeGapSizeC; } // Check case of empty detector if (largestEdgeGap < 0.0) largestEdgeGap = 2.0 * _etaMax; // Fill bin centre switch (E) { case 200: _h_DeltaEtaF_200->fill(largestEdgeGap + _etaBinSize/2.); break; case 400: _h_DeltaEtaF_400->fill(largestEdgeGap + _etaBinSize/2.); break; case 600: _h_DeltaEtaF_600->fill(largestEdgeGap + _etaBinSize/2.); break; case 800: _h_DeltaEtaF_800->fill(largestEdgeGap + _etaBinSize/2.); break; } if (E == 200) largestEdgeGap_200 = largestEdgeGap; if (E == 400) largestEdgeGap_400 = largestEdgeGap; if (E == 600) largestEdgeGap_600 = largestEdgeGap; if (E == 800) largestEdgeGap_800 = largestEdgeGap; } // Algorithm result every 1000 events if (event_count % 1000 == 0) { for (int E = 200; E <= 800; E += 200) { bool* energyMap = 0; double largestEdgeGap = 0; switch (E) { case 200: energyMap = energyMap_200; largestEdgeGap = largestEdgeGap_200; break; case 400: energyMap = energyMap_400; largestEdgeGap = largestEdgeGap_400; break; case 600: energyMap = energyMap_600; largestEdgeGap = largestEdgeGap_600; break; case 800: energyMap = energyMap_800; largestEdgeGap = largestEdgeGap_800; break; } MSG_DEBUG("Largest Forward Gap at pT Cut " << E << " MeV=" << largestEdgeGap << " eta, NFinalState pT > 200 in ATLAS acceptance:" << fs2.particles().size()); std::string hitPattern = "Detector HitPattern=-4.9["; for (int a = 0; a < _etaBins; ++a) { if (energyMap[a] == true) hitPattern += "X"; else hitPattern += "_"; } hitPattern += "]4.9"; MSG_DEBUG(hitPattern); std::string gapArrow = " "; if (!gapDirectionAt200) { int drawSpaces = (int)(_etaBins - (largestEdgeGap/_etaBinSize) + 0.5); for (int i = 0; i < drawSpaces; ++i) gapArrow += " "; } int drawArrows = (int)((largestEdgeGap/_etaBinSize) + 0.5); for (int i = 0; i < drawArrows; ++i) gapArrow += "^"; MSG_DEBUG(gapArrow); } } } /// Normalise histograms after the run, Scale to cross section void finalize() { MSG_DEBUG("Cross Section=" << crossSection() / millibarn << "mb, SumOfWeights=" << sumOfWeights()); scale(_h_DeltaEtaF_200, (crossSection() / millibarn)/sumOfWeights()); scale(_h_DeltaEtaF_400, (crossSection() / millibarn)/sumOfWeights()); scale(_h_DeltaEtaF_600, (crossSection() / millibarn)/sumOfWeights()); scale(_h_DeltaEtaF_800, (crossSection() / millibarn)/sumOfWeights()); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_DeltaEtaF_200; Histo1DPtr _h_DeltaEtaF_400; Histo1DPtr _h_DeltaEtaF_600; Histo1DPtr _h_DeltaEtaF_800; //@} /// @name Private variables //@{ static constexpr int _etaBins = 49; static constexpr double _etaMax = 4.9; double _etaBinSize; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2012_I1084540); } diff --git a/analyses/pluginATLAS/ATLAS_2012_I1093738.cc b/analyses/pluginATLAS/ATLAS_2012_I1093738.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1093738.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1093738.cc @@ -1,199 +1,199 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief Measurement of isolated gamma + jet + X differential cross-sections /// /// Inclusive isolated gamma + jet cross-sections, differential in pT(gamma), for /// various photon and jet rapidity configurations. /// /// @author Giovanni Marchiori class ATLAS_2012_I1093738 : public Analysis { public: // Constructor ATLAS_2012_I1093738() : Analysis("ATLAS_2012_I1093738") { } // Book histograms and initialise projections before the run void init() { // Final state FinalState fs; declare(fs, "FS"); // Voronoi eta-phi tessellation with KT jets, for ambient energy density calculation FastJets fj(fs, FastJets::KT, 0.5); fj.useJetArea(new fastjet::AreaDefinition(fastjet::VoronoiAreaSpec())); declare(fj, "KtJetsD05"); // Leading photon - LeadingParticlesFinalState photonfs(FinalState(-1.37, 1.37, 25.0*GeV)); + LeadingParticlesFinalState photonfs(FinalState((Cuts::etaIn(-1.37, 1.37) && Cuts::pT >= 25.0*GeV))); photonfs.addParticleId(PID::PHOTON); declare(photonfs, "LeadingPhoton"); // FS excluding the leading photon VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(photonfs); declare(vfs, "JetFS"); // Jets FastJets jetpro(vfs, FastJets::ANTIKT, 0.4); jetpro.useInvisibles(); declare(jetpro, "Jets"); book(_h_phbarrel_jetcentral_SS ,1, 1, 1); book(_h_phbarrel_jetmedium_SS ,2, 1, 1); book(_h_phbarrel_jetforward_SS ,3, 1, 1); book(_h_phbarrel_jetcentral_OS ,4, 1, 1); book(_h_phbarrel_jetmedium_OS ,5, 1, 1); book(_h_phbarrel_jetforward_OS ,6, 1, 1); } int getEtaBin(double eta, int what) const { const double aeta = fabs(eta); if (what == 0) return binIndex(aeta, _eta_bins_ph); if (what == 1) return binIndex(aeta, _eta_bins_jet); return binIndex(aeta, _eta_bins_areaoffset); } // Perform the per-event analysis void analyze(const Event& event) { // Get the photon const FinalState& photonfs = apply(event, "LeadingPhoton"); if (photonfs.particles().size() < 1) vetoEvent; const FourMomentum photon = photonfs.particles().front().momentum(); // Get the jet Jets jets = apply(event, "Jets").jetsByPt(20.0*GeV); if (jets.empty()) vetoEvent; FourMomentum leadingJet = jets[0].momentum(); // Require jet separated from photon if (deltaR(photon, leadingJet) < 1.0) vetoEvent; // Veto if leading jet is outside plotted rapidity regions if (leadingJet.absrap() > 4.4) vetoEvent; // Compute the jet pT densities vector< vector > ptDensities(_eta_bins_areaoffset.size()-1); FastJets fastjets = apply(event, "KtJetsD05"); const shared_ptr clust_seq_area = fastjets.clusterSeqArea(); for (const Jet& jet : fastjets.jets()) { const double area = clust_seq_area->area(jet); //< Implicit call to pseudojet() if (area > 1e-4 && jet.abseta() < _eta_bins_areaoffset.back()) { ptDensities.at(getEtaBin(jet.abseta(), 2)) += jet.pT()/area; } } // Compute the median event energy density /// @todo This looks equivalent to median(ptDensities[b]) -- isn't SKIPNHARDJETS meant to be used as an offset? const unsigned int SKIPNHARDJETS = 0; vector ptDensity; for (size_t b = 0; b < _eta_bins_areaoffset.size()-1; b++) { double median = 0.0; if (ptDensities[b].size() > SKIPNHARDJETS) { std::sort(ptDensities[b].begin(), ptDensities[b].end()); const int nDens = ptDensities[b].size() - SKIPNHARDJETS; if (nDens % 2 == 0) { median = (ptDensities[b][nDens/2]+ptDensities[b][(nDens-2)/2])/2; } else { median = ptDensities[b][(nDens-1)/2]; } } ptDensity.push_back(median); } // Compute photon isolation with a standard ET cone const Particles fs = apply(event, "JetFS").particles(); FourMomentum mom_in_EtCone; const double ISO_DR = 0.4; const double CLUSTER_ETA_WIDTH = 0.25*5.0; const double CLUSTER_PHI_WIDTH = (PI/128.)*7.0; for (const Particle& p : fs) { // Check if it's in the cone of .4 if (deltaR(photon, p) >= ISO_DR) continue; // Check if it's in the 5x7 central core if (fabs(deltaEta(photon, p)) < CLUSTER_ETA_WIDTH*0.5 && fabs(deltaPhi(photon, p)) < CLUSTER_PHI_WIDTH*0.5) continue; // Increment sum mom_in_EtCone += p.momentum(); } // Figure out the correction (area*density) const double ETCONE_AREA = PI*ISO_DR*ISO_DR - CLUSTER_ETA_WIDTH*CLUSTER_PHI_WIDTH; const double correction = ptDensity[getEtaBin(photon.abseta(),2)] * ETCONE_AREA; // Require photon to be isolated if (mom_in_EtCone.Et()-correction > 4.0*GeV) vetoEvent; const int photon_jet_sign = sign( leadingJet.rapidity() * photon.rapidity() ); // Fill histos const double abs_jet_rapidity = fabs(leadingJet.rapidity()); const double photon_pt = photon.pT()/GeV; const double abs_photon_eta = fabs(photon.eta()); if (abs_photon_eta < 1.37) { if (abs_jet_rapidity < 1.2) { if (photon_jet_sign >= 1) { _h_phbarrel_jetcentral_SS->fill(photon_pt); } else { _h_phbarrel_jetcentral_OS->fill(photon_pt); } } else if (abs_jet_rapidity < 2.8) { if (photon_jet_sign >= 1) { _h_phbarrel_jetmedium_SS->fill(photon_pt); } else { _h_phbarrel_jetmedium_OS->fill(photon_pt); } } else if (abs_jet_rapidity < 4.4) { if (photon_jet_sign >= 1) { _h_phbarrel_jetforward_SS->fill(photon_pt); } else { _h_phbarrel_jetforward_OS->fill(photon_pt); } } } } /// Normalise histograms etc., after the run void finalize() { scale(_h_phbarrel_jetcentral_SS, crossSection()/sumOfWeights()); scale(_h_phbarrel_jetcentral_OS, crossSection()/sumOfWeights()); scale(_h_phbarrel_jetmedium_SS, crossSection()/sumOfWeights()); scale(_h_phbarrel_jetmedium_OS, crossSection()/sumOfWeights()); scale(_h_phbarrel_jetforward_SS, crossSection()/sumOfWeights()); scale(_h_phbarrel_jetforward_OS, crossSection()/sumOfWeights()); } private: Histo1DPtr _h_phbarrel_jetcentral_SS, _h_phbarrel_jetmedium_SS, _h_phbarrel_jetforward_SS; Histo1DPtr _h_phbarrel_jetcentral_OS, _h_phbarrel_jetmedium_OS, _h_phbarrel_jetforward_OS; const vector _eta_bins_ph = {0.0, 1.37, 1.52, 2.37}; const vector _eta_bins_jet = {0.0, 1.2, 2.8, 4.4}; const vector _eta_bins_areaoffset = {0.0, 1.5, 3.0}; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2012_I1093738); } diff --git a/analyses/pluginATLAS/ATLAS_2012_I1094564.cc b/analyses/pluginATLAS/ATLAS_2012_I1094564.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1094564.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1094564.cc @@ -1,342 +1,342 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Tools/BinnedHistogram.hh" namespace Rivet { /// ATLAS jet substructure measurement class ATLAS_2012_I1094564 : public Analysis { public: ATLAS_2012_I1094564() : Analysis("ATLAS_2012_I1094564") {} // Returns constituents to make it easier to do the filtering PseudoJets splitjet(fastjet::PseudoJet jet, double& last_R, const FastJets& fj, bool& unclustered) const { // Build a new cluster sequence just using the constituents of this jet. fastjet::ClusterSequence cs(fj.clusterSeq()->constituents(jet), fastjet::JetDefinition(fastjet::cambridge_algorithm, M_PI/2.)); // Get the jet back again vector remadeJets = cs.inclusive_jets(0.); if ( remadeJets.size() != 1 ) return remadeJets; fastjet::PseudoJet remadeJet = remadeJets[0]; fastjet::PseudoJet parent1, parent2; unclustered = false; while ( cs.has_parents(remadeJet, parent1, parent2) ) { if (parent1.squared_distance(parent2) < 0.09) break; if (parent1.m2() < parent2.m2()) { fastjet::PseudoJet tmp; tmp = parent1; parent1 = parent2; parent2 = tmp; } double ktdist = parent1.kt_distance(parent2); double rtycut2 = 0.3*0.3; if (parent1.m() < 0.67*remadeJet.m() && ktdist > rtycut2*remadeJet.m2()) { unclustered = true; break; } else { remadeJet = parent1; } } last_R = 0.5 * sqrt(parent1.squared_distance(parent2)); return cs.constituents(remadeJet); } fastjet::PseudoJet filterjet(PseudoJets jets, double& stingy_R, const double def_R) const { if (stingy_R == 0.0) stingy_R = def_R; stingy_R = def_R < stingy_R ? def_R : stingy_R; fastjet::JetDefinition stingy_jet_def(fastjet::cambridge_algorithm, stingy_R); fastjet::ClusterSequence scs(jets, stingy_jet_def); vector stingy_jets = sorted_by_pt(scs.inclusive_jets(0)); fastjet::PseudoJet reconst_jet(0, 0, 0, 0); for (size_t isj = 0; isj < std::min((size_t) 3, stingy_jets.size()); ++isj) { reconst_jet += stingy_jets[isj]; } return reconst_jet; } // These are custom functions for n-subjettiness. PseudoJets jetGetAxes(int n_jets, const PseudoJets& inputJets, double subR) const { // Sanity check if (inputJets.size() < (size_t) n_jets) { MSG_ERROR("Not enough input particles."); return inputJets; } // Get subjets, return fastjet::ClusterSequence sub_clust_seq(inputJets, fastjet::JetDefinition(fastjet::kt_algorithm, subR, fastjet::E_scheme, fastjet::Best)); return sub_clust_seq.exclusive_jets(n_jets); } double jetTauValue(double beta, double jet_rad, const PseudoJets& particles, const PseudoJets& axes, double Rcut) const { double tauNum = 0.0; double tauDen = 0.0; if (particles.size() == 0) return 0.0; for (size_t i = 0; i < particles.size(); i++) { // find minimum distance (set R large to begin) double minR = 10000.0; for (size_t j = 0; j < axes.size(); j++) { double tempR = sqrt(particles[i].squared_distance(axes[j])); if (tempR < minR) minR = tempR; } if (minR > Rcut) minR = Rcut; // calculate nominator and denominator tauNum += particles[i].perp() * pow(minR,beta); tauDen += particles[i].perp() * pow(jet_rad,beta); } // return N-subjettiness (or 0 if denominator is 0) return safediv(tauNum, tauDen, 0); } void jetUpdateAxes(double beta, const PseudoJets& particles, PseudoJets& axes) const { vector belongsto; for (size_t i = 0; i < particles.size(); i++) { // find minimum distance axis int assign = 0; double minR = 10000.0; for (size_t j = 0; j < axes.size(); j++) { double tempR = sqrt(particles[i].squared_distance(axes[j])); if (tempR < minR) { minR = tempR; assign = j; } } belongsto.push_back(assign); } // iterative step double deltaR2, distphi; vector ynom, phinom, den; ynom.resize(axes.size()); phinom.resize(axes.size()); den.resize(axes.size()); for (size_t i = 0; i < particles.size(); i++) { distphi = particles[i].phi() - axes[belongsto[i]].phi(); deltaR2 = particles[i].squared_distance(axes[belongsto[i]]); if (deltaR2 == 0.) continue; if (abs(distphi) <= M_PI) phinom.at(belongsto[i]) += particles[i].perp() * particles[i].phi() * pow(deltaR2, (beta-2)/2); else if ( distphi > M_PI) phinom.at(belongsto[i]) += particles[i].perp() * (-2 * M_PI + particles[i].phi()) * pow(deltaR2, (beta-2)/2); else if ( distphi < M_PI) phinom.at(belongsto[i]) += particles[i].perp() * (+2 * M_PI + particles[i].phi()) * pow(deltaR2, (beta-2)/2); ynom.at(belongsto[i]) += particles[i].perp() * particles[i].rap() * pow(deltaR2, (beta-2)/2); den.at(belongsto[i]) += particles[i].perp() * pow(deltaR2, (beta-2)/2); } // reset to new axes for (size_t j = 0; j < axes.size(); j++) { if (den[j] == 0.) axes.at(j) = axes[j]; else { double phi_new = fmod( 2*M_PI + (phinom[j] / den[j]), 2*M_PI ); double pt_new = axes[j].perp(); double y_new = ynom[j] / den[j]; double px = pt_new * cos(phi_new); double py = pt_new * sin(phi_new); double pz = pt_new * sinh(y_new); axes.at(j).reset(px, py, pz, axes[j].perp()/2); } } } void init() { /// Projections: - FinalState fs(-4.5, 4.5, 0.*GeV); + FinalState fs((Cuts::etaIn(-4.5, 4.5) && Cuts::pT >= 0.*GeV)); declare(fs, "FS"); declare(FastJets(fs, FastJets::ANTIKT, 1.0), "AKT"); declare(FastJets(fs, FastJets::CAM, 1.2) , "CA" ); /// Histograms: {Histo1DPtr tmp; _h_camass.add(200, 300, book(tmp, 1, 1, 1));} {Histo1DPtr tmp; _h_camass.add(300, 400, book(tmp, 2, 1, 1));} {Histo1DPtr tmp; _h_camass.add(400, 500, book(tmp, 3, 1, 1));} {Histo1DPtr tmp; _h_camass.add(500, 600, book(tmp, 4, 1, 1));} {Histo1DPtr tmp; _h_filtmass.add(200, 300, book(tmp, 5, 1, 1));} {Histo1DPtr tmp; _h_filtmass.add(300, 400, book(tmp, 6, 1, 1));} {Histo1DPtr tmp; _h_filtmass.add(400, 500, book(tmp, 7, 1, 1));} {Histo1DPtr tmp; _h_filtmass.add(500, 600, book(tmp, 8, 1, 1));} {Histo1DPtr tmp; _h_ktmass.add(200, 300, book(tmp, 9, 1, 1));} {Histo1DPtr tmp; _h_ktmass.add(300, 400, book(tmp, 10, 1, 1));} {Histo1DPtr tmp; _h_ktmass.add(400, 500, book(tmp, 11, 1, 1));} {Histo1DPtr tmp; _h_ktmass.add(500, 600, book(tmp, 12, 1, 1));} {Histo1DPtr tmp; _h_ktd12.add(200, 300, book(tmp, 13, 1, 1));} {Histo1DPtr tmp; _h_ktd12.add(300, 400, book(tmp, 14, 1, 1));} {Histo1DPtr tmp; _h_ktd12.add(400, 500, book(tmp, 15, 1, 1));} {Histo1DPtr tmp; _h_ktd12.add(500, 600, book(tmp, 16, 1, 1));} {Histo1DPtr tmp; _h_ktd23.add(200, 300, book(tmp, 17, 1 ,1));} {Histo1DPtr tmp; _h_ktd23.add(300, 400, book(tmp, 18, 1 ,1));} {Histo1DPtr tmp; _h_ktd23.add(400, 500, book(tmp, 19, 1 ,1));} {Histo1DPtr tmp; _h_ktd23.add(500, 600, book(tmp, 20, 1 ,1));} {Histo1DPtr tmp; _h_cat21.add(200, 300, book(tmp, 21, 1, 1));} {Histo1DPtr tmp; _h_cat21.add(300, 400, book(tmp, 22, 1, 1));} {Histo1DPtr tmp; _h_cat21.add(400, 500, book(tmp, 23, 1, 1));} {Histo1DPtr tmp; _h_cat21.add(500, 600, book(tmp, 24, 1, 1));} {Histo1DPtr tmp; _h_cat32.add(200, 300, book(tmp, 25, 1, 1));} {Histo1DPtr tmp; _h_cat32.add(300, 400, book(tmp, 26, 1, 1));} {Histo1DPtr tmp; _h_cat32.add(400, 500, book(tmp, 27, 1, 1));} {Histo1DPtr tmp; _h_cat32.add(500, 600, book(tmp, 28, 1, 1));} {Histo1DPtr tmp; _h_ktt21.add(200, 300, book(tmp, 29, 1, 1));} {Histo1DPtr tmp; _h_ktt21.add(300, 400, book(tmp, 30, 1, 1));} {Histo1DPtr tmp; _h_ktt21.add(400, 500, book(tmp, 31, 1, 1));} {Histo1DPtr tmp; _h_ktt21.add(500, 600, book(tmp, 32, 1, 1));} {Histo1DPtr tmp; _h_ktt32.add(200, 300, book(tmp, 33, 1, 1));} {Histo1DPtr tmp; _h_ktt32.add(300, 400, book(tmp, 34, 1, 1));} {Histo1DPtr tmp; _h_ktt32.add(400, 500, book(tmp, 35, 1, 1));} {Histo1DPtr tmp; _h_ktt32.add(500, 600, book(tmp, 36, 1, 1));} } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; using namespace fastjet; // Get anti-kt jets with p_T > 200 GeV, check abs(y) < 2, and fill mass histograms const FastJets& ktfj = apply(event, "AKT"); PseudoJets ktjets = ktfj.pseudoJetsByPt(200*GeV); for (const PseudoJet ajet : ktjets) { if (abs(ajet.rap()) < 2) { _h_ktmass.fill(ajet.perp(), ajet.m(), weight); } } // Same as above but C/A jets const FastJets& cafj = apply(event, "CA"); PseudoJets cajets = cafj.pseudoJetsByPt(200*GeV); for (const PseudoJet ajet : cajets) { if (abs(ajet.rap()) < 2) { _h_camass.fill(ajet.perp(), ajet.m(), weight); } } // Split and filter. // Only do this to C/A jets in this analysis. for (const PseudoJet pjet : cajets) { if ( pjet.perp() > 600 || abs(pjet.rap()) > 2) continue; double dR = 0; bool unclustered = false; PseudoJets split_jets = splitjet(pjet, dR, cafj, unclustered); if ( (dR < 0.15) || (unclustered == false) ) continue; PseudoJet filt_jet = filterjet(split_jets, dR, 0.3); _h_filtmass.fill(filt_jet.perp(), filt_jet.m(), weight); } // Use the two last stages of clustering to get sqrt(d_12) and sqrt(d_23). // Only use anti-kt jets in this analysis. for (const PseudoJet pjet : ktjets) { if (pjet.perp() > 600 || abs(pjet.rap()) > 2) continue; ClusterSequence subjet_cseq(ktfj.clusterSeq()->constituents(pjet), JetDefinition(kt_algorithm, M_PI/2.)); double d_12 = subjet_cseq.exclusive_dmerge(1) * M_PI*M_PI/4.; double d_23 = subjet_cseq.exclusive_dmerge(2) * M_PI*M_PI/4.; _h_ktd12.fill(pjet.perp(), sqrt(d_12), weight); _h_ktd23.fill(pjet.perp(), sqrt(d_23), weight); } // N-subjettiness, use beta = 1 (no rationale given). // Uses the functions defined above. // C/A jets first, anti-kt after. double beta = 1.; //Rcut is used for particles that are very far from the closest axis. At 10 //is has no impact on the outcome of the calculation double Rcut = 10.; for (const PseudoJet pjet : cajets) { if (pjet.perp() > 600*GeV || fabs(pjet.rap()) > 2) continue; const PseudoJets constituents = cafj.clusterSeq()->constituents(pjet); if (constituents.size() < 3) continue; const PseudoJets axis1 = jetGetAxes(1, constituents, M_PI/2.0); const PseudoJets axis2 = jetGetAxes(2, constituents, M_PI/2.0); const PseudoJets axis3 = jetGetAxes(3, constituents, M_PI/2.0); const double radius = 1.2; const double tau1 = jetTauValue(beta, radius, constituents, axis1, Rcut); const double tau2 = jetTauValue(beta, radius, constituents, axis2, Rcut); const double tau3 = jetTauValue(beta, radius, constituents, axis3, Rcut); if (tau1 == 0 || tau2 == 0) continue; _h_cat21.fill(pjet.perp(), tau2/tau1, weight); _h_cat32.fill(pjet.perp(), tau3/tau2, weight); } for (const PseudoJet pjet : ktjets) { if (pjet.perp() > 600*GeV || fabs(pjet.rap()) > 2) continue; const PseudoJets constituents = ktfj.clusterSeq()->constituents(pjet); if (constituents.size() < 3) continue; const PseudoJets axis1 = jetGetAxes(1, constituents, M_PI/2.0); const PseudoJets axis2 = jetGetAxes(2, constituents, M_PI/2.0); const PseudoJets axis3 = jetGetAxes(3, constituents, M_PI/2.0); const double radius = 1.0; const double tau1 = jetTauValue(beta, radius, constituents, axis1, Rcut); const double tau2 = jetTauValue(beta, radius, constituents, axis2, Rcut); const double tau3 = jetTauValue(beta, radius, constituents, axis3, Rcut); if (tau1 == 0 || tau2 == 0) continue; _h_ktt21.fill(pjet.perp(), tau2/tau1, weight); _h_ktt32.fill(pjet.perp(), tau3/tau2, weight); } } /// Normalise histograms etc., after the run void finalize() { for (Histo1DPtr h : _h_camass.histos()) normalize(h); for (Histo1DPtr h : _h_filtmass.histos()) normalize(h); for (Histo1DPtr h : _h_ktmass.histos()) normalize(h); for (Histo1DPtr h : _h_ktd12.histos()) normalize(h); for (Histo1DPtr h : _h_ktd23.histos()) normalize(h); for (Histo1DPtr h : _h_cat21.histos()) normalize(h); for (Histo1DPtr h : _h_cat32.histos()) normalize(h); for (Histo1DPtr h : _h_ktt21.histos()) normalize(h); for (Histo1DPtr h : _h_ktt32.histos()) normalize(h); } private: BinnedHistogram _h_camass; BinnedHistogram _h_filtmass; BinnedHistogram _h_ktmass; BinnedHistogram _h_ktd12; BinnedHistogram _h_ktd23; BinnedHistogram _h_cat21; BinnedHistogram _h_cat32; BinnedHistogram _h_ktt21; BinnedHistogram _h_ktt32; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2012_I1094564); } diff --git a/analyses/pluginATLAS/ATLAS_2012_I1095236.cc b/analyses/pluginATLAS/ATLAS_2012_I1095236.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1095236.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1095236.cc @@ -1,326 +1,326 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @author Peter Richardson class ATLAS_2012_I1095236 : public Analysis { public: /// Constructor ATLAS_2012_I1095236() : Analysis("ATLAS_2012_I1095236") { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Projection to find the electrons IdentifiedFinalState elecs(Cuts::abseta < 2.47 && Cuts::pT > 20*GeV); elecs.acceptIdPair(PID::ELECTRON); declare(elecs, "elecs"); // Projection to find the muons IdentifiedFinalState muons(Cuts::abseta < 2.4 && Cuts::pT > 10*GeV); muons.acceptIdPair(PID::MUON); declare(muons, "muons"); // Jet finder VetoedFinalState vfs; vfs.addVetoPairId(PID::MUON); declare(FastJets(vfs, FastJets::ANTIKT, 0.4), "AntiKtJets04"); // All tracks (to do deltaR with leptons) declare(ChargedFinalState(Cuts::abseta < 3.0),"cfs"); // Used for pTmiss - declare(VisibleFinalState(-4.9,4.9),"vfs"); + declare(VisibleFinalState((Cuts::etaIn(-4.9,4.9))),"vfs"); // Book histograms book(_count_SR0_A1 ,"count_SR0_A1", 1, 0., 1.); book(_count_SR0_B1 ,"count_SR0_B1", 1, 0., 1.); book(_count_SR0_C1 ,"count_SR0_C1", 1, 0., 1.); book(_count_SR0_A2 ,"count_SR0_A2", 1, 0., 1.); book(_count_SR0_B2 ,"count_SR0_B2", 1, 0., 1.); book(_count_SR0_C2 ,"count_SR0_C2", 1, 0., 1.); book(_count_SR1_D ,"count_SR1_D" , 1, 0., 1.); book(_count_SR1_E ,"count_SR1_E" , 1, 0., 1.); book(_hist_meff_SR0_A1 ,"hist_m_eff_SR0_A1", 14, 400., 1800.); book(_hist_meff_SR0_A2 ,"hist_m_eff_SR0_A2", 14, 400., 1800.); book(_hist_meff_SR1_D_e ,"hist_meff_SR1_D_e" , 16, 600., 2200.); book(_hist_meff_SR1_D_mu ,"hist_meff_SR1_D_mu", 16, 600., 2200.); book(_hist_met_SR0_A1 ,"hist_met_SR0_A1", 14, 0., 700.); book(_hist_met_SR0_A2 ,"hist_met_SR0_A2", 14, 0., 700.); book(_hist_met_SR0_D_e ,"hist_met_SR1_D_e" , 15, 0., 600.); book(_hist_met_SR0_D_mu ,"hist_met_SR1_D_mu", 15, 0., 600.); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; Jets cand_jets; const Jets jets = apply(event, "AntiKtJets04").jetsByPt(20.0*GeV); for (const Jet& jet : jets) { if ( fabs( jet.eta() ) < 2.8 ) { cand_jets.push_back(jet); } } const Particles cand_e = apply(event, "elecs").particlesByPt(); const Particles cand_mu = apply(event, "muons").particlesByPt(); // Resolve jet-lepton overlap for jets with |eta| < 2.8 Jets recon_jets; for ( const Jet& jet : cand_jets ) { if ( fabs( jet.eta() ) >= 2.8 ) continue; bool away_from_e = true; for ( const Particle & e : cand_e ) { if ( deltaR(e.momentum(),jet.momentum()) <= 0.2 ) { away_from_e = false; break; } } if ( away_from_e ) recon_jets.push_back( jet ); } // get the loose leptons used to define the 0 lepton channel Particles loose_e, loose_mu; for ( const Particle & e : cand_e ) { bool away = true; for ( const Jet& jet : recon_jets ) { if ( deltaR(e.momentum(),jet.momentum()) < 0.4 ) { away = false; break; } } if ( away ) loose_e.push_back( e ); } for ( const Particle & mu : cand_mu ) { bool away = true; for ( const Jet& jet : recon_jets ) { if ( deltaR(mu.momentum(),jet.momentum()) < 0.4 ) { away = false; break; } } if ( away ) loose_mu.push_back( mu ); } // tight leptons for the 1-lepton channel Particles tight_mu; Particles chg_tracks = apply(event, "cfs").particles(); for ( const Particle & mu : loose_mu) { if(mu.perp()<20.) continue; double pTinCone = -mu.pT(); for ( const Particle & track : chg_tracks ) { if ( deltaR(mu.momentum(),track.momentum()) <= 0.2 ) pTinCone += track.pT(); } if ( pTinCone < 1.8*GeV ) tight_mu.push_back(mu); } Particles tight_e; for ( const Particle & e : loose_e ) { if(e.perp()<25.) continue; double pTinCone = -e.perp(); for ( const Particle & track : chg_tracks ) { if ( deltaR(e.momentum(),track.momentum()) <= 0.2 ) pTinCone += track.pT(); } if (pTinCone/e.perp()<0.1) { tight_e.push_back(e); } } // pTmiss Particles vfs_particles = apply(event, "vfs").particles(); FourMomentum pTmiss; for ( const Particle & p : vfs_particles ) { pTmiss -= p.momentum(); } double eTmiss = pTmiss.pT(); // get the number of b-tagged jets unsigned int ntagged=0; for (const Jet & jet : recon_jets ) { if(jet.perp()>50. && abs(jet.eta())<2.5 && jet.bTagged() && rand()/static_cast(RAND_MAX)<=0.60) ++ntagged; } // ATLAS calo problem if(rand()/static_cast(RAND_MAX)<=0.42) { for ( const Jet & jet : recon_jets ) { double eta = jet.rapidity(); double phi = jet.azimuthalAngle(MINUSPI_PLUSPI); if(jet.perp()>50 && eta>-0.1&&eta<1.5&&phi>-0.9&&phi<-0.5) vetoEvent; } } // at least 1 b tag if(ntagged==0) vetoEvent; // minumum Et miss if(eTmiss<80.) vetoEvent; // at least 3 jets pT > 50 if(recon_jets.size()<3 || recon_jets[2].perp()<50.) vetoEvent; // m_eff double m_eff = eTmiss; for(unsigned int ix=0;ix<3;++ix) m_eff += recon_jets[ix].perp(); // delta Phi double min_dPhi = 999.999; double pTmiss_phi = pTmiss.phi(); for(unsigned int ix=0;ix<3;++ix) { min_dPhi = min( min_dPhi, deltaPhi( pTmiss_phi, recon_jets[ix].phi() ) ); } // 0-lepton channels if(loose_e.empty() && loose_mu.empty() && recon_jets[0].perp()>130. && eTmiss>130. && eTmiss/m_eff>0.25 && min_dPhi>0.4) { // jet charge cut bool jetCharge = true; for(unsigned int ix=0;ix<3;++ix) { if(fabs(recon_jets[ix].eta())>2.) continue; double trackpT=0; for(const Particle & p : recon_jets[ix].particles()) { if(PID::charge3(p.pid())==0) continue; trackpT += p.perp(); } if(trackpT/recon_jets[ix].perp()<0.05) jetCharge = false; } if(jetCharge) { // SR0-A region if(m_eff>500.) { _count_SR0_A1->fill(0.5,weight); _hist_meff_SR0_A1->fill(m_eff,weight); _hist_met_SR0_A1 ->fill(eTmiss,weight); if(ntagged>=2) { _count_SR0_A2->fill(0.5,weight); _hist_meff_SR0_A2->fill(m_eff,weight); _hist_met_SR0_A2 ->fill(eTmiss,weight); } } // SR0-B if(m_eff>700.) { _count_SR0_B1->fill(0.5,weight); if(ntagged>=2) _count_SR0_B2->fill(0.5,weight); } // SR0-C if(m_eff>900.) { _count_SR0_C1->fill(0.5,weight); if(ntagged>=2) _count_SR0_C2->fill(0.5,weight); } } } // 1-lepton channels if(tight_e.size() + tight_mu.size() == 1 && recon_jets.size()>=4 && recon_jets[3].perp()>50.&& recon_jets[0].perp()>60.) { Particle lepton = tight_e.empty() ? tight_mu[0] : tight_e[0]; m_eff += lepton.perp() + recon_jets[3].perp(); // transverse mass cut double mT = 2.*(lepton.perp()*eTmiss- lepton.px()*pTmiss.px()- lepton.py()*pTmiss.py()); mT = sqrt(mT); if(mT>100.&&m_eff>700.) { // D region _count_SR1_D->fill(0.5,weight); if(lepton.abspid()==PID::ELECTRON) { _hist_meff_SR1_D_e->fill(m_eff,weight); _hist_met_SR0_D_e->fill(eTmiss,weight); } else { _hist_meff_SR1_D_mu->fill(m_eff,weight); _hist_met_SR0_D_mu->fill(eTmiss,weight); } // E region if(eTmiss>200.) { _count_SR1_E->fill(0.5,weight); } } } } void finalize() { double norm = crossSection()/femtobarn*2.05/sumOfWeights(); // these are number of events at 2.05fb^-1 per 100 GeV scale( _hist_meff_SR0_A1 , 100. * norm ); scale( _hist_meff_SR0_A2 , 100. * norm ); scale( _hist_meff_SR1_D_e , 100. * norm ); scale( _hist_meff_SR1_D_mu , 100. * norm ); // these are number of events at 2.05fb^-1 per 50 GeV scale( _hist_met_SR0_A1, 50. * norm ); scale( _hist_met_SR0_A2, 40. * norm ); // these are number of events at 2.05fb^-1 per 40 GeV scale( _hist_met_SR0_D_e , 40. * norm ); scale( _hist_met_SR0_D_mu, 40. * norm ); // these are number of events at 2.05fb^-1 scale(_count_SR0_A1,norm); scale(_count_SR0_B1,norm); scale(_count_SR0_C1,norm); scale(_count_SR0_A2,norm); scale(_count_SR0_B2,norm); scale(_count_SR0_C2,norm); scale(_count_SR1_D ,norm); scale(_count_SR1_E ,norm); } //@} private: Histo1DPtr _count_SR0_A1; Histo1DPtr _count_SR0_B1; Histo1DPtr _count_SR0_C1; Histo1DPtr _count_SR0_A2; Histo1DPtr _count_SR0_B2; Histo1DPtr _count_SR0_C2; Histo1DPtr _count_SR1_D; Histo1DPtr _count_SR1_E; Histo1DPtr _hist_meff_SR0_A1; Histo1DPtr _hist_meff_SR0_A2; Histo1DPtr _hist_meff_SR1_D_e; Histo1DPtr _hist_meff_SR1_D_mu; Histo1DPtr _hist_met_SR0_A1; Histo1DPtr _hist_met_SR0_A2; Histo1DPtr _hist_met_SR0_D_e; Histo1DPtr _hist_met_SR0_D_mu; }; // This global object acts as a hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2012_I1095236); } diff --git a/analyses/pluginATLAS/ATLAS_2012_I1125575.cc b/analyses/pluginATLAS/ATLAS_2012_I1125575.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1125575.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1125575.cc @@ -1,239 +1,239 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Tools/BinnedHistogram.hh" namespace Rivet { /// ATLAS charged particle jet underlying event and jet radius dependence class ATLAS_2012_I1125575 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor ATLAS_2012_I1125575() : Analysis("ATLAS_2012_I1125575") { } //@} /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - const ChargedFinalState jet_input(-2.5, 2.5, 0.5*GeV); + const ChargedFinalState jet_input((Cuts::etaIn(-2.5, 2.5) && Cuts::pT >= 0.5*GeV)); declare(jet_input, "JET_INPUT"); - const ChargedFinalState track_input(-1.5, 1.5, 0.5*GeV); + const ChargedFinalState track_input((Cuts::etaIn(-1.5, 1.5) && Cuts::pT >= 0.5*GeV)); declare(track_input, "TRACK_INPUT"); const FastJets jets02(jet_input, FastJets::ANTIKT, 0.2); declare(jets02, "JETS_02"); const FastJets jets04(jet_input, FastJets::ANTIKT, 0.4); declare(jets04, "JETS_04"); const FastJets jets06(jet_input, FastJets::ANTIKT, 0.6); declare(jets06, "JETS_06"); const FastJets jets08(jet_input, FastJets::ANTIKT, 0.8); declare(jets08, "JETS_08"); const FastJets jets10(jet_input, FastJets::ANTIKT, 1.0); declare(jets10, "JETS_10"); // Mean number of tracks initializeProfiles(_h_meanNch, 1); // Mean of the average track pT in each region initializeProfiles(_h_meanPtAvg, 2); // Mean of the scalar sum of track pT in each region initializeProfiles(_h_meanPtSum, 3); // Distribution of Nch, in bins of leading track-jet pT initializeHistograms(_h_Nch, 4); // Distribution of average track-jet pT, in bins of leading track-jet pT initializeHistograms(_h_PtAvg, 5); // Distribution of sum of track-jet pT, in bins of leading track-jet pT initializeHistograms(_h_PtSum, 6); for (int i = 0; i < 5; ++i) book(_nEvents[i], "nEvents_"+to_str(i)); } void initializeProfiles(Profile1DPtr plots[5][2], int distribution) { for (int i = 0; i < 5; ++i) { for (int j = 0; j < 2; ++j) { book(plots[i][j] ,distribution, i+1, j+1); } } } void initializeHistograms(BinnedHistogram plots[5][2], int distribution) { Scatter2D refscatter = refData(1, 1, 1); for (int i = 0; i < 5; ++i) { for (int y = 0; y < 2; ++y) { for (size_t j = 0; j < refscatter.numPoints(); ++j) { int histogram_number = ((j+1)*2)-((y+1)%2); double low_edge = refscatter.point(j).xMin(); double high_edge = refscatter.point(j).xMax(); Histo1DPtr tmp; plots[i][y].add(low_edge, high_edge, book(tmp, distribution, i+1, histogram_number)); } } } } /// Perform the per-event analysis void analyze(const Event& event) { vector all_jets; Jets jets_02 = apply(event, "JETS_02").jetsByPt(Cuts::pT > 4*GeV && Cuts::abseta < 1.5); all_jets.push_back(&jets_02); Jets jets_04 = apply(event, "JETS_04").jetsByPt(Cuts::pT > 4*GeV && Cuts::abseta < 1.5); all_jets.push_back(&jets_04); Jets jets_06 = apply(event, "JETS_06").jetsByPt(Cuts::pT > 4*GeV && Cuts::abseta < 1.5); all_jets.push_back(&jets_06); Jets jets_08 = apply(event, "JETS_08").jetsByPt(Cuts::pT > 4*GeV && Cuts::abseta < 1.5); all_jets.push_back(&jets_08); Jets jets_10 = apply(event, "JETS_10").jetsByPt(Cuts::pT > 4*GeV && Cuts::abseta < 1.5); all_jets.push_back(&jets_10); // Count the number of tracks in the away and transverse regions, for each set of jets double n_ch[5][2] = { {0,0}, {0,0}, {0,0}, {0,0}, {0,0} }; // Also add up the sum pT double sumpt[5][2] = { {0,0}, {0,0}, {0,0}, {0,0}, {0,0} }; // ptmean = sumpt / n_ch double ptavg[5][2] = { {0,0}, {0,0}, {0,0}, {0,0}, {0,0} }; // lead jet pT defines which bin we want to fill double lead_jet_pts[5] = {0.0}; // Loop over each of the jet radii: for (int i = 0; i < 5; ++i) { if (all_jets[i]->size() < 1) continue; // Find the lead jet pT lead_jet_pts[i] = all_jets[i]->at(0).pT(); // Loop over each of the charged particles const Particles& tracks = apply(event, "TRACK_INPUT").particlesByPt(); for(const Particle& t : tracks) { // Get the delta-phi between the track and the leading jet double dphi = deltaPhi(all_jets[i]->at(0), t); // Find out which region this puts it in. // 0 = away region, 1 = transverse region, 2 = toward region int region = region_index(dphi); // If the track is in the toward region, ignore it. if (region == 2) continue; // Otherwise, increment the relevant counters ++n_ch[i][region]; sumpt[i][region] += t.pT(); } // Calculate the pT_avg for the away and transverse regions. // (And make sure we don't try to divide by zero.) ptavg[i][0] = (n_ch[i][0] == 0 ? 0.0 : sumpt[i][0] / n_ch[i][0]); ptavg[i][1] = (n_ch[i][1] == 0 ? 0.0 : sumpt[i][1] / n_ch[i][1]); _nEvents[i]->fill(); } fillProfiles(_h_meanNch, n_ch, lead_jet_pts, 1.0 / (2*PI)); fillProfiles(_h_meanPtAvg, ptavg, lead_jet_pts, 1.0); fillProfiles(_h_meanPtSum, sumpt, lead_jet_pts, 1.0 / (2*PI)); fillHistograms(_h_Nch, n_ch, lead_jet_pts); fillHistograms(_h_PtAvg, ptavg, lead_jet_pts); fillHistograms(_h_PtSum, sumpt, lead_jet_pts); } void fillProfiles(Profile1DPtr plots[5][2], double var[5][2], double lead_pt[5], double scale) { for (int i=0; i<5; ++i) { double pt = lead_pt[i]; for (int j=0; j<2; ++j) { double v = var[i][j]; plots[i][j]->fill(pt, v*scale); } } } void fillHistograms(BinnedHistogram plots[5][2], double var[5][2], double lead_pt[5]) { for (int i=0; i<5; ++i) { double pt = lead_pt[i]; for (int j=0; j<2; ++j) { double v = var[i][j]; plots[i][j].fill(pt, v); } } } int region_index(double dphi) { assert(inRange(dphi, 0.0, PI, CLOSED, CLOSED)); if (dphi < PI/3.0) return 2; if (dphi < 2*PI/3.0) return 1; return 0; } /// Normalise histograms etc., after the run void finalize() { finalizeHistograms(_h_Nch); finalizeHistograms(_h_PtAvg); finalizeHistograms(_h_PtSum); } void finalizeHistograms(BinnedHistogram plots[5][2]) { for (int i = 0; i < 5; ++i) { for (int j = 0; j < 2; ++j) { vector histos = plots[i][j].histos(); for(Histo1DPtr h : histos) { scale(h, 1.0/ *_nEvents[i]); } } } } //@} private: // Data members like post-cuts event weight counters go here CounterPtr _nEvents[5]; Profile1DPtr _h_meanNch[5][2]; Profile1DPtr _h_meanPtAvg[5][2]; Profile1DPtr _h_meanPtSum[5][2]; BinnedHistogram _h_Nch[5][2]; BinnedHistogram _h_PtAvg[5][2]; BinnedHistogram _h_PtSum[5][2]; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2012_I1125575); } diff --git a/analyses/pluginATLAS/ATLAS_2012_I1183818.cc b/analyses/pluginATLAS/ATLAS_2012_I1183818.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1183818.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1183818.cc @@ -1,239 +1,239 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/FastJets.hh" /// @author Peter Wijeratne /// @author Robindra Prabhu namespace Rivet { // A very basic analysis sensitive to ET flow in minbias and dijet events class ATLAS_2012_I1183818 : public Analysis { public: ATLAS_2012_I1183818() : Analysis("ATLAS_2012_I1183818") {} public: void init() { - const FinalState cnfs(-4.8, 4.8, 0*MeV); - const ChargedFinalState cfs(-2.5, 2.5, 250*MeV); + const FinalState cnfs((Cuts::etaIn(-4.8, 4.8))); + const ChargedFinalState cfs((Cuts::etaIn(-2.5, 2.5) && Cuts::pT >= 250*MeV)); declare(cnfs, "FS"); declare(cfs, "CFS"); const FastJets jetsAntiKt4(cnfs, FastJets::ANTIKT, 0.4); declare(jetsAntiKt4, "AntiKt4Jets"); // ------- MINBIAS HISTOGRAMS -------- // // MB event counter book(m_chargedEvents, "m_chargedEvents"); book(_h_ETflowEta ,1, 1, 1); book(_h_SumETbin1 ,3, 1, 1); book(_h_SumETbin2 ,4, 1, 1); book(_h_SumETbin3 ,5, 1, 1); book(_h_SumETbin4 ,6, 1, 1); book(_h_SumETbin5 ,7, 1, 1); book(_h_SumETbin6 ,8, 1, 1); // ------- DIJET HISTOGRAMS -------- // // Dijet event counter book(m_events_dijets, "m_chargedEvents"); // sumET book(_h_transETflowEta , 2, 1, 1); book(_h_transSumETbin1 , 9, 1, 1); book(_h_transSumETbin2 ,10, 1, 1); book(_h_transSumETbin3 ,11, 1, 1); book(_h_transSumETbin4 ,12, 1, 1); book(_h_transSumETbin5 ,13, 1, 1); book(_h_transSumETbin6 ,14, 1, 1); } void analyze(const Event& event) { const FinalState& cfs = apply(event, "CFS"); bool isCharged = false; if (cfs.size() >= 2) { // event selection: > 2 charged particles with pT > 250.MeV and |eta| < 2.5 isCharged = true; m_chargedEvents->fill(); } const FinalState& cnfs = apply(event, "FS"); Particles particles; for( const Particle& p : cnfs.particles() ) { // enforce truth selection representing detected particle sensitivity double pp = p.p3().mod(); if (PID::charge3(p.pid()) != 0 && pp < 0.5*GeV) continue; if (PID::charge3(p.pid()) == 0 && pp < 0.2*GeV) continue; particles.push_back(p); } // get jets const FastJets& jetsAntiKt4 = apply(event, "AntiKt4Jets"); const Jets& jets = jetsAntiKt4.jetsByPt(20.0*GeV); // initialise sumET variables double sumETbin1 = 0; double sumETbin2 = 0; double sumETbin3 = 0; double sumETbin4 = 0; double sumETbin5 = 0; double sumETbin6 = 0; // if (passes event selection) if (isCharged) { for( const Particle& p : particles ) { ///calculate variables double ET = p.Et()/GeV; double eta = p.abseta(); // fill histograms _h_ETflowEta->fill(eta, ET); if (eta < 0.8) sumETbin1 += ET; else if (eta < 1.6) sumETbin2 += ET; else if (eta < 2.4) sumETbin3 += ET; else if (eta < 3.2) sumETbin4 += ET; else if (eta < 4.0) sumETbin5 += ET; else if (eta <= 4.8) sumETbin6 += ET; } // end of for _h_SumETbin1->fill(sumETbin1); _h_SumETbin2->fill(sumETbin2); _h_SumETbin3->fill(sumETbin3); _h_SumETbin4->fill(sumETbin4); _h_SumETbin5->fill(sumETbin5); _h_SumETbin6->fill(sumETbin6); } // --- do dijet analysis --- if ( jets.size() >= 2 && // require at least two jets jets[0].Et() >= 20.*GeV && // require two leading jets to pass ET cuts jets[1].Et() >= 20.*GeV && fabs(jets[0].eta()) < 2.5 && // require leading jets to be central fabs(jets[1].eta()) < 2.5 && deltaPhi(jets[0], jets[1]) > 2.5 && // require back-to-back topology jets[1].Et()/jets[0].Et() >= 0.5) { //require ET-balance // found an event that satisfies dijet selection, now fill histograms... // initialise dijet sumET variables double trans_sumET_bin1 = 0.; double trans_sumET_bin2 = 0.; double trans_sumET_bin3 = 0.; double trans_sumET_bin4 = 0.; double trans_sumET_bin5 = 0.; double trans_sumET_bin6 = 0.; m_events_dijets->fill(); // loop over all particles and check their relation to leading jet for( const Particle& particle : particles ) { // calculate variables double dPhi = deltaPhi( jets[0], particle.momentum() ); double ET = particle.Et()/GeV; double eta = fabs(particle.eta()); // Transverse region if ( dPhi > 1./3.*M_PI && dPhi < 2./3.*M_PI ) { _h_transETflowEta->fill( eta, ET ); if (eta < 0.8) { trans_sumET_bin1 += ET; } else if (eta < 1.6) { trans_sumET_bin2 += ET; } else if (eta < 2.4) { trans_sumET_bin3 += ET; } else if (eta < 3.2) { trans_sumET_bin4 += ET; } else if (eta < 4.0) { trans_sumET_bin5 += ET; } else if (eta <= 4.8) { trans_sumET_bin6 += ET; } } } // end loop over particles _h_transSumETbin1->fill( trans_sumET_bin1); _h_transSumETbin2->fill( trans_sumET_bin2); _h_transSumETbin3->fill( trans_sumET_bin3); _h_transSumETbin4->fill( trans_sumET_bin4); _h_transSumETbin5->fill( trans_sumET_bin5); _h_transSumETbin6->fill( trans_sumET_bin6); } // end of dijet selection cuts } void finalize() { /// several scale factors here: /// 1. nEvents (m_chargedEvents) /// 2. phase-space (2*M_PI) /// 3. double binning due to symmetrisation (2) scale( _h_ETflowEta, 1./m_chargedEvents->val()/(4.*M_PI) ); scale( _h_SumETbin1, 1./m_chargedEvents->val() ); scale( _h_SumETbin2, 1./m_chargedEvents->val() ); scale( _h_SumETbin3, 1./m_chargedEvents->val() ); scale( _h_SumETbin4, 1./m_chargedEvents->val() ); scale( _h_SumETbin5, 1./m_chargedEvents->val() ); scale( _h_SumETbin6, 1./m_chargedEvents->val() ); //Dijet analysis // Dijet scale factors: //1. number of events passing dijet selection //2. phase-space: 1. / 2/3*M_PI //3. double binning due to symmetrisation in |eta| plots : 1/2 scale( _h_transETflowEta, 1./m_events_dijets->val() * 1./(4./3.*M_PI) ); scale( _h_transSumETbin1, 1./m_events_dijets->val() ); scale( _h_transSumETbin2, 1./m_events_dijets->val() ); scale( _h_transSumETbin3, 1./m_events_dijets->val() ); scale( _h_transSumETbin4, 1./m_events_dijets->val() ); scale( _h_transSumETbin5, 1./m_events_dijets->val() ); scale( _h_transSumETbin6, 1./m_events_dijets->val() ); } private: // Event counts CounterPtr m_chargedEvents; CounterPtr m_events_dijets; // Minbias-analysis: variable + histograms Histo1DPtr _h_ETflowEta; Histo1DPtr _h_SumETbin1; Histo1DPtr _h_SumETbin2; Histo1DPtr _h_SumETbin3; Histo1DPtr _h_SumETbin4; Histo1DPtr _h_SumETbin5; Histo1DPtr _h_SumETbin6; // Transverse region Histo1DPtr _h_transETflowEta; Histo1DPtr _h_transSumETbin1; Histo1DPtr _h_transSumETbin2; Histo1DPtr _h_transSumETbin3; Histo1DPtr _h_transSumETbin4; Histo1DPtr _h_transSumETbin5; Histo1DPtr _h_transSumETbin6; }; DECLARE_RIVET_PLUGIN(ATLAS_2012_I1183818); } diff --git a/analyses/pluginATLAS/ATLAS_2012_I1190891.cc b/analyses/pluginATLAS/ATLAS_2012_I1190891.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1190891.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1190891.cc @@ -1,300 +1,300 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Tools/RivetMT2.hh" namespace Rivet { /// @author Peter Richardson class ATLAS_2012_I1190891 : public Analysis { public: /// Constructor ATLAS_2012_I1190891() : Analysis("ATLAS_2012_I1190891") { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // projection to find the electrons IdentifiedFinalState elecs(Cuts::abseta < 2.47 && Cuts::pT > 10*GeV); elecs.acceptIdPair(PID::ELECTRON); declare(elecs, "elecs"); // projection to find the muons IdentifiedFinalState muons(Cuts::abseta < 2.4 && Cuts::pT > 10*GeV); muons.acceptIdPair(PID::MUON); declare(muons, "muons"); // for pTmiss declare(VisibleFinalState(Cuts::abseta < 4.9),"vfs"); VetoedFinalState vfs; vfs.addVetoPairId(PID::MUON); /// Jet finder declare(FastJets(vfs, FastJets::ANTIKT, 0.4), "AntiKtJets04"); // all tracks (to do deltaR with leptons) - declare(ChargedFinalState(-3.0,3.0),"cfs"); + declare(ChargedFinalState((Cuts::etaIn(-3.0,3.0))),"cfs"); // Book histograms book(_hist_etmiss ,"hist_etmiss",10,0.,500.); book(_hist_meff ,"hist_m_eff",7,0.,1050.); book(_count_SR1 ,"count_SR1", 1, 0., 1.); book(_count_SR2 ,"count_SR2", 1, 0., 1.); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; // get the jet candidates Jets cand_jets; for (const Jet& jet : apply(event, "AntiKtJets04").jetsByPt(20.0*GeV) ) { if ( fabs( jet.eta() ) < 2.5 ) { cand_jets.push_back(jet); } } // candidate muons Particles cand_mu; Particles chg_tracks = apply(event, "cfs").particles(); for ( const Particle & mu : apply(event, "muons").particlesByPt() ) { double pTinCone = -mu.pT(); for ( const Particle & track : chg_tracks ) { if ( deltaR(mu.momentum(),track.momentum()) <= 0.2 ) pTinCone += track.pT(); } if ( pTinCone < 1.8*GeV ) cand_mu.push_back(mu); } // candidate electrons Particles cand_e; for ( const Particle & e : apply(event, "elecs").particlesByPt() ) { double pTinCone = -e.perp(); for ( const Particle & track : chg_tracks ) { if ( deltaR(e.momentum(),track.momentum()) <= 0.2 ) pTinCone += track.pT(); } if (pTinCone/e.perp()<0.1) { cand_e.push_back(e); } } // resolve jet/lepton ambiguity Jets recon_jets; for ( const Jet& jet : cand_jets ) { bool away_from_e = true; for ( const Particle & e : cand_e ) { if ( deltaR(e.momentum(),jet.momentum()) <= 0.2 ) { away_from_e = false; break; } } if ( away_from_e ) recon_jets.push_back( jet ); } // only keep electrons more than R=0.4 from jets Particles cand2_e; for(unsigned int ie=0;ie0) continue; double mtest = (cand2_e[ie].momentum()+cand2_e[ie2].momentum()).mass(); if(mtest<=20.) { pass = false; break; } } if(pass) recon_e.push_back(cand2_e[ie]); } // only keep muons more than R=0.4 from jets Particles cand2_mu; for(unsigned int imu=0;imu0) continue; double mtest = (cand2_mu[imu].momentum()+cand2_mu[imu2].momentum()).mass(); if(mtest<=20.) { pass = false; break; } } if(pass) recon_mu.push_back(cand2_mu[imu]); } // pTmiss Particles vfs_particles = apply(event, "vfs").particles(); FourMomentum pTmiss; for ( const Particle & p : vfs_particles ) { pTmiss -= p.momentum(); } double eTmiss = pTmiss.pT(); // now only use recon_jets, recon_mu, recon_e // reject events with less than 4 electrons and muons if ( recon_mu.size() + recon_e.size() < 4 ) { MSG_DEBUG("To few charged leptons left after selection"); vetoEvent; } // check if passes single lepton trigger bool passSingle = ( !recon_e .empty() && recon_e[0] .perp()>25. )|| ( !recon_mu.empty() && recon_mu[0].perp()>20.); // or two lepton trigger bool passDouble = ( recon_mu.size()>=2 && recon_mu[1].perp()>12.) || ( recon_e .size()>=2 && recon_e [1].perp()>17.) || ( !recon_e.empty() && !recon_mu.empty() && recon_e[0].perp()>15. && recon_mu[0].perp()>10.); // must pass a trigger if( !passSingle && !passDouble ) { MSG_DEBUG("Hardest lepton fails trigger"); vetoEvent; } // calculate meff double meff = eTmiss; for ( const Particle & e : recon_e ) meff += e.perp(); for ( const Particle & mu : recon_mu ) meff += mu.perp(); for ( const Jet & jet : recon_jets ) { double pT = jet.perp(); if(pT>40.) meff += pT; } // mass of SFOS pairs closest to the Z mass for(unsigned int ix=0;ix0) continue; double mtest = (recon_e[ix].momentum()+recon_e[iy].momentum()).mass(); if(mtest>81.2 && mtest<101.2) vetoEvent; } } for(unsigned int ix=0;ix0) continue; double mtest = (recon_mu[ix].momentum()+recon_mu[iy].momentum()).mass(); if(mtest>81.2 && mtest<101.2) vetoEvent; } } // make the control plots _hist_etmiss ->fill(eTmiss,weight); _hist_meff ->fill(meff ,weight); // finally the counts if(eTmiss>50.) _count_SR1->fill(0.5,weight); if(meff >300.) _count_SR2->fill(0.5,weight); } //@} void finalize() { double norm = crossSection()/femtobarn*4.7/sumOfWeights(); scale(_hist_etmiss,norm* 50.); scale(_hist_meff ,norm*150.); scale(_count_SR1,norm); scale(_count_SR2,norm); } private: /// @name Histograms //@{ Histo1DPtr _hist_etmiss; Histo1DPtr _hist_meff; Histo1DPtr _count_SR1; Histo1DPtr _count_SR2; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2012_I1190891); } diff --git a/analyses/pluginATLAS/ATLAS_2012_I1203852.cc b/analyses/pluginATLAS/ATLAS_2012_I1203852.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1203852.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1203852.cc @@ -1,371 +1,371 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" #include "Rivet/Projections/UnstableFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/MergedFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "Rivet/Projections/InvMassFinalState.hh" namespace Rivet { /// Generic Z candidate struct Zstate : public ParticlePair { Zstate() { } Zstate(ParticlePair _particlepair) : ParticlePair(_particlepair) { } FourMomentum mom() const { return first.momentum() + second.momentum(); } operator FourMomentum() const { return mom(); } static bool cmppT(const Zstate& lx, const Zstate& rx) { return lx.mom().pT() < rx.mom().pT(); } }; /// @name ZZ analysis class ATLAS_2012_I1203852 : public Analysis { public: /// Default constructor ATLAS_2012_I1203852() : Analysis("ATLAS_2012_I1203852") { } void init() { // NB Missing ET is not required to be neutrinos - FinalState fs(-5.0, 5.0, 0.0*GeV); + FinalState fs((Cuts::etaIn(-5.0, 5.0))); // Final states to form Z bosons vids.push_back(make_pair(PID::ELECTRON, PID::POSITRON)); vids.push_back(make_pair(PID::MUON, PID::ANTIMUON)); IdentifiedFinalState Photon(fs); Photon.acceptIdPair(PID::PHOTON); IdentifiedFinalState bare_EL(fs); bare_EL.acceptIdPair(PID::ELECTRON); IdentifiedFinalState bare_MU(fs); bare_MU.acceptIdPair(PID::MUON); // Selection 1: ZZ-> llll selection Cut etaranges_lep = Cuts::abseta < 3.16 && Cuts::pT > 7*GeV; DressedLeptons electron_sel4l(Photon, bare_EL, 0.1, etaranges_lep); declare(electron_sel4l, "ELECTRON_sel4l"); DressedLeptons muon_sel4l(Photon, bare_MU, 0.1, etaranges_lep); declare(muon_sel4l, "MUON_sel4l"); // Selection 2: ZZ-> llnunu selection Cut etaranges_lep2 = Cuts::abseta < 2.5 && Cuts::pT > 10*GeV; DressedLeptons electron_sel2l2nu(Photon, bare_EL, 0.1, etaranges_lep2); declare(electron_sel2l2nu, "ELECTRON_sel2l2nu"); DressedLeptons muon_sel2l2nu(Photon, bare_MU, 0.1, etaranges_lep2); declare(muon_sel2l2nu, "MUON_sel2l2nu"); /// Get all neutrinos. These will not be used to form jets. IdentifiedFinalState neutrino_fs(Cuts::abseta < 4.5); neutrino_fs.acceptNeutrinos(); declare(neutrino_fs, "NEUTRINO_FS"); // Calculate missing ET from the visible final state, not by requiring neutrinos declare(MissingMomentum(Cuts::abseta < 4.5), "MISSING"); VetoedFinalState jetinput; jetinput.addVetoOnThisFinalState(bare_MU); jetinput.addVetoOnThisFinalState(neutrino_fs); FastJets jetpro(fs, FastJets::ANTIKT, 0.4); declare(jetpro, "jet"); // Both ZZ on-shell histos book(_h_ZZ_xsect ,1, 1, 1); book(_h_ZZ_ZpT ,3, 1, 1); book(_h_ZZ_phill ,5, 1, 1); book(_h_ZZ_mZZ ,7, 1, 1); // One Z off-shell (ZZstar) histos book(_h_ZZs_xsect ,1, 1, 2); // ZZ -> llnunu histos book(_h_ZZnunu_xsect ,1, 1, 3); book(_h_ZZnunu_ZpT ,4, 1, 1); book(_h_ZZnunu_phill ,6, 1, 1); book(_h_ZZnunu_mZZ ,8, 1, 1); } /// Do the analysis void analyze(const Event& e) { //////////////////////////////////////////////////////////////////// // preselection of leptons for ZZ-> llll final state //////////////////////////////////////////////////////////////////// Particles leptons_sel4l; const vector& mu_sel4l = apply(e, "MUON_sel4l").dressedLeptons(); const vector& el_sel4l = apply(e, "ELECTRON_sel4l").dressedLeptons(); vector leptonsFS_sel4l; leptonsFS_sel4l.insert( leptonsFS_sel4l.end(), mu_sel4l.begin(), mu_sel4l.end() ); leptonsFS_sel4l.insert( leptonsFS_sel4l.end(), el_sel4l.begin(), el_sel4l.end() ); //////////////////////////////////////////////////////////////////// // OVERLAP removal dR(l,l)>0.2 //////////////////////////////////////////////////////////////////// for ( const DressedLepton& l1 : leptonsFS_sel4l) { bool isolated = true; for (DressedLepton& l2 : leptonsFS_sel4l) { const double dR = deltaR(l1, l2); if (dR < 0.2 && l1 != l2) { isolated = false; break; } } if (isolated) leptons_sel4l.push_back(l1); } ////////////////////////////////////////////////////////////////// // Exactly two opposite charged leptons ////////////////////////////////////////////////////////////////// // calculate total 'flavour' charge double totalcharge = 0; for (Particle& l : leptons_sel4l) totalcharge += l.pid(); // Analyze 4 lepton events if (leptons_sel4l.size() == 4 && totalcharge == 0 ) { Zstate Z1, Z2; // Identifies Z states from 4 lepton pairs identifyZstates(Z1, Z2,leptons_sel4l); //////////////////////////////////////////////////////////////////////////// // Z MASS WINDOW // -ZZ: for both Z: 6620 GeV /////////////////////////////////////////////////////////////////////////// Zstate leadPtZ = std::max(Z1, Z2, Zstate::cmppT); double mZ1 = Z1.mom().mass(); double mZ2 = Z2.mom().mass(); double ZpT = leadPtZ.mom().pT(); double phill = fabs(deltaPhi(leadPtZ.first, leadPtZ.second)); if (phill > M_PI) phill = 2*M_PI-phill; double mZZ = (Z1.mom() + Z2.mom()).mass(); if (mZ1 > 20*GeV && mZ2 > 20*GeV) { // ZZ* selection if (inRange(mZ1, 66*GeV, 116*GeV) || inRange(mZ2, 66*GeV, 116*GeV)) { _h_ZZs_xsect -> fill(sqrtS()*GeV); } // ZZ selection if (inRange(mZ1, 66*GeV, 116*GeV) && inRange(mZ2, 66*GeV, 116*GeV)) { _h_ZZ_xsect -> fill(sqrtS()*GeV); _h_ZZ_ZpT -> fill(ZpT); _h_ZZ_phill -> fill(phill); _h_ZZ_mZZ -> fill(mZZ); } } } //////////////////////////////////////////////////////////////////// /// preselection of leptons for ZZ-> llnunu final state //////////////////////////////////////////////////////////////////// Particles leptons_sel2l2nu; // output const vector& mu_sel2l2nu = apply(e, "MUON_sel2l2nu").dressedLeptons(); const vector& el_sel2l2nu = apply(e, "ELECTRON_sel2l2nu").dressedLeptons(); vector leptonsFS_sel2l2nu; leptonsFS_sel2l2nu.insert( leptonsFS_sel2l2nu.end(), mu_sel2l2nu.begin(), mu_sel2l2nu.end() ); leptonsFS_sel2l2nu.insert( leptonsFS_sel2l2nu.end(), el_sel2l2nu.begin(), el_sel2l2nu.end() ); // Lepton preselection for ZZ-> llnunu if ((mu_sel2l2nu.empty() || el_sel2l2nu.empty()) // cannot have opposite flavour && (leptonsFS_sel2l2nu.size() == 2) // exactly two leptons && (leptonsFS_sel2l2nu[0].charge() * leptonsFS_sel2l2nu[1].charge() < 1 ) // opposite charge && (deltaR(leptonsFS_sel2l2nu[0], leptonsFS_sel2l2nu[1]) > 0.3) // overlap removal && (leptonsFS_sel2l2nu[0].pT() > 20*GeV && leptonsFS_sel2l2nu[1].pT() > 20*GeV)) { // trigger requirement leptons_sel2l2nu.insert(leptons_sel2l2nu.end(), leptonsFS_sel2l2nu.begin(), leptonsFS_sel2l2nu.end()); } if (leptons_sel2l2nu.empty()) vetoEvent; // no further analysis, fine to veto Particles leptons_sel2l2nu_jetveto; for (const DressedLepton& l : mu_sel2l2nu) leptons_sel2l2nu_jetveto.push_back(l.constituentLepton()); for (const DressedLepton& l : el_sel2l2nu) leptons_sel2l2nu_jetveto.push_back(l.constituentLepton()); double ptll = (leptons_sel2l2nu[0].momentum() + leptons_sel2l2nu[1].momentum()).pT(); // Find Z1-> ll - FinalState fs2(-3.2, 3.2); + FinalState fs2((Cuts::etaIn(-3.2, 3.2))); InvMassFinalState imfs(fs2, vids, 20*GeV, sqrtS()); imfs.calc(leptons_sel2l2nu); if (imfs.particlePairs().size() != 1) vetoEvent; const ParticlePair& Z1constituents = imfs.particlePairs()[0]; FourMomentum Z1 = Z1constituents.first.momentum() + Z1constituents.second.momentum(); // Z to neutrinos candidate from missing ET const MissingMomentum & missmom = applyProjection(e, "MISSING"); const FourMomentum Z2 = missmom.missingMomentum(ZMASS); double met_Znunu = missmom.missingEt(); //Z2.pT(); // mTZZ const double mT2_1st_term = add_quad(ZMASS, ptll) + add_quad(ZMASS, met_Znunu); const double mT2_2nd_term = Z1.px() + Z2.px(); const double mT2_3rd_term = Z1.py() + Z2.py(); const double mTZZ = sqrt(sqr(mT2_1st_term) - sqr(mT2_2nd_term) - sqr(mT2_3rd_term)); if (!inRange(Z2.mass(), 66*GeV, 116*GeV)) vetoEvent; if (!inRange(Z1.mass(), 76*GeV, 106*GeV)) vetoEvent; ///////////////////////////////////////////////////////////// // AXIAL MET < 75 GeV //////////////////////////////////////////////////////////// double dPhiZ1Z2 = fabs(deltaPhi(Z1, Z2)); if (dPhiZ1Z2 > M_PI) dPhiZ1Z2 = 2*M_PI - dPhiZ1Z2; const double axialEtmiss = -Z2.pT()*cos(dPhiZ1Z2); if (axialEtmiss < 75*GeV) vetoEvent; const double ZpT = Z1.pT(); double phill = fabs(deltaPhi(Z1constituents.first, Z1constituents.second)); if (phill > M_PI) phill = 2*M_PI - phill; //////////////////////////////////////////////////////////////////////////// // JETS // -"j": found by "jetpro" projection && pT() > 25 GeV && |eta| < 4.5 // -"goodjets": "j" && dR(electron/muon,jet) > 0.3 // // JETVETO: veto all events with at least one good jet /////////////////////////////////////////////////////////////////////////// vector good_jets; for (const Jet& j : apply(e, "jet").jetsByPt(25)) { if (j.abseta() > 4.5) continue; bool isLepton = 0; for (const Particle& l : leptons_sel2l2nu_jetveto) { const double dR = deltaR(l.momentum(), j.momentum()); if (dR < 0.3) { isLepton = true; break; } } if (!isLepton) good_jets.push_back(j); } size_t n_sel_jets = good_jets.size(); if (n_sel_jets != 0) vetoEvent; ///////////////////////////////////////////////////////////// // Fractional MET and lepton pair difference: "RatioMet"< 0.4 //////////////////////////////////////////////////////////// double ratioMet = fabs(Z2.pT() - Z1.pT()) / Z1.pT(); if (ratioMet > 0.4 ) vetoEvent; // End of ZZllnunu selection: now fill histograms _h_ZZnunu_xsect->fill(sqrtS()/GeV); _h_ZZnunu_ZpT ->fill(ZpT); _h_ZZnunu_phill->fill(phill); _h_ZZnunu_mZZ ->fill(mTZZ); } /// Finalize void finalize() { const double norm = crossSection()/sumOfWeights()/femtobarn; scale(_h_ZZ_xsect, norm); normalize(_h_ZZ_ZpT); normalize(_h_ZZ_phill); normalize(_h_ZZ_mZZ); scale(_h_ZZs_xsect, norm); scale(_h_ZZnunu_xsect, norm); normalize(_h_ZZnunu_ZpT); normalize(_h_ZZnunu_phill); normalize(_h_ZZnunu_mZZ); } private: void identifyZstates(Zstate& Z1, Zstate& Z2, const Particles& leptons_sel4l); Histo1DPtr _h_ZZ_xsect, _h_ZZ_ZpT, _h_ZZ_phill, _h_ZZ_mZZ; Histo1DPtr _h_ZZs_xsect; Histo1DPtr _h_ZZnunu_xsect, _h_ZZnunu_ZpT, _h_ZZnunu_phill, _h_ZZnunu_mZZ; vector< pair > vids; const double ZMASS = 91.1876; // GeV }; /// 4l to ZZ assignment -- algorithm void ATLAS_2012_I1203852::identifyZstates(Zstate& Z1, Zstate& Z2, const Particles& leptons_sel4l) { ///////////////////////////////////////////////////////////////////////////// /// ZZ->4l pairing /// - Exactly two same flavour opposite charged leptons /// - Ambiguities in pairing are resolved by choosing the combination /// that results in the smaller value of the sum |mll - mZ| for the two pairs ///////////////////////////////////////////////////////////////////////////// Particles part_pos_el, part_neg_el, part_pos_mu, part_neg_mu; for (const Particle& l : leptons_sel4l) { if (l.abspid() == PID::ELECTRON) { if (l.pid() < 0) part_neg_el.push_back(l); if (l.pid() > 0) part_pos_el.push_back(l); } else if (l.abspid() == PID::MUON) { if (l.pid() < 0) part_neg_mu.push_back(l); if (l.pid() > 0) part_pos_mu.push_back(l); } } // ee/mm channel if ( part_neg_el.size() == 2 || part_neg_mu.size() == 2) { Zstate Zcand_1, Zcand_2, Zcand_3, Zcand_4; if (part_neg_el.size() == 2) { // ee Zcand_1 = Zstate( ParticlePair( part_neg_el[0], part_pos_el[0] ) ); Zcand_2 = Zstate( ParticlePair( part_neg_el[0], part_pos_el[1] ) ); Zcand_3 = Zstate( ParticlePair( part_neg_el[1], part_pos_el[0] ) ); Zcand_4 = Zstate( ParticlePair( part_neg_el[1], part_pos_el[1] ) ); } else { // mumu Zcand_1 = Zstate( ParticlePair( part_neg_mu[0], part_pos_mu[0] ) ); Zcand_2 = Zstate( ParticlePair( part_neg_mu[0], part_pos_mu[1] ) ); Zcand_3 = Zstate( ParticlePair( part_neg_mu[1], part_pos_mu[0] ) ); Zcand_4 = Zstate( ParticlePair( part_neg_mu[1], part_pos_mu[1] ) ); } // We can have the following pairs: (Z1 + Z4) || (Z2 + Z3) double minValue_1, minValue_2; minValue_1 = fabs( Zcand_1.mom().mass() - ZMASS ) + fabs( Zcand_4.mom().mass() - ZMASS); minValue_2 = fabs( Zcand_2.mom().mass() - ZMASS ) + fabs( Zcand_3.mom().mass() - ZMASS); if (minValue_1 < minValue_2 ) { Z1 = Zcand_1; Z2 = Zcand_4; } else { Z1 = Zcand_2; Z2 = Zcand_3; } // emu channel } else if (part_neg_mu.size() == 1 && part_neg_el.size() == 1) { Z1 = Zstate ( ParticlePair (part_neg_mu[0], part_pos_mu[0] ) ); Z2 = Zstate ( ParticlePair (part_neg_el[0], part_pos_el[0] ) ); } } // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2012_I1203852); } diff --git a/analyses/pluginATLAS/ATLAS_2012_I1204447.cc b/analyses/pluginATLAS/ATLAS_2012_I1204447.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1204447.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1204447.cc @@ -1,1056 +1,1056 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/UnstableFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class ATLAS_2012_I1204447 : public Analysis { public: /// Constructor ATLAS_2012_I1204447() : Analysis("ATLAS_2012_I1204447") { } /// Book histograms and initialise projections before the run void init() { // To calculate the acceptance without having the fiducial lepton efficiencies included, this part can be turned off _use_fiducial_lepton_efficiency = true; // Random numbers for simulation of ATLAS detector reconstruction efficiency srand(160385); // Read in all signal regions _signal_regions = getSignalRegions(); // Set number of events per signal region to 0 for (size_t i = 0; i < _signal_regions.size(); i++) book(_eventCountsPerSR[_signal_regions[i]], "_eventCountsPerSR_" + _signal_regions[i]); // Final state including all charged and neutral particles - const FinalState fs(-5.0, 5.0, 1*GeV); + const FinalState fs((Cuts::etaIn(-5.0, 5.0) && Cuts::pT >= 1*GeV)); declare(fs, "FS"); // Final state including all charged particles declare(ChargedFinalState(Cuts::abseta < 2.5 && Cuts::pT > 1*GeV), "CFS"); // Final state including all visible particles (to calculate MET, Jets etc.) declare(VisibleFinalState(Cuts::abseta < 5.0), "VFS"); // Final state including all AntiKt 04 Jets VetoedFinalState vfs; vfs.addVetoPairId(PID::MUON); declare(FastJets(vfs, FastJets::ANTIKT, 0.4), "AntiKtJets04"); // Final state including all unstable particles (including taus) declare(UnstableFinalState(Cuts::abseta < 5.0 && Cuts::pT > 5*GeV), "UFS"); // Final state including all electrons IdentifiedFinalState elecs(Cuts::abseta < 2.47 && Cuts::pT > 10*GeV); elecs.acceptIdPair(PID::ELECTRON); declare(elecs, "elecs"); // Final state including all muons IdentifiedFinalState muons(Cuts::abseta < 2.5 && Cuts::pT > 10*GeV); muons.acceptIdPair(PID::MUON); declare(muons, "muons"); // Book histograms book(_h_HTlep_all ,"HTlep_all" , 30, 0, 1500); book(_h_HTjets_all ,"HTjets_all", 30, 0, 1500); book(_h_MET_all ,"MET_all" , 20, 0, 1000); book(_h_Meff_all ,"Meff_all" , 30, 0, 3000); book(_h_e_n ,"e_n" , 10, -0.5, 9.5); book(_h_mu_n ,"mu_n" , 10, -0.5, 9.5); book(_h_tau_n ,"tau_n", 10, -0.5, 9.5); book(_h_pt_1_3l ,"pt_1_3l", 100, 0, 2000); book(_h_pt_2_3l ,"pt_2_3l", 100, 0, 2000); book(_h_pt_3_3l ,"pt_3_3l", 100, 0, 2000); book(_h_pt_1_2ltau ,"pt_1_2ltau", 100, 0, 2000); book(_h_pt_2_2ltau ,"pt_2_2ltau", 100, 0, 2000); book(_h_pt_3_2ltau ,"pt_3_2ltau", 100, 0, 2000); book(_h_excluded ,"excluded", 2, -0.5, 1.5); } /// Perform the per-event analysis void analyze(const Event& event) { // Muons Particles muon_candidates; const Particles charged_tracks = apply(event, "CFS").particles(); const Particles visible_particles = apply(event, "VFS").particles(); for (const Particle& mu : apply(event, "muons").particlesByPt()) { // Calculate pTCone30 variable (pT of all tracks within dR<0.3 - pT of muon itself) double pTinCone = -mu.pT(); for (const Particle& track : charged_tracks) { if (deltaR(mu.momentum(), track.momentum()) < 0.3) pTinCone += track.pT(); } // Calculate eTCone30 variable (pT of all visible particles within dR<0.3) double eTinCone = 0.; for (const Particle& visible_particle : visible_particles) { if (visible_particle.abspid() != PID::MUON && inRange(deltaR(mu.momentum(), visible_particle.momentum()), 0.1, 0.3)) eTinCone += visible_particle.pT(); } // Apply reconstruction efficiency and simulate reco int muon_id = 13; if ( mu.hasAncestor(15) || mu.hasAncestor(-15)) muon_id = 14; const double eff = (_use_fiducial_lepton_efficiency) ? apply_reco_eff(muon_id, mu) : 1.0; const bool keep_muon = rand()/static_cast(RAND_MAX) <= eff; // Keep muon if pTCone30/pT < 0.15 and eTCone30/pT < 0.2 and reconstructed if (keep_muon && pTinCone/mu.pT() <= 0.15 && eTinCone/mu.pT() < 0.2) muon_candidates.push_back(mu); } // Electrons Particles electron_candidates; for (const Particle& e : apply(event, "elecs").particlesByPt()) { // Neglect electrons in crack regions if (inRange(e.abseta(), 1.37, 1.52)) continue; // Calculate pTCone30 variable (pT of all tracks within dR<0.3 - pT of electron itself) double pTinCone = -e.pT(); for (const Particle& track : charged_tracks) { if (deltaR(e.momentum(), track.momentum()) < 0.3) pTinCone += track.pT(); } // Calculate eTCone30 variable (pT of all visible particles (except muons) within dR<0.3) double eTinCone = 0.; for (const Particle& visible_particle : visible_particles) { if (visible_particle.abspid() != PID::MUON && inRange(deltaR(e.momentum(), visible_particle.momentum()), 0.1, 0.3)) eTinCone += visible_particle.pT(); } // Apply reconstruction efficiency and simulate reco int elec_id = 11; if (e.hasAncestor(15) || e.hasAncestor(-15)) elec_id = 12; const double eff = (_use_fiducial_lepton_efficiency) ? apply_reco_eff(elec_id, e) : 1.0; const bool keep_elec = rand()/static_cast(RAND_MAX) <= eff; // Keep electron if pTCone30/pT < 0.13 and eTCone30/pT < 0.2 and reconstructed if (keep_elec && pTinCone/e.pT() <= 0.13 && eTinCone/e.pT() < 0.2) electron_candidates.push_back(e); } // Taus /// @todo This could benefit from a tau finder projection Particles tau_candidates; for (const Particle& tau : apply(event, "UFS").particlesByPt()) { // Only pick taus out of all unstable particles if (tau.abspid() != PID::TAU) continue; // Check that tau has decayed into daughter particles /// @todo Huh? Unstable taus with no decay vtx? Can use Particle.isStable()? But why in this situation? if (tau.genParticle()->end_vertex() == 0) continue; // Calculate visible tau pT from pT of tau neutrino in tau decay for pT and |eta| cuts FourMomentum daughter_tau_neutrino_momentum = get_tau_neutrino_mom(tau); Particle tau_vis = tau; tau_vis.setMomentum(tau.momentum()-daughter_tau_neutrino_momentum); // keep only taus in certain eta region and above 15 GeV of visible tau pT if ( tau_vis.pT() <= 15.0*GeV || tau_vis.abseta() > 2.5) continue; // Get prong number (number of tracks) in tau decay and check if tau decays leptonically unsigned int nprong = 0; bool lep_decaying_tau = false; get_prong_number(tau.genParticle(), nprong, lep_decaying_tau); // Apply reconstruction efficiency int tau_id = 15; if (nprong == 1) tau_id = 15; else if (nprong == 3) tau_id = 16; // Get fiducial lepton efficiency simulate reco efficiency const double eff = (_use_fiducial_lepton_efficiency) ? apply_reco_eff(tau_id, tau_vis) : 1.0; const bool keep_tau = rand()/static_cast(RAND_MAX) <= eff; // Keep tau if nprong = 1, it decays hadronically, and it's reconstructed by the detector if ( !lep_decaying_tau && nprong == 1 && keep_tau) tau_candidates.push_back(tau_vis); } // Jets (all anti-kt R=0.4 jets with pT > 25 GeV and eta < 4.9) Jets jet_candidates; for (const Jet& jet : apply(event, "AntiKtJets04").jetsByPt(25*GeV)) { if (jet.abseta() < 4.9) jet_candidates.push_back(jet); } // ETmiss Particles vfs_particles = apply(event, "VFS").particles(); FourMomentum pTmiss; for (const Particle& p : vfs_particles) pTmiss -= p.momentum(); double eTmiss = pTmiss.pT()/GeV; //------------------ // Overlap removal // electron - electron Particles electron_candidates_2; for (size_t ie = 0; ie < electron_candidates.size(); ++ie) { const Particle & e = electron_candidates[ie]; bool away = true; // If electron pair within dR < 0.1: remove electron with lower pT for (size_t ie2=0; ie2 < electron_candidates_2.size(); ++ie2) { if ( deltaR( e.momentum(), electron_candidates_2[ie2].momentum()) < 0.1 ) { away = false; break; } } // If isolated keep it if ( away ) electron_candidates_2.push_back( e ); } // jet - electron Jets recon_jets; for (const Jet& jet : jet_candidates) { bool away = true; // if jet within dR < 0.2 of electron: remove jet for (const Particle& e : electron_candidates_2) { if (deltaR(e.momentum(), jet.momentum()) < 0.2) { away = false; break; } } // jet - tau if (away) { // If jet within dR < 0.2 of tau: remove jet for (const Particle& tau : tau_candidates) { if (deltaR(tau.momentum(), jet.momentum()) < 0.2) { away = false; break; } } } // If isolated keep it if ( away ) recon_jets.push_back( jet ); } // electron - jet Particles recon_leptons, recon_e; for (size_t ie = 0; ie < electron_candidates_2.size(); ++ie) { const Particle& e = electron_candidates_2[ie]; // If electron within 0.2 < dR < 0.4 from any jets: remove electron bool away = true; for (const Jet& jet : recon_jets) { if (deltaR(e.momentum(), jet.momentum()) < 0.4) { away = false; break; } } // electron - muon // if electron within dR < 0.1 of a muon: remove electron if (away) { for (const Particle& mu : muon_candidates) { if (deltaR(mu.momentum(), e.momentum()) < 0.1) { away = false; break; } } } // If isolated keep it if (away) { recon_e += e; recon_leptons += e; } } // tau - electron Particles recon_tau; for ( const Particle& tau : tau_candidates ) { bool away = true; // If tau within dR < 0.2 of an electron: remove tau for ( const Particle& e : recon_e ) { if (deltaR( tau.momentum(), e.momentum()) < 0.2) { away = false; break; } } // tau - muon // If tau within dR < 0.2 of a muon: remove tau if (away) { for (const Particle& mu : muon_candidates) { if (deltaR(tau.momentum(), mu.momentum()) < 0.2) { away = false; break; } } } // If isolated keep it if (away) recon_tau.push_back( tau ); } // Muon - jet isolation Particles recon_mu, trigger_mu; // If muon within dR < 0.4 of a jet, remove muon for (const Particle& mu : muon_candidates) { bool away = true; for (const Jet& jet : recon_jets) { if ( deltaR( mu.momentum(), jet.momentum()) < 0.4 ) { away = false; break; } } if (away) { recon_mu.push_back( mu ); recon_leptons.push_back( mu ); if (mu.abseta() < 2.4) trigger_mu.push_back( mu ); } } // End overlap removal //------------------ // Jet cleaning if (rand()/static_cast(RAND_MAX) <= 0.42) { for (const Jet& jet : recon_jets) { const double eta = jet.rapidity(); const double phi = jet.azimuthalAngle(MINUSPI_PLUSPI); if (jet.pT() > 25*GeV && inRange(eta, -0.1, 1.5) && inRange(phi, -0.9, -0.5)) vetoEvent; } } // Post-isolation event cuts // Require at least 3 charged tracks in event if (charged_tracks.size() < 3) vetoEvent; // And at least one e/mu passing trigger if (!( !recon_e .empty() && recon_e[0] .pT() > 25*GeV) && !( !trigger_mu.empty() && trigger_mu[0].pT() > 25*GeV) ) { MSG_DEBUG("Hardest lepton fails trigger"); vetoEvent; } // And only accept events with at least 2 electrons and muons and at least 3 leptons in total if (recon_mu.size() + recon_e.size() + recon_tau.size() < 3 || recon_leptons.size() < 2) vetoEvent; // Sort leptons by decreasing pT sortByPt(recon_leptons); sortByPt(recon_tau); // Calculate HTlep, fill lepton pT histograms & store chosen combination of 3 leptons double HTlep = 0.; Particles chosen_leptons; if ( recon_leptons.size() > 2 ) { _h_pt_1_3l->fill(recon_leptons[0].perp()/GeV); _h_pt_2_3l->fill(recon_leptons[1].perp()/GeV); _h_pt_3_3l->fill(recon_leptons[2].perp()/GeV); HTlep = (recon_leptons[0].pT() + recon_leptons[1].pT() + recon_leptons[2].pT())/GeV; chosen_leptons.push_back( recon_leptons[0] ); chosen_leptons.push_back( recon_leptons[1] ); chosen_leptons.push_back( recon_leptons[2] ); } else { _h_pt_1_2ltau->fill(recon_leptons[0].perp()/GeV); _h_pt_2_2ltau->fill(recon_leptons[1].perp()/GeV); _h_pt_3_2ltau->fill(recon_tau[0].perp()/GeV); HTlep = (recon_leptons[0].pT() + recon_leptons[1].pT() + recon_tau[0].pT())/GeV ; chosen_leptons.push_back( recon_leptons[0] ); chosen_leptons.push_back( recon_leptons[1] ); chosen_leptons.push_back( recon_tau[0] ); } // Number of prompt e/mu and had taus _h_e_n ->fill(recon_e.size()); _h_mu_n ->fill(recon_mu.size()); _h_tau_n->fill(recon_tau.size()); // Calculate HTjets double HTjets = 0.; for ( const Jet & jet : recon_jets ) HTjets += jet.perp()/GeV; // Calculate meff double meff = eTmiss + HTjets; Particles all_leptons; for ( const Particle & e : recon_e ) { meff += e.perp()/GeV; all_leptons.push_back( e ); } for ( const Particle & mu : recon_mu ) { meff += mu.perp()/GeV; all_leptons.push_back( mu ); } for ( const Particle & tau : recon_tau ) { meff += tau.perp()/GeV; all_leptons.push_back( tau ); } // Fill histogram of kinematic variables _h_HTlep_all ->fill(HTlep); _h_HTjets_all->fill(HTjets); _h_MET_all ->fill(eTmiss); _h_Meff_all ->fill(meff); // Determine signal region (3l/2ltau, onZ/offZ) string basic_signal_region; if ( recon_mu.size() + recon_e.size() > 2 ) basic_signal_region += "3l_"; else if ( (recon_mu.size() + recon_e.size() == 2) && (recon_tau.size() > 0)) basic_signal_region += "2ltau_"; // Is there an OSSF pair or a three lepton combination with an invariant mass close to the Z mass int onZ = isonZ(chosen_leptons); if (onZ == 1) basic_signal_region += "onZ"; else if (onZ == 0) basic_signal_region += "offZ"; // Check in which signal regions this event falls and adjust event counters fillEventCountsPerSR(basic_signal_region, onZ, HTlep, eTmiss, HTjets, meff); } /// Normalise histograms etc., after the run void finalize() { // Normalize to an integrated luminosity of 1 fb-1 double norm = crossSection()/femtobarn/sumOfWeights(); string best_signal_region = ""; double ratio_best_SR = 0.; // Loop over all signal regions and find signal region with best sensitivity (ratio signal events/visible cross-section) for (size_t i = 0; i < _signal_regions.size(); i++) { double signal_events = _eventCountsPerSR[_signal_regions[i]]->val() * norm; // Use expected upper limits to find best signal region double UL95 = getUpperLimit(_signal_regions[i], false); double ratio = signal_events / UL95; if (ratio > ratio_best_SR) { best_signal_region = _signal_regions[i]; ratio_best_SR = ratio; } } double signal_events_best_SR = _eventCountsPerSR[best_signal_region]->val() * norm; double exp_UL_best_SR = getUpperLimit(best_signal_region, false); double obs_UL_best_SR = getUpperLimit(best_signal_region, true); // Print out result cout << "----------------------------------------------------------------------------------------" << '\n'; cout << "Best signal region: " << best_signal_region << '\n'; cout << "Normalized number of signal events in this best signal region (per fb-1): " << signal_events_best_SR << '\n'; cout << "Efficiency*Acceptance: " << _eventCountsPerSR[best_signal_region]->val()/sumOfWeights() << '\n'; cout << "Cross-section [fb]: " << crossSection()/femtobarn << '\n'; cout << "Expected visible cross-section (per fb-1): " << exp_UL_best_SR << '\n'; cout << "Ratio (signal events / expected visible cross-section): " << ratio_best_SR << '\n'; cout << "Observed visible cross-section (per fb-1): " << obs_UL_best_SR << '\n'; cout << "Ratio (signal events / observed visible cross-section): " << signal_events_best_SR/obs_UL_best_SR << '\n'; cout << "----------------------------------------------------------------------------------------" << '\n'; cout << "Using the EXPECTED limits (visible cross-section) of the analysis: " << '\n'; if (signal_events_best_SR > exp_UL_best_SR) { cout << "Since the number of signal events > the visible cross-section, this model/grid point is EXCLUDED with 95% CL." << '\n'; _h_excluded->fill(1); } else { cout << "Since the number of signal events < the visible cross-section, this model/grid point is NOT EXCLUDED." << '\n'; _h_excluded->fill(0); } cout << "----------------------------------------------------------------------------------------" << '\n'; cout << "Using the OBSERVED limits (visible cross-section) of the analysis: " << '\n'; if (signal_events_best_SR > obs_UL_best_SR) { cout << "Since the number of signal events > the visible cross-section, this model/grid point is EXCLUDED with 95% CL." << '\n'; _h_excluded->fill(1); } else { cout << "Since the number of signal events < the visible cross-section, this model/grid point is NOT EXCLUDED." << '\n'; _h_excluded->fill(0); } cout << "----------------------------------------------------------------------------------------" << '\n'; // Normalize to cross section if (norm != 0) { scale(_h_HTlep_all, norm); scale(_h_HTjets_all, norm); scale(_h_MET_all, norm); scale(_h_Meff_all, norm); scale(_h_pt_1_3l, norm); scale(_h_pt_2_3l, norm); scale(_h_pt_3_3l, norm); scale(_h_pt_1_2ltau, norm); scale(_h_pt_2_2ltau, norm); scale(_h_pt_3_2ltau, norm); scale(_h_e_n, norm); scale(_h_mu_n, norm); scale(_h_tau_n, norm); scale(_h_excluded, signal_events_best_SR); } } /// Helper functions //@{ /// Function giving a list of all signal regions vector getSignalRegions() { // List of basic signal regions vector basic_signal_regions; basic_signal_regions.push_back("3l_offZ"); basic_signal_regions.push_back("3l_onZ"); basic_signal_regions.push_back("2ltau_offZ"); basic_signal_regions.push_back("2ltau_onZ"); // List of kinematic variables vector kinematic_variables; kinematic_variables.push_back("HTlep"); kinematic_variables.push_back("METStrong"); kinematic_variables.push_back("METWeak"); kinematic_variables.push_back("Meff"); kinematic_variables.push_back("MeffStrong"); vector signal_regions; // Loop over all kinematic variables and basic signal regions for (size_t i0 = 0; i0 < kinematic_variables.size(); i0++) { for (size_t i1 = 0; i1 < basic_signal_regions.size(); i1++) { // Is signal region onZ? int onZ = (basic_signal_regions[i1].find("onZ") != string::npos) ? 1 : 0; // Get cut values for this kinematic variable vector cut_values = getCutsPerSignalRegion(kinematic_variables[i0], onZ); // Loop over all cut values for (size_t i2 = 0; i2 < cut_values.size(); i2++) { // push signal region into vector signal_regions.push_back( (kinematic_variables[i0] + "_" + basic_signal_regions[i1] + "_cut_" + toString(i2)) ); } } } return signal_regions; } /// Function giving all cut vales per kinematic variable (taking onZ for MET into account) vector getCutsPerSignalRegion(const string& signal_region, int onZ=0) { vector cutValues; // Cut values for HTlep if (signal_region.compare("HTlep") == 0) { cutValues.push_back(0); cutValues.push_back(100); cutValues.push_back(150); cutValues.push_back(200); cutValues.push_back(300); } // Cut values for METStrong (HTjets > 100 GeV) and METWeak (HTjets < 100 GeV) else if (signal_region.compare("METStrong") == 0 || signal_region.compare("METWeak") == 0) { if (onZ == 0) cutValues.push_back(0); else if (onZ == 1) cutValues.push_back(20); cutValues.push_back(50); cutValues.push_back(75); } // Cut values for Meff and MeffStrong (MET > 75 GeV) if (signal_region.compare("Meff") == 0 || signal_region.compare("MeffStrong") == 0) { cutValues.push_back(0); cutValues.push_back(150); cutValues.push_back(300); cutValues.push_back(500); } return cutValues; } /// function fills map EventCountsPerSR by looping over all signal regions /// and looking if the event falls into this signal region void fillEventCountsPerSR(const string& basic_signal_region, int onZ, double HTlep, double eTmiss, double HTjets, double meff) { // Get cut values for HTlep, loop over them and add event if cut is passed vector cut_values = getCutsPerSignalRegion("HTlep", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (HTlep > cut_values[i]) _eventCountsPerSR[("HTlep_" + basic_signal_region + "_cut_" + toString(cut_values[i]))]->fill(); } // Get cut values for METStrong, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("METStrong", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (eTmiss > cut_values[i] && HTjets > 100.) _eventCountsPerSR[("METStrong_" + basic_signal_region + "_cut_" + toString(cut_values[i]))]->fill(); } // Get cut values for METWeak, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("METWeak", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (eTmiss > cut_values[i] && HTjets <= 100.) _eventCountsPerSR[("METWeak_" + basic_signal_region + "_cut_" + toString(cut_values[i]))]->fill(); } // Get cut values for Meff, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("Meff", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (meff > cut_values[i]) _eventCountsPerSR[("Meff_" + basic_signal_region + "_cut_" + toString(cut_values[i]))]->fill(); } // Get cut values for MeffStrong, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("MeffStrong", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (meff > cut_values[i] && eTmiss > 75.) _eventCountsPerSR[("MeffStrong_" + basic_signal_region + "_cut_" + toString(cut_values[i]))]->fill(); } } /// Function returning 4-vector of daughter-particle if it is a tau neutrino /// @todo Move to TauFinder and make less HepMC-ish FourMomentum get_tau_neutrino_mom(const Particle& p) { assert(p.abspid() == PID::TAU); const GenVertex* dv = p.genParticle()->end_vertex(); assert(dv != NULL); for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) { if (abs((*pp)->pdg_id()) == PID::NU_TAU) return FourMomentum((*pp)->momentum()); } return FourMomentum(); } /// Function calculating the prong number of taus /// @todo Move to TauFinder and make less HepMC-ish void get_prong_number(const GenParticle* p, unsigned int& nprong, bool& lep_decaying_tau) { assert(p != NULL); //const int tau_barcode = p->barcode(); const GenVertex* dv = p->end_vertex(); assert(dv != NULL); for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) { // If they have status 1 and are charged they will produce a track and the prong number is +1 if ((*pp)->status() == 1 ) { const int id = (*pp)->pdg_id(); if (Rivet::PID::charge(id) != 0 ) ++nprong; // Check if tau decays leptonically // @todo Can a tau decay include a tau in its decay daughters?! if ((abs(id) == PID::ELECTRON || abs(id) == PID::MUON || abs(id) == PID::TAU) && abs(p->pdg_id()) == PID::TAU) lep_decaying_tau = true; } // If the status of the daughter particle is 2 it is unstable and the further decays are checked else if ((*pp)->status() == 2 ) { get_prong_number(*pp, nprong, lep_decaying_tau); } } } /// Function giving fiducial lepton efficiency double apply_reco_eff(int flavor, const Particle& p) { float pt = p.pT()/GeV; float eta = p.eta(); double eff = 0.; //double err = 0.; if (flavor == 11) { // weight prompt electron -- now including data/MC ID SF in eff. //float rho = 0.820; float p0 = 7.34; float p1 = 0.8977; //float ep0= 0.5 ; float ep1= 0.0087; eff = p1 - p0/pt; //double err0 = ep0/pt; // d(eff)/dp0 //double err1 = ep1; // d(eff)/dp1 //err = sqrt(err0*err0 + err1*err1 - 2*rho*err0*err1); double avgrate = 0.6867; float wz_ele_eta[] = {0.588717,0.603674,0.666135,0.747493,0.762202,0.675051,0.751606,0.745569,0.665333,0.610432,0.592693,}; //float ewz_ele_eta[] ={0.00292902,0.002476,0.00241209,0.00182319,0.00194339,0.00299785,0.00197339,0.00182004,0.00241793,0.00245997,0.00290394,}; int ibin = 3; if (eta >= -2.5 && eta < -2.0) ibin = 0; if (eta >= -2.0 && eta < -1.5) ibin = 1; if (eta >= -1.5 && eta < -1.0) ibin = 2; if (eta >= -1.0 && eta < -0.5) ibin = 3; if (eta >= -0.5 && eta < -0.1) ibin = 4; if (eta >= -0.1 && eta < 0.1) ibin = 5; if (eta >= 0.1 && eta < 0.5) ibin = 6; if (eta >= 0.5 && eta < 1.0) ibin = 7; if (eta >= 1.0 && eta < 1.5) ibin = 8; if (eta >= 1.5 && eta < 2.0) ibin = 9; if (eta >= 2.0 && eta < 2.5) ibin = 10; double eff_eta = wz_ele_eta[ibin]; //double err_eta = ewz_ele_eta[ibin]; eff = (eff*eff_eta)/avgrate; } if (flavor == 12) { // weight electron from tau //float rho = 0.884; float p0 = 6.799; float p1 = 0.842; //float ep0= 0.664; float ep1= 0.016; eff = p1 - p0/pt; //double err0 = ep0/pt; // d(eff)/dp0 //double err1 = ep1; // d(eff)/dp1 //err = sqrt(err0*err0 + err1*err1 - 2*rho*err0*err1); double avgrate = 0.5319; float wz_elet_eta[] = {0.468945,0.465953,0.489545,0.58709,0.59669,0.515829,0.59284,0.575828,0.498181,0.463536,0.481738,}; //float ewz_elet_eta[] ={0.00933795,0.00780868,0.00792679,0.00642083,0.00692652,0.0101568,0.00698452,0.00643524,0.0080002,0.00776238,0.0094699,}; int ibin = 3; if (eta >= -2.5 && eta < -2.0) ibin = 0; if (eta >= -2.0 && eta < -1.5) ibin = 1; if (eta >= -1.5 && eta < -1.0) ibin = 2; if (eta >= -1.0 && eta < -0.5) ibin = 3; if (eta >= -0.5 && eta < -0.1) ibin = 4; if (eta >= -0.1 && eta < 0.1) ibin = 5; if (eta >= 0.1 && eta < 0.5) ibin = 6; if (eta >= 0.5 && eta < 1.0) ibin = 7; if (eta >= 1.0 && eta < 1.5) ibin = 8; if (eta >= 1.5 && eta < 2.0) ibin = 9; if (eta >= 2.0 && eta < 2.5) ibin = 10; double eff_eta = wz_elet_eta[ibin]; //double err_eta = ewz_elet_eta[ibin]; eff = (eff*eff_eta)/avgrate; } if (flavor == 13) {// weight prompt muon //if eta>0.1 float p0 = -18.21; float p1 = 14.83; float p2 = 0.9312; //float ep0= 5.06; float ep1= 1.9; float ep2=0.00069; if ( fabs(eta) < 0.1) { p0 = 7.459; p1 = 2.615; p2 = 0.5138; //ep0 = 10.4; ep1 = 4.934; ep2 = 0.0034; } double arg = ( pt-p0 )/( 2.*p1 ) ; eff = 0.5 * p2 * (1.+erf(arg)); //err = 0.1*eff; } if (flavor == 14) {// weight muon from tau if (fabs(eta) < 0.1) { float p0 = -1.756; float p1 = 12.38; float p2 = 0.4441; //float ep0= 10.39; float ep1= 7.9; float ep2=0.022; double arg = ( pt-p0 )/( 2.*p1 ) ; eff = 0.5 * p2 * (1.+erf(arg)); //err = 0.1*eff; } else { float p0 = 2.102; float p1 = 0.8293; //float ep0= 0.271; float ep1= 0.0083; eff = p1 - p0/pt; //double err0 = ep0/pt; // d(eff)/dp0 //double err1 = ep1; // d(eff)/dp1 //err = sqrt(err0*err0 + err1*err1 - 2*rho*err0*err1); } } if (flavor == 15) {// weight hadronic tau 1p float wz_tau1p[] = {0.0249278,0.146978,0.225049,0.229212,0.21519,0.206152,0.201559,0.197917,0.209249,0.228336,0.193548,}; //float ewz_tau1p[] ={0.00178577,0.00425252,0.00535052,0.00592126,0.00484684,0.00612941,0.00792099,0.0083006,0.0138307,0.015568,0.0501751,}; int ibin = 0; if (pt > 15) ibin = 1; if (pt > 20) ibin = 2; if (pt > 25) ibin = 3; if (pt > 30) ibin = 4; if (pt > 40) ibin = 5; if (pt > 50) ibin = 6; if (pt > 60) ibin = 7; if (pt > 80) ibin = 8; if (pt > 100) ibin = 9; if (pt > 200) ibin = 10; eff = wz_tau1p[ibin]; //err = ewz_tau1p[ibin]; double avgrate = 0.1718; float wz_tau1p_eta[] = {0.162132,0.176393,0.139619,0.178813,0.185144,0.210027,0.203937,0.178688,0.137034,0.164216,0.163713,}; //float ewz_tau1p_eta[] ={0.00706705,0.00617989,0.00506798,0.00525172,0.00581865,0.00865675,0.00599245,0.00529877,0.00506368,0.00617025,0.00726219,}; ibin = 3; if (eta >= -2.5 && eta < -2.0) ibin = 0; if (eta >= -2.0 && eta < -1.5) ibin = 1; if (eta >= -1.5 && eta < -1.0) ibin = 2; if (eta >= -1.0 && eta < -0.5) ibin = 3; if (eta >= -0.5 && eta < -0.1) ibin = 4; if (eta >= -0.1 && eta < 0.1) ibin = 5; if (eta >= 0.1 && eta < 0.5) ibin = 6; if (eta >= 0.5 && eta < 1.0) ibin = 7; if (eta >= 1.0 && eta < 1.5) ibin = 8; if (eta >= 1.5 && eta < 2.0) ibin = 9; if (eta >= 2.0 && eta < 2.5) ibin = 10; double eff_eta = wz_tau1p_eta[ibin]; //double err_eta = ewz_tau1p_eta[ibin]; eff = (eff*eff_eta)/avgrate; } if (flavor == 16) { //weight hadronic tau 3p float wz_tau3p[] = {0.000587199,0.00247181,0.0013031,0.00280112,}; //float ewz_tau3p[] ={0.000415091,0.000617187,0.000582385,0.00197792,}; int ibin = 0; if (pt > 15) ibin = 1; if (pt > 20) ibin = 2; if (pt > 40) ibin = 3; if (pt > 80) ibin = 4; eff = wz_tau3p[ibin]; //err = ewz_tau3p[ibin]; } return eff; } /// Function giving observed upper limit (visible cross-section) double getUpperLimit(const string& signal_region, bool observed) { map upperLimitsObserved; upperLimitsObserved["HTlep_3l_offZ_cut_0"] = 11.; upperLimitsObserved["HTlep_3l_offZ_cut_100"] = 8.7; upperLimitsObserved["HTlep_3l_offZ_cut_150"] = 4.0; upperLimitsObserved["HTlep_3l_offZ_cut_200"] = 4.4; upperLimitsObserved["HTlep_3l_offZ_cut_300"] = 1.6; upperLimitsObserved["HTlep_2ltau_offZ_cut_0"] = 25.; upperLimitsObserved["HTlep_2ltau_offZ_cut_100"] = 14.; upperLimitsObserved["HTlep_2ltau_offZ_cut_150"] = 6.1; upperLimitsObserved["HTlep_2ltau_offZ_cut_200"] = 3.3; upperLimitsObserved["HTlep_2ltau_offZ_cut_300"] = 1.2; upperLimitsObserved["HTlep_3l_onZ_cut_0"] = 48.; upperLimitsObserved["HTlep_3l_onZ_cut_100"] = 38.; upperLimitsObserved["HTlep_3l_onZ_cut_150"] = 14.; upperLimitsObserved["HTlep_3l_onZ_cut_200"] = 7.2; upperLimitsObserved["HTlep_3l_onZ_cut_300"] = 4.5; upperLimitsObserved["HTlep_2ltau_onZ_cut_0"] = 85.; upperLimitsObserved["HTlep_2ltau_onZ_cut_100"] = 53.; upperLimitsObserved["HTlep_2ltau_onZ_cut_150"] = 11.0; upperLimitsObserved["HTlep_2ltau_onZ_cut_200"] = 5.2; upperLimitsObserved["HTlep_2ltau_onZ_cut_300"] = 3.0; upperLimitsObserved["METStrong_3l_offZ_cut_0"] = 2.6; upperLimitsObserved["METStrong_3l_offZ_cut_50"] = 2.1; upperLimitsObserved["METStrong_3l_offZ_cut_75"] = 2.1; upperLimitsObserved["METStrong_2ltau_offZ_cut_0"] = 4.2; upperLimitsObserved["METStrong_2ltau_offZ_cut_50"] = 3.1; upperLimitsObserved["METStrong_2ltau_offZ_cut_75"] = 2.6; upperLimitsObserved["METStrong_3l_onZ_cut_20"] = 11.0; upperLimitsObserved["METStrong_3l_onZ_cut_50"] = 6.4; upperLimitsObserved["METStrong_3l_onZ_cut_75"] = 5.1; upperLimitsObserved["METStrong_2ltau_onZ_cut_20"] = 5.9; upperLimitsObserved["METStrong_2ltau_onZ_cut_50"] = 3.4; upperLimitsObserved["METStrong_2ltau_onZ_cut_75"] = 1.2; upperLimitsObserved["METWeak_3l_offZ_cut_0"] = 11.; upperLimitsObserved["METWeak_3l_offZ_cut_50"] = 5.3; upperLimitsObserved["METWeak_3l_offZ_cut_75"] = 3.1; upperLimitsObserved["METWeak_2ltau_offZ_cut_0"] = 23.; upperLimitsObserved["METWeak_2ltau_offZ_cut_50"] = 4.3; upperLimitsObserved["METWeak_2ltau_offZ_cut_75"] = 3.1; upperLimitsObserved["METWeak_3l_onZ_cut_20"] = 41.; upperLimitsObserved["METWeak_3l_onZ_cut_50"] = 16.; upperLimitsObserved["METWeak_3l_onZ_cut_75"] = 8.0; upperLimitsObserved["METWeak_2ltau_onZ_cut_20"] = 80.; upperLimitsObserved["METWeak_2ltau_onZ_cut_50"] = 4.4; upperLimitsObserved["METWeak_2ltau_onZ_cut_75"] = 1.8; upperLimitsObserved["Meff_3l_offZ_cut_0"] = 11.; upperLimitsObserved["Meff_3l_offZ_cut_150"] = 8.1; upperLimitsObserved["Meff_3l_offZ_cut_300"] = 3.1; upperLimitsObserved["Meff_3l_offZ_cut_500"] = 2.1; upperLimitsObserved["Meff_2ltau_offZ_cut_0"] = 25.; upperLimitsObserved["Meff_2ltau_offZ_cut_150"] = 12.; upperLimitsObserved["Meff_2ltau_offZ_cut_300"] = 3.9; upperLimitsObserved["Meff_2ltau_offZ_cut_500"] = 2.2; upperLimitsObserved["Meff_3l_onZ_cut_0"] = 48.; upperLimitsObserved["Meff_3l_onZ_cut_150"] = 37.; upperLimitsObserved["Meff_3l_onZ_cut_300"] = 11.; upperLimitsObserved["Meff_3l_onZ_cut_500"] = 4.8; upperLimitsObserved["Meff_2ltau_onZ_cut_0"] = 85.; upperLimitsObserved["Meff_2ltau_onZ_cut_150"] = 28.; upperLimitsObserved["Meff_2ltau_onZ_cut_300"] = 5.9; upperLimitsObserved["Meff_2ltau_onZ_cut_500"] = 1.9; upperLimitsObserved["MeffStrong_3l_offZ_cut_0"] = 3.8; upperLimitsObserved["MeffStrong_3l_offZ_cut_150"] = 3.8; upperLimitsObserved["MeffStrong_3l_offZ_cut_300"] = 2.8; upperLimitsObserved["MeffStrong_3l_offZ_cut_500"] = 2.1; upperLimitsObserved["MeffStrong_2ltau_offZ_cut_0"] = 3.9; upperLimitsObserved["MeffStrong_2ltau_offZ_cut_150"] = 4.0; upperLimitsObserved["MeffStrong_2ltau_offZ_cut_300"] = 2.9; upperLimitsObserved["MeffStrong_2ltau_offZ_cut_500"] = 1.5; upperLimitsObserved["MeffStrong_3l_onZ_cut_0"] = 10.0; upperLimitsObserved["MeffStrong_3l_onZ_cut_150"] = 10.0; upperLimitsObserved["MeffStrong_3l_onZ_cut_300"] = 6.8; upperLimitsObserved["MeffStrong_3l_onZ_cut_500"] = 3.9; upperLimitsObserved["MeffStrong_2ltau_onZ_cut_0"] = 1.6; upperLimitsObserved["MeffStrong_2ltau_onZ_cut_150"] = 1.4; upperLimitsObserved["MeffStrong_2ltau_onZ_cut_300"] = 1.5; upperLimitsObserved["MeffStrong_2ltau_onZ_cut_500"] = 0.9; // Expected upper limits are also given but not used in this analysis map upperLimitsExpected; upperLimitsExpected["HTlep_3l_offZ_cut_0"] = 11.; upperLimitsExpected["HTlep_3l_offZ_cut_100"] = 8.5; upperLimitsExpected["HTlep_3l_offZ_cut_150"] = 4.6; upperLimitsExpected["HTlep_3l_offZ_cut_200"] = 3.6; upperLimitsExpected["HTlep_3l_offZ_cut_300"] = 1.9; upperLimitsExpected["HTlep_2ltau_offZ_cut_0"] = 23.; upperLimitsExpected["HTlep_2ltau_offZ_cut_100"] = 14.; upperLimitsExpected["HTlep_2ltau_offZ_cut_150"] = 6.4; upperLimitsExpected["HTlep_2ltau_offZ_cut_200"] = 3.6; upperLimitsExpected["HTlep_2ltau_offZ_cut_300"] = 1.5; upperLimitsExpected["HTlep_3l_onZ_cut_0"] = 33.; upperLimitsExpected["HTlep_3l_onZ_cut_100"] = 25.; upperLimitsExpected["HTlep_3l_onZ_cut_150"] = 12.; upperLimitsExpected["HTlep_3l_onZ_cut_200"] = 6.5; upperLimitsExpected["HTlep_3l_onZ_cut_300"] = 3.1; upperLimitsExpected["HTlep_2ltau_onZ_cut_0"] = 94.; upperLimitsExpected["HTlep_2ltau_onZ_cut_100"] = 61.; upperLimitsExpected["HTlep_2ltau_onZ_cut_150"] = 9.9; upperLimitsExpected["HTlep_2ltau_onZ_cut_200"] = 4.5; upperLimitsExpected["HTlep_2ltau_onZ_cut_300"] = 1.9; upperLimitsExpected["METStrong_3l_offZ_cut_0"] = 3.1; upperLimitsExpected["METStrong_3l_offZ_cut_50"] = 2.4; upperLimitsExpected["METStrong_3l_offZ_cut_75"] = 2.3; upperLimitsExpected["METStrong_2ltau_offZ_cut_0"] = 4.8; upperLimitsExpected["METStrong_2ltau_offZ_cut_50"] = 3.3; upperLimitsExpected["METStrong_2ltau_offZ_cut_75"] = 2.1; upperLimitsExpected["METStrong_3l_onZ_cut_20"] = 8.7; upperLimitsExpected["METStrong_3l_onZ_cut_50"] = 4.9; upperLimitsExpected["METStrong_3l_onZ_cut_75"] = 3.8; upperLimitsExpected["METStrong_2ltau_onZ_cut_20"] = 7.3; upperLimitsExpected["METStrong_2ltau_onZ_cut_50"] = 2.8; upperLimitsExpected["METStrong_2ltau_onZ_cut_75"] = 1.5; upperLimitsExpected["METWeak_3l_offZ_cut_0"] = 10.; upperLimitsExpected["METWeak_3l_offZ_cut_50"] = 4.7; upperLimitsExpected["METWeak_3l_offZ_cut_75"] = 3.0; upperLimitsExpected["METWeak_2ltau_offZ_cut_0"] = 21.; upperLimitsExpected["METWeak_2ltau_offZ_cut_50"] = 4.0; upperLimitsExpected["METWeak_2ltau_offZ_cut_75"] = 2.6; upperLimitsExpected["METWeak_3l_onZ_cut_20"] = 30.; upperLimitsExpected["METWeak_3l_onZ_cut_50"] = 10.; upperLimitsExpected["METWeak_3l_onZ_cut_75"] = 5.4; upperLimitsExpected["METWeak_2ltau_onZ_cut_20"] = 88.; upperLimitsExpected["METWeak_2ltau_onZ_cut_50"] = 5.5; upperLimitsExpected["METWeak_2ltau_onZ_cut_75"] = 2.2; upperLimitsExpected["Meff_3l_offZ_cut_0"] = 11.; upperLimitsExpected["Meff_3l_offZ_cut_150"] = 8.8; upperLimitsExpected["Meff_3l_offZ_cut_300"] = 3.7; upperLimitsExpected["Meff_3l_offZ_cut_500"] = 2.1; upperLimitsExpected["Meff_2ltau_offZ_cut_0"] = 23.; upperLimitsExpected["Meff_2ltau_offZ_cut_150"] = 13.; upperLimitsExpected["Meff_2ltau_offZ_cut_300"] = 4.9; upperLimitsExpected["Meff_2ltau_offZ_cut_500"] = 2.4; upperLimitsExpected["Meff_3l_onZ_cut_0"] = 33.; upperLimitsExpected["Meff_3l_onZ_cut_150"] = 25.; upperLimitsExpected["Meff_3l_onZ_cut_300"] = 9.; upperLimitsExpected["Meff_3l_onZ_cut_500"] = 3.9; upperLimitsExpected["Meff_2ltau_onZ_cut_0"] = 94.; upperLimitsExpected["Meff_2ltau_onZ_cut_150"] = 35.; upperLimitsExpected["Meff_2ltau_onZ_cut_300"] = 6.8; upperLimitsExpected["Meff_2ltau_onZ_cut_500"] = 2.5; upperLimitsExpected["MeffStrong_3l_offZ_cut_0"] = 3.9; upperLimitsExpected["MeffStrong_3l_offZ_cut_150"] = 3.9; upperLimitsExpected["MeffStrong_3l_offZ_cut_300"] = 3.0; upperLimitsExpected["MeffStrong_3l_offZ_cut_500"] = 2.0; upperLimitsExpected["MeffStrong_2ltau_offZ_cut_0"] = 3.8; upperLimitsExpected["MeffStrong_2ltau_offZ_cut_150"] = 3.9; upperLimitsExpected["MeffStrong_2ltau_offZ_cut_300"] = 3.1; upperLimitsExpected["MeffStrong_2ltau_offZ_cut_500"] = 1.6; upperLimitsExpected["MeffStrong_3l_onZ_cut_0"] = 6.9; upperLimitsExpected["MeffStrong_3l_onZ_cut_150"] = 7.1; upperLimitsExpected["MeffStrong_3l_onZ_cut_300"] = 4.9; upperLimitsExpected["MeffStrong_3l_onZ_cut_500"] = 3.0; upperLimitsExpected["MeffStrong_2ltau_onZ_cut_0"] = 2.4; upperLimitsExpected["MeffStrong_2ltau_onZ_cut_150"] = 2.5; upperLimitsExpected["MeffStrong_2ltau_onZ_cut_300"] = 2.0; upperLimitsExpected["MeffStrong_2ltau_onZ_cut_500"] = 1.1; if (observed) return upperLimitsObserved[signal_region]; else return upperLimitsExpected[signal_region]; } /// Function checking if there is an OSSF lepton pair or a combination of 3 leptons with an invariant mass close to the Z mass /// @todo Should the reference Z mass be 91.2? int isonZ (const Particles& particles) { int onZ = 0; double best_mass_2 = 999.; double best_mass_3 = 999.; // Loop over all 2 particle combinations to find invariant mass of OSSF pair closest to Z mass for ( const Particle& p1 : particles ) { for ( const Particle& p2 : particles ) { double mass_difference_2_old = fabs(91.0 - best_mass_2); double mass_difference_2_new = fabs(91.0 - (p1.momentum() + p2.momentum()).mass()/GeV); // If particle combination is OSSF pair calculate mass difference to Z mass if ( (p1.pid()*p2.pid() == -121 || p1.pid()*p2.pid() == -169) ) { // Get invariant mass closest to Z mass if (mass_difference_2_new < mass_difference_2_old) best_mass_2 = (p1.momentum() + p2.momentum()).mass()/GeV; // In case there is an OSSF pair take also 3rd lepton into account (e.g. from FSR and photon to electron conversion) for ( const Particle & p3 : particles ) { double mass_difference_3_old = fabs(91.0 - best_mass_3); double mass_difference_3_new = fabs(91.0 - (p1.momentum() + p2.momentum() + p3.momentum()).mass()/GeV); if (mass_difference_3_new < mass_difference_3_old) best_mass_3 = (p1.momentum() + p2.momentum() + p3.momentum()).mass()/GeV; } } } } // Pick the minimum invariant mass of the best OSSF pair combination and the best 3 lepton combination // If this mass is in a 20 GeV window around the Z mass, the event is classified as onZ double best_mass = min(best_mass_2, best_mass_3); if (fabs(91.0 - best_mass) < 20) onZ = 1; return onZ; } //@} private: /// Histograms //@{ Histo1DPtr _h_HTlep_all, _h_HTjets_all, _h_MET_all, _h_Meff_all; Histo1DPtr _h_pt_1_3l, _h_pt_2_3l, _h_pt_3_3l, _h_pt_1_2ltau, _h_pt_2_2ltau, _h_pt_3_2ltau; Histo1DPtr _h_e_n, _h_mu_n, _h_tau_n; Histo1DPtr _h_excluded; //@} /// Fiducial efficiencies to model the effects of the ATLAS detector bool _use_fiducial_lepton_efficiency; /// List of signal regions and event counts per signal region vector _signal_regions; map _eventCountsPerSR; }; DECLARE_RIVET_PLUGIN(ATLAS_2012_I1204447); } diff --git a/analyses/pluginATLAS/ATLAS_2013_I1243871.cc b/analyses/pluginATLAS/ATLAS_2013_I1243871.cc --- a/analyses/pluginATLAS/ATLAS_2013_I1243871.cc +++ b/analyses/pluginATLAS/ATLAS_2013_I1243871.cc @@ -1,278 +1,278 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Tools/ParticleIdUtils.hh" #include "Rivet/Particle.hh" namespace Rivet { class ATLAS_2013_I1243871 : public Analysis { public: /// Constructor ATLAS_2013_I1243871() : Analysis("ATLAS_2013_I1243871") { } /// Book histograms and initialise projections before the run void init() { // Set up projections - const FinalState fs(-4.5, 4.5); + const FinalState fs((Cuts::etaIn(-4.5, 4.5))); declare(fs, "ALL_FS"); /// Get electrons from truth record IdentifiedFinalState elec_fs(Cuts::abseta < 2.47 && Cuts::pT > 25*GeV); elec_fs.acceptIdPair(PID::ELECTRON); declare(elec_fs, "ELEC_FS"); /// Get muons which pass the initial kinematic cuts: IdentifiedFinalState muon_fs(Cuts::abseta < 2.5 && Cuts::pT > 20*GeV); muon_fs.acceptIdPair(PID::MUON); declare(muon_fs, "MUON_FS"); // Final state used as input for jet-finding. // We include everything except the muons and neutrinos VetoedFinalState jet_input(fs); jet_input.vetoNeutrinos(); jet_input.addVetoPairId(PID::MUON); declare(jet_input, "JET_INPUT"); // Get the jets FastJets jets(jet_input, FastJets::ANTIKT, 0.4); declare(jets, "JETS"); // Book histograms for (size_t d = 0; d < 5; ++d) { book(_p_b_rho[d] ,d+1, 1, 1); book(_p_l_rho[d] ,d+1, 2, 1); book(_p_b_Psi[d] ,d+1, 1, 2); book(_p_l_Psi[d] ,d+1, 2, 2); } } /// Perform the per-event analysis void analyze(const Event& event) { /// Get the various sets of final state particles const Particles& elecFS = apply(event, "ELEC_FS").particlesByPt(); const Particles& muonFS = apply(event, "MUON_FS").particlesByPt(); // Get all jets with pT > 7 GeV (ATLAS standard jet collection) /// @todo Why rewrite the jets collection as a vector of pointers? const Jets& jets = apply(event, "JETS").jetsByPt(7*GeV); vector allJets; for(const Jet& j : jets) allJets.push_back(&j); // Keep any jets that pass the pt cut vector pt_jets; for (const Jet* j : allJets) { /// @todo Use direct kinematics access const double pt = j->momentum().pT(); const double eta = j->momentum().eta(); if (pt > 25*GeV && fabs(eta) < 2.5) pt_jets.push_back(j); } // Remove jets too close to an electron vector good_jets; for (const Jet* j : pt_jets) { bool isElectron = 0; for (const Particle& e : elecFS) { const double elec_jet_dR = deltaR(e.momentum(), j->momentum()); if (elec_jet_dR < 0.2) { isElectron = true; break; } } if (!isElectron) good_jets.push_back(j); } // Classify the event type const size_t nElec = elecFS.size(); const size_t nMuon = muonFS.size(); bool isSemilepton = false, isDilepton = false; if (nElec == 1 && nMuon == 0) { isSemilepton = true; } else if (nElec == 0 && nMuon == 1) { isSemilepton = true; } else if (nElec == 2 && nMuon == 0) { if (charge(elecFS[0]) != charge(elecFS[1])) isDilepton = true; } else if (nElec == 1 && nMuon == 1) { if (charge(elecFS[0]) != charge(muonFS[0])) isDilepton = true; } else if (nElec == 0 && nMuon == 2) { if (charge(muonFS[0]) != charge(muonFS[1])) isDilepton = true; } const bool isGoodEvent = (isSemilepton && good_jets.size() >= 4) || (isDilepton && good_jets.size() >= 2); if (!isGoodEvent) vetoEvent; // Select b-hadrons /// @todo Use built-in identification on Particle, avoid HepMC vector b_hadrons; vector allParticles = particles(event.genEvent()); for (size_t i = 0; i < allParticles.size(); i++) { const GenParticle* p = allParticles.at(i); if ( !(PID::isHadron( p->pdg_id() ) && PID::hasBottom( p->pdg_id() )) ) continue; if (p->momentum().perp() < 5*GeV) continue; b_hadrons.push_back(p); } // Select b-jets as those containing a b-hadron /// @todo Use built-in dR < 0.3 Jet tagging, avoid HepMC vector b_jets; for (const Jet* j : good_jets) { bool isbJet = false; for (const GenParticle* b : b_hadrons) { /// @todo Use direct momentum accessor / delta functions const FourMomentum hadron = b->momentum(); const double hadron_jet_dR = deltaR(j->momentum(), hadron); if (hadron_jet_dR < 0.3) { isbJet = true; break; } } // Check if it is overlapped to any other jet bool isOverlapped = false; for (const Jet* k : allJets) { if (j == k) continue; double dRjj = deltaR(j->momentum(), k->momentum()); if (dRjj < 0.8) { isOverlapped = true; break; } } if (isbJet && !isOverlapped) b_jets.push_back(j); } MSG_DEBUG(b_jets.size() << " b-jets selected"); // Select light-jets as the pair of non-b-jets with invariant mass closest to the W mass /// @todo Use built-in b-tagging (dR < 0.3 defn), avoid HepMC const double nominalW = 80.4*GeV; double deltaM = 500*GeV; const Jet* light1 = NULL; const Jet* light2 = NULL; // NB: const Jets, not const pointers! for (const Jet* i : good_jets) { bool isbJet1 = false; for (const GenParticle* b : b_hadrons) { /// @todo Use direct momentum accessor / delta functions const FourMomentum hadron = b->momentum(); const double hadron_jet_dR = deltaR(i->momentum(), hadron); if (hadron_jet_dR < 0.3) { isbJet1 = true; break; } } if (isbJet1) continue; for (const Jet* j : good_jets) { bool isbJet2 = false; for (const GenParticle* b : b_hadrons) { FourMomentum hadron = b->momentum(); double hadron_jet_dR = deltaR(j->momentum(), hadron); if (hadron_jet_dR < 0.3) { isbJet2 = true; break; } } if (isbJet2) continue; double invMass = (i->momentum()+j->momentum()).mass(); if (fabs(invMass-nominalW) < deltaM){ deltaM = fabs(invMass - nominalW); light1 = i; light2 = j; } } } // Check that both jets are not overlapped, and populate the light jets list vector light_jets; const bool hasGoodLight = light1 != NULL && light2 != NULL && light1 != light2; if (hasGoodLight) { bool isOverlap1 = false, isOverlap2 = false; for (const Jet* j : allJets) { if (light1 == j) continue; const double dR1j = deltaR(light1->momentum(), j->momentum()); if (dR1j < 0.8) { isOverlap1 = true; break; } } for (const Jet* j : allJets) { if (light2 == j) continue; const double dR2j = deltaR(light2->momentum(), j->momentum()); if (dR2j < 0.8) { isOverlap2 = true; break; } } if (!isOverlap1 && !isOverlap2) { light_jets.push_back(light1); light_jets.push_back(light2); } } MSG_DEBUG(light_jets.size() << " light jets selected"); // Calculate the jet shapes /// @todo Use C++11 vector/array initialization const double binWidth = 0.04; // -> 10 bins from 0.0-0.4 vector ptEdges; ptEdges += {{ 30, 40, 50, 70, 100, 150 }}; // b-jet shapes MSG_DEBUG("Filling b-jet shapes"); for (const Jet* bJet : b_jets) { // Work out jet pT bin and skip this jet if out of range const double jetPt = bJet->momentum().pT(); MSG_DEBUG("Jet pT = " << jetPt/GeV << " GeV"); if (!inRange(jetPt/GeV, 30., 150.)) continue; /// @todo Use YODA bin index lookup tools size_t ipt; for (ipt = 0; ipt < 5; ++ipt) if (inRange(jetPt/GeV, ptEdges[ipt], ptEdges[ipt+1])) break; MSG_DEBUG("Jet pT index = " << ipt); // Calculate jet shape vector rings(10, 0); for (const Particle& p : bJet->particles()) { const double dR = deltaR(bJet->momentum(), p.momentum()); const size_t idR = (size_t) floor(dR/binWidth); for (size_t i = idR; i < 10; ++i) rings[i] += p.pT(); } // Fill each dR bin of the histos for this jet pT for (int iBin = 0; iBin < 10; ++iBin) { const double rcenter = 0.02 + iBin*binWidth; const double rhoval = (iBin != 0 ? (rings[iBin]-rings[iBin-1]) : rings[iBin]) / binWidth / rings[9]; const double psival = rings[iBin] / rings[9]; MSG_DEBUG(rcenter << ", " << rhoval << ", " << psival); _p_b_rho[ipt]->fill(rcenter, rhoval); _p_b_Psi[ipt]->fill(rcenter, psival); } } // Light jet shapes MSG_DEBUG("Filling light jet shapes"); for (const Jet* lJet : light_jets) { // Work out jet pT bin and skip this jet if out of range const double jetPt = lJet->momentum().pT(); MSG_DEBUG("Jet pT = " << jetPt/GeV << " GeV"); if (!inRange(jetPt/GeV, 30., 150.)) continue; /// @todo Use YODA bin index lookup tools size_t ipt; for (ipt = 0; ipt < 5; ++ipt) if (inRange(jetPt/GeV, ptEdges[ipt], ptEdges[ipt+1])) break; MSG_DEBUG("Jet pT index = " << ipt); // Calculate jet shape vector rings(10, 0); for (const Particle& p : lJet->particles()) { const double dR = deltaR(lJet->momentum(), p.momentum()); const size_t idR = (size_t) floor(dR/binWidth); for (size_t i = idR; i < 10; ++i) rings[i] += p.pT(); } // Fill each dR bin of the histos for this jet pT for (int iBin = 0; iBin < 10; ++iBin) { const double rcenter = 0.02 + iBin*binWidth; const double rhoval = (iBin != 0 ? (rings[iBin]-rings[iBin-1]) : rings[iBin]) / binWidth / rings[9]; const double psival = rings[iBin] / rings[9]; _p_l_rho[ipt]->fill(rcenter, rhoval); _p_l_Psi[ipt]->fill(rcenter, psival); } } } private: Profile1DPtr _p_b_rho[5]; Profile1DPtr _p_l_rho[5]; Profile1DPtr _p_b_Psi[5]; Profile1DPtr _p_l_Psi[5]; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2013_I1243871); } diff --git a/analyses/pluginATLAS/ATLAS_2013_I1244522.cc b/analyses/pluginATLAS/ATLAS_2013_I1244522.cc --- a/analyses/pluginATLAS/ATLAS_2013_I1244522.cc +++ b/analyses/pluginATLAS/ATLAS_2013_I1244522.cc @@ -1,163 +1,163 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief Measurement of isolated gamma + jet + X differential cross-sections class ATLAS_2013_I1244522 : public Analysis { public: // Constructor ATLAS_2013_I1244522() : Analysis("ATLAS_2013_I1244522") { } // Book histograms and initialise projections before the run void init() { FinalState fs; // Voronoi eta-phi tassellation with KT jets, for ambient energy density calculation FastJets fj(fs, FastJets::KT, 0.5); fj.useJetArea(new fastjet::AreaDefinition(fastjet::VoronoiAreaSpec())); declare(fj, "KtJetsD05"); // Leading photon - LeadingParticlesFinalState photonfs(PromptFinalState(FinalState(-2.37, 2.37, 45.0*GeV))); + LeadingParticlesFinalState photonfs(PromptFinalState(FinalState((Cuts::etaIn(-2.37, 2.37) && Cuts::pT >= 45.0*GeV)))); photonfs.addParticleId(PID::PHOTON); declare(photonfs, "LeadingPhoton"); // FS excluding the leading photon VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(photonfs); declare(vfs, "JetFS"); // Jets FastJets jetpro(vfs, FastJets::ANTIKT, 0.6); jetpro.useInvisibles(); declare(jetpro, "Jets"); // Histograms book(_h_ph_pt ,1, 1, 1); book(_h_jet_pt ,2, 1, 1); book(_h_jet_rap ,3, 1, 1); book(_h_dphi_phjet ,4, 1, 1); book(_h_costheta_biased_phjet ,5, 1, 1); book(_h_mass_phjet ,6, 1, 1); book(_h_costheta_phjet ,7, 1, 1); } size_t getEtaBin(double eta) const { const double aeta = fabs(eta); return binIndex(aeta, _eta_bins_areaoffset); } // Perform the per-event analysis void analyze(const Event& event) { // Get the photon Particles photons = apply(event, "LeadingPhoton").particles(); if (photons.size() != 1 ) vetoEvent; const Particle& photon = photons[0]; if (inRange(photon.abseta(), 1.37, 1.52)) vetoEvent; //Compute isolation energy in cone of radius .4 around photon (all particles) FourMomentum mom_in_EtCone; const Particles& fs = apply(event, "JetFS").particles(); for (const Particle& p : fs) { // Check if it's outside the cone of 0.4 if (deltaR(photon, p) >= 0.4) continue; // Increment isolation energy mom_in_EtCone += p.momentum(); } // Get the jets Jets alljets = apply(event, "Jets").jetsByPt(40.0*GeV); Jets jets; for (const Jet& jet : alljets) if (deltaR(photon, jet) > 1.0) jets += jet; if (jets.empty()) vetoEvent; Jet leadingJet = jets[0]; if (leadingJet.absrap() > 2.37) vetoEvent; // Get the area-filtered jet inputs for computing median energy density, etc. vector ptDensity; vector< vector > ptDensities(_eta_bins_areaoffset.size()-1); FastJets fast_jets = apply(event, "KtJetsD05"); const auto clust_seq_area = fast_jets.clusterSeqArea(); for (const Jet& jet : fast_jets.jets()) { const double area = clust_seq_area->area(jet); if (area > 1e-4 && jet.abseta() < _eta_bins_areaoffset.back()) ptDensities.at( getEtaBin(jet.abseta()) ).push_back(jet.pT()/area); } // Compute the median energy density, etc. for (size_t b = 0; b < _eta_bins_areaoffset.size() - 1; ++b) { const int njets = ptDensities[b].size(); ptDensity += (njets > 0) ? median(ptDensities[b]) : 0; } // Compute the isolation energy correction (cone area*energy density) const double etCone_area = PI*sqr(0.4) - (5.0*.025)*(7.0*PI/128.); const double correction = ptDensity[getEtaBin(photon.abseta())] * etCone_area; // Apply isolation cut on area-corrected value if (mom_in_EtCone.Et() - correction >= 4*GeV) vetoEvent; // Fill histos const double dy = deltaRap(photon, leadingJet); const double costheta_yj = tanh(dy/2); _h_ph_pt->fill(photon.pT()/GeV); _h_jet_pt->fill(leadingJet.pT()/GeV); _h_jet_rap->fill(leadingJet.absrap()); _h_dphi_phjet->fill(deltaPhi(photon, leadingJet)); _h_costheta_biased_phjet->fill(costheta_yj); if (costheta_yj < 0.829022) { const FourMomentum yj = photon.momentum() + leadingJet.momentum(); if (yj.mass() > 160.939*GeV) { if (fabs(photon.eta() + leadingJet.rap()) < 2.37) { _h_mass_phjet->fill(yj.mass()/GeV); _h_costheta_phjet->fill(costheta_yj); } } } } /// Normalise histograms etc., after the run void finalize() { const double sf = crossSection() / picobarn / sumOfWeights(); scale(_h_ph_pt, sf); scale(_h_jet_pt, sf); scale(_h_jet_rap, sf); scale(_h_dphi_phjet, sf); scale(_h_costheta_biased_phjet, sf); scale(_h_mass_phjet, sf); scale(_h_costheta_phjet, sf); } private: Histo1DPtr _h_ph_pt, _h_jet_pt, _h_jet_rap, _h_dphi_phjet, _h_costheta_biased_phjet, _h_mass_phjet, _h_costheta_phjet; const vector _eta_bins_areaoffset = {0.0, 1.5, 3.0}; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2013_I1244522); } diff --git a/analyses/pluginATLAS/ATLAS_2014_I1298811.cc b/analyses/pluginATLAS/ATLAS_2014_I1298811.cc --- a/analyses/pluginATLAS/ATLAS_2014_I1298811.cc +++ b/analyses/pluginATLAS/ATLAS_2014_I1298811.cc @@ -1,196 +1,196 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class ATLAS_2014_I1298811 : public Analysis { public: ATLAS_2014_I1298811() : Analysis("ATLAS_2014_I1298811") { } void init() { // Configure projections - const FinalState fs(-4.8, 4.8, 0*MeV); + const FinalState fs((Cuts::etaIn(-4.8, 4.8))); declare(fs, "FS"); const FastJets jets(fs, FastJets::ANTIKT, 0.4); declare(jets, "Jets"); // Book histograms for (size_t itopo = 0; itopo < 2; ++itopo) { // Profiles for (size_t iregion = 0; iregion < 3; ++iregion) { book(_p_ptsumch_vs_ptlead[itopo][iregion] ,1+iregion, 1, itopo+1); book(_p_nch_vs_ptlead[itopo][iregion] ,4+iregion, 1, itopo+1); } book(_p_etsum25_vs_ptlead_trans[itopo] ,7, 1, itopo+1); book(_p_etsum48_vs_ptlead_trans[itopo] ,8, 1, itopo+1); book(_p_chratio_vs_ptlead_trans[itopo] ,9, 1, itopo+1); book(_p_ptmeanch_vs_ptlead_trans[itopo] ,10, 1, itopo+1); // 1D histos for (size_t iregion = 0; iregion < 3; ++iregion) { for (size_t ipt = 0; ipt < 4; ++ipt) { book(_h_ptsumch[ipt][itopo][iregion] ,13+3*ipt+iregion, 1, itopo+1); book(_h_nch[ipt][itopo][iregion] ,25+3*ipt+iregion, 1, itopo+1); } } } book(_p_ptmeanch_vs_nch_trans[0], 11, 1, 1); book(_p_ptmeanch_vs_nch_trans[1], 12, 1, 1); } void analyze(const Event& event) { // Find the jets with pT > 20 GeV and *rapidity* within 2.8 /// @todo Use Cuts instead rather than an eta cut in the proj and a y cut after const Jets alljets = apply(event, "Jets").jetsByPt(20*GeV); Jets jets; for (const Jet& j : alljets) if (j.absrap() < 2.8) jets.push_back(j); // Require at least one jet in the event if (jets.empty()) vetoEvent; // Identify the leading jet and its phi and pT const FourMomentum plead = jets[0].momentum(); const double philead = plead.phi(); const double etalead = plead.eta(); const double ptlead = plead.pT(); MSG_DEBUG("Leading object: pT = " << ptlead << ", eta = " << etalead << ", phi = " << philead); // Sum particle properties in the transverse regions int tmpnch[2] = {0,0}; double tmpptsum[2] = {0,0}; double tmpetsum48[2] = {0,0}; double tmpetsum25[2] = {0,0}; const Particles particles = apply(event, "FS").particles(); for (const Particle& p : particles) { // Only consider the transverse region(s), not toward or away if (!inRange(deltaPhi(p.phi(), philead), PI/3.0, TWOPI/3.0)) continue; // Work out which transverse side this particle is on const size_t iside = (mapAngleMPiToPi(p.phi() - philead) > 0) ? 0 : 1; MSG_TRACE(p.phi() << " vs. " << philead << ": " << iside); // Charged or neutral particle? const bool charged = PID::charge3(p.pid()) != 0; // Track observables if (charged && fabs(p.eta()) < 2.5 && p.pT() > 500*MeV) { tmpnch[iside] += 1; tmpptsum[iside] += p.pT(); } // Cluster observables if ((charged && p.p3().mod() > 200*MeV) || (!charged && p.p3().mod() > 500*MeV)) { tmpetsum48[iside] += p.pT(); if (fabs(p.eta()) < 2.5) tmpetsum25[iside] += p.pT(); } } // Construct tot/max/min counts (for trans/max/min, indexed by iregion) const int nch[3] = { tmpnch[0] + tmpnch[1], std::max(tmpnch[0], tmpnch[1]), std::min(tmpnch[0], tmpnch[1]) }; const double ptsum[3] = { tmpptsum[0] + tmpptsum[1], std::max(tmpptsum[0], tmpptsum[1]), std::min(tmpptsum[0], tmpptsum[1]) }; const double etsum48[3] = { tmpetsum48[0] + tmpetsum48[1], std::max(tmpetsum48[0], tmpetsum48[1]), std::min(tmpetsum48[0], tmpetsum48[1]) }; const double etsum25[3] = { tmpetsum25[0] + tmpetsum25[1], std::max(tmpetsum25[0], tmpetsum25[1]), std::min(tmpetsum25[0], tmpetsum25[1]) }; ////////////////////////////////////////////////////////// // Now fill the histograms with the computed quantities // phi sizes of each trans/max/min region (for indexing by iregion) const double dphi[3] = { 2*PI/3.0, PI/3.0, PI/3.0 }; // Loop over inclusive jet and exclusive dijet configurations for (size_t itopo = 0; itopo < 2; ++itopo) { // Exit early if in the exclusive dijet iteration and the exclusive dijet cuts are not met if (itopo == 1) { if (jets.size() != 2) continue; const FourMomentum psublead = jets[1].momentum(); // Delta(phi) cut const double phisublead = psublead.phi(); if (deltaPhi(philead, phisublead) < 2.5) continue; // pT fraction cut const double ptsublead = psublead.pT(); if (ptsublead < 0.5*ptlead) continue; MSG_DEBUG("Exclusive dijet event"); } // Plot profiles and distributions which have no max/min region definition _p_etsum25_vs_ptlead_trans[itopo]->fill(ptlead/GeV, etsum25[0]/5.0/dphi[0]/GeV); _p_etsum48_vs_ptlead_trans[itopo]->fill(ptlead/GeV, etsum48[0]/9.6/dphi[0]/GeV); if (etsum25[0] > 0) { _p_chratio_vs_ptlead_trans[itopo]->fill(ptlead/GeV, ptsum[0]/etsum25[0]); } const double ptmean = safediv(ptsum[0], nch[0], -1); ///< Return -1 if div by zero if (ptmean >= 0) { _p_ptmeanch_vs_ptlead_trans[itopo]->fill(ptlead/GeV, ptmean/GeV); _p_ptmeanch_vs_nch_trans[itopo]->fill(nch[0], ptmean/GeV); } // Plot remaining profile and 1D observables, which are defined in all 3 tot/max/min regions for (size_t iregion = 0; iregion < 3; ++iregion) { _p_ptsumch_vs_ptlead[itopo][iregion]->fill(ptlead/GeV, ptsum[iregion]/5.0/dphi[iregion]/GeV); _p_nch_vs_ptlead[itopo][iregion]->fill(ptlead/GeV, nch[iregion]/5.0/dphi[iregion]); for (size_t ipt = 0; ipt < 4; ++ipt) { if (ipt == 1 && !inRange(ptlead/GeV, 20, 60)) continue; if (ipt == 2 && !inRange(ptlead/GeV, 60, 210)) continue; if (ipt == 3 && ptlead/GeV < 210) continue; _h_ptsumch[ipt][itopo][iregion]->fill(ptsum[iregion]/5.0/dphi[iregion]/GeV); _h_nch[ipt][itopo][iregion]->fill(nch[iregion]/5.0/dphi[iregion]); } } } } void finalize() { for (size_t iregion = 0; iregion < 3; ++iregion) { for (size_t itopo = 0; itopo < 2; ++itopo) { for (size_t ipt = 0; ipt < 4; ++ipt) { normalize(_h_ptsumch[ipt][itopo][iregion], 1.0); normalize(_h_nch[ipt][itopo][iregion], 1.0); } } } } private: /// @name Histogram arrays //@{ Profile1DPtr _p_ptsumch_vs_ptlead[2][3]; Profile1DPtr _p_nch_vs_ptlead[2][3]; Profile1DPtr _p_ptmeanch_vs_ptlead_trans[2]; Profile1DPtr _p_etsum25_vs_ptlead_trans[2]; Profile1DPtr _p_etsum48_vs_ptlead_trans[2]; Profile1DPtr _p_chratio_vs_ptlead_trans[2]; Profile1DPtr _p_ptmeanch_vs_nch_trans[2]; Histo1DPtr _h_ptsumch[4][2][3]; Histo1DPtr _h_nch[4][2][3]; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2014_I1298811); } diff --git a/analyses/pluginATLAS/ATLAS_2014_I1327229.cc b/analyses/pluginATLAS/ATLAS_2014_I1327229.cc --- a/analyses/pluginATLAS/ATLAS_2014_I1327229.cc +++ b/analyses/pluginATLAS/ATLAS_2014_I1327229.cc @@ -1,1326 +1,1326 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/UnstableFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class ATLAS_2014_I1327229 : public Analysis { public: /// Constructor ATLAS_2014_I1327229() : Analysis("ATLAS_2014_I1327229") { } /// Book histograms and initialise projections before the run void init() { // To calculate the acceptance without having the fiducial lepton efficiencies included, this part can be turned off _use_fiducial_lepton_efficiency = true; // Random numbers for simulation of ATLAS detector reconstruction efficiency /// @todo Replace with SmearedParticles etc. srand(160385); // Read in all signal regions _signal_regions = getSignalRegions(); // Set number of events per signal region to 0 for (size_t i = 0; i < _signal_regions.size(); i++) book(_eventCountsPerSR[_signal_regions[i]], "_eventCountsPerSR_" + _signal_regions[i]); // Final state including all charged and neutral particles - const FinalState fs(-5.0, 5.0, 1*GeV); + const FinalState fs((Cuts::etaIn(-5.0, 5.0) && Cuts::pT >= 1*GeV)); declare(fs, "FS"); // Final state including all charged particles - declare(ChargedFinalState(-2.5, 2.5, 1*GeV), "CFS"); + declare(ChargedFinalState((Cuts::etaIn(-2.5, 2.5) && Cuts::pT >= 1*GeV)), "CFS"); // Final state including all visible particles (to calculate MET, Jets etc.) - declare(VisibleFinalState(-5.0,5.0),"VFS"); + declare(VisibleFinalState((Cuts::etaIn(-5.0,5.0))),"VFS"); // Final state including all AntiKt 04 Jets VetoedFinalState vfs; vfs.addVetoPairId(PID::MUON); declare(FastJets(vfs, FastJets::ANTIKT, 0.4), "AntiKtJets04"); // Final state including all unstable particles (including taus) declare(UnstableFinalState(Cuts::abseta < 5.0 && Cuts::pT > 5*GeV),"UFS"); // Final state including all electrons IdentifiedFinalState elecs(Cuts::abseta < 2.47 && Cuts::pT > 10*GeV); elecs.acceptIdPair(PID::ELECTRON); declare(elecs, "elecs"); // Final state including all muons IdentifiedFinalState muons(Cuts::abseta < 2.5 && Cuts::pT > 10*GeV); muons.acceptIdPair(PID::MUON); declare(muons, "muons"); /// Book histograms: book(_h_HTlep_all ,"HTlep_all", 30,0,3000); book(_h_HTjets_all ,"HTjets_all", 30,0,3000); book(_h_MET_all ,"MET_all", 30,0,1500); book(_h_Meff_all ,"Meff_all", 50,0,5000); book(_h_min_pT_all ,"min_pT_all", 50, 0, 2000); book(_h_mT_all ,"mT_all", 50, 0, 2000); book(_h_e_n ,"e_n", 10, -0.5, 9.5); book(_h_mu_n ,"mu_n", 10, -0.5, 9.5); book(_h_tau_n ,"tau_n", 10, -0.5, 9.5); book(_h_pt_1_3l ,"pt_1_3l", 100, 0, 2000); book(_h_pt_2_3l ,"pt_2_3l", 100, 0, 2000); book(_h_pt_3_3l ,"pt_3_3l", 100, 0, 2000); book(_h_pt_1_2ltau ,"pt_1_2ltau", 100, 0, 2000); book(_h_pt_2_2ltau ,"pt_2_2ltau", 100, 0, 2000); book(_h_pt_3_2ltau ,"pt_3_2ltau", 100, 0, 2000); book(_h_excluded ,"excluded", 2, -0.5, 1.5); } /// Perform the per-event analysis void analyze(const Event& event) { // Muons Particles muon_candidates; const Particles charged_tracks = apply(event, "CFS").particles(); const Particles visible_particles = apply(event, "VFS").particles(); for (const Particle& mu : apply(event, "muons").particlesByPt() ) { // Calculate pTCone30 variable (pT of all tracks within dR<0.3 - pT of muon itself) double pTinCone = -mu.pT(); for (const Particle& track : charged_tracks ) { if (deltaR(mu.momentum(),track.momentum()) < 0.3 ) pTinCone += track.pT(); } // Calculate eTCone30 variable (pT of all visible particles within dR<0.3) double eTinCone = 0.; for (const Particle& visible_particle : visible_particles) { if (visible_particle.abspid() != PID::MUON && inRange(deltaR(mu.momentum(),visible_particle.momentum()), 0.1, 0.3)) eTinCone += visible_particle.pT(); } // Apply reconstruction efficiency and simulate reconstruction int muon_id = 13; if (mu.hasAncestor(PID::TAU) || mu.hasAncestor(-PID::TAU)) muon_id = 14; const double eff = (_use_fiducial_lepton_efficiency) ? apply_reco_eff(muon_id,mu) : 1.0; const bool keep_muon = rand()/static_cast(RAND_MAX)<=eff; // Keep muon if pTCone30/pT < 0.15 and eTCone30/pT < 0.2 and reconstructed if (keep_muon && pTinCone/mu.pT() <= 0.1 && eTinCone/mu.pT() < 0.1) muon_candidates.push_back(mu); } // Electrons Particles electron_candidates; for (const Particle& e : apply(event, "elecs").particlesByPt() ) { // Neglect electrons in crack regions if (inRange(e.abseta(), 1.37, 1.52)) continue; // Calculate pTCone30 variable (pT of all tracks within dR<0.3 - pT of electron itself) double pTinCone = -e.pT(); for (const Particle& track : charged_tracks) { if (deltaR(e.momentum(), track.momentum()) < 0.3 ) pTinCone += track.pT(); } // Calculate eTCone30 variable (pT of all visible particles (except muons) within dR<0.3) double eTinCone = 0.; for (const Particle& visible_particle : visible_particles) { if (visible_particle.abspid() != PID::MUON && inRange(deltaR(e.momentum(),visible_particle.momentum()), 0.1, 0.3)) eTinCone += visible_particle.pT(); } // Apply reconstruction efficiency and simulate reconstruction int elec_id = 11; if (e.hasAncestor(15) || e.hasAncestor(-15)) elec_id = 12; const double eff = (_use_fiducial_lepton_efficiency) ? apply_reco_eff(elec_id,e) : 1.0; const bool keep_elec = rand()/static_cast(RAND_MAX)<=eff; // Keep electron if pTCone30/pT < 0.13 and eTCone30/pT < 0.2 and reconstructed if (keep_elec && pTinCone/e.pT() <= 0.1 && eTinCone/e.pT() < 0.1) electron_candidates.push_back(e); } // Taus Particles tau_candidates; for (const Particle& tau : apply(event, "UFS").particles() ) { // Only pick taus out of all unstable particles if ( tau.abspid() != PID::TAU) continue; // Check that tau has decayed into daughter particles if (tau.genParticle()->end_vertex() == 0) continue; // Calculate visible tau momentum using the tau neutrino momentum in the tau decay FourMomentum daughter_tau_neutrino_momentum = get_tau_neutrino_momentum(tau); Particle tau_vis = tau; tau_vis.setMomentum(tau.momentum()-daughter_tau_neutrino_momentum); // keep only taus in certain eta region and above 15 GeV of visible tau pT if ( tau_vis.pT()/GeV <= 15.0 || tau_vis.abseta() > 2.5) continue; // Get prong number (number of tracks) in tau decay and check if tau decays leptonically unsigned int nprong = 0; bool lep_decaying_tau = false; get_prong_number(tau.genParticle(),nprong,lep_decaying_tau); // Apply reconstruction efficiency and simulate reconstruction int tau_id = 15; if (nprong == 1) tau_id = 15; else if (nprong == 3) tau_id = 16; const double eff = (_use_fiducial_lepton_efficiency) ? apply_reco_eff(tau_id,tau_vis) : 1.0; const bool keep_tau = rand()/static_cast(RAND_MAX)<=eff; // Keep tau if nprong = 1, it decays hadronically and it is reconstructed if ( !lep_decaying_tau && nprong == 1 && keep_tau) tau_candidates.push_back(tau_vis); } // Jets (all anti-kt R=0.4 jets with pT > 30 GeV and eta < 4.9 Jets jet_candidates; for (const Jet& jet : apply(event, "AntiKtJets04").jetsByPt(30.0*GeV) ) { if (jet.abseta() < 4.9 ) jet_candidates.push_back(jet); } // ETmiss Particles vfs_particles = apply(event, "VFS").particles(); FourMomentum pTmiss; for (const Particle& p : vfs_particles) pTmiss -= p.momentum(); double eTmiss = pTmiss.pT()/GeV; // ------------------------- // Overlap removal // electron - electron Particles electron_candidates_2; for(size_t ie = 0; ie < electron_candidates.size(); ++ie) { const Particle& e = electron_candidates[ie]; bool away = true; // If electron pair within dR < 0.1: remove electron with lower pT for(size_t ie2 = 0; ie2 < electron_candidates_2.size(); ++ie2) { if (deltaR(e.momentum(),electron_candidates_2[ie2].momentum()) < 0.1 ) { away = false; break; } } // If isolated keep it if ( away ) electron_candidates_2.push_back( e ); } // jet - electron Jets recon_jets; for (const Jet& jet : jet_candidates) { bool away = true; // If jet within dR < 0.2 of electron: remove jet for (const Particle& e : electron_candidates_2) { if (deltaR(e.momentum(), jet.momentum()) < 0.2 ) { away = false; break; } } // jet - tau if ( away ) { // If jet within dR < 0.2 of tau: remove jet for (const Particle& tau : tau_candidates) { if (deltaR(tau.momentum(), jet.momentum()) < 0.2 ) { away = false; break; } } } // If isolated keep it if ( away ) recon_jets.push_back( jet ); } // electron - jet Particles recon_leptons, recon_e; for (size_t ie = 0; ie < electron_candidates_2.size(); ++ie) { const Particle& e = electron_candidates_2[ie]; // If electron within 0.2 < dR < 0.4 from any jets: remove electron bool away = true; for (const Jet& jet : recon_jets) { if (deltaR(e.momentum(), jet.momentum()) < 0.4 ) { away = false; break; } } // electron - muon // If electron within dR < 0.1 of a muon: remove electron if (away) { for (const Particle& mu : muon_candidates) { if (deltaR(mu.momentum(),e.momentum()) < 0.1) { away = false; break; } } } // If isolated keep it if ( away ) { recon_e.push_back( e ); recon_leptons.push_back( e ); } } // tau - electron Particles recon_tau; for (const Particle& tau : tau_candidates) { bool away = true; // If tau within dR < 0.2 of an electron: remove tau for (const Particle & e : recon_e) { if (deltaR(tau.momentum(),e.momentum()) < 0.2 ) { away = false; break; } } // tau - muon // If tau within dR < 0.2 of a muon: remove tau if (away) { for (const Particle& mu : muon_candidates) { if (deltaR(tau.momentum(), mu.momentum()) < 0.2 ) { away = false; break; } } } // If isolated keep it if (away) recon_tau.push_back( tau ); } // muon - jet Particles recon_mu, trigger_mu; // If muon within dR < 0.4 of a jet: remove muon for (const Particle& mu : muon_candidates ) { bool away = true; for (const Jet& jet : recon_jets) { if (deltaR(mu.momentum(), jet.momentum()) < 0.4 ) { away = false; break; } } if (away) { recon_mu.push_back( mu ); recon_leptons.push_back( mu ); if (mu.abseta() < 2.4) trigger_mu.push_back( mu ); } } // End overlap removal // --------------------- // Jet cleaning if (rand()/static_cast(RAND_MAX) <= 0.42) { for (const Jet& jet : recon_jets ) { const double eta = jet.rapidity(); const double phi = jet.azimuthalAngle(MINUSPI_PLUSPI); if(jet.pT() > 25*GeV && inRange(eta,-0.1,1.5) && inRange(phi,-0.9,-0.5)) vetoEvent; } } // Event selection // Require at least 3 charged tracks in event if (charged_tracks.size() < 3) vetoEvent; // And at least one e/mu passing trigger if( !( !recon_e.empty() && recon_e[0].pT()>26.*GeV) && !( !trigger_mu.empty() && trigger_mu[0].pT()>26.*GeV) ) { MSG_DEBUG("Hardest lepton fails trigger"); vetoEvent; } // And only accept events with at least 2 electrons and muons and at least 3 leptons in total if (recon_mu.size() + recon_e.size() + recon_tau.size() < 3 || recon_leptons.size() < 2) vetoEvent; // Sort leptons by decreasing pT sortByPt(recon_leptons); sortByPt(recon_tau); // Calculate HTlep, fill lepton pT histograms & store chosen combination of 3 leptons double HTlep = 0.; Particles chosen_leptons; if (recon_leptons.size() > 2) { _h_pt_1_3l->fill(recon_leptons[0].pT()/GeV); _h_pt_2_3l->fill(recon_leptons[1].pT()/GeV); _h_pt_3_3l->fill(recon_leptons[2].pT()/GeV); HTlep = (recon_leptons[0].pT() + recon_leptons[1].pT() + recon_leptons[2].pT())/GeV; chosen_leptons.push_back( recon_leptons[0] ); chosen_leptons.push_back( recon_leptons[1] ); chosen_leptons.push_back( recon_leptons[2] ); } else { _h_pt_1_2ltau->fill(recon_leptons[0].pT()/GeV); _h_pt_2_2ltau->fill(recon_leptons[1].pT()/GeV); _h_pt_3_2ltau->fill(recon_tau[0].pT()/GeV); HTlep = recon_leptons[0].pT()/GeV + recon_leptons[1].pT()/GeV + recon_tau[0].pT()/GeV; chosen_leptons.push_back( recon_leptons[0] ); chosen_leptons.push_back( recon_leptons[1] ); chosen_leptons.push_back( recon_tau[0] ); } // Calculate mT and mTW variable Particles mT_leptons; Particles mTW_leptons; for (size_t i1 = 0; i1 < 3; i1 ++) { for (size_t i2 = i1+1; i2 < 3; i2 ++) { double OSSF_inv_mass = isOSSF_mass(chosen_leptons[i1],chosen_leptons[i2]); if (OSSF_inv_mass != 0.) { for (size_t i3 = 0; i3 < 3 ; i3 ++) { if (i3 != i2 && i3 != i1) { mT_leptons.push_back(chosen_leptons[i3]); if ( fabs(91.0 - OSSF_inv_mass) < 20. ) mTW_leptons.push_back(chosen_leptons[i3]); } } } else { mT_leptons.push_back(chosen_leptons[0]); mTW_leptons.push_back(chosen_leptons[0]); } } } sortByPt(mT_leptons); sortByPt(mTW_leptons); double mT = sqrt(2*pTmiss.pT()/GeV*mT_leptons[0].pT()/GeV*(1-cos(pTmiss.phi()-mT_leptons[0].phi()))); double mTW = sqrt(2*pTmiss.pT()/GeV*mTW_leptons[0].pT()/GeV*(1-cos(pTmiss.phi()-mTW_leptons[0].phi()))); // Calculate Min pT variable double min_pT = chosen_leptons[2].pT()/GeV; // Number of prompt e/mu and had taus _h_e_n->fill(recon_e.size()); _h_mu_n->fill(recon_mu.size()); _h_tau_n->fill(recon_tau.size()); // Calculate HTjets variable double HTjets = 0.; for (const Jet& jet : recon_jets) HTjets += jet.pT()/GeV; // Calculate meff variable double meff = eTmiss + HTjets; Particles all_leptons; for (const Particle& e : recon_e ) { meff += e.pT()/GeV; all_leptons.push_back( e ); } for (const Particle& mu : recon_mu) { meff += mu.pT()/GeV; all_leptons.push_back( mu ); } for (const Particle& tau : recon_tau) { meff += tau.pT()/GeV; all_leptons.push_back( tau ); } // Fill histograms of kinematic variables _h_HTlep_all->fill(HTlep); _h_HTjets_all->fill(HTjets); _h_MET_all->fill(eTmiss); _h_Meff_all->fill(meff); _h_min_pT_all->fill(min_pT); _h_mT_all->fill(mT); // Determine signal region (3l / 2ltau , onZ / offZ OSSF / offZ no-OSSF) // 3l vs. 2ltau string basic_signal_region; if (recon_mu.size() + recon_e.size() > 2) basic_signal_region += "3l_"; else if ( (recon_mu.size() + recon_e.size() == 2) && (recon_tau.size() > 0)) basic_signal_region += "2ltau_"; // Is there an OSSF pair or a three lepton combination with an invariant mass close to the Z mass int onZ = isonZ(chosen_leptons); if (onZ == 1) basic_signal_region += "onZ"; else if (onZ == 0) { bool OSSF = isOSSF(chosen_leptons); if (OSSF) basic_signal_region += "offZ_OSSF"; else basic_signal_region += "offZ_noOSSF"; } // Check in which signal regions this event falls and adjust event counters // INFO: The b-jet signal regions of the paper are not included in this Rivet implementation fillEventCountsPerSR(basic_signal_region,onZ,HTlep,eTmiss,HTjets,meff,min_pT,mTW); } /// Normalise histograms etc., after the run void finalize() { // Normalize to an integrated luminosity of 1 fb-1 double norm = crossSection()/femtobarn/sumOfWeights(); string best_signal_region = ""; double ratio_best_SR = 0.; // Loop over all signal regions and find signal region with best sensitivity (ratio signal events/visible cross-section) for (size_t i = 0; i < _signal_regions.size(); i++) { double signal_events = _eventCountsPerSR[_signal_regions[i]]->val() * norm; // Use expected upper limits to find best signal region: double UL95 = getUpperLimit(_signal_regions[i],false); double ratio = signal_events / UL95; if (ratio > ratio_best_SR) { best_signal_region = _signal_regions.at(i); ratio_best_SR = ratio; } } double signal_events_best_SR = _eventCountsPerSR[best_signal_region]->val() * norm; double exp_UL_best_SR = getUpperLimit(best_signal_region, false); double obs_UL_best_SR = getUpperLimit(best_signal_region, true); // Print out result cout << "----------------------------------------------------------------------------------------" << '\n'; cout << "Number of total events: " << sumOfWeights() << '\n'; cout << "Best signal region: " << best_signal_region << '\n'; cout << "Normalized number of signal events in this best signal region (per fb-1): " << signal_events_best_SR << '\n'; cout << "Efficiency*Acceptance: " << _eventCountsPerSR[best_signal_region]->val()/sumOfWeights() << '\n'; cout << "Cross-section [fb]: " << crossSection()/femtobarn << '\n'; cout << "Expected visible cross-section (per fb-1): " << exp_UL_best_SR << '\n'; cout << "Ratio (signal events / expected visible cross-section): " << ratio_best_SR << '\n'; cout << "Observed visible cross-section (per fb-1): " << obs_UL_best_SR << '\n'; cout << "Ratio (signal events / observed visible cross-section): " << signal_events_best_SR/obs_UL_best_SR << '\n'; cout << "----------------------------------------------------------------------------------------" << '\n'; cout << "Using the EXPECTED limits (visible cross-section) of the analysis: " << '\n'; if (signal_events_best_SR > exp_UL_best_SR) { cout << "Since the number of signal events > the visible cross-section, this model/grid point is EXCLUDED with 95% C.L." << '\n'; _h_excluded->fill(1); } else { cout << "Since the number of signal events < the visible cross-section, this model/grid point is NOT EXCLUDED." << '\n'; _h_excluded->fill(0); } cout << "----------------------------------------------------------------------------------------" << '\n'; cout << "Using the OBSERVED limits (visible cross-section) of the analysis: " << '\n'; if (signal_events_best_SR > obs_UL_best_SR) { cout << "Since the number of signal events > the visible cross-section, this model/grid point is EXCLUDED with 95% C.L." << '\n'; _h_excluded->fill(1); } else { cout << "Since the number of signal events < the visible cross-section, this model/grid point is NOT EXCLUDED." << '\n'; _h_excluded->fill(0); } cout << "----------------------------------------------------------------------------------------" << '\n'; cout << "INFO: The b-jet signal regions of the paper are not included in this Rivet implementation." << '\n'; cout << "----------------------------------------------------------------------------------------" << '\n'; /// Normalize to cross section if (norm != 0) { scale(_h_HTlep_all, norm); scale(_h_HTjets_all, norm); scale(_h_MET_all, norm); scale(_h_Meff_all, norm); scale(_h_min_pT_all, norm); scale(_h_mT_all, norm); scale(_h_pt_1_3l, norm); scale(_h_pt_2_3l, norm); scale(_h_pt_3_3l, norm); scale(_h_pt_1_2ltau, norm); scale(_h_pt_2_2ltau, norm); scale(_h_pt_3_2ltau, norm); scale(_h_e_n, norm); scale(_h_mu_n, norm); scale(_h_tau_n, norm); scale(_h_excluded, norm); } } /// Helper functions //@{ /// Function giving a list of all signal regions vector getSignalRegions() { // List of basic signal regions vector basic_signal_regions; basic_signal_regions.push_back("3l_offZ_OSSF"); basic_signal_regions.push_back("3l_offZ_noOSSF"); basic_signal_regions.push_back("3l_onZ"); basic_signal_regions.push_back("2ltau_offZ_OSSF"); basic_signal_regions.push_back("2ltau_offZ_noOSSF"); basic_signal_regions.push_back("2ltau_onZ"); // List of kinematic variables vector kinematic_variables; kinematic_variables.push_back("HTlep"); kinematic_variables.push_back("METStrong"); kinematic_variables.push_back("METWeak"); kinematic_variables.push_back("Meff"); kinematic_variables.push_back("MeffStrong"); kinematic_variables.push_back("MeffMt"); kinematic_variables.push_back("MinPt"); vector signal_regions; // Loop over all kinematic variables and basic signal regions for (size_t i0 = 0; i0 < kinematic_variables.size(); i0++) { for (size_t i1 = 0; i1 < basic_signal_regions.size(); i1++) { // Is signal region onZ? int onZ = (basic_signal_regions[i1].find("onZ") != string::npos) ? 1 : 0; // Get cut values for this kinematic variable vector cut_values = getCutsPerSignalRegion(kinematic_variables[i0], onZ); // Loop over all cut values for (size_t i2 = 0; i2 < cut_values.size(); i2++) { // Push signal region into vector signal_regions.push_back( kinematic_variables[i0] + "_" + basic_signal_regions[i1] + "_cut_" + toString(cut_values[i2]) ); } } } return signal_regions; } /// Function giving all cut values per kinematic variable vector getCutsPerSignalRegion(const string& signal_region, int onZ = 0) { vector cutValues; // Cut values for HTlep if (signal_region.compare("HTlep") == 0) { cutValues.push_back(0); cutValues.push_back(200); cutValues.push_back(500); cutValues.push_back(800); } // Cut values for MinPt else if (signal_region.compare("MinPt") == 0) { cutValues.push_back(0); cutValues.push_back(50); cutValues.push_back(100); cutValues.push_back(150); } // Cut values for METStrong (HTjets > 150 GeV) and METWeak (HTjets < 150 GeV) else if (signal_region.compare("METStrong") == 0 || signal_region.compare("METWeak") == 0) { cutValues.push_back(0); cutValues.push_back(100); cutValues.push_back(200); cutValues.push_back(300); } // Cut values for Meff if (signal_region.compare("Meff") == 0) { cutValues.push_back(0); cutValues.push_back(600); cutValues.push_back(1000); cutValues.push_back(1500); } // Cut values for MeffStrong (MET > 100 GeV) if ((signal_region.compare("MeffStrong") == 0 || signal_region.compare("MeffMt") == 0) && onZ ==1) { cutValues.push_back(0); cutValues.push_back(600); cutValues.push_back(1200); } return cutValues; } /// function fills map _eventCountsPerSR by looping over all signal regions /// and looking if the event falls into this signal region void fillEventCountsPerSR(const string& basic_signal_region, int onZ, double HTlep, double eTmiss, double HTjets, double meff, double min_pT, double mTW) { // Get cut values for HTlep, loop over them and add event if cut is passed vector cut_values = getCutsPerSignalRegion("HTlep", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (HTlep > cut_values[i]) _eventCountsPerSR[("HTlep_" + basic_signal_region + "_cut_" + toString(cut_values[i]))]->fill(); } // Get cut values for MinPt, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("MinPt", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (min_pT > cut_values[i]) _eventCountsPerSR[("MinPt_" + basic_signal_region + "_cut_" + toString(cut_values[i]))]->fill(); } // Get cut values for METStrong, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("METStrong", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (eTmiss > cut_values[i] && HTjets > 150.) _eventCountsPerSR[("METStrong_" + basic_signal_region + "_cut_" + toString(cut_values[i]))]->fill(); } // Get cut values for METWeak, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("METWeak", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (eTmiss > cut_values[i] && HTjets <= 150.) _eventCountsPerSR[("METWeak_" + basic_signal_region + "_cut_" + toString(cut_values[i]))]->fill(); } // Get cut values for Meff, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("Meff", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (meff > cut_values[i]) _eventCountsPerSR[("Meff_" + basic_signal_region + "_cut_" + toString(cut_values[i]))]->fill(); } // Get cut values for MeffStrong, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("MeffStrong", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (meff > cut_values[i] && eTmiss > 100.) _eventCountsPerSR[("MeffStrong_" + basic_signal_region + "_cut_" + toString(cut_values[i]))]->fill(); } // Get cut values for MeffMt, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("MeffMt", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (meff > cut_values[i] && mTW > 100. && onZ == 1) _eventCountsPerSR[("MeffMt_" + basic_signal_region + "_cut_" + toString(cut_values[i]))]->fill(); } } /// Function returning 4-momentum of daughter-particle if it is a tau neutrino FourMomentum get_tau_neutrino_momentum(const Particle& p) { assert(p.abspid() == PID::TAU); const GenVertex* dv = p.genParticle()->end_vertex(); assert(dv != NULL); // Loop over all daughter particles for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) { if (abs((*pp)->pdg_id()) == PID::NU_TAU) return FourMomentum((*pp)->momentum()); } return FourMomentum(); } /// Function calculating the prong number of taus void get_prong_number(const GenParticle* p, unsigned int& nprong, bool& lep_decaying_tau) { assert(p != NULL); const GenVertex* dv = p->end_vertex(); assert(dv != NULL); for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) { // If they have status 1 and are charged they will produce a track and the prong number is +1 if ((*pp)->status() == 1 ) { const int id = (*pp)->pdg_id(); if (Rivet::PID::charge(id) != 0 ) ++nprong; // Check if tau decays leptonically if (( abs(id) == PID::ELECTRON || abs(id) == PID::MUON || abs(id) == PID::TAU ) && abs(p->pdg_id()) == PID::TAU) lep_decaying_tau = true; } // If the status of the daughter particle is 2 it is unstable and the further decays are checked else if ((*pp)->status() == 2 ) { get_prong_number((*pp),nprong,lep_decaying_tau); } } } /// Function giving fiducial lepton efficiency double apply_reco_eff(int flavor, const Particle& p) { double pt = p.pT()/GeV; double eta = p.eta(); double eff = 0.; if (flavor == 11) { // weight prompt electron -- now including data/MC ID SF in eff. double avgrate = 0.685; const static double wz_ele[] = {0.0256,0.522,0.607,0.654,0.708,0.737,0.761,0.784,0.815,0.835,0.851,0.841,0.898}; // double ewz_ele[] = {0.000257,0.00492,0.00524,0.00519,0.00396,0.00449,0.00538,0.00513,0.00773,0.00753,0.0209,0.0964,0.259}; int ibin = 0; if(pt > 10 && pt < 15) ibin = 0; if(pt > 15 && pt < 20) ibin = 1; if(pt > 20 && pt < 25) ibin = 2; if(pt > 25 && pt < 30) ibin = 3; if(pt > 30 && pt < 40) ibin = 4; if(pt > 40 && pt < 50) ibin = 5; if(pt > 50 && pt < 60) ibin = 6; if(pt > 60 && pt < 80) ibin = 7; if(pt > 80 && pt < 100) ibin = 8; if(pt > 100 && pt < 200) ibin = 9; if(pt > 200 && pt < 400) ibin = 10; if(pt > 400 && pt < 600) ibin = 11; if(pt > 600) ibin = 12; double eff_pt = 0.; eff_pt = wz_ele[ibin]; eta = fabs(eta); const static double wz_ele_eta[] = {0.65,0.714,0.722,0.689,0.635,0.615}; // double ewz_ele_eta[] = {0.00642,0.00355,0.00335,0.004,0.00368,0.00422}; ibin = 0; if(eta > 0 && eta < 0.1) ibin = 0; if(eta > 0.1 && eta < 0.5) ibin = 1; if(eta > 0.5 && eta < 1.0) ibin = 2; if(eta > 1.0 && eta < 1.5) ibin = 3; if(eta > 1.5 && eta < 2.0) ibin = 4; if(eta > 2.0 && eta < 2.5) ibin = 5; double eff_eta = 0.; eff_eta = wz_ele_eta[ibin]; eff = (eff_pt * eff_eta) / avgrate; } if (flavor == 12) { // weight electron from tau double avgrate = 0.476; const static double wz_ele[] = {0.00855,0.409,0.442,0.55,0.632,0.616,0.615,0.642,0.72,0.617}; // double ewz_ele[] = {0.000573,0.0291,0.0366,0.0352,0.0363,0.0474,0.0628,0.0709,0.125,0.109}; int ibin = 0; if(pt > 10 && pt < 15) ibin = 0; if(pt > 15 && pt < 20) ibin = 1; if(pt > 20 && pt < 25) ibin = 2; if(pt > 25 && pt < 30) ibin = 3; if(pt > 30 && pt < 40) ibin = 4; if(pt > 40 && pt < 50) ibin = 5; if(pt > 50 && pt < 60) ibin = 6; if(pt > 60 && pt < 80) ibin = 7; if(pt > 80 && pt < 100) ibin = 8; if(pt > 100) ibin = 9; double eff_pt = 0.; eff_pt = wz_ele[ibin]; eta = fabs(eta); const static double wz_ele_eta[] = {0.546,0.5,0.513,0.421,0.47,0.433}; //double ewz_ele_eta[] = {0.0566,0.0257,0.0263,0.0263,0.0303,0.0321}; ibin = 0; if(eta > 0 && eta < 0.1) ibin = 0; if(eta > 0.1 && eta < 0.5) ibin = 1; if(eta > 0.5 && eta < 1.0) ibin = 2; if(eta > 1.0 && eta < 1.5) ibin = 3; if(eta > 1.5 && eta < 2.0) ibin = 4; if(eta > 2.0 && eta < 2.5) ibin = 5; double eff_eta = 0.; eff_eta = wz_ele_eta[ibin]; eff = (eff_pt * eff_eta) / avgrate; } if (flavor == 13) { // weight prompt muon int ibin = 0; if(pt > 10 && pt < 15) ibin = 0; if(pt > 15 && pt < 20) ibin = 1; if(pt > 20 && pt < 25) ibin = 2; if(pt > 25 && pt < 30) ibin = 3; if(pt > 30 && pt < 40) ibin = 4; if(pt > 40 && pt < 50) ibin = 5; if(pt > 50 && pt < 60) ibin = 6; if(pt > 60 && pt < 80) ibin = 7; if(pt > 80 && pt < 100) ibin = 8; if(pt > 100 && pt < 200) ibin = 9; if(pt > 200 && pt < 400) ibin = 10; if(pt > 400) ibin = 11; if(fabs(eta) < 0.1) { const static double wz_mu[] = {0.00705,0.402,0.478,0.49,0.492,0.499,0.527,0.512,0.53,0.528,0.465,0.465}; //double ewz_mu[] = {0.000298,0.0154,0.017,0.0158,0.0114,0.0123,0.0155,0.0133,0.0196,0.0182,0.0414,0.0414}; double eff_pt = 0.; eff_pt = wz_mu[ibin]; eff = eff_pt; } if(fabs(eta) > 0.1) { const static double wz_mu[] = {0.0224,0.839,0.887,0.91,0.919,0.923,0.925,0.925,0.922,0.918,0.884,0.834}; //double ewz_mu[] = {0.000213,0.00753,0.0074,0.007,0.00496,0.00534,0.00632,0.00583,0.00849,0.00804,0.0224,0.0963}; double eff_pt = 0.; eff_pt = wz_mu[ibin]; eff = eff_pt; } } if (flavor == 14) { // weight muon from tau int ibin = 0; if(pt > 10 && pt < 15) ibin = 0; if(pt > 15 && pt < 20) ibin = 1; if(pt > 20 && pt < 25) ibin = 2; if(pt > 25 && pt < 30) ibin = 3; if(pt > 30 && pt < 40) ibin = 4; if(pt > 40 && pt < 50) ibin = 5; if(pt > 50 && pt < 60) ibin = 6; if(pt > 60 && pt < 80) ibin = 7; if(pt > 80 && pt < 100) ibin = 8; if(pt > 100) ibin = 9; if(fabs(eta) < 0.1) { const static double wz_mu[] = {0.0,0.664,0.124,0.133,0.527,0.283,0.495,0.25,0.5,0.331}; //double ewz_mu[] = {0.0,0.192,0.0437,0.0343,0.128,0.107,0.202,0.125,0.25,0.191}; double eff_pt = 0.; eff_pt = wz_mu[ibin]; eff = eff_pt; } if(fabs(eta) > 0.1) { const static double wz_mu[] = {0.0,0.617,0.655,0.676,0.705,0.738,0.712,0.783,0.646,0.745}; //double ewz_mu[] = {0.0,0.043,0.0564,0.0448,0.0405,0.0576,0.065,0.0825,0.102,0.132}; double eff_pt = 0.; eff_pt = wz_mu[ibin]; eff = eff_pt; } } if (flavor == 15) { // weight hadronic tau 1p double avgrate = 0.16; const static double wz_tau1p[] = {0.0,0.0311,0.148,0.229,0.217,0.292,0.245,0.307,0.227,0.277}; //double ewz_tau1p[] = {0.0,0.00211,0.0117,0.0179,0.0134,0.0248,0.0264,0.0322,0.0331,0.0427}; int ibin = 0; if(pt > 10 && pt < 15) ibin = 0; if(pt > 15 && pt < 20) ibin = 1; if(pt > 20 && pt < 25) ibin = 2; if(pt > 25 && pt < 30) ibin = 3; if(pt > 30 && pt < 40) ibin = 4; if(pt > 40 && pt < 50) ibin = 5; if(pt > 50 && pt < 60) ibin = 6; if(pt > 60 && pt < 80) ibin = 7; if(pt > 80 && pt < 100) ibin = 8; if(pt > 100) ibin = 9; double eff_pt = 0.; eff_pt = wz_tau1p[ibin]; const static double wz_tau1p_eta[] = {0.166,0.15,0.188,0.175,0.142,0.109}; //double ewz_tau1p_eta[] ={0.0166,0.00853,0.0097,0.00985,0.00949,0.00842}; ibin = 0; if(eta > 0.0 && eta < 0.1) ibin = 0; if(eta > 0.1 && eta < 0.5) ibin = 1; if(eta > 0.5 && eta < 1.0) ibin = 2; if(eta > 1.0 && eta < 1.5) ibin = 3; if(eta > 1.5 && eta < 2.0) ibin = 4; if(eta > 2.0 && eta < 2.5) ibin = 5; double eff_eta = 0.; eff_eta = wz_tau1p_eta[ibin]; eff = (eff_pt * eff_eta) / avgrate; } return eff; } /// Function giving observed and expected upper limits (on the visible cross-section) double getUpperLimit(const string& signal_region, bool observed) { map upperLimitsObserved; map upperLimitsExpected; upperLimitsObserved["HTlep_3l_offZ_OSSF_cut_0"] = 2.435; upperLimitsObserved["HTlep_3l_offZ_OSSF_cut_200"] = 0.704; upperLimitsObserved["HTlep_3l_offZ_OSSF_cut_500"] = 0.182; upperLimitsObserved["HTlep_3l_offZ_OSSF_cut_800"] = 0.147; upperLimitsObserved["HTlep_2ltau_offZ_OSSF_cut_0"] = 13.901; upperLimitsObserved["HTlep_2ltau_offZ_OSSF_cut_200"] = 1.677; upperLimitsObserved["HTlep_2ltau_offZ_OSSF_cut_500"] = 0.141; upperLimitsObserved["HTlep_2ltau_offZ_OSSF_cut_800"] = 0.155; upperLimitsObserved["HTlep_3l_offZ_noOSSF_cut_0"] = 1.054; upperLimitsObserved["HTlep_3l_offZ_noOSSF_cut_200"] = 0.341; upperLimitsObserved["HTlep_3l_offZ_noOSSF_cut_500"] = 0.221; upperLimitsObserved["HTlep_3l_offZ_noOSSF_cut_800"] = 0.140; upperLimitsObserved["HTlep_2ltau_offZ_noOSSF_cut_0"] = 4.276; upperLimitsObserved["HTlep_2ltau_offZ_noOSSF_cut_200"] = 0.413; upperLimitsObserved["HTlep_2ltau_offZ_noOSSF_cut_500"] = 0.138; upperLimitsObserved["HTlep_2ltau_offZ_noOSSF_cut_800"] = 0.150; upperLimitsObserved["HTlep_3l_onZ_cut_0"] = 29.804; upperLimitsObserved["HTlep_3l_onZ_cut_200"] = 3.579; upperLimitsObserved["HTlep_3l_onZ_cut_500"] = 0.466; upperLimitsObserved["HTlep_3l_onZ_cut_800"] = 0.298; upperLimitsObserved["HTlep_2ltau_onZ_cut_0"] = 205.091; upperLimitsObserved["HTlep_2ltau_onZ_cut_200"] = 3.141; upperLimitsObserved["HTlep_2ltau_onZ_cut_500"] = 0.290; upperLimitsObserved["HTlep_2ltau_onZ_cut_800"] = 0.157; upperLimitsObserved["METStrong_3l_offZ_OSSF_cut_0"] = 1.111; upperLimitsObserved["METStrong_3l_offZ_OSSF_cut_100"] = 0.354; upperLimitsObserved["METStrong_3l_offZ_OSSF_cut_200"] = 0.236; upperLimitsObserved["METStrong_3l_offZ_OSSF_cut_300"] = 0.150; upperLimitsObserved["METStrong_2ltau_offZ_OSSF_cut_0"] = 1.881; upperLimitsObserved["METStrong_2ltau_offZ_OSSF_cut_100"] = 0.406; upperLimitsObserved["METStrong_2ltau_offZ_OSSF_cut_200"] = 0.194; upperLimitsObserved["METStrong_2ltau_offZ_OSSF_cut_300"] = 0.134; upperLimitsObserved["METStrong_3l_offZ_noOSSF_cut_0"] = 0.770; upperLimitsObserved["METStrong_3l_offZ_noOSSF_cut_100"] = 0.295; upperLimitsObserved["METStrong_3l_offZ_noOSSF_cut_200"] = 0.149; upperLimitsObserved["METStrong_3l_offZ_noOSSF_cut_300"] = 0.140; upperLimitsObserved["METStrong_2ltau_offZ_noOSSF_cut_0"] = 2.003; upperLimitsObserved["METStrong_2ltau_offZ_noOSSF_cut_100"] = 0.806; upperLimitsObserved["METStrong_2ltau_offZ_noOSSF_cut_200"] = 0.227; upperLimitsObserved["METStrong_2ltau_offZ_noOSSF_cut_300"] = 0.138; upperLimitsObserved["METStrong_3l_onZ_cut_0"] = 6.383; upperLimitsObserved["METStrong_3l_onZ_cut_100"] = 0.959; upperLimitsObserved["METStrong_3l_onZ_cut_200"] = 0.549; upperLimitsObserved["METStrong_3l_onZ_cut_300"] = 0.182; upperLimitsObserved["METStrong_2ltau_onZ_cut_0"] = 10.658; upperLimitsObserved["METStrong_2ltau_onZ_cut_100"] = 0.637; upperLimitsObserved["METStrong_2ltau_onZ_cut_200"] = 0.291; upperLimitsObserved["METStrong_2ltau_onZ_cut_300"] = 0.227; upperLimitsObserved["METWeak_3l_offZ_OSSF_cut_0"] = 1.802; upperLimitsObserved["METWeak_3l_offZ_OSSF_cut_100"] = 0.344; upperLimitsObserved["METWeak_3l_offZ_OSSF_cut_200"] = 0.189; upperLimitsObserved["METWeak_3l_offZ_OSSF_cut_300"] = 0.148; upperLimitsObserved["METWeak_2ltau_offZ_OSSF_cut_0"] = 12.321; upperLimitsObserved["METWeak_2ltau_offZ_OSSF_cut_100"] = 0.430; upperLimitsObserved["METWeak_2ltau_offZ_OSSF_cut_200"] = 0.137; upperLimitsObserved["METWeak_2ltau_offZ_OSSF_cut_300"] = 0.134; upperLimitsObserved["METWeak_3l_offZ_noOSSF_cut_0"] = 0.562; upperLimitsObserved["METWeak_3l_offZ_noOSSF_cut_100"] = 0.153; upperLimitsObserved["METWeak_3l_offZ_noOSSF_cut_200"] = 0.154; upperLimitsObserved["METWeak_3l_offZ_noOSSF_cut_300"] = 0.141; upperLimitsObserved["METWeak_2ltau_offZ_noOSSF_cut_0"] = 2.475; upperLimitsObserved["METWeak_2ltau_offZ_noOSSF_cut_100"] = 0.244; upperLimitsObserved["METWeak_2ltau_offZ_noOSSF_cut_200"] = 0.141; upperLimitsObserved["METWeak_2ltau_offZ_noOSSF_cut_300"] = 0.142; upperLimitsObserved["METWeak_3l_onZ_cut_0"] = 24.769; upperLimitsObserved["METWeak_3l_onZ_cut_100"] = 0.690; upperLimitsObserved["METWeak_3l_onZ_cut_200"] = 0.198; upperLimitsObserved["METWeak_3l_onZ_cut_300"] = 0.138; upperLimitsObserved["METWeak_2ltau_onZ_cut_0"] = 194.360; upperLimitsObserved["METWeak_2ltau_onZ_cut_100"] = 0.287; upperLimitsObserved["METWeak_2ltau_onZ_cut_200"] = 0.144; upperLimitsObserved["METWeak_2ltau_onZ_cut_300"] = 0.130; upperLimitsObserved["Meff_3l_offZ_OSSF_cut_0"] = 2.435; upperLimitsObserved["Meff_3l_offZ_OSSF_cut_600"] = 0.487; upperLimitsObserved["Meff_3l_offZ_OSSF_cut_1000"] = 0.156; upperLimitsObserved["Meff_3l_offZ_OSSF_cut_1500"] = 0.140; upperLimitsObserved["Meff_2ltau_offZ_OSSF_cut_0"] = 13.901; upperLimitsObserved["Meff_2ltau_offZ_OSSF_cut_600"] = 0.687; upperLimitsObserved["Meff_2ltau_offZ_OSSF_cut_1000"] = 0.224; upperLimitsObserved["Meff_2ltau_offZ_OSSF_cut_1500"] = 0.155; upperLimitsObserved["Meff_3l_offZ_noOSSF_cut_0"] = 1.054; upperLimitsObserved["Meff_3l_offZ_noOSSF_cut_600"] = 0.249; upperLimitsObserved["Meff_3l_offZ_noOSSF_cut_1000"] = 0.194; upperLimitsObserved["Meff_3l_offZ_noOSSF_cut_1500"] = 0.145; upperLimitsObserved["Meff_2ltau_offZ_noOSSF_cut_0"] = 4.276; upperLimitsObserved["Meff_2ltau_offZ_noOSSF_cut_600"] = 0.772; upperLimitsObserved["Meff_2ltau_offZ_noOSSF_cut_1000"] = 0.218; upperLimitsObserved["Meff_2ltau_offZ_noOSSF_cut_1500"] = 0.204; upperLimitsObserved["Meff_3l_onZ_cut_0"] = 29.804; upperLimitsObserved["Meff_3l_onZ_cut_600"] = 2.933; upperLimitsObserved["Meff_3l_onZ_cut_1000"] = 0.912; upperLimitsObserved["Meff_3l_onZ_cut_1500"] = 0.225; upperLimitsObserved["Meff_2ltau_onZ_cut_0"] = 205.091; upperLimitsObserved["Meff_2ltau_onZ_cut_600"] = 1.486; upperLimitsObserved["Meff_2ltau_onZ_cut_1000"] = 0.641; upperLimitsObserved["Meff_2ltau_onZ_cut_1500"] = 0.204; upperLimitsObserved["MeffStrong_3l_offZ_OSSF_cut_0"] = 0.479; upperLimitsObserved["MeffStrong_3l_offZ_OSSF_cut_600"] = 0.353; upperLimitsObserved["MeffStrong_3l_offZ_OSSF_cut_1200"] = 0.187; upperLimitsObserved["MeffStrong_2ltau_offZ_OSSF_cut_0"] = 0.617; upperLimitsObserved["MeffStrong_2ltau_offZ_OSSF_cut_600"] = 0.320; upperLimitsObserved["MeffStrong_2ltau_offZ_OSSF_cut_1200"] = 0.281; upperLimitsObserved["MeffStrong_3l_offZ_noOSSF_cut_0"] = 0.408; upperLimitsObserved["MeffStrong_3l_offZ_noOSSF_cut_600"] = 0.240; upperLimitsObserved["MeffStrong_3l_offZ_noOSSF_cut_1200"] = 0.150; upperLimitsObserved["MeffStrong_2ltau_offZ_noOSSF_cut_0"] = 0.774; upperLimitsObserved["MeffStrong_2ltau_offZ_noOSSF_cut_600"] = 0.417; upperLimitsObserved["MeffStrong_2ltau_offZ_noOSSF_cut_1200"] = 0.266; upperLimitsObserved["MeffStrong_3l_onZ_cut_0"] = 1.208; upperLimitsObserved["MeffStrong_3l_onZ_cut_600"] = 0.837; upperLimitsObserved["MeffStrong_3l_onZ_cut_1200"] = 0.269; upperLimitsObserved["MeffStrong_2ltau_onZ_cut_0"] = 0.605; upperLimitsObserved["MeffStrong_2ltau_onZ_cut_600"] = 0.420; upperLimitsObserved["MeffStrong_2ltau_onZ_cut_1200"] = 0.141; upperLimitsObserved["MeffMt_3l_onZ_cut_0"] = 1.832; upperLimitsObserved["MeffMt_3l_onZ_cut_600"] = 0.862; upperLimitsObserved["MeffMt_3l_onZ_cut_1200"] = 0.222; upperLimitsObserved["MeffMt_2ltau_onZ_cut_0"] = 1.309; upperLimitsObserved["MeffMt_2ltau_onZ_cut_600"] = 0.481; upperLimitsObserved["MeffMt_2ltau_onZ_cut_1200"] = 0.146; upperLimitsObserved["MinPt_3l_offZ_OSSF_cut_0"] = 2.435; upperLimitsObserved["MinPt_3l_offZ_OSSF_cut_50"] = 0.500; upperLimitsObserved["MinPt_3l_offZ_OSSF_cut_100"] = 0.203; upperLimitsObserved["MinPt_3l_offZ_OSSF_cut_150"] = 0.128; upperLimitsObserved["MinPt_2ltau_offZ_OSSF_cut_0"] = 13.901; upperLimitsObserved["MinPt_2ltau_offZ_OSSF_cut_50"] = 0.859; upperLimitsObserved["MinPt_2ltau_offZ_OSSF_cut_100"] = 0.158; upperLimitsObserved["MinPt_2ltau_offZ_OSSF_cut_150"] = 0.155; upperLimitsObserved["MinPt_3l_offZ_noOSSF_cut_0"] = 1.054; upperLimitsObserved["MinPt_3l_offZ_noOSSF_cut_50"] = 0.295; upperLimitsObserved["MinPt_3l_offZ_noOSSF_cut_100"] = 0.148; upperLimitsObserved["MinPt_3l_offZ_noOSSF_cut_150"] = 0.137; upperLimitsObserved["MinPt_2ltau_offZ_noOSSF_cut_0"] = 4.276; upperLimitsObserved["MinPt_2ltau_offZ_noOSSF_cut_50"] = 0.314; upperLimitsObserved["MinPt_2ltau_offZ_noOSSF_cut_100"] = 0.134; upperLimitsObserved["MinPt_2ltau_offZ_noOSSF_cut_150"] = 0.140; upperLimitsObserved["MinPt_3l_onZ_cut_0"] = 29.804; upperLimitsObserved["MinPt_3l_onZ_cut_50"] = 1.767; upperLimitsObserved["MinPt_3l_onZ_cut_100"] = 0.690; upperLimitsObserved["MinPt_3l_onZ_cut_150"] = 0.301; upperLimitsObserved["MinPt_2ltau_onZ_cut_0"] = 205.091; upperLimitsObserved["MinPt_2ltau_onZ_cut_50"] = 1.050; upperLimitsObserved["MinPt_2ltau_onZ_cut_100"] = 0.155; upperLimitsObserved["MinPt_2ltau_onZ_cut_150"] = 0.146; upperLimitsObserved["nbtag_3l_offZ_OSSF_cut_0"] = 2.435; upperLimitsObserved["nbtag_3l_offZ_OSSF_cut_1"] = 0.865; upperLimitsObserved["nbtag_3l_offZ_OSSF_cut_2"] = 0.474; upperLimitsObserved["nbtag_2ltau_offZ_OSSF_cut_0"] = 13.901; upperLimitsObserved["nbtag_2ltau_offZ_OSSF_cut_1"] = 1.566; upperLimitsObserved["nbtag_2ltau_offZ_OSSF_cut_2"] = 0.426; upperLimitsObserved["nbtag_3l_offZ_noOSSF_cut_0"] = 1.054; upperLimitsObserved["nbtag_3l_offZ_noOSSF_cut_1"] = 0.643; upperLimitsObserved["nbtag_3l_offZ_noOSSF_cut_2"] = 0.321; upperLimitsObserved["nbtag_2ltau_offZ_noOSSF_cut_0"] = 4.276; upperLimitsObserved["nbtag_2ltau_offZ_noOSSF_cut_1"] = 2.435; upperLimitsObserved["nbtag_2ltau_offZ_noOSSF_cut_2"] = 1.073; upperLimitsObserved["nbtag_3l_onZ_cut_0"] = 29.804; upperLimitsObserved["nbtag_3l_onZ_cut_1"] = 3.908; upperLimitsObserved["nbtag_3l_onZ_cut_2"] = 0.704; upperLimitsObserved["nbtag_2ltau_onZ_cut_0"] = 205.091; upperLimitsObserved["nbtag_2ltau_onZ_cut_1"] = 9.377; upperLimitsObserved["nbtag_2ltau_onZ_cut_2"] = 0.657; upperLimitsExpected["HTlep_3l_offZ_OSSF_cut_0"] = 2.893; upperLimitsExpected["HTlep_3l_offZ_OSSF_cut_200"] = 1.175; upperLimitsExpected["HTlep_3l_offZ_OSSF_cut_500"] = 0.265; upperLimitsExpected["HTlep_3l_offZ_OSSF_cut_800"] = 0.155; upperLimitsExpected["HTlep_2ltau_offZ_OSSF_cut_0"] = 14.293; upperLimitsExpected["HTlep_2ltau_offZ_OSSF_cut_200"] = 1.803; upperLimitsExpected["HTlep_2ltau_offZ_OSSF_cut_500"] = 0.159; upperLimitsExpected["HTlep_2ltau_offZ_OSSF_cut_800"] = 0.155; upperLimitsExpected["HTlep_3l_offZ_noOSSF_cut_0"] = 0.836; upperLimitsExpected["HTlep_3l_offZ_noOSSF_cut_200"] = 0.340; upperLimitsExpected["HTlep_3l_offZ_noOSSF_cut_500"] = 0.218; upperLimitsExpected["HTlep_3l_offZ_noOSSF_cut_800"] = 0.140; upperLimitsExpected["HTlep_2ltau_offZ_noOSSF_cut_0"] = 4.132; upperLimitsExpected["HTlep_2ltau_offZ_noOSSF_cut_200"] = 0.599; upperLimitsExpected["HTlep_2ltau_offZ_noOSSF_cut_500"] = 0.146; upperLimitsExpected["HTlep_2ltau_offZ_noOSSF_cut_800"] = 0.148; upperLimitsExpected["HTlep_3l_onZ_cut_0"] = 32.181; upperLimitsExpected["HTlep_3l_onZ_cut_200"] = 4.879; upperLimitsExpected["HTlep_3l_onZ_cut_500"] = 0.473; upperLimitsExpected["HTlep_3l_onZ_cut_800"] = 0.266; upperLimitsExpected["HTlep_2ltau_onZ_cut_0"] = 217.801; upperLimitsExpected["HTlep_2ltau_onZ_cut_200"] = 3.676; upperLimitsExpected["HTlep_2ltau_onZ_cut_500"] = 0.235; upperLimitsExpected["HTlep_2ltau_onZ_cut_800"] = 0.150; upperLimitsExpected["METStrong_3l_offZ_OSSF_cut_0"] = 1.196; upperLimitsExpected["METStrong_3l_offZ_OSSF_cut_100"] = 0.423; upperLimitsExpected["METStrong_3l_offZ_OSSF_cut_200"] = 0.208; upperLimitsExpected["METStrong_3l_offZ_OSSF_cut_300"] = 0.158; upperLimitsExpected["METStrong_2ltau_offZ_OSSF_cut_0"] = 2.158; upperLimitsExpected["METStrong_2ltau_offZ_OSSF_cut_100"] = 0.461; upperLimitsExpected["METStrong_2ltau_offZ_OSSF_cut_200"] = 0.186; upperLimitsExpected["METStrong_2ltau_offZ_OSSF_cut_300"] = 0.138; upperLimitsExpected["METStrong_3l_offZ_noOSSF_cut_0"] = 0.495; upperLimitsExpected["METStrong_3l_offZ_noOSSF_cut_100"] = 0.284; upperLimitsExpected["METStrong_3l_offZ_noOSSF_cut_200"] = 0.150; upperLimitsExpected["METStrong_3l_offZ_noOSSF_cut_300"] = 0.146; upperLimitsExpected["METStrong_2ltau_offZ_noOSSF_cut_0"] = 1.967; upperLimitsExpected["METStrong_2ltau_offZ_noOSSF_cut_100"] = 0.732; upperLimitsExpected["METStrong_2ltau_offZ_noOSSF_cut_200"] = 0.225; upperLimitsExpected["METStrong_2ltau_offZ_noOSSF_cut_300"] = 0.147; upperLimitsExpected["METStrong_3l_onZ_cut_0"] = 7.157; upperLimitsExpected["METStrong_3l_onZ_cut_100"] = 1.342; upperLimitsExpected["METStrong_3l_onZ_cut_200"] = 0.508; upperLimitsExpected["METStrong_3l_onZ_cut_300"] = 0.228; upperLimitsExpected["METStrong_2ltau_onZ_cut_0"] = 12.441; upperLimitsExpected["METStrong_2ltau_onZ_cut_100"] = 0.534; upperLimitsExpected["METStrong_2ltau_onZ_cut_200"] = 0.243; upperLimitsExpected["METStrong_2ltau_onZ_cut_300"] = 0.218; upperLimitsExpected["METWeak_3l_offZ_OSSF_cut_0"] = 2.199; upperLimitsExpected["METWeak_3l_offZ_OSSF_cut_100"] = 0.391; upperLimitsExpected["METWeak_3l_offZ_OSSF_cut_200"] = 0.177; upperLimitsExpected["METWeak_3l_offZ_OSSF_cut_300"] = 0.144; upperLimitsExpected["METWeak_2ltau_offZ_OSSF_cut_0"] = 12.431; upperLimitsExpected["METWeak_2ltau_offZ_OSSF_cut_100"] = 0.358; upperLimitsExpected["METWeak_2ltau_offZ_OSSF_cut_200"] = 0.150; upperLimitsExpected["METWeak_2ltau_offZ_OSSF_cut_300"] = 0.135; upperLimitsExpected["METWeak_3l_offZ_noOSSF_cut_0"] = 0.577; upperLimitsExpected["METWeak_3l_offZ_noOSSF_cut_100"] = 0.214; upperLimitsExpected["METWeak_3l_offZ_noOSSF_cut_200"] = 0.155; upperLimitsExpected["METWeak_3l_offZ_noOSSF_cut_300"] = 0.140; upperLimitsExpected["METWeak_2ltau_offZ_noOSSF_cut_0"] = 2.474; upperLimitsExpected["METWeak_2ltau_offZ_noOSSF_cut_100"] = 0.382; upperLimitsExpected["METWeak_2ltau_offZ_noOSSF_cut_200"] = 0.144; upperLimitsExpected["METWeak_2ltau_offZ_noOSSF_cut_300"] = 0.146; upperLimitsExpected["METWeak_3l_onZ_cut_0"] = 26.305; upperLimitsExpected["METWeak_3l_onZ_cut_100"] = 1.227; upperLimitsExpected["METWeak_3l_onZ_cut_200"] = 0.311; upperLimitsExpected["METWeak_3l_onZ_cut_300"] = 0.188; upperLimitsExpected["METWeak_2ltau_onZ_cut_0"] = 205.198; upperLimitsExpected["METWeak_2ltau_onZ_cut_100"] = 0.399; upperLimitsExpected["METWeak_2ltau_onZ_cut_200"] = 0.166; upperLimitsExpected["METWeak_2ltau_onZ_cut_300"] = 0.140; upperLimitsExpected["Meff_3l_offZ_OSSF_cut_0"] = 2.893; upperLimitsExpected["Meff_3l_offZ_OSSF_cut_600"] = 0.649; upperLimitsExpected["Meff_3l_offZ_OSSF_cut_1000"] = 0.252; upperLimitsExpected["Meff_3l_offZ_OSSF_cut_1500"] = 0.150; upperLimitsExpected["Meff_2ltau_offZ_OSSF_cut_0"] = 14.293; upperLimitsExpected["Meff_2ltau_offZ_OSSF_cut_600"] = 0.657; upperLimitsExpected["Meff_2ltau_offZ_OSSF_cut_1000"] = 0.226; upperLimitsExpected["Meff_2ltau_offZ_OSSF_cut_1500"] = 0.154; upperLimitsExpected["Meff_3l_offZ_noOSSF_cut_0"] = 0.836; upperLimitsExpected["Meff_3l_offZ_noOSSF_cut_600"] = 0.265; upperLimitsExpected["Meff_3l_offZ_noOSSF_cut_1000"] = 0.176; upperLimitsExpected["Meff_3l_offZ_noOSSF_cut_1500"] = 0.146; upperLimitsExpected["Meff_2ltau_offZ_noOSSF_cut_0"] = 4.132; upperLimitsExpected["Meff_2ltau_offZ_noOSSF_cut_600"] = 0.678; upperLimitsExpected["Meff_2ltau_offZ_noOSSF_cut_1000"] = 0.243; upperLimitsExpected["Meff_2ltau_offZ_noOSSF_cut_1500"] = 0.184; upperLimitsExpected["Meff_3l_onZ_cut_0"] = 32.181; upperLimitsExpected["Meff_3l_onZ_cut_600"] = 3.219; upperLimitsExpected["Meff_3l_onZ_cut_1000"] = 0.905; upperLimitsExpected["Meff_3l_onZ_cut_1500"] = 0.261; upperLimitsExpected["Meff_2ltau_onZ_cut_0"] = 217.801; upperLimitsExpected["Meff_2ltau_onZ_cut_600"] = 1.680; upperLimitsExpected["Meff_2ltau_onZ_cut_1000"] = 0.375; upperLimitsExpected["Meff_2ltau_onZ_cut_1500"] = 0.178; upperLimitsExpected["MeffStrong_3l_offZ_OSSF_cut_0"] = 0.571; upperLimitsExpected["MeffStrong_3l_offZ_OSSF_cut_600"] = 0.386; upperLimitsExpected["MeffStrong_3l_offZ_OSSF_cut_1200"] = 0.177; upperLimitsExpected["MeffStrong_2ltau_offZ_OSSF_cut_0"] = 0.605; upperLimitsExpected["MeffStrong_2ltau_offZ_OSSF_cut_600"] = 0.335; upperLimitsExpected["MeffStrong_2ltau_offZ_OSSF_cut_1200"] = 0.249; upperLimitsExpected["MeffStrong_3l_offZ_noOSSF_cut_0"] = 0.373; upperLimitsExpected["MeffStrong_3l_offZ_noOSSF_cut_600"] = 0.223; upperLimitsExpected["MeffStrong_3l_offZ_noOSSF_cut_1200"] = 0.150; upperLimitsExpected["MeffStrong_2ltau_offZ_noOSSF_cut_0"] = 0.873; upperLimitsExpected["MeffStrong_2ltau_offZ_noOSSF_cut_600"] = 0.428; upperLimitsExpected["MeffStrong_2ltau_offZ_noOSSF_cut_1200"] = 0.210; upperLimitsExpected["MeffStrong_3l_onZ_cut_0"] = 2.034; upperLimitsExpected["MeffStrong_3l_onZ_cut_600"] = 1.093; upperLimitsExpected["MeffStrong_3l_onZ_cut_1200"] = 0.293; upperLimitsExpected["MeffStrong_2ltau_onZ_cut_0"] = 0.690; upperLimitsExpected["MeffStrong_2ltau_onZ_cut_600"] = 0.392; upperLimitsExpected["MeffStrong_2ltau_onZ_cut_1200"] = 0.156; upperLimitsExpected["MeffMt_3l_onZ_cut_0"] = 2.483; upperLimitsExpected["MeffMt_3l_onZ_cut_600"] = 0.845; upperLimitsExpected["MeffMt_3l_onZ_cut_1200"] = 0.255; upperLimitsExpected["MeffMt_2ltau_onZ_cut_0"] = 1.448; upperLimitsExpected["MeffMt_2ltau_onZ_cut_600"] = 0.391; upperLimitsExpected["MeffMt_2ltau_onZ_cut_1200"] = 0.146; upperLimitsExpected["MinPt_3l_offZ_OSSF_cut_0"] = 2.893; upperLimitsExpected["MinPt_3l_offZ_OSSF_cut_50"] = 0.703; upperLimitsExpected["MinPt_3l_offZ_OSSF_cut_100"] = 0.207; upperLimitsExpected["MinPt_3l_offZ_OSSF_cut_150"] = 0.143; upperLimitsExpected["MinPt_2ltau_offZ_OSSF_cut_0"] = 14.293; upperLimitsExpected["MinPt_2ltau_offZ_OSSF_cut_50"] = 0.705; upperLimitsExpected["MinPt_2ltau_offZ_OSSF_cut_100"] = 0.149; upperLimitsExpected["MinPt_2ltau_offZ_OSSF_cut_150"] = 0.155; upperLimitsExpected["MinPt_3l_offZ_noOSSF_cut_0"] = 0.836; upperLimitsExpected["MinPt_3l_offZ_noOSSF_cut_50"] = 0.249; upperLimitsExpected["MinPt_3l_offZ_noOSSF_cut_100"] = 0.135; upperLimitsExpected["MinPt_3l_offZ_noOSSF_cut_150"] = 0.136; upperLimitsExpected["MinPt_2ltau_offZ_noOSSF_cut_0"] = 4.132; upperLimitsExpected["MinPt_2ltau_offZ_noOSSF_cut_50"] = 0.339; upperLimitsExpected["MinPt_2ltau_offZ_noOSSF_cut_100"] = 0.149; upperLimitsExpected["MinPt_2ltau_offZ_noOSSF_cut_150"] = 0.145; upperLimitsExpected["MinPt_3l_onZ_cut_0"] = 32.181; upperLimitsExpected["MinPt_3l_onZ_cut_50"] = 2.260; upperLimitsExpected["MinPt_3l_onZ_cut_100"] = 0.438; upperLimitsExpected["MinPt_3l_onZ_cut_150"] = 0.305; upperLimitsExpected["MinPt_2ltau_onZ_cut_0"] = 217.801; upperLimitsExpected["MinPt_2ltau_onZ_cut_50"] = 1.335; upperLimitsExpected["MinPt_2ltau_onZ_cut_100"] = 0.162; upperLimitsExpected["MinPt_2ltau_onZ_cut_150"] = 0.149; upperLimitsExpected["nbtag_3l_offZ_OSSF_cut_0"] = 2.893; upperLimitsExpected["nbtag_3l_offZ_OSSF_cut_1"] = 0.923; upperLimitsExpected["nbtag_3l_offZ_OSSF_cut_2"] = 0.452; upperLimitsExpected["nbtag_2ltau_offZ_OSSF_cut_0"] = 14.293; upperLimitsExpected["nbtag_2ltau_offZ_OSSF_cut_1"] = 1.774; upperLimitsExpected["nbtag_2ltau_offZ_OSSF_cut_2"] = 0.549; upperLimitsExpected["nbtag_3l_offZ_noOSSF_cut_0"] = 0.836; upperLimitsExpected["nbtag_3l_offZ_noOSSF_cut_1"] = 0.594; upperLimitsExpected["nbtag_3l_offZ_noOSSF_cut_2"] = 0.298; upperLimitsExpected["nbtag_2ltau_offZ_noOSSF_cut_0"] = 4.132; upperLimitsExpected["nbtag_2ltau_offZ_noOSSF_cut_1"] = 2.358; upperLimitsExpected["nbtag_2ltau_offZ_noOSSF_cut_2"] = 0.958; upperLimitsExpected["nbtag_3l_onZ_cut_0"] = 32.181; upperLimitsExpected["nbtag_3l_onZ_cut_1"] = 3.868; upperLimitsExpected["nbtag_3l_onZ_cut_2"] = 0.887; upperLimitsExpected["nbtag_2ltau_onZ_cut_0"] = 217.801; upperLimitsExpected["nbtag_2ltau_onZ_cut_1"] = 9.397; upperLimitsExpected["nbtag_2ltau_onZ_cut_2"] = 0.787; if (observed) return upperLimitsObserved[signal_region]; else return upperLimitsExpected[signal_region]; } /// Function checking if there is an OSSF lepton pair or a combination of 3 leptons with an invariant mass close to the Z mass int isonZ (const Particles& particles) { int onZ = 0; double best_mass_2 = 999.; double best_mass_3 = 999.; // Loop over all 2 particle combinations to find invariant mass of OSSF pair closest to Z mass for (const Particle& p1 : particles) { for (const Particle& p2 : particles) { double mass_difference_2_old = fabs(91.0 - best_mass_2); double mass_difference_2_new = fabs(91.0 - (p1.momentum() + p2.momentum()).mass()/GeV); // If particle combination is OSSF pair calculate mass difference to Z mass if ((p1.pid()*p2.pid() == -121 || p1.pid()*p2.pid() == -169)) { // Get invariant mass closest to Z mass if (mass_difference_2_new < mass_difference_2_old) best_mass_2 = (p1.momentum() + p2.momentum()).mass()/GeV; // In case there is an OSSF pair take also 3rd lepton into account (e.g. from FSR and photon to electron conversion) for (const Particle& p3 : particles ) { double mass_difference_3_old = fabs(91.0 - best_mass_3); double mass_difference_3_new = fabs(91.0 - (p1.momentum() + p2.momentum() + p3.momentum()).mass()/GeV); if (mass_difference_3_new < mass_difference_3_old) best_mass_3 = (p1.momentum() + p2.momentum() + p3.momentum()).mass()/GeV; } } } } // Pick the minimum invariant mass of the best OSSF pair combination and the best 3 lepton combination double best_mass = min(best_mass_2,best_mass_3); // if this mass is in a 20 GeV window around the Z mass, the event is classified as onZ if ( fabs(91.0 - best_mass) < 20. ) onZ = 1; return onZ; } /// function checking if two leptons are an OSSF lepton pair and giving out the invariant mass (0 if no OSSF pair) double isOSSF_mass (const Particle& p1, const Particle& p2) { double inv_mass = 0.; // Is particle combination OSSF pair? if ((p1.pid()*p2.pid() == -121 || p1.pid()*p2.pid() == -169)) { // Get invariant mass inv_mass = (p1.momentum() + p2.momentum()).mass()/GeV; } return inv_mass; } /// Function checking if there is an OSSF lepton pair bool isOSSF (const Particles& particles) { for (size_t i1=0 ; i1 < 3 ; i1 ++) { for (size_t i2 = i1+1 ; i2 < 3 ; i2 ++) { if ((particles[i1].pid()*particles[i2].pid() == -121 || particles[i1].pid()*particles[i2].pid() == -169)) { return true; } } } return false; } //@} private: /// Histograms //@{ Histo1DPtr _h_HTlep_all, _h_HTjets_all, _h_MET_all, _h_Meff_all, _h_min_pT_all, _h_mT_all; Histo1DPtr _h_pt_1_3l, _h_pt_2_3l, _h_pt_3_3l, _h_pt_1_2ltau, _h_pt_2_2ltau, _h_pt_3_2ltau; Histo1DPtr _h_e_n, _h_mu_n, _h_tau_n; Histo1DPtr _h_excluded; //@} /// Fiducial efficiencies to model the effects of the ATLAS detector bool _use_fiducial_lepton_efficiency; /// List of signal regions and event counts per signal region vector _signal_regions; map _eventCountsPerSR; }; DECLARE_RIVET_PLUGIN(ATLAS_2014_I1327229); } diff --git a/analyses/pluginATLAS/ATLAS_2016_I1419652.cc b/analyses/pluginATLAS/ATLAS_2016_I1419652.cc --- a/analyses/pluginATLAS/ATLAS_2016_I1419652.cc +++ b/analyses/pluginATLAS/ATLAS_2016_I1419652.cc @@ -1,173 +1,173 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class ATLAS_2016_I1419652 : public Analysis { public: /// Particle types included enum PartTypes { k_NoStrange, k_AllCharged, kNPartTypes }; /// Phase space regions enum RegionID { k_pt500_nch1_eta25, k_pt500_nch1_eta08, kNregions }; /// Nch cut for each region const static int nchCut[kNregions]; /// Default constructor ATLAS_2016_I1419652() : Analysis("ATLAS_2016_I1419652") {} /// Initialization, called once before running void init() { // Projections - const ChargedFinalState cfs500_25(-2.5, 2.5, 500.0*MeV); + const ChargedFinalState cfs500_25((Cuts::etaIn(-2.5, 2.5) && Cuts::pT >= 500.0*MeV)); declare(cfs500_25, "CFS500_25"); - const ChargedFinalState cfs500_08(-0.8, 0.8, 500.0*MeV); + const ChargedFinalState cfs500_08((Cuts::etaIn(-0.8, 0.8) && Cuts::pT >= 500.0*MeV)); declare(cfs500_08, "CFS500_08"); for (int iT = 0; iT < kNPartTypes; ++iT) { for (int iR = 0; iR < kNregions; ++iR) { book(_sumW[iT][iR], "_sumW" + to_str(iT) + to_str(iR)); book(_hist_nch [iT][iR] , 1, iR + 1, iT + 1); book(_hist_pt [iT][iR] , 2, iR + 1, iT + 1); book(_hist_eta [iT][iR] , 3, iR + 1, iT + 1); book(_hist_ptnch[iT][iR] , 4, iR + 1, iT + 1); } } } void analyze(const Event& event) { string fsName; for (int iR = 0; iR < kNregions; ++iR) { switch (iR) { case k_pt500_nch1_eta25: fsName = "CFS500_25"; break; case k_pt500_nch1_eta08: fsName = "CFS500_08"; break; } const ChargedFinalState& cfs = apply(event, fsName); /// What's the benefit in separating this code which is only called from one place?! fillPtEtaNch(cfs, iR); } } void finalize() { // Standard histograms for (int iT = 0; iT < kNPartTypes; ++iT) { for (int iR = 0; iR < kNregions; ++iR) { double etaRangeSize = -999.0; //intentionally crazy switch (iR) { case k_pt500_nch1_eta25 : etaRangeSize = 5.0 ; break; case k_pt500_nch1_eta08 : etaRangeSize = 1.6 ; break; default: etaRangeSize = -999.0; break; //intentionally crazy } if (_sumW[iT][iR]->val() > 0) { scale(_hist_nch[iT][iR], 1.0/ *_sumW[iT][iR]); scale(_hist_pt [iT][iR], 1.0/ dbl(*_sumW[iT][iR])/TWOPI/etaRangeSize); scale(_hist_eta[iT][iR], 1.0/ *_sumW[iT][iR]); } else { MSG_WARNING("Sum of weights is zero (!) in type/region: " << iT << " " << iR); } } } } /// Helper for collectively filling Nch, pT, eta, and pT vs. Nch histograms void fillPtEtaNch(const ChargedFinalState& cfs, int iRegion) { // Get number of particles int nch[kNPartTypes]; int nch_noStrange = 0; for (const Particle& p : cfs.particles()) { PdgId pdg = p.abspid (); if ( pdg == 3112 || // Sigma- pdg == 3222 || // Sigma+ pdg == 3312 || // Xi- pdg == 3334 ) // Omega- continue; nch_noStrange++; } nch[k_AllCharged] = cfs.size(); nch[k_NoStrange ] = nch_noStrange; // Skip if event fails cut for all charged (noStrange will always be less) if (nch[k_AllCharged] < nchCut[iRegion]) return; // Fill event weight info _sumW[k_AllCharged][iRegion]->fill(); if (nch[k_NoStrange ] >= nchCut[iRegion]) { _sumW[k_NoStrange][iRegion]->fill(); } // Fill nch _hist_nch[k_AllCharged][iRegion]->fill(nch[k_AllCharged]); if (nch[k_NoStrange ] >= nchCut[iRegion]) { _hist_nch [k_NoStrange][iRegion]->fill(nch[k_NoStrange ]); } // Loop over particles, fill pT, eta and ptnch for (const Particle& p : cfs.particles()) { const double pt = p.pT()/GeV; const double eta = p.eta(); _hist_pt [k_AllCharged][iRegion]->fill(pt , 1.0/pt); _hist_eta [k_AllCharged][iRegion]->fill(eta); _hist_ptnch [k_AllCharged][iRegion]->fill(nch[k_AllCharged], pt); // Make sure nch cut is passed for nonStrange particles! if (nch[k_NoStrange ] >= nchCut[iRegion]) { PdgId pdg = p.abspid (); if ( pdg == 3112 || // Sigma- pdg == 3222 || // Sigma+ pdg == 3312 || // Xi- pdg == 3334 ) // Omega- continue; // Here we don't have strange particles anymore _hist_pt [k_NoStrange][iRegion]->fill(pt , 1.0/pt); _hist_eta [k_NoStrange][iRegion]->fill(eta); _hist_ptnch[k_NoStrange][iRegion]->fill(nch[k_NoStrange], pt); } } } private: CounterPtr _sumW[kNPartTypes][kNregions]; Histo1DPtr _hist_nch [kNPartTypes][kNregions]; Histo1DPtr _hist_pt [kNPartTypes][kNregions]; Histo1DPtr _hist_eta [kNPartTypes][kNregions]; Profile1DPtr _hist_ptnch[kNPartTypes][kNregions]; }; // Constants: pT & eta regions const int ATLAS_2016_I1419652::nchCut[] = {1, 1}; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2016_I1419652); } diff --git a/analyses/pluginATLAS/ATLAS_2017_I1624693.cc b/analyses/pluginATLAS/ATLAS_2017_I1624693.cc --- a/analyses/pluginATLAS/ATLAS_2017_I1624693.cc +++ b/analyses/pluginATLAS/ATLAS_2017_I1624693.cc @@ -1,435 +1,435 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" /// @todo Include more projections as required, e.g. ChargedFinalState, FastJets, ZFinder... namespace Rivet { class ATLAS_2017_I1624693 : public Analysis { public: /// Constructor /// @brief Study of ordered hadron chains at 7 TeV DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2017_I1624693); /// @name Analysis methods //@{ struct usedX { int locMin; int locMax; std::vector > chains; // Constructor usedX(int min, int max, int ic, float mass) { locMin=min; locMax=max; chains.clear(); chains.push_back(std::pair(ic,mass)); } // Constructor usedX(int min, int max) { locMin=min; locMax=max; chains.clear(); } void add(int jc, float mass) { if (chains.size()) { std::vector >::iterator it=chains.begin(); while ( it!=chains.end() && mass>(*it).second ) ++it; chains.insert(it,std::pair(jc,mass)); } else { chains.push_back(std::pair(jc,mass)); } } }; /// Book histograms and initialise projections before the run void init() { /// @todo Initialise and register projections here - ChargedFinalState cfs(-2.5, 2.5, 0.1*GeV); + ChargedFinalState cfs((Cuts::etaIn(-2.5, 2.5) && Cuts::pT >= 0.1*GeV)); declare(cfs,"CFS"); // pion mass; pim = 0.1396; /// @todo Book histograms here, e.g.: book(_DeltaQ , 1, 1, 1); book(_Delta3h, 2, 1, 1); book(_dalitz , 3, 1, 1); // auxiliary book(_h_nch, "_nch", 200, -0.5, 199.5); } /// Perform the per-event analysis void analyze(const Event& event) { //const double weight = event.weight(); bool match =false; /// @todo Do the event by event analysis here const ChargedFinalState& had = applyProjection(event, "CFS"); Particles hs=had.particles(); int nch = hs.size(); if (nch < 3) return; _h_nch->fill(1.*nch,1.); for (unsigned int i=0; i < hs.size() - 1; ++i) { for (unsigned int j=i+1; j < hs.size(); ++j) { double q12 = qq(hs[i],hs[j],match); if (match) _DeltaQ->fill(q12,-1.); else _DeltaQ->fill(q12,1.); } } // chain selection std::vector wchain; std::vector< std::vector > rchains; std::vector< std::vector > mchains; wchain.clear(); rchains.clear(); mchains.clear(); for (unsigned int ip1 = 0; ip1< hs.size(); ++ip1 ) { wchain.push_back(1.); std::vector cc(1,ip1); std::vector mc; double qlmin=10000.; int ilmin=-1; for (unsigned ip2 = 0; ip2 < hs.size(); ++ip2) { if (ip2==ip1) continue; double ql = qq(hs[ip1],hs[ip2],match); if (!match) continue; // looking for closest like-sign match if (ql ilmin && rchains[ilmin][1]==ip1) { // std::cout <<"exclusive match:"<< std::endl; wchain.back()=0.5; wchain[ilmin]=0.5; } double m3min=10000.; int ixmin=-1; for (unsigned ip2 = 0; ip2< hs.size(); ++ip2) { if (ip2==ip1 || int(ip2)==ilmin ) continue; double qx = qq(hs[ip1],hs[ip2],match); if (match) continue; double qxl = qq(hs[ip2],hs[ilmin],match); double m3 = sqrt(9*pim*pim+qxl*qxl+qlmin*qlmin+qx*qx); if (m3 assoc(hs.size(),0.); // cache for association rate std::vector accept(rchains.size(), false); // loop over chains and accept lowest masses while watching the association rate int inext = 0; while ( inext>-1 ) { inext = -1; float cMin = 100000.; // find non-accepted chain with lowest Q_ls; dissolve chains if association count over 2 for (unsigned int ic=0; ic < rchains.size(); ++ic) { if (rchains[ic].size() < 2) continue; if (accept[ic]) continue; if (mchains[ic][0] < cMin) { cMin = mchains[ic][0]; inext=ic; } } if (inext>-1 ) { unsigned int cloc0 = rchains[inext][0]; unsigned int cloc1 = rchains[inext][1]; if ( (assoc[cloc0] + 1. <= 2.) && (assoc[cloc1] + 1. <= 2.) ) { // chain can be accepted accept[inext]=true; assoc[cloc0]+=1.; assoc[cloc1]+=1.; if (wchain[inext]==0.5) { // accept the identical chain, too for (unsigned int ic=0; ic1 ) { // association count filled up, discard chain accept[inext]=true; wchain[inext]=0.; } else { // dissolve chain and find new association unsigned int i1 = rchains[inext][0]; float mMn = 1000000.; int ipn = -1; for (unsigned int i2=0; i2 1.) continue; if (m > 0. && m = 0) { rchains[inext][1]=ipn; mchains[inext][0]=mMn; // resolve chain weight : by default, it is 1. wchain[inext]=1.; // check exclusivity of pairing for (unsigned int ico=0; ico0.) continue; float q12 = qq(hs[ipn],hs[ij],match); double m3 = sqrt(9*pim*pim+q02*q02+mMn*mMn+q12*q12); if (m3>0. && m3 =0) { rchains[inext].push_back(ipnn); rchains[inext][2]=ipnn; mchains[inext][1]=mMnn; } else {accept[inext]=true; wchain[inext]=0.;} } else { // chain not recovered wchain[inext]=0.; accept[inext]=true; } } } } // end loop over chains // cleanup: association rate for unlike-sign pairs // third member verification std::vector accept3(rchains.size(),false); // watch unlike-sign combinations used std::vector used; // loop over chains and accept lowest masses while watching the association rate inext = 0; while ( inext>-1 ) { inext = -1; float cMin = 100000.; // find non-accepted chain with lowest mass; dissolve chains if association count over 3 for (unsigned int ic=0; ic < rchains.size(); ++ic) { if (rchains[ic].size() < 3 || !wchain[ic] || !accept[ic]) continue; if (accept3[ic]) continue; if (mchains[ic][1]-1 ) { unsigned int cloc0 = rchains[inext][0]; unsigned int cloc1 = rchains[inext][1]; unsigned int cloc2 = rchains[inext][2]; // map use of unlike sign pairs int iu0 = -1; float w0=0.; for (unsigned int iu=0; iu 0) for (unsigned int iw=0; iw0) for (unsigned int iw=0; iw 0.) continue; if (assoc[i3] > 3-wchain[inext]) continue; // check pair association w0=0.; w1=0.; for (unsigned int iu=0; iu 0) for (unsigned int iw=0; iw0) for (unsigned int iw=0; iw2. || w1+wchain[inext]>2.) continue; float q12 = qq(hs[i2],hs[i3],match); float q01 = qq(hs[i1],hs[i2],match); float m = sqrt(9*pim*pim+q02*q02+q01*q01+q12*q12); if (m>0. && m =0) { rchains[inext].push_back(ipn); rchains[inext][2]=iploc; mchains[inext][1]=mMn; } else { // chain not recovered wchain[inext]=0.; } } } } // end loop over chains // end 3rd member optimization for (unsigned int ip=0; ip < wchain.size(); ++ip) { if (!wchain[ip]) continue; if (rchains[ip].size() < 3) continue; float m3min = mchains[ip][1]; if (m3min > 0.59) continue; // dalitz plot std::pair dd = dalitz3(hs[rchains[ip][0]], hs[rchains[ip][1]], hs[rchains[ip][2]]); _dalitz->fill(dd.first,dd.second,1.*wchain[ip]); // Delta(Q) spectra float qlmin = mchains[ip][0]; float qxmin = qq(hs[rchains[ip][0]], hs[rchains[ip][2]], match); float xlmin = qq(hs[rchains[ip][1]], hs[rchains[ip][2]], match); _Delta3h->fill(qxmin, 0.5*wchain[ip]); _Delta3h->fill(xlmin, 0.5*wchain[ip]); _Delta3h->fill(qlmin, -1.*wchain[ip]); } } /// Normalise histograms etc., after the run void finalize() { // normalize by the number of charged particles // counter automatic division by bin size double norm = 0.01 / (_h_nch->xMean()*_h_nch->numEntries()); _dalitz->scaleW(norm); _DeltaQ->scaleW(norm); _Delta3h->scaleW(norm); } //@} double qq(const Particle& gp1, const Particle& gp2, bool& match) { match = gp1.charge() * gp2.charge() > 0; FourMomentum p1, p2; p1.setPM(gp1.px(), gp1.py(), gp1.pz(), pim); p2.setPM(gp2.px(), gp2.py(), gp2.pz(), pim); return sqrt(fmax(0., (p1 + p2).mass2() - 4*pim*pim)); } std::pair dalitz3(const Particle& gp1, const Particle& gp2, const Particle& gp3) const { float p1= gp1.pt(); float p2= gp2.pt(); float p3= gp3.pt(); float th1 = gp1.theta(); float th2 = gp2.theta(); float th3 = gp3.theta(); float ph1 = gp1.phi(); float ph2 = gp2.phi(); float ph3 = gp3.phi(); float e1 = sqrt(p1*p1+pim*pim); float e2 = sqrt(p2*p2+pim*pim); float e3 = sqrt(p3*p3+pim*pim); float p1x = p1*cos(ph1)*sin(th1); float p1y = p1*sin(ph1)*sin(th1); float p1z = p1*cos(th1); float p2x = p2*cos(ph2)*sin(th2); float p2y = p2*sin(ph2)*sin(th2); float p2z = p2*cos(th2); float p3x = p3*cos(ph3)*sin(th3); float p3y = p3*sin(ph3)*sin(th3); float p3z = p3*cos(th3); float px = p1x+p2x+p3x; float py = p1y+p2y+p3y; float pz = p1z+p2z+p3z; float ap = sqrt(px*px+py*py+pz*pz); float e=e1+e2+e3; float beta = ap/e; float gamma = 1./sqrt(1-beta*beta); float p1l = (p1x*px+p1y*py+p1z*pz)/ap; float p2l = (p2x*px+p2y*py+p2z*pz)/ap; float p3l = (p3x*px+p3y*py+p3z*pz)/ap; float e1_boost = gamma*e1-gamma*beta*p1l; float e2_boost = gamma*e2-gamma*beta*p2l; float e3_boost = gamma*e3-gamma*beta*p3l; float Q = sqrt(e*e-ap*ap)-3*pim; return std::pair(sqrt(3.)*(e1_boost-e2_boost)/Q , 3*(e3_boost-pim)/Q-1.); } private: // Data members like post-cuts event weight counters go here float pim; private: /// @name Histograms Histo1DPtr _DeltaQ; Histo1DPtr _Delta3h; Histo1DPtr _h_nch; Histo2DPtr _dalitz; //@} }; // This global object acts as a hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2017_I1624693); } diff --git a/analyses/pluginCDF/CDF_1988_S1865951.cc b/analyses/pluginCDF/CDF_1988_S1865951.cc --- a/analyses/pluginCDF/CDF_1988_S1865951.cc +++ b/analyses/pluginCDF/CDF_1988_S1865951.cc @@ -1,85 +1,85 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/TriggerCDFRun0Run1.hh" namespace Rivet { /// @brief CDF track \f$ p_\perp \f$ distributions at 630 and 1800 GeV class CDF_1988_S1865951 : public Analysis { public: /// Constructor CDF_1988_S1865951() : Analysis("CDF_1988_S1865951") {} /// @name Analysis methods //@{ /// Book histograms and set up projections void init() { // Set up projections declare(TriggerCDFRun0Run1(), "Trigger"); - const ChargedFinalState cfs(-1.0, 1.0, 0.4*GeV); + const ChargedFinalState cfs((Cuts::etaIn(-1.0, 1.0) && Cuts::pT >= 0.4*GeV)); declare(cfs, "CFS"); // Book histo if (fuzzyEquals(sqrtS()/GeV, 1800, 1E-3)) { book(_hist_pt ,1, 1, 1); } else if (fuzzyEquals(sqrtS()/GeV, 630, 1E-3)) { book(_hist_pt ,2, 1, 1); } book(_sumWTrig, "sumWTrig"); } /// Do the analysis void analyze(const Event& event) { // Trigger const bool trigger = apply(event, "Trigger").minBiasDecision(); if (!trigger) vetoEvent; _sumWTrig->fill(); const FinalState& trackfs = apply(event, "CFS"); for (Particle p : trackfs.particles()) { const double pt = p.pT()/GeV; // Effective weight for d3sig/dp3 = weight / ( Delta eta * 2pi * pt ), with Delta(eta) = 2 const double eff_weight = 1.0/(2*2*TWOPI*pt); _hist_pt->fill(pt, eff_weight); } } /// Scale histos void finalize() { scale(_hist_pt, crossSectionPerEvent()/millibarn); } //@} private: /// @name Counters //@{ CounterPtr _sumWTrig; //@} /// @name Histos //@{ Histo1DPtr _hist_pt; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_1988_S1865951); } diff --git a/analyses/pluginCDF/CDF_1990_S2089246.cc b/analyses/pluginCDF/CDF_1990_S2089246.cc --- a/analyses/pluginCDF/CDF_1990_S2089246.cc +++ b/analyses/pluginCDF/CDF_1990_S2089246.cc @@ -1,84 +1,84 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/TriggerCDFRun0Run1.hh" namespace Rivet { /// @brief CDF pseudorapidity analysis at 630 and 1800 GeV /// @author Andy Buckley class CDF_1990_S2089246 : public Analysis { public: /// Constructor CDF_1990_S2089246() : Analysis("CDF_1990_S2089246") { } /// @name Analysis methods //@{ void init() { // Setup projections declare(TriggerCDFRun0Run1(), "Trigger"); - declare(ChargedFinalState(-3.5, 3.5), "CFS"); + declare(ChargedFinalState((Cuts::etaIn(-3.5, 3.5))), "CFS"); // Book histo if (fuzzyEquals(sqrtS()/GeV, 1800, 1E-3)) { book(_hist_eta ,3, 1, 1); } else if (fuzzyEquals(sqrtS()/GeV, 630, 1E-3)) { book(_hist_eta ,4, 1, 1); } book(_sumWTrig, "sumWTrig"); } /// Do the analysis void analyze(const Event& event) { // Trigger const bool trigger = apply(event, "Trigger").minBiasDecision(); if (!trigger) vetoEvent; _sumWTrig->fill(); // Loop over final state charged particles to fill eta histos const FinalState& fs = apply(event, "CFS"); for (const Particle& p : fs.particles()) { const double eta = p.eta(); _hist_eta->fill(fabs(eta)); } } /// Finalize void finalize() { // Divide through by num events to get d/d(eta) in bins // Factor of 1/2 for |eta| -> eta scale(_hist_eta, 0.5/ *_sumWTrig); } //@} private: /// @name Weight counter //@{ CounterPtr _sumWTrig; //@} /// @name Histogram collections //@{ Histo1DPtr _hist_eta; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_1990_S2089246); } diff --git a/analyses/pluginCDF/CDF_1993_S2742446.cc b/analyses/pluginCDF/CDF_1993_S2742446.cc --- a/analyses/pluginCDF/CDF_1993_S2742446.cc +++ b/analyses/pluginCDF/CDF_1993_S2742446.cc @@ -1,108 +1,108 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// @brief CDF class CDF_1993_S2742446 : public Analysis { public: CDF_1993_S2742446() : Analysis("CDF_1993_S2742446") { } public: void init() { // The photon selection has been corrected to pTmin=22 GeV (vs. 23 in the trigger) - LeadingParticlesFinalState photonfs(FinalState(-0.9, 0.9, 22.0*GeV)); + LeadingParticlesFinalState photonfs(FinalState((Cuts::etaIn(-0.9, 0.9) && Cuts::pT >= 22.0*GeV))); photonfs.addParticleId(PID::PHOTON); declare(photonfs, "LeadingPhoton"); // FS excluding the leading photon - VetoedFinalState vfs(FinalState(-4.2, 4.2)); + VetoedFinalState vfs(FinalState((Cuts::etaIn(-4.2, 4.2)))); vfs.addVetoOnThisFinalState(photonfs); declare(vfs, "VFS"); // Jets declare(FastJets(vfs, FastJets::CDFJETCLU, 0.7), "Jets"); book(_h_costheta ,1, 1, 1); } void analyze(const Event& event) { Particles photons = apply(event, "LeadingPhoton").particles(); if (photons.size()!=1 || photons[0].pT()>45.0*GeV) { vetoEvent; } FourMomentum leadingPhoton = photons[0].momentum(); double eta_P = leadingPhoton.eta(); double phi_P = leadingPhoton.phi(); // photon isolation: less than 2 GeV EM E_T double Etsum=0.0; for (const Particle& p : apply(event, "VFS").particles()) { if (p.charge() != 0 && deltaR(eta_P, phi_P, p.eta(), p.phi()) < 0.7) Etsum += p.Et(); } if (Etsum > 2*GeV) vetoEvent; FourMomentum jetsum; Jets jets = apply(event, "Jets").jets(Cuts::pT > 10*GeV, cmpMomByPt); // Require at least one jet with pT>10 GeV if (jets.size()==0) vetoEvent; // Require the leading jet to be in the opposite (phi) hemisphere w.r.t. the photon if (jets[0].phi() - phi_P <= M_PI) vetoEvent; // sum all jets in the opposite hemisphere in phi from the photon for (const Jet& jet : jets) { if (fabs(jet.phi()-phi_P) > M_PI) jetsum+=jet.momentum(); } // c.m. cuts, see Table 1 double etaboost = (jetsum.eta()+eta_P)/2.; if (!inRange(etaboost, -1.2, 1.2)) vetoEvent; double etastar = (jetsum.eta()-eta_P)/2.; if (!inRange(etastar, -1.1, 1.1)) vetoEvent; double pstar = photons[0].pT()*cosh(etastar); if (!inRange(pstar, 27.8, 47.0)) vetoEvent; const double costheta = fabs(tanh((eta_P-jetsum.eta())/2.0)); if (!inRange(costheta, 0., 0.8)) vetoEvent; // Fill histo _h_costheta->fill(costheta); } void finalize() { /// @todo Take fixed norm direct from ref histo normalize(_h_costheta, 1.4271); // fixed norm ok } private: Histo1DPtr _h_costheta; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_1993_S2742446); } diff --git a/analyses/pluginCDF/CDF_1994_S2952106.cc b/analyses/pluginCDF/CDF_1994_S2952106.cc --- a/analyses/pluginCDF/CDF_1994_S2952106.cc +++ b/analyses/pluginCDF/CDF_1994_S2952106.cc @@ -1,208 +1,208 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" namespace Rivet { /// @brief CDF Run I color coherence analysis /// @author Andy Buckley /// @author Lars Sonnenschein class CDF_1994_S2952106 : public Analysis { public: /// Constructor CDF_1994_S2952106() : Analysis("CDF_1994_S2952106") { } /// @name Analysis methods //@{ void init() { - const FinalState fs(-4.2, 4.2); + const FinalState fs((Cuts::etaIn(-4.2, 4.2))); declare(fs, "FS"); declare(FastJets(fs, FastJets::CDFJETCLU, 0.7), "Jets"); // Zero passed-cuts event weight counters book(_sumw, "sumW"); // Output histograms book(_histJet1Et ,1,1,1); book(_histJet2Et ,2,1,1); book(_histJet3eta, 3,1,1); book(_histR23 , 4,1,1); book(_histAlpha , 5,1,1); // Temporary histos: these are the ones we actually fill for the plots which require correction book(_tmphistJet3eta, "TMP/Jet3eta", refData(3,1,1)); book(_tmphistR23, "TMP/R23", refData(4,1,1)); book(_tmphistAlpha, "TMP/Alpha", refData(5,1,1)); } // Do the analysis void analyze(const Event & event) { const Jets jets = apply(event, "Jets").jets(cmpMomByEt); MSG_DEBUG("Jet multiplicity before any cuts = " << jets.size()); // ETs only from jets: double et_sinphi_sum = 0; double et_cosphi_sum = 0; double et_sum = 0; for (size_t i = 0; i< jets.size(); ++i) { et_sinphi_sum += jets[i].Et() * sin(jets[i].phi()); et_cosphi_sum += jets[i].Et() * cos(jets[i].phi()); et_sum += jets[i].Et(); } // ET requirement if (sqrt(sqr(et_sinphi_sum) + sqr(et_cosphi_sum))/et_sum > 6.0) vetoEvent; // Check jet requirements if (jets.size() < 3) vetoEvent; if (jets[0].pT() < 110*GeV) vetoEvent; if (jets[2].pT() < 10*GeV) vetoEvent; // More jet 1,2,3 checks FourMomentum pj1(jets[0].momentum()), pj2(jets[1].momentum()), pj3(jets[2].momentum()); if (fabs(pj1.eta()) > 0.7 || fabs(pj2.eta()) > 0.7) vetoEvent; MSG_DEBUG("Jet 1 & 2 eta, pT requirements fulfilled"); // Require that jets are back-to-back within 20 degrees in phi if ((PI - deltaPhi(pj1.phi(), pj2.phi())) > (20/180.0)*PI) vetoEvent; MSG_DEBUG("Jet 1 & 2 phi requirement fulfilled"); _sumw->fill(); // Fill histos _histJet1Et->fill(pj1.pT()); _histJet2Et->fill(pj2.pT()); _tmphistJet3eta->fill(pj3.eta()); _tmphistR23->fill(deltaR(pj2, pj3)); // Calc and plot alpha const double dPhi = deltaPhi(pj3.phi(), pj2.phi()); const double dH = sign(pj2.eta()) * (pj3.eta() - pj2.eta()); const double alpha = atan(dH/dPhi); _tmphistAlpha->fill(alpha*180./PI); } /// Apply bin-wise detector correction factors void finalize() { // Normal scalings normalize(_histJet1Et, 12.3); normalize(_histJet2Et, 12.3); // eta3 correction const double eta3_CDF_sim[] = { 0.0013, 0.0037, 0.0047, 0.0071, 0.0093, 0.0117, 0.0151, 0.0149, 0.0197, 0.0257, 0.0344, 0.0409, 0.0481, 0.0454, 0.0394, 0.0409, 0.0387, 0.0387, 0.0322, 0.0313, 0.0290, 0.0309, 0.0412, 0.0417, 0.0412, 0.0397, 0.0417, 0.0414, 0.0376, 0.0316, 0.0270, 0.0186, 0.0186, 0.0132, 0.0127, 0.0106, 0.0071, 0.0040, 0.0020, 0.0013 }; const double eta3_CDF_sim_err[] = { 0.0009, 0.0009, 0.0007, 0.0007, 0.0007, 0.0010, 0.0012, 0.0012, 0.0013, 0.0016, 0.0017, 0.0020, 0.0020, 0.0022, 0.0020, 0.0020, 0.0018, 0.0018, 0.0016, 0.0017, 0.0017, 0.0019, 0.0020, 0.0021, 0.0020, 0.0020, 0.0019, 0.0020, 0.0018, 0.0017, 0.0017, 0.0014, 0.0014, 0.0009, 0.0010, 0.0009, 0.0009, 0.0008, 0.0008, 0.0009 }; const double eta3_Ideal_sim[] = { 0.0017, 0.0030, 0.0033, 0.0062, 0.0062, 0.0112, 0.0177, 0.0164, 0.0196, 0.0274, 0.0351, 0.0413, 0.0520, 0.0497, 0.0448, 0.0446, 0.0375, 0.0329, 0.0291, 0.0272, 0.0233, 0.0288, 0.0384, 0.0396, 0.0468, 0.0419, 0.0459, 0.0399, 0.0355, 0.0329, 0.0274, 0.0230, 0.0201, 0.0120, 0.0100, 0.0080, 0.0051, 0.0051, 0.0010, 0.0010 }; for (size_t i = 0; i < 40; ++i) { const double yval = _tmphistJet3eta->bin(i).area() * (eta3_CDF_sim[i]/eta3_Ideal_sim[i]); const double yerr = _tmphistJet3eta->bin(i).areaErr() * (eta3_CDF_sim_err[i]/eta3_Ideal_sim[i]); _histJet3eta->addPoint(_tmphistJet3eta->bin(i).xMid(), yval/dbl(*_sumw), _tmphistJet3eta->bin(i).xWidth()/2.0, yerr/dbl(*_sumw)); } // R23 correction const double R23_CDF_sim[] = { 0.0005, 0.0161, 0.0570, 0.0762, 0.0723, 0.0705, 0.0598, 0.0563, 0.0557, 0.0579, 0.0538, 0.0522, 0.0486, 0.0449, 0.0418, 0.0361, 0.0326, 0.0304, 0.0252, 0.0212, 0.0173, 0.0176, 0.0145, 0.0127, 0.0103, 0.0065, 0.0049, 0.0045, 0.0035, 0.0029, 0.0024, 0.0014, 0.0011, 0.0010, 0.0009 }; const double R23_CDF_sim_err[] = { 0.0013, 0.0009, 0.0022, 0.0029, 0.0026, 0.0024, 0.0022, 0.0025, 0.0023, 0.0024, 0.0021, 0.0021, 0.0021, 0.0021, 0.0021, 0.0019, 0.0019, 0.0016, 0.0017, 0.0014, 0.0010, 0.0014, 0.0012, 0.0013, 0.0010, 0.0011, 0.0010, 0.0010, 0.0010, 0.0011, 0.0011, 0.0009, 0.0008, 0.0008, 0.0009 }; const double R23_Ideal_sim[] = { 0.0005, 0.0176, 0.0585, 0.0862, 0.0843, 0.0756, 0.0673, 0.0635, 0.0586, 0.0619, 0.0565, 0.0515, 0.0466, 0.0472, 0.0349, 0.0349, 0.0266, 0.0254, 0.0204, 0.0179, 0.0142, 0.0134, 0.0101, 0.0090, 0.0080, 0.0034, 0.0030, 0.0033, 0.0027, 0.0021, 0.0012, 0.0006, 0.0004, 0.0005, 0.0003 }; for (size_t i = 0; i < 35; ++i) { const double yval = _tmphistR23->bin(i).area() * (R23_CDF_sim[i]/R23_Ideal_sim[i]); const double yerr = _tmphistR23->bin(i).areaErr() * (R23_CDF_sim_err[i]/R23_Ideal_sim[i]); _histR23->addPoint(_tmphistR23->bin(i).xMid(), yval/dbl(*_sumw), _tmphistR23->bin(i).xWidth()/2.0, yerr/dbl(*_sumw)); } // alpha correction const double alpha_CDF_sim[] = { 0.0517, 0.0461, 0.0490, 0.0452, 0.0451, 0.0435, 0.0317, 0.0287, 0.0294, 0.0261, 0.0231, 0.0220, 0.0233, 0.0192, 0.0213, 0.0166, 0.0176, 0.0146, 0.0136, 0.0156, 0.0142, 0.0152, 0.0151, 0.0147, 0.0164, 0.0186, 0.0180, 0.0210, 0.0198, 0.0189, 0.0197, 0.0211, 0.0270, 0.0236, 0.0243, 0.0269, 0.0257, 0.0276, 0.0246, 0.0286 }; const double alpha_CDF_sim_err[] = { 0.0024, 0.0025, 0.0024, 0.0024, 0.0024, 0.0022, 0.0019, 0.0018, 0.0019, 0.0016, 0.0017, 0.0017, 0.0019, 0.0013, 0.0017, 0.0014, 0.0016, 0.0013, 0.0012, 0.0009, 0.0014, 0.0014, 0.0014, 0.0014, 0.0014, 0.0015, 0.0014, 0.0016, 0.0016, 0.0015, 0.0016, 0.0016, 0.0019, 0.0017, 0.0019, 0.0018, 0.0018, 0.0018, 0.0018, 0.0019 }; const double alpha_Ideal_sim[] = { 0.0552, 0.0558, 0.0583, 0.0550, 0.0495, 0.0433, 0.0393, 0.0346, 0.0331, 0.0296, 0.0258, 0.0196, 0.0171, 0.0179, 0.0174, 0.0141, 0.0114, 0.0096, 0.0076, 0.0087, 0.0099, 0.0079, 0.0102, 0.0114, 0.0124, 0.0130, 0.0165, 0.0160, 0.0177, 0.0190, 0.0232, 0.0243, 0.0238, 0.0248, 0.0235, 0.0298, 0.0292, 0.0291, 0.0268, 0.0316 }; for (size_t i = 0; i < 40; ++i) { const double yval = _tmphistAlpha->bin(i).area() * (alpha_CDF_sim[i]/alpha_Ideal_sim[i]); const double yerr = _tmphistAlpha->bin(i).areaErr() * (alpha_CDF_sim_err[i]/alpha_Ideal_sim[i]); _histAlpha->addPoint(_tmphistAlpha->bin(i).xMid(), yval/dbl(*_sumw), _tmphistAlpha->bin(i).xWidth()/2.0, yerr/dbl(*_sumw)); } } //@} private: /// @name Event weight counters //@{ CounterPtr _sumw; //@} /// @name Histograms //@{ /// Straightforward output histos Histo1DPtr _histJet1Et, _histJet2Et; /// Output histos which need to have correction factors applied Scatter2DPtr _histR23, _histJet3eta, _histAlpha; /// Temporary histos, to be converted to DPSes Histo1DPtr _tmphistR23, _tmphistJet3eta, _tmphistAlpha; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_1994_S2952106); } diff --git a/analyses/pluginCDF/CDF_1996_S3108457.cc b/analyses/pluginCDF/CDF_1996_S3108457.cc --- a/analyses/pluginCDF/CDF_1996_S3108457.cc +++ b/analyses/pluginCDF/CDF_1996_S3108457.cc @@ -1,121 +1,121 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/SmearedJets.hh" namespace Rivet { /// @brief CDF properties of high-mass multi-jet events class CDF_1996_S3108457 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor CDF_1996_S3108457() : Analysis("CDF_1996_S3108457") { } //@} /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { /// Initialise and register projections here - const FinalState fs(-4.2, 4.2); + const FinalState fs((Cuts::etaIn(-4.2, 4.2))); FastJets fj(fs, FastJets::CDFJETCLU, 0.7); declare(fj, "Jets"); // Smear Energy and mass with the 10% uncertainty quoted in the paper SmearedJets sj_E(fj, [](const Jet& jet){ return P4_SMEAR_MASS_GAUSS(P4_SMEAR_E_GAUSS(jet, 0.1*jet.E()), 0.1*jet.mass()); }); declare(sj_E, "SmearedJets_E"); /// Book histograms here, e.g.: for (size_t i=0; i<5; ++i) { book(_h_m[i] ,1+i, 1, 1); book(_h_costheta[i] ,10+i, 1, 1); book(_h_pT[i] ,15+i, 1, 1); } /// @todo Ratios of mass histograms left out: Binning doesn't work out } /// Perform the per-event analysis void analyze(const Event& event) { // Get the smeared jets Jets SJets = apply(event, "SmearedJets_E").jets(Cuts::Et > 20.0*GeV, cmpMomByEt); if (SJets.size() < 2 || SJets.size() > 6) vetoEvent; // Calculate Et, total jet 4 Momentum double sumEt(0), sumE(0); FourMomentum JS(0,0,0,0); for(const Jet& jet : SJets) { sumEt += jet.Et()*GeV; sumE += jet.E()*GeV; JS+=jet.momentum(); } if (sumEt < 420*GeV || sumE > 2000*GeV) vetoEvent; double mass = JS.mass(); LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(JS.betaVec()); FourMomentum jet0boosted(cms_boost.transform(SJets[0].momentum())); double costheta0 = fabs(cos(jet0boosted.theta())); if (costheta0 < 2.0/3.0) { _h_m[SJets.size()-2]->fill(mass); } if (mass > 600.0*GeV) _h_costheta[JS.size()-2]->fill(costheta0); if (costheta0 < 2.0/3.0 && mass > 600.0*GeV) { for (const Jet& jet : SJets) { _h_pT[SJets.size()-2]->fill(jet.pT()); } } } /// Normalise histograms etc., after the run void finalize() { /// Normalise, scale and otherwise manipulate histograms here for (size_t i=0; i<5; ++i) { normalize(_h_m[i], 40.0); normalize(_h_costheta[i], 2.0); normalize(_h_pT[i], 20.0); } } //@} private: /// @name Histograms //@{ Histo1DPtr _h_m[5]; Histo1DPtr _h_costheta[5]; Histo1DPtr _h_pT[5]; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_1996_S3108457); } diff --git a/analyses/pluginCDF/CDF_1996_S3349578.cc b/analyses/pluginCDF/CDF_1996_S3349578.cc --- a/analyses/pluginCDF/CDF_1996_S3349578.cc +++ b/analyses/pluginCDF/CDF_1996_S3349578.cc @@ -1,455 +1,455 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/SmearedJets.hh" namespace Rivet { /// @brief CDF properties of high-mass multi-jet events class CDF_1996_S3349578 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor CDF_1996_S3349578() : Analysis("CDF_1996_S3349578") { } //@} /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { /// Initialise and register projections here - const FinalState fs(-4.2, 4.2); + const FinalState fs((Cuts::etaIn(-4.2, 4.2))); FastJets fj(fs, FastJets::CDFJETCLU, 0.7); declare(fj, "Jets"); // Smear Energy and mass with the 10% uncertainty quoted in the paper SmearedJets sj_E(fj, [](const Jet& jet){ return P4_SMEAR_MASS_GAUSS(P4_SMEAR_E_GAUSS(jet, 0.1*jet.E()), 0.1*jet.mass()); }); declare(sj_E, "SmearedJets"); /// Book histograms here, e.g.: book(_h_3_mNJ ,1, 1, 1); book(_h_3_X3 ,2, 1, 1); book(_h_3_X4 ,3, 1, 1); book(_h_3_costheta3 ,8, 1, 1); book(_h_3_psi3 ,9, 1, 1); book(_h_3_f3 ,14, 1, 1); book(_h_3_f4 ,14, 1, 2); book(_h_3_f5 ,14, 1, 3); book(_h_4_mNJ ,1, 1, 2); book(_h_4_X3 ,4, 1, 1); book(_h_4_X4 ,5, 1, 1); book(_h_4_costheta3 ,10, 1, 1); book(_h_4_psi3 ,11, 1, 1); book(_h_4_f3 ,15, 1, 1); book(_h_4_f4 ,15, 1, 2); book(_h_4_f5 ,15, 1, 3); book(_h_4_XA ,17, 1, 1); book(_h_4_psiAB ,19, 1, 1); book(_h_4_fA ,21, 1, 1); book(_h_4_fB ,21, 1, 2); book(_h_5_mNJ ,1, 1, 3); book(_h_5_X3 ,6, 1, 1); book(_h_5_X4 ,7, 1, 1); book(_h_5_costheta3 ,12, 1, 1); book(_h_5_psi3 ,13, 1, 1); book(_h_5_f3 ,16, 1, 1); book(_h_5_f4 ,16, 1, 2); book(_h_5_f5 ,16, 1, 3); book(_h_5_XA ,18, 1, 1); book(_h_5_XC ,18, 1, 2); book(_h_5_psiAB ,20, 1, 1); book(_h_5_psiCD ,20, 1, 2); book(_h_5_fA ,22, 1, 1); book(_h_5_fB ,23, 1, 1); book(_h_5_fC ,24, 1, 1); book(_h_5_fD ,25, 1, 1); } void analyze(const Event& event) { Jets jets; FourMomentum jetsystem(0.0, 0.0, 0.0, 0.0); for (const Jet& jet : apply(event, "SmearedJets").jets(Cuts::Et > 20.0*GeV, cmpMomByEt)) { bool separated = true; for (const Jet& ref : jets) { if (deltaR(jet, ref) < 0.9) { separated = false; break; } } if (!separated) continue; jets.push_back(jet); jetsystem += jet.momentum(); if (jets.size() >= 5) break; } if (jets.size() > 4) { _fiveJetAnalysis(jets); jets.resize(4); } if (jets.size() > 3) { _fourJetAnalysis(jets); jets.resize(3); } if (jets.size() > 2) { _threeJetAnalysis(jets); } } void _threeJetAnalysis(const Jets& jets) { MSG_DEBUG("3 jet analysis"); double sumEt = 0.0; FourMomentum jetsystem(0.0, 0.0, 0.0, 0.0); for (const Jet& jet : jets) { sumEt += jet.Et(); jetsystem += jet.momentum(); } if (sumEt < 420.0*GeV) return; const double m3J = _safeMass(jetsystem); if (m3J < 600*GeV) return; const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(jetsystem.betaVec()); vector jets3; for (Jet jet : jets) { jets3.push_back(cms_boost.transform(jet.momentum())); } std::sort(jets3.begin(), jets3.end(), FourMomentum::byEDescending()); FourMomentum p3(jets3[0]), p4(jets3[1]), p5(jets3[2]); FourMomentum pAV = cms_boost.transform(_avg_beam_in_lab(m3J, jetsystem.rapidity())); double costheta3 = pAV.p3().unit().dot(p3.p3().unit()); if (fabs(costheta3) > 0.6) return; double X3 = 2.0*p3.E()/m3J; if (X3 > 0.9) return; const double X4 = 2.0*p4.E()/m3J; const double psi3 = _psi(p3, pAV, p4, p5); const double f3 = _safeMass(p3)/m3J; const double f4 = _safeMass(p4)/m3J; const double f5 = _safeMass(p5)/m3J; _h_3_mNJ->fill(m3J); _h_3_X3->fill(X3); _h_3_X4->fill(X4); _h_3_costheta3->fill(costheta3); _h_3_psi3->fill(psi3); _h_3_f3->fill(f3); _h_3_f4->fill(f4); _h_3_f5->fill(f5); } void _fourJetAnalysis(const Jets& jets) { MSG_DEBUG("4 jet analysis"); double sumEt=0.0; FourMomentum jetsystem(0.0, 0.0, 0.0, 0.0); for (const Jet& jet : jets) { sumEt+=jet.Et(); jetsystem+=jet.momentum(); } if (sumEt < 420.0*GeV) return; const double m4J = _safeMass(jetsystem); if (m4J < 650*GeV) return; const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(jetsystem.betaVec()); vector jets4; for (Jet jet : jets) { jets4.push_back(cms_boost.transform(jet.momentum())); } std::sort(jets4.begin(), jets4.end(), FourMomentum::byEDescending()); FourMomentum pA, pB; vector jets3(_reduce(jets4, pA, pB)); std::sort(jets3.begin(), jets3.end(), FourMomentum::byEDescending()); FourMomentum p3(jets3[0]); FourMomentum p4(jets3[1]); FourMomentum p5(jets3[2]); FourMomentum pAV = cms_boost.transform(_avg_beam_in_lab(m4J, jetsystem.rapidity())); double costheta3=pAV.p3().unit().dot(p3.p3().unit()); if (fabs(costheta3)>0.8) { return; } const double X3 = 2.0*p3.E()/m4J; if (X3>0.9) { return; } // fill histograms const double X4 = 2.0*p4.E()/m4J; const double psi3 = _psi(p3, pAV, p4, p5); const double f3 = _safeMass(p3)/m4J; const double f4 = _safeMass(p4)/m4J; const double f5 = _safeMass(p5)/m4J; const double fA = _safeMass(pA)/m4J; const double fB = _safeMass(pB)/m4J; const double XA = pA.E()/(pA.E()+pB.E()); const double psiAB = _psi(pA, pB, pA+pB, pAV); _h_4_mNJ->fill(m4J); _h_4_X3->fill(X3); _h_4_X4->fill(X4); _h_4_costheta3->fill(costheta3); _h_4_psi3->fill(psi3); _h_4_f3->fill(f3); _h_4_f4->fill(f4); _h_4_f5->fill(f5); _h_4_XA->fill(XA); _h_4_psiAB->fill(psiAB); _h_4_fA->fill(fA); _h_4_fB->fill(fB); } void _fiveJetAnalysis(const Jets& jets) { MSG_DEBUG("5 jet analysis"); double sumEt=0.0; FourMomentum jetsystem(0.0, 0.0, 0.0, 0.0); for (const Jet& jet : jets) { sumEt+=jet.Et(); jetsystem+=jet.momentum(); } if (sumEt < 420.0*GeV) return; const double m5J = _safeMass(jetsystem); if (m5J < 750*GeV) return; const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(jetsystem.betaVec()); vector jets5; for (Jet jet : jets) { jets5.push_back(cms_boost.transform(jet.momentum())); } std::sort(jets5.begin(), jets5.end(), FourMomentum::byEDescending()); FourMomentum pC, pD; vector jets4(_reduce(jets5, pC, pD)); std::sort(jets4.begin(), jets4.end(), FourMomentum::byEDescending()); FourMomentum pA, pB; vector jets3(_reduce(jets4, pA, pB)); std::sort(jets3.begin(), jets3.end(), FourMomentum::byEDescending()); FourMomentum p3(jets3[0]); FourMomentum p4(jets3[1]); FourMomentum p5(jets3[2]); // fill histograms FourMomentum pAV = cms_boost.transform(_avg_beam_in_lab(m5J, jetsystem.rapidity())); const double costheta3 = pAV.p3().unit().dot(p3.p3().unit()); const double X3 = 2.0*p3.E()/m5J; const double X4 = 2.0*p4.E()/m5J; const double psi3 = _psi(p3, pAV, p4, p5); const double f3 = _safeMass(p3)/m5J; const double f4 = _safeMass(p4)/m5J; const double f5 = _safeMass(p5)/m5J; const double fA = _safeMass(pA)/m5J; const double fB = _safeMass(pB)/m5J; const double XA = pA.E()/(pA.E()+pB.E()); const double psiAB = _psi(pA, pB, pA+pB, pAV); const double fC = _safeMass(pC)/m5J; const double fD = _safeMass(pD)/m5J; const double XC = pC.E()/(pC.E()+pD.E()); const double psiCD = _psi(pC, pD, pC+pD, pAV); _h_5_mNJ->fill(m5J); _h_5_X3->fill(X3); _h_5_X4->fill(X4); _h_5_costheta3->fill(costheta3); _h_5_psi3->fill(psi3); _h_5_f3->fill(f3); _h_5_f4->fill(f4); _h_5_f5->fill(f5); _h_5_XA->fill(XA); _h_5_psiAB->fill(psiAB); _h_5_fA->fill(fA); _h_5_fB->fill(fB); _h_5_XC->fill(XC); _h_5_psiCD->fill(psiCD); _h_5_fC->fill(fC); _h_5_fD->fill(fD); } /// Normalise histograms etc., after the run void finalize() { /// Normalise, scale and otherwise manipulate histograms here normalize(_h_3_mNJ, 1.0); normalize(_h_3_X3, 1.0); normalize(_h_3_X4, 1.0); normalize(_h_3_costheta3, 1.0); normalize(_h_3_psi3, 1.0); normalize(_h_3_f3, 1.0); normalize(_h_3_f4, 1.0); normalize(_h_3_f5, 1.0); normalize(_h_4_mNJ, 1.0); normalize(_h_4_X3, 1.0); normalize(_h_4_X4, 1.0); normalize(_h_4_costheta3, 1.0); normalize(_h_4_psi3, 1.0); normalize(_h_4_f3, 1.0); normalize(_h_4_f4, 1.0); normalize(_h_4_f5, 1.0); normalize(_h_4_XA, 1.0); normalize(_h_4_psiAB, 1.0); normalize(_h_4_fA, 1.0); normalize(_h_4_fB, 1.0); normalize(_h_5_mNJ, 1.0); normalize(_h_5_X3, 1.0); normalize(_h_5_X4, 1.0); normalize(_h_5_costheta3, 1.0); normalize(_h_5_psi3, 1.0); normalize(_h_5_f3, 1.0); normalize(_h_5_f4, 1.0); normalize(_h_5_f5, 1.0); normalize(_h_5_XA, 1.0); normalize(_h_5_XC, 1.0); normalize(_h_5_psiAB, 1.0); normalize(_h_5_psiCD, 1.0); normalize(_h_5_fA, 1.0); normalize(_h_5_fB, 1.0); normalize(_h_5_fC, 1.0); normalize(_h_5_fD, 1.0); } //@} private: vector _reduce(const vector& jets, FourMomentum& combined1, FourMomentum& combined2) { double minMass2 = 1e9; size_t idx1(jets.size()), idx2(jets.size()); for (size_t i=0; i newjets; for (size_t i=0; i1e-3) { FourMomentum boostvec(cosh(y), 0.0, 0.0, sinh(y)); const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(boostvec.betaVec()).inverse(); beam1 = cms_boost.transform(beam1); beam2 = cms_boost.transform(beam2); } return (beam1.E() > beam2.E()) ? beam1-beam2 : beam2-beam1; } double _psi(const FourMomentum& p1, const FourMomentum& p2, const FourMomentum& p3, const FourMomentum& p4) { Vector3 p1xp2 = p1.p3().cross(p2.p3()); Vector3 p3xp4 = p3.p3().cross(p4.p3()); return mapAngle0ToPi(acos(p1xp2.unit().dot(p3xp4.unit()))); } double _safeMass(const FourMomentum& p) { double mass2=p.mass2(); if (mass2>0.0) return sqrt(mass2); else if (mass2<-1.0e-5) { MSG_WARNING("m2 = " << m2 << ". Assuming m2=0."); return 0.0; } else return 0.0; } private: /// @name Histograms //@{ Histo1DPtr _h_3_mNJ; Histo1DPtr _h_3_X3; Histo1DPtr _h_3_X4; Histo1DPtr _h_3_costheta3; Histo1DPtr _h_3_psi3; Histo1DPtr _h_3_f3; Histo1DPtr _h_3_f4; Histo1DPtr _h_3_f5; Histo1DPtr _h_4_mNJ; Histo1DPtr _h_4_X3; Histo1DPtr _h_4_X4; Histo1DPtr _h_4_costheta3; Histo1DPtr _h_4_psi3; Histo1DPtr _h_4_f3; Histo1DPtr _h_4_f4; Histo1DPtr _h_4_f5; Histo1DPtr _h_4_XA; Histo1DPtr _h_4_psiAB; Histo1DPtr _h_4_fA; Histo1DPtr _h_4_fB; Histo1DPtr _h_5_mNJ; Histo1DPtr _h_5_X3; Histo1DPtr _h_5_X4; Histo1DPtr _h_5_costheta3; Histo1DPtr _h_5_psi3; Histo1DPtr _h_5_f3; Histo1DPtr _h_5_f4; Histo1DPtr _h_5_f5; Histo1DPtr _h_5_XA; Histo1DPtr _h_5_XC; Histo1DPtr _h_5_psiAB; Histo1DPtr _h_5_psiCD; Histo1DPtr _h_5_fA; Histo1DPtr _h_5_fB; Histo1DPtr _h_5_fC; Histo1DPtr _h_5_fD; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_1996_S3349578); } diff --git a/analyses/pluginCDF/CDF_1996_S3418421.cc b/analyses/pluginCDF/CDF_1996_S3418421.cc --- a/analyses/pluginCDF/CDF_1996_S3418421.cc +++ b/analyses/pluginCDF/CDF_1996_S3418421.cc @@ -1,95 +1,95 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Tools/BinnedHistogram.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief CDF dijet angular distributions class CDF_1996_S3418421 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor CDF_1996_S3418421() : Analysis("CDF_1996_S3418421") { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - FinalState fs(-4.2, 4.2); + FinalState fs((Cuts::etaIn(-4.2, 4.2))); declare(FastJets(fs, FastJets::CDFJETCLU, 0.7), "Jets"); {Histo1DPtr tmp; _h_chi.add(241.0, 300.0, book(tmp, 1, 1, 1));} {Histo1DPtr tmp; _h_chi.add(300.0, 400.0, book(tmp, 1, 1, 2));} {Histo1DPtr tmp; _h_chi.add(400.0, 517.0, book(tmp, 1, 1, 3));} {Histo1DPtr tmp; _h_chi.add(517.0, 625.0, book(tmp, 1, 1, 4));} {Histo1DPtr tmp; _h_chi.add(625.0,1800.0, book(tmp, 1, 1, 5));} book(_h_ratio, 2, 1, 1); book(_htmp_chi_above_25 ,"TMP/chiabove25", refData(2, 1, 1)); book(_htmp_chi_below_25 ,"TMP/chibelow25", refData(2, 1, 1)); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; Jets jets = apply(event, "Jets").jetsByPt(50.0*GeV); if (jets.size() < 2) vetoEvent; const FourMomentum jet1 = jets[0].momentum(); const FourMomentum jet2 = jets[1].momentum(); const double eta1 = jet1.eta(); const double eta2 = jet2.eta(); const double chi = exp(fabs(eta1 - eta2)); if (fabs(eta2) > 2.0 || fabs(eta1) > 2.0 || chi > 5.0) vetoEvent; double m = FourMomentum(jet1 + jet2).mass(); _h_chi.fill(m, chi, weight); // Fill ratio numerator or denominator depending on chi value ((chi > 2.5) ? _htmp_chi_above_25 : _htmp_chi_below_25)->fill(m/GeV, weight); } /// Normalise histograms etc., after the run void finalize() { for (Histo1DPtr hist : _h_chi.histos()) { normalize(hist); } divide(_htmp_chi_below_25, _htmp_chi_above_25, _h_ratio); } //@} private: /// @name Histograms //@{ BinnedHistogram _h_chi; Histo1DPtr _htmp_chi_above_25, _htmp_chi_below_25; Scatter2DPtr _h_ratio; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_1996_S3418421); } diff --git a/analyses/pluginCDF/CDF_1997_S3541940.cc b/analyses/pluginCDF/CDF_1997_S3541940.cc --- a/analyses/pluginCDF/CDF_1997_S3541940.cc +++ b/analyses/pluginCDF/CDF_1997_S3541940.cc @@ -1,249 +1,249 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/SmearedJets.hh" namespace Rivet { /// @brief CDF properties of 6-jet events with large 6-jet mass class CDF_1997_S3541940 : public Analysis { public: DEFAULT_RIVET_ANALYSIS_CTOR(CDF_1997_S3541940); void init() { // Find true jets - const FinalState fs(-4.2, 4.2); + const FinalState fs((Cuts::etaIn(-4.2, 4.2))); FastJets fj(fs, FastJets::CDFJETCLU, 0.7); // declare(fj, "Jets"); // Smear jet energy and mass with the 10% uncertainty quoted in the paper SmearedJets sj_E(fj, [](const Jet& jet){ return P4_SMEAR_MASS_GAUSS(P4_SMEAR_E_GAUSS(jet, 0.1*jet.E()), 0.1*jet.mass()); }); declare(sj_E, "Jets"); book(_h_m6J ,1, 1, 1); book(_h_X3ppp ,2, 1, 1); book(_h_X4ppp ,3, 1, 1); book(_h_costheta3ppp ,4, 1, 1); book(_h_psi3ppp ,5, 1, 1); book(_h_f3ppp ,6, 1, 1); book(_h_f4ppp ,6, 1, 2); book(_h_f5ppp ,6, 1, 3); book(_h_XApp ,7, 1, 1); book(_h_XCp ,8, 1, 1); book(_h_XE ,9, 1, 1); book(_h_psiAppBpp ,10, 1, 1); book(_h_psiCpDp ,11, 1, 1); book(_h_psiEF ,12, 1, 1); book(_h_fApp ,13, 1, 1); book(_h_fBpp ,14, 1, 1); book(_h_fCp ,15, 1, 1); book(_h_fDp ,16, 1, 1); book(_h_fE ,17, 1, 1); book(_h_fF ,18, 1, 1); } void analyze(const Event& event) { const Jets alljets = apply(event, "Jets").jets(Cuts::Et > 20*GeV && Cuts::abseta < 3, cmpMomByEt); Jets jets; double sumEt = 0.0; FourMomentum jetsystem(0.0, 0.0, 0.0, 0.0); for (const Jet& jet : alljets) { double Et = jet.Et(); bool separated = true; for (const Jet& ref : jets) { if (deltaR(jet, ref) < 0.9) { separated = false; break; } } if (!separated) continue; jets.push_back(jet); sumEt += Et; jetsystem += jet.momentum(); if (jets.size() >= 6) break; } if (jets.size() < 6) vetoEvent; if (sumEt < 320.0*GeV) vetoEvent; double m6J = _safeMass(jetsystem); if (m6J < 520.0*GeV) vetoEvent; if (getLog().isActive(Log::DEBUG)) { stringstream ss; ss << "Jets:\n"; for (const Jet& j : jets) ss << j << "\n"; MSG_DEBUG(ss.str()); } const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(jetsystem.betaVec()); vector jets6; for (Jet jet : jets) { jets6.push_back(cms_boost.transform(jet.momentum())); } std::sort(jets6.begin(), jets6.end(), FourMomentum::byEDescending()); FourMomentum pE, pF; vector jets5(_reduce(jets6, pE, pF)); std::sort(jets5.begin(), jets5.end(), FourMomentum::byEDescending()); FourMomentum pCp, pDp; vector jets4(_reduce(jets5, pCp, pDp)); std::sort(jets4.begin(), jets4.end(), FourMomentum::byEDescending()); FourMomentum pApp, pBpp; vector jets3(_reduce(jets4, pApp, pBpp)); std::sort(jets3.begin(), jets3.end(), FourMomentum::byEDescending()); FourMomentum p3ppp(jets3[0]); FourMomentum p4ppp(jets3[1]); FourMomentum p5ppp(jets3[2]); double X3ppp = 2.0*p3ppp.E()/m6J; if (X3ppp > 0.9) vetoEvent; FourMomentum pAV = cms_boost.transform(_avg_beam_in_lab(m6J, jetsystem.rapidity())); double costheta3ppp = pAV.p3().unit().dot(p3ppp.p3().unit()); if (fabs(costheta3ppp) > 0.9) vetoEvent; // 3-jet-system variables _h_m6J->fill(m6J); _h_X3ppp->fill(X3ppp); _h_X4ppp->fill(2.0*p4ppp.E()/m6J); _h_costheta3ppp->fill(costheta3ppp); double psi3ppp = _psi(p3ppp, pAV, p4ppp, p5ppp); _h_psi3ppp->fill(psi3ppp); _h_f3ppp->fill(_safeMass(p3ppp)/m6J); _h_f4ppp->fill(_safeMass(p4ppp)/m6J); _h_f5ppp->fill(_safeMass(p5ppp)/m6J); // 4 -> 3 jet variables _h_fApp->fill(_safeMass(pApp)/m6J); _h_fBpp->fill(_safeMass(pApp)/m6J); _h_XApp->fill(pApp.E()/(pApp.E()+pBpp.E())); double psiAppBpp = _psi(pApp, pBpp, pApp+pBpp, pAV); _h_psiAppBpp->fill(psiAppBpp); // 5 -> 4 jet variables _h_fCp->fill(_safeMass(pCp)/m6J); _h_fDp->fill(_safeMass(pDp)/m6J); _h_XCp->fill(pCp.E()/(pCp.E()+pDp.E())); double psiCpDp = _psi(pCp, pDp, pCp+pDp, pAV); _h_psiCpDp->fill(psiCpDp); // 6 -> 5 jet variables _h_fE->fill(_safeMass(pE)/m6J); _h_fF->fill(_safeMass(pF)/m6J); _h_XE->fill(pE.E()/(pE.E()+pF.E())); double psiEF = _psi(pE, pF, pE+pF, pAV); _h_psiEF->fill(psiEF); } void finalize() { normalize(_h_m6J); normalize(_h_X3ppp); normalize(_h_X4ppp); normalize(_h_costheta3ppp); normalize(_h_psi3ppp); normalize(_h_f3ppp); normalize(_h_f4ppp); normalize(_h_f5ppp); normalize(_h_XApp); normalize(_h_XCp); normalize(_h_XE); normalize(_h_psiAppBpp); normalize(_h_psiCpDp); normalize(_h_psiEF); normalize(_h_fApp); normalize(_h_fBpp); normalize(_h_fCp); normalize(_h_fDp); normalize(_h_fE); normalize(_h_fF); } private: vector _reduce(const vector& jets, FourMomentum& combined1, FourMomentum& combined2) { double minMass2 = 1e9; size_t idx1(jets.size()), idx2(jets.size()); for (size_t i = 0; i < jets.size(); ++i) { for (size_t j = i+1; j < jets.size(); ++j) { double mass2 = FourMomentum(jets[i] + jets[j]).mass2(); if (mass2 < minMass2) { idx1 = i; idx2 = j; } } } vector newjets; for (size_t i = 0; i < jets.size(); ++i) { if (i != idx1 && i != idx2) newjets.push_back(jets[i]); } newjets.push_back(jets[idx1] + jets[idx2]); combined1 = jets[idx1]; combined2 = jets[idx2]; return newjets; } FourMomentum _avg_beam_in_lab(const double& m, const double& y) { const double mt = m/2.0; FourMomentum beam1(mt, 0, 0, mt); FourMomentum beam2(mt, 0, 0, -mt); if (fabs(y) > 1e-3) { FourMomentum boostvec(cosh(y), 0.0, 0.0, sinh(y)); const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(boostvec.betaVec()).inverse(); beam1 = cms_boost.transform(beam1); beam2 = cms_boost.transform(beam2); } return (beam1.E() > beam2.E()) ? beam1 - beam2 : beam2 - beam1; } double _psi(const FourMomentum& p1, const FourMomentum& p2, const FourMomentum& p3, const FourMomentum& p4) { Vector3 p1xp2 = p1.p3().cross(p2.p3()); Vector3 p3xp4 = p3.p3().cross(p4.p3()); return mapAngle0ToPi(acos(p1xp2.unit().dot(p3xp4.unit()))); } double _safeMass(const FourMomentum& p) { double mass2 = p.mass2(); if (mass2 > 0.0) return sqrt(mass2); if (mass2 < -1e-5) MSG_WARNING("m2 = " << m2 << ". Assuming m2=0."); return 0.0; } private: Histo1DPtr _h_m6J; Histo1DPtr _h_X3ppp, _h_X4ppp; Histo1DPtr _h_costheta3ppp; Histo1DPtr _h_psi3ppp; Histo1DPtr _h_f3ppp, _h_f4ppp, _h_f5ppp; Histo1DPtr _h_XApp, _h_XCp, _h_XE; Histo1DPtr _h_psiAppBpp, _h_psiCpDp, _h_psiEF; Histo1DPtr _h_fApp, _h_fBpp, _h_fCp, _h_fDp, _h_fE, _h_fF; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_1997_S3541940); } diff --git a/analyses/pluginCDF/CDF_1998_S3618439.cc b/analyses/pluginCDF/CDF_1998_S3618439.cc --- a/analyses/pluginCDF/CDF_1998_S3618439.cc +++ b/analyses/pluginCDF/CDF_1998_S3618439.cc @@ -1,76 +1,76 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief CDF diff cross-section in events with large missing energy class CDF_1998_S3618439 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor CDF_1998_S3618439() : Analysis("CDF_1998_S3618439") { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - FinalState fs(-4.2, 4.2); + FinalState fs((Cuts::etaIn(-4.2, 4.2))); declare(FastJets(fs, FastJets::CDFJETCLU, 0.7), "Jets"); book(_h_sumET_20 ,1, 1, 1); book(_h_sumET_100 ,1, 1, 2); } /// Perform the per-event analysis void analyze(const Event& event) { Jets jets = apply(event, "Jets").jets(Cuts::Et > 20*GeV, cmpMomByEt); double sumET_20(0.0), sumET_100(0.0); for (const Jet& jet : jets) { double ET = jet.Et()/GeV; sumET_20 += ET; if (ET > 100.0) sumET_100 += ET; } if (sumET_20 > 320.0) _h_sumET_20->fill(sumET_20); if (sumET_100 > 320.0) _h_sumET_100->fill(sumET_100); } /// Normalise histograms etc., after the run void finalize() { scale(_h_sumET_20, crossSection()/picobarn/sumOfWeights()); scale(_h_sumET_100, crossSection()/picobarn/sumOfWeights()); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_sumET_20, _h_sumET_100; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_1998_S3618439); } diff --git a/analyses/pluginCDF/CDF_2000_S4266730.cc b/analyses/pluginCDF/CDF_2000_S4266730.cc --- a/analyses/pluginCDF/CDF_2000_S4266730.cc +++ b/analyses/pluginCDF/CDF_2000_S4266730.cc @@ -1,74 +1,74 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief CDF dijet cross-section, differential in dijet mass class CDF_2000_S4266730 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor CDF_2000_S4266730() : Analysis("CDF_2000_S4266730") { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - FinalState fs(-4.2, 4.2); + FinalState fs((Cuts::etaIn(-4.2, 4.2))); declare(FastJets(fs, FastJets::CDFJETCLU, 0.7), "Jets"); book(_h_mjj ,1, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { Jets jets = apply(event, "Jets").jets(cmpMomByEt); if (jets.size() < 2) vetoEvent; FourMomentum jet1 = jets[0].momentum(); FourMomentum jet2 = jets[1].momentum(); double eta1 = jet1.eta(); double eta2 = jet2.eta(); if (fabs(eta1) > 2.0 || fabs(eta2) > 2.0) vetoEvent; if (fabs(tanh((eta1-eta2)/2)) > 2.0/3.0) vetoEvent; double mjj = FourMomentum(jet1+jet2).mass()/GeV; if (mjj < 180) vetoEvent; _h_mjj->fill(mjj); } /// Normalise histograms etc., after the run void finalize() { scale(_h_mjj, crossSection()/picobarn/sumOfWeights()); } //@} private: /// Histogram Histo1DPtr _h_mjj; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2000_S4266730); } diff --git a/analyses/pluginCDF/CDF_2001_S4517016.cc b/analyses/pluginCDF/CDF_2001_S4517016.cc --- a/analyses/pluginCDF/CDF_2001_S4517016.cc +++ b/analyses/pluginCDF/CDF_2001_S4517016.cc @@ -1,83 +1,83 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Tools/BinnedHistogram.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief CDF two-jet triply-differential cross-section class CDF_2001_S4517016 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor CDF_2001_S4517016() : Analysis("CDF_2001_S4517016") { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - FinalState fs(-4.2, 4.2); + FinalState fs((Cuts::etaIn(-4.2, 4.2))); declare(FastJets(fs, FastJets::CDFJETCLU, 0.7), "Jets"); {Histo1DPtr tmp; _h_ET.add(0.1, 0.7, book(tmp, 1, 1, 1));} {Histo1DPtr tmp; _h_ET.add(0.7, 1.4, book(tmp, 2, 1, 1));} {Histo1DPtr tmp; _h_ET.add(1.4, 2.1, book(tmp, 3, 1, 1));} {Histo1DPtr tmp; _h_ET.add(2.1, 3.0, book(tmp, 4, 1, 1));} } /// Perform the per-event analysis void analyze(const Event& event) { Jets jets = apply(event, "Jets").jets(Cuts::Et > 10*GeV, cmpMomByEt); if (jets.size() < 2) vetoEvent; FourMomentum jet1 = jets[0].momentum(); FourMomentum jet2 = jets[1].momentum(); double eta1 = jet1.abseta(); double eta2 = jet2.abseta(); double ET1 = jet1.Et(); double ET2 = jet2.Et(); if (!inRange(eta1, 0.1, 0.7) || ET1 < 40.0*GeV) vetoEvent; if (!inRange(eta2, 0.1, 3.0)) vetoEvent; _h_ET.fill(eta2, ET1); if (eta2<0.7 && ET2>40.0*GeV) _h_ET.fill(eta1, ET2); } /// Normalise histograms etc., after the run void finalize() { const double deta1 = 1.2; _h_ET.scale(crossSection()/nanobarn/sumOfWeights()/deta1 / 2.0, this); } //@} private: /// @name Histograms //@{ BinnedHistogram _h_ET; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2001_S4517016); } diff --git a/analyses/pluginCDF/CDF_2001_S4563131.cc b/analyses/pluginCDF/CDF_2001_S4563131.cc --- a/analyses/pluginCDF/CDF_2001_S4563131.cc +++ b/analyses/pluginCDF/CDF_2001_S4563131.cc @@ -1,70 +1,70 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief CDF Run I inclusive jet cross-section class CDF_2001_S4563131 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor CDF_2001_S4563131() : Analysis("CDF_2001_S4563131") { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - FinalState fs(-4.2, 4.2); + FinalState fs((Cuts::etaIn(-4.2, 4.2))); declare(FastJets(fs, FastJets::CDFJETCLU, 0.7), "Jets"); book(_h_ET ,1, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { Jets jets = apply(event, "Jets").jets(Cuts::Et > 40*GeV && Cuts::abseta >= 0.1 && Cuts::abseta <= 0.7, cmpMomByEt); for (const Jet& jet : jets) { //if (inRange(jet.abseta(), 0.1, 0.7)) _h_ET->fill(jet.Et()); } } /// Normalise histograms etc., after the run void finalize() { const double deta = 1.2; scale(_h_ET, crossSection()/sumOfWeights()/deta/nanobarn); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_ET; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2001_S4563131); } diff --git a/analyses/pluginCDF/CDF_2001_S4751469.cc b/analyses/pluginCDF/CDF_2001_S4751469.cc --- a/analyses/pluginCDF/CDF_2001_S4751469.cc +++ b/analyses/pluginCDF/CDF_2001_S4751469.cc @@ -1,264 +1,264 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/TriggerCDFRun0Run1.hh" #include "Rivet/Projections/ConstLossyFinalState.hh" //#include "Rivet/Projections/SmearedParticles.hh" namespace Rivet { /// @brief Field-Stuart CDF Run I track-jet underlying event analysis /// /// @author Andy Buckley /// /// The "original" underlying event analysis, using a non-standard track-jet algorithm. /// /// @par Run conditions /// /// @arg \f$ \sqrt{s} = \f$ 1800 GeV /// @arg Run with generic QCD events. /// @arg Several \f$ p_\perp^\text{min} \f$ cutoffs are probably required to fill the profile histograms: /// @arg \f$ p_\perp^\text{min} = \f$ 0 (min bias), 10, 20 GeV class CDF_2001_S4751469 : public Analysis { public: /// Constructor: cuts on final state are \f$ -1 < \eta < 1 \f$ /// and \f$ p_T > 0.5 \f$ GeV. CDF_2001_S4751469() : Analysis("CDF_2001_S4751469") { } /// @name Analysis methods //@{ // Book histograms void init() { declare(TriggerCDFRun0Run1(), "Trigger"); // Randomly discard 8% of charged particles as a kind of hacky detector correction. - const ChargedFinalState cfs(-1.0, 1.0, 0.5*GeV); + const ChargedFinalState cfs((Cuts::etaIn(-1.0, 1.0) && Cuts::pT >= 0.5*GeV)); /// @todo Replace ConstLossyFinalState with SmearedParticles const ConstLossyFinalState lossyfs(cfs, 0.08); //const SmearedParticles lossyfs(cfs, [](const Particle&){ return 0.92; }); declare(lossyfs, "FS"); declare(FastJets(lossyfs, FastJets::TRACKJET, 0.7), "TrackJet"); book(_numvsDeltaPhi2 ,1, 1, 1); book(_numvsDeltaPhi5 ,1, 1, 2); book(_numvsDeltaPhi30 ,1, 1, 3); book(_pTvsDeltaPhi2 ,2, 1, 1); book(_pTvsDeltaPhi5 ,2, 1, 2); book(_pTvsDeltaPhi30 ,2, 1, 3); book(_numTowardMB ,3, 1, 1); book(_numTransMB ,3, 1, 2); book(_numAwayMB ,3, 1, 3); book(_numTowardJ20 ,4, 1, 1); book(_numTransJ20 ,4, 1, 2); book(_numAwayJ20 ,4, 1, 3); book(_ptsumTowardMB ,5, 1, 1); book(_ptsumTransMB ,5, 1, 2); book(_ptsumAwayMB ,5, 1, 3); book(_ptsumTowardJ20 ,6, 1, 1); book(_ptsumTransJ20 ,6, 1, 2); book(_ptsumAwayJ20 ,6, 1, 3); book(_ptTrans2 ,7, 1, 1); book(_ptTrans5 ,7, 1, 2); book(_ptTrans30 ,7, 1, 3); book(_totalNumTrans2, "totalNumTrans2"); book(_totalNumTrans5, "totalNumTrans5"); book(_totalNumTrans30, "totalNumTrans30"); book(_sumWeightsPtLead2, "sumWeightsPtLead2"); book(_sumWeightsPtLead5, "sumWeightsPtLead5"); book(_sumWeightsPtLead30, "sumWeightsPtLead30"); } /// Do the analysis void analyze(const Event& event) { // Trigger const bool trigger = apply(event, "Trigger").minBiasDecision(); if (!trigger) vetoEvent; // Get jets, sorted by pT const Jets jets = apply(event, "TrackJet").jetsByPt(); if (jets.empty()) vetoEvent; const Jet jet1 = jets.front(); const double ptLead = jet1.pT(); // Cut on highest pT jet: combined 0.5 GeV < pT(lead) < 50 GeV if (ptLead/GeV < 0.5) vetoEvent; if (ptLead/GeV > 50.0) vetoEvent; // Count sum of all event weights in three pT_lead regions if (ptLead/GeV > 2.0) _sumWeightsPtLead2->fill(); if (ptLead/GeV > 5.0) _sumWeightsPtLead5->fill(); if (ptLead/GeV > 30.0) _sumWeightsPtLead30->fill(); // Run over tracks double ptSumToward(0.0), ptSumAway(0.0), ptSumTrans(0.0); size_t numToward(0), numTrans(0), numAway(0); // Temporary histos that bin N and pT in dphi Profile1D htmp_num_dphi_2(refData(1, 1, 1)), htmp_num_dphi_5(refData(1, 1, 2)), htmp_num_dphi_30(refData(1, 1, 3)); Profile1D htmp_pt_dphi_2(refData(2, 1, 1)), htmp_pt_dphi_5(refData(2, 1, 2)), htmp_pt_dphi_30(refData(2, 1, 3)); // Final state charged particles /// @todo Non-trackjet track efficiencies are corrected? const Particles& tracks = apply(event, "FS").particles(); for (const Particle& p : tracks) { const double dPhi = deltaPhi(p, jet1); const double pT = p.pT(); if (dPhi < PI/3.0) { ptSumToward += pT; ++numToward; } else if (dPhi < 2*PI/3.0) { ptSumTrans += pT; ++numTrans; // Fill transverse pT distributions if (ptLead/GeV > 2.0) { _ptTrans2->fill(pT/GeV); _totalNumTrans2->fill(); } if (ptLead/GeV > 5.0) { _ptTrans5->fill(pT/GeV); _totalNumTrans5->fill(); } if (ptLead/GeV > 30.0) { _ptTrans30->fill(pT/GeV); _totalNumTrans30->fill(); } } else { ptSumAway += pT; ++numAway; } // Fill tmp histos to bin event's track Nch & pT in dphi const double dPhideg = 180*dPhi/M_PI; if (ptLead/GeV > 2.0) { htmp_num_dphi_2.fill(dPhideg, 1); htmp_pt_dphi_2.fill (dPhideg, pT/GeV); } if (ptLead/GeV > 5.0) { htmp_num_dphi_5.fill(dPhideg, 1); htmp_pt_dphi_5.fill (dPhideg, pT/GeV); } if (ptLead/GeV > 30.0) { htmp_num_dphi_30.fill(dPhideg, 1); htmp_pt_dphi_30.fill (dPhideg, pT/GeV); } } // Update the "proper" dphi profile histograms for (int i = 0; i < 50; i++) { ///< @todo Should really explicitly iterate over nbins for each temp histo if (ptLead/GeV > 2.0) { const double x2 = htmp_pt_dphi_2.bin(i).xMid(); const double num2 = (htmp_num_dphi_2.bin(i).numEntries() > 0) ? htmp_num_dphi_2.bin(i).mean() : 0.0; const double pt2 = (htmp_num_dphi_2.bin(i).numEntries() > 0) ? htmp_pt_dphi_2.bin(i).mean() : 0.0; _numvsDeltaPhi2->fill(x2, num2); _pTvsDeltaPhi2->fill(x2, pt2); } if (ptLead/GeV > 5.0) { const double x5 = htmp_pt_dphi_5.bin(i).xMid(); const double num5 = (htmp_num_dphi_5.bin(i).numEntries() > 0) ? htmp_num_dphi_5.bin(i).mean() : 0.0; const double pt5 = (htmp_num_dphi_5.bin(i).numEntries() > 0) ? htmp_pt_dphi_5.bin(i).mean() : 0.0; _numvsDeltaPhi5->fill(x5, num5); _pTvsDeltaPhi5->fill(x5, pt5); } if (ptLead/GeV > 30.0) { const double x30 = htmp_pt_dphi_30.bin(i).xMid(); const double num30 = (htmp_num_dphi_30.bin(i).numEntries() > 0) ? htmp_num_dphi_30.bin(i).mean() : 0.0; const double pt30 = (htmp_num_dphi_30.bin(i).numEntries() > 0) ? htmp_pt_dphi_30.bin(i).mean() : 0.0; _numvsDeltaPhi30->fill(x30, num30); _pTvsDeltaPhi30->fill(x30, pt30); } } // Log some event details about pT MSG_DEBUG("pT [lead; twd, away, trans] = [" << ptLead << "; " << ptSumToward << ", " << ptSumAway << ", " << ptSumTrans << "]"); // Update the pT profile histograms _ptsumTowardMB->fill(ptLead/GeV, ptSumToward/GeV); _ptsumTowardJ20->fill(ptLead/GeV, ptSumToward/GeV); _ptsumTransMB->fill(ptLead/GeV, ptSumTrans/GeV); _ptsumTransJ20->fill(ptLead/GeV, ptSumTrans/GeV); _ptsumAwayMB->fill(ptLead/GeV, ptSumAway/GeV); _ptsumAwayJ20->fill(ptLead/GeV, ptSumAway/GeV); // Log some event details about Nch MSG_DEBUG("N [twd, away, trans] = [" << ptLead << "; " << numToward << ", " << numTrans << ", " << numAway << "]"); // Update the N_track profile histograms _numTowardMB->fill(ptLead/GeV, numToward); _numTowardJ20->fill(ptLead/GeV, numToward); _numTransMB->fill(ptLead/GeV, numTrans); _numTransJ20->fill(ptLead/GeV, numTrans); _numAwayMB->fill(ptLead/GeV, numAway); _numAwayJ20->fill(ptLead/GeV, numAway); } /// Normalize histos void finalize() { normalize(_ptTrans2, *_totalNumTrans2 / *_sumWeightsPtLead2); normalize(_ptTrans5, *_totalNumTrans5 / *_sumWeightsPtLead5); normalize(_ptTrans30, *_totalNumTrans30 / *_sumWeightsPtLead30); } //@} private: /// Sum total number of charged particles in the trans region, in 3 \f$ p_\perp^\text{lead} \f$ bins. CounterPtr _totalNumTrans2, _totalNumTrans5, _totalNumTrans30; /// Sum the total number of events in 3 \f$ p_\perp^\text{lead} \f$ bins. CounterPtr _sumWeightsPtLead2,_sumWeightsPtLead5, _sumWeightsPtLead30; /// @name Histogram collections //@{ // The sumpt vs. dphi and Nch vs. dphi histos Profile1DPtr _numvsDeltaPhi2, _numvsDeltaPhi5, _numvsDeltaPhi30; Profile1DPtr _pTvsDeltaPhi2, _pTvsDeltaPhi5, _pTvsDeltaPhi30; /// Profile histograms, binned in the \f$ p_T \f$ of the leading jet, for /// the \f$ p_T \f$ sum in the toward, transverse and away regions. Profile1DPtr _ptsumTowardMB, _ptsumTransMB, _ptsumAwayMB; Profile1DPtr _ptsumTowardJ20, _ptsumTransJ20, _ptsumAwayJ20; /// Profile histograms, binned in the \f$ p_T \f$ of the leading jet, for /// the number of charged particles per jet in the toward, transverse and /// away regions. Profile1DPtr _numTowardMB, _numTransMB, _numAwayMB; Profile1DPtr _numTowardJ20, _numTransJ20, _numAwayJ20; /// Histogram of \f$ p_T \f$ distribution for 3 different \f$ p_{T1} \f$ IR cutoffs. Histo1DPtr _ptTrans2, _ptTrans5, _ptTrans30; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2001_S4751469); } diff --git a/analyses/pluginCDF/CDF_2002_S4796047.cc b/analyses/pluginCDF/CDF_2002_S4796047.cc --- a/analyses/pluginCDF/CDF_2002_S4796047.cc +++ b/analyses/pluginCDF/CDF_2002_S4796047.cc @@ -1,122 +1,122 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/TriggerCDFRun0Run1.hh" namespace Rivet { /// @brief CDF Run I charged multiplicity measurement /// @author Hendrik Hoeth /// /// This analysis measures the charged multiplicity distribution /// in minimum bias events at two different center-of-mass energies: /// \f$ \sqrt{s} = \f$ 630 and 1800 GeV. /// /// Particles with c*tau > 10 mm are considered stable, i.e. they /// are reconstructed and their decay products removed. Selection /// cuts are |eta|<1 and pT>0.4 GeV. /// /// @par Run conditions /// /// @arg Two different beam energies: \f$ \sqrt{s} = \$f 630 & 1800 GeV /// @arg Run with generic QCD events. /// @arg Set particles with c*tau > 10 mm stable class CDF_2002_S4796047 : public Analysis { public: /// Constructor CDF_2002_S4796047() : Analysis("CDF_2002_S4796047") { } /// @name Analysis methods //@{ /// Book projections and histograms void init() { declare(TriggerCDFRun0Run1(), "Trigger"); - const ChargedFinalState cfs(-1.0, 1.0, 0.4*GeV); + const ChargedFinalState cfs((Cuts::etaIn(-1.0, 1.0) && Cuts::pT >= 0.4*GeV)); declare(cfs, "FS"); // Histos if (fuzzyEquals(sqrtS()/GeV, 630)) { book(_hist_multiplicity ,1, 1, 1); book(_hist_pt_vs_multiplicity ,3, 1, 1); } else if (fuzzyEquals(sqrtS()/GeV, 1800)) { book(_hist_multiplicity ,2, 1, 1); book(_hist_pt_vs_multiplicity ,4, 1, 1); } book(_sumWTrig, "sumWTrig"); } /// Do the analysis void analyze(const Event& evt) { // Trigger const bool trigger = apply(evt, "Trigger").minBiasDecision(); if (!trigger) vetoEvent; _sumWTrig->fill(); // Get beam energy and tracks const ChargedFinalState& fs = apply(evt, "FS"); const size_t numParticles = fs.particles().size(); // Fill histos of charged multiplicity distributions _hist_multiplicity->fill(numParticles); // Fill histos for vs. charged multiplicity for (const Particle& p : fs.particles()) { const double pT = p.pT(); _hist_pt_vs_multiplicity->fill(numParticles, pT/GeV); } } void finalize() { // This normalisation is NOT a cross-section. // In the paper the x-axes (!) of the histograms are // scaled such that they can put both energies in the // same plot. Of course this affects the area, too. // Since we want to plot the actual multiplicity, we // scale the x-axes back and have to adjust the areas // accordingly. The scale factors are given in the // legend of the plot in the paper. Have a look at // figure 1 and everything immediately becomes clear. // DON'T TRY TO REPAIR THIS, YOU WILL BREAK IT. if (fuzzyEquals(sqrtS()/GeV, 630)) { normalize(_hist_multiplicity, 3.21167); // fixed norm OK } else if (fuzzyEquals(sqrtS()/GeV, 1800)) { normalize(_hist_multiplicity, 4.19121); // fixed norm OK } } //@} private: /// @name Counter //@{ CounterPtr _sumWTrig; //@} /// @name Histos //@{ Histo1DPtr _hist_multiplicity; Profile1DPtr _hist_pt_vs_multiplicity; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2002_S4796047); } diff --git a/analyses/pluginCDF/CDF_2004_S5839831.cc b/analyses/pluginCDF/CDF_2004_S5839831.cc --- a/analyses/pluginCDF/CDF_2004_S5839831.cc +++ b/analyses/pluginCDF/CDF_2004_S5839831.cc @@ -1,384 +1,384 @@ // -*- C++ -*- // "Acosta" underlying event analysis at CDF, inc. "Swiss Cheese" #include "Rivet/Analysis.hh" #include "Rivet/Jet.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/TriggerCDFRun0Run1.hh" namespace Rivet { /// @brief CDF calo jet underlying event analysis at 630 and 1800 GeV /// /// CDF measurement of underlying event using calorimeter jet scales and /// alignment, particle flow activity in transverse cones, and the Swiss /// Cheese analysis method, where cones are excluded around the 2 and 3 /// hardest jets. /// /// @author Andy Buckley class CDF_2004_S5839831 : public Analysis { public: /// Constructor: cuts on charged final state are \f$ -1 < \eta < 1 \f$ /// and \f$ p_T > 0.4 \f$ GeV. CDF_2004_S5839831() : Analysis("CDF_2004_S5839831") { } private: /// @cond CONEUE_DETAIL struct ConesInfo { ConesInfo() : numMax(0), numMin(0), ptMax(0), ptMin(0), ptDiff(0) {} unsigned int numMax, numMin; double ptMax, ptMin, ptDiff; }; /// @endcond ConesInfo _calcTransCones(const double etaLead, const double phiLead, const Particles& tracks) { const double phiTransPlus = mapAngle0To2Pi(phiLead + PI/2.0); const double phiTransMinus = mapAngle0To2Pi(phiLead - PI/2.0); MSG_DEBUG("phi_lead = " << phiLead << " -> trans = (" << phiTransPlus << ", " << phiTransMinus << ")"); unsigned int numPlus(0), numMinus(0); double ptPlus(0), ptMinus(0); // Run over all charged tracks for (const Particle& t : tracks) { FourMomentum trackMom = t.momentum(); const double pt = trackMom.pT(); // Find if track mom is in either transverse cone if (deltaR(trackMom, etaLead, phiTransPlus) < 0.7) { ptPlus += pt; numPlus += 1; } else if (deltaR(trackMom, etaLead, phiTransMinus) < 0.7) { ptMinus += pt; numMinus += 1; } } ConesInfo rtn; // Assign N_{min,max} from N_{plus,minus} rtn.numMax = (ptPlus >= ptMinus) ? numPlus : numMinus; rtn.numMin = (ptPlus >= ptMinus) ? numMinus : numPlus; // Assign pT_{min,max} from pT_{plus,minus} rtn.ptMax = (ptPlus >= ptMinus) ? ptPlus : ptMinus; rtn.ptMin = (ptPlus >= ptMinus) ? ptMinus : ptPlus; rtn.ptDiff = fabs(rtn.ptMax - rtn.ptMin); MSG_DEBUG("Min cone has " << rtn.numMin << " tracks -> " << "pT_min = " << rtn.ptMin/GeV << " GeV"); MSG_DEBUG("Max cone has " << rtn.numMax << " tracks -> " << "pT_max = " << rtn.ptMax/GeV << " GeV"); return rtn; } ConesInfo _calcTransCones(const FourMomentum& leadvec, const Particles& tracks) { const double etaLead = leadvec.eta(); const double phiLead = leadvec.phi(); return _calcTransCones(etaLead, phiLead, tracks); } /// @name Analysis methods //@{ void init() { // Set up projections declare(TriggerCDFRun0Run1(), "Trigger"); declare(Beam(), "Beam"); - const FinalState calofs(-1.2, 1.2); + const FinalState calofs((Cuts::etaIn(-1.2, 1.2))); declare(calofs, "CaloFS"); declare(FastJets(calofs, FastJets::CDFJETCLU, 0.7), "Jets"); - const ChargedFinalState trackfs(-1.2, 1.2, 0.4*GeV); + const ChargedFinalState trackfs((Cuts::etaIn(-1.2, 1.2) && Cuts::pT >= 0.4*GeV)); declare(trackfs, "TrackFS"); // Restrict tracks to |eta| < 0.7 for the min bias part. - const ChargedFinalState mbfs(-0.7, 0.7, 0.4*GeV); + const ChargedFinalState mbfs((Cuts::etaIn(-0.7, 0.7) && Cuts::pT >= 0.4*GeV)); declare(mbfs, "MBFS"); // Restrict tracks to |eta| < 1 for the Swiss-Cheese part. - const ChargedFinalState cheesefs(-1.0, 1.0, 0.4*GeV); + const ChargedFinalState cheesefs((Cuts::etaIn(-1.0, 1.0) && Cuts::pT >= 0.4*GeV)); declare(cheesefs, "CheeseFS"); declare(FastJets(cheesefs, FastJets::CDFJETCLU, 0.7), "CheeseJets"); // Book histograms if (fuzzyEquals(sqrtS()/GeV, 1800, 1E-3)) { book(_pt90MaxAvg1800 ,1, 1, 1); book(_pt90MinAvg1800 ,1, 1, 2); book(_pt90Max1800 ,2, 1, 1); book(_pt90Min1800 ,2, 1, 2); book(_pt90Diff1800 ,2, 1, 3); book(_num90Max1800 ,4, 1, 1); book(_num90Min1800 ,4, 1, 2); book(_pTSum1800_2Jet ,7, 1, 1); book(_pTSum1800_3Jet ,7, 1, 2); book(_pt90Dbn1800Et40 ,3, 1, 1); book(_pt90Dbn1800Et80 ,3, 1, 2); book(_pt90Dbn1800Et120 ,3, 1, 3); book(_pt90Dbn1800Et160 ,3, 1, 4); book(_pt90Dbn1800Et200 ,3, 1, 5); book(_numTracksDbn1800MB ,5, 1, 1); book(_ptDbn1800MB ,6, 1, 1); } else if (fuzzyEquals(sqrtS()/GeV, 630, 1E-3)) { book(_pt90Max630 ,8, 1, 1); book(_pt90Min630 ,8, 1, 2); book(_pt90Diff630 ,8, 1, 3); book(_pTSum630_2Jet ,9, 1, 1); book(_pTSum630_3Jet ,9, 1, 2); book(_numTracksDbn630MB ,10, 1, 1); book(_ptDbn630MB ,11, 1, 1); } } /// Do the analysis void analyze(const Event& event) { // Trigger const bool trigger = apply(event, "Trigger").minBiasDecision(); if (!trigger) vetoEvent; const double sqrtS = apply(event, "Beam").sqrtS(); { MSG_DEBUG("Running max/min analysis"); Jets jets = apply(event, "Jets").jets(cmpMomByE); if (!jets.empty()) { // Leading jet must be in central |eta| < 0.5 region const Jet leadingjet = jets.front(); const double etaLead = leadingjet.eta(); // Get Et of the leading jet: used to bin histograms const double ETlead = leadingjet.Et(); MSG_DEBUG("Leading Et = " << ETlead/GeV << " GeV"); if (fabs(etaLead) > 0.5 && ETlead < 15*GeV) { MSG_DEBUG("Leading jet eta = " << etaLead << " not in |eta| < 0.5 & pT > 15 GeV"); } else { // Multiplicity & pT distributions for sqrt(s) = 630 GeV, 1800 GeV const Particles tracks = apply(event, "TrackFS").particles(); const ConesInfo cones = _calcTransCones(leadingjet.momentum(), tracks); if (fuzzyEquals(sqrtS/GeV, 630)) { _pt90Max630->fill(ETlead/GeV, cones.ptMax/GeV); _pt90Min630->fill(ETlead/GeV, cones.ptMin/GeV); _pt90Diff630->fill(ETlead/GeV, cones.ptDiff/GeV); } else if (fuzzyEquals(sqrtS/GeV, 1800)) { _num90Max1800->fill(ETlead/GeV, cones.numMax); _num90Min1800->fill(ETlead/GeV, cones.numMin); _pt90Max1800->fill(ETlead/GeV, cones.ptMax/GeV); _pt90Min1800->fill(ETlead/GeV, cones.ptMin/GeV); _pt90Diff1800->fill(ETlead/GeV, cones.ptDiff/GeV); _pt90MaxAvg1800->fill(ETlead/GeV, cones.ptMax/GeV); // /numMax _pt90MinAvg1800->fill(ETlead/GeV, cones.ptMin/GeV); // /numMin // const double ptTransTotal = cones.ptMax + cones.ptMin; if (inRange(ETlead/GeV, 40., 80.)) { _pt90Dbn1800Et40->fill(ptTransTotal/GeV); } else if (inRange(ETlead/GeV, 80., 120.)) { _pt90Dbn1800Et80->fill(ptTransTotal/GeV); } else if (inRange(ETlead/GeV, 120., 160.)) { _pt90Dbn1800Et120->fill(ptTransTotal/GeV); } else if (inRange(ETlead/GeV, 160., 200.)) { _pt90Dbn1800Et160->fill(ptTransTotal/GeV); } else if (inRange(ETlead/GeV, 200., 270.)) { _pt90Dbn1800Et200->fill(ptTransTotal/GeV); } } } } } // Fill min bias total track multiplicity histos { MSG_DEBUG("Running min bias multiplicity analysis"); const Particles mbtracks = apply(event, "MBFS").particles(); if (fuzzyEquals(sqrtS/GeV, 1800)) { _numTracksDbn1800MB->fill(mbtracks.size()); } else if (fuzzyEquals(sqrtS/GeV, 630)) { _numTracksDbn630MB->fill(mbtracks.size()); } // Run over all charged tracks for (const Particle& t : mbtracks) { FourMomentum trackMom = t.momentum(); const double pt = trackMom.pT(); // Plot total pT distribution for min bias if (fuzzyEquals(sqrtS/GeV, 1800)) { _ptDbn1800MB->fill(pt/GeV); } else if (fuzzyEquals(sqrtS/GeV, 630)) { _ptDbn630MB->fill(pt/GeV); } } } // Construct "Swiss Cheese" pT distributions, with pT contributions from // tracks within R = 0.7 of the 1st, 2nd (and 3rd) jets being ignored. A // different set of charged tracks, with |eta| < 1.0, is used here, and all // the removed jets must have Et > 5 GeV. { MSG_DEBUG("Running Swiss Cheese analysis"); const Particles cheesetracks = apply(event, "CheeseFS").particles(); Jets cheesejets = apply(event, "Jets").jets(cmpMomByE); if (cheesejets.empty()) { MSG_DEBUG("No 'cheese' jets found in event"); return; } if (cheesejets.size() > 1 && fabs(cheesejets[0].eta()) <= 0.5 && cheesejets[0].Et()/GeV > 5.0 && cheesejets[1].Et()/GeV > 5.0) { const double cheeseETlead = cheesejets[0].Et(); const double eta1 = cheesejets[0].eta(); const double phi1 = cheesejets[0].phi(); const double eta2 = cheesejets[1].eta(); const double phi2 = cheesejets[1].phi(); double ptSumSub2(0), ptSumSub3(0); for (const Particle& t : cheesetracks) { FourMomentum trackMom = t.momentum(); const double pt = trackMom.pT(); // Subtracting 2 leading jets const double deltaR1 = deltaR(trackMom, eta1, phi1); const double deltaR2 = deltaR(trackMom, eta2, phi2); MSG_TRACE("Track vs jet(1): " << "|(" << trackMom.eta() << ", " << trackMom.phi() << ") - " << "|(" << eta1 << ", " << phi1 << ")| = " << deltaR1); MSG_TRACE("Track vs jet(2): " << "|(" << trackMom.eta() << ", " << trackMom.phi() << ") - " << "|(" << eta2 << ", " << phi2 << ")| = " << deltaR2); if (deltaR1 > 0.7 && deltaR2 > 0.7) { ptSumSub2 += pt; // Subtracting 3rd leading jet if (cheesejets.size() > 2 && cheesejets[2].Et()/GeV > 5.0) { const double eta3 = cheesejets[2].eta(); const double phi3 = cheesejets[2].phi(); const double deltaR3 = deltaR(trackMom, eta3, phi3); MSG_TRACE("Track vs jet(3): " << "|(" << trackMom.eta() << ", " << trackMom.phi() << ") - " << "|(" << eta3 << ", " << phi3 << ")| = " << deltaR3); if (deltaR3 > 0.7) { ptSumSub3 += pt; } } } } // Swiss Cheese sub 2,3 jets distributions for sqrt(s) = 630 GeV, 1800 GeV if (fuzzyEquals(sqrtS/GeV, 630)) { if (!isZero(ptSumSub2)) _pTSum630_2Jet->fill(cheeseETlead/GeV, ptSumSub2/GeV); if (!isZero(ptSumSub3))_pTSum630_3Jet->fill(cheeseETlead/GeV, ptSumSub3/GeV); } else if (fuzzyEquals(sqrtS/GeV, 1800)) { if (!isZero(ptSumSub2))_pTSum1800_2Jet->fill(cheeseETlead/GeV, ptSumSub2/GeV); if (!isZero(ptSumSub3))_pTSum1800_3Jet->fill(cheeseETlead/GeV, ptSumSub3/GeV); } } } } void finalize() { /// @todo Take these normalisations from the data histo (it can't come from just the MC) if (fuzzyEquals(sqrtS()/GeV, 1800, 1E-3)) { // Normalize to actual number of entries in pT dbn histos... normalize(_pt90Dbn1800Et40, 1656.75); // norm OK normalize(_pt90Dbn1800Et80, 4657.5); // norm OK normalize(_pt90Dbn1800Et120, 5395.5); // norm OK normalize(_pt90Dbn1800Et160, 7248.75); // norm OK normalize(_pt90Dbn1800Et200, 2442.0); // norm OK } // ...and for min bias distributions: if (fuzzyEquals(sqrtS()/GeV, 1800, 1E-3)) { normalize(_numTracksDbn1800MB, 309718.25); // norm OK normalize(_ptDbn1800MB, 33600.0); // norm OK } else if (fuzzyEquals(sqrtS()/GeV, 630, 1E-3)) { normalize(_numTracksDbn630MB, 1101024.0); // norm OK normalize(_ptDbn630MB, 105088.0); // norm OK } } //@} private: /// @name Histogram collections //@{ /// Profile histograms, binned in the \f$ E_T \f$ of the leading jet, for /// the average \f$ p_T \f$ in the toward, transverse and away regions at /// \f$ \sqrt{s} = 1800 \text{GeV} \f$. /// Corresponds to Table 1, and HepData table 1. Profile1DPtr _pt90MaxAvg1800, _pt90MinAvg1800; /// Profile histograms, binned in the \f$ E_T \f$ of the leading jet, for /// the \f$ p_T \f$ sum in the toward, transverse and away regions at /// \f$ \sqrt{s} = 1800 \text{GeV} \f$. /// Corresponds to figure 2/3, and HepData table 2. Profile1DPtr _pt90Max1800, _pt90Min1800, _pt90Diff1800; /// Profile histograms, binned in the \f$ E_T \f$ of the leading jet, for /// the \f$ p_T \f$ sum in the toward, transverse and away regions at /// at \f$ \sqrt{s} = 630 \text{GeV} \f$. /// Corresponds to figure 8, and HepData table 8. Profile1DPtr _pt90Max630, _pt90Min630, _pt90Diff630; /// Profile histograms, binned in the \f$ E_T \f$ of the leading jet, for /// the cone track multiplicity at \f$ \sqrt{s} = 1800 \text{GeV} \f$. /// Corresponds to figure 5, and HepData table 4. Profile1DPtr _num90Max1800, _num90Min1800; /// Profile histograms, binned in the \f$ E_T \f$ of the leading jet, for /// the \f$ p_T \f$ sum at \f$ \sqrt{s} = 1800 \text{GeV} \f$. /// Corresponds to figure 7, and HepData table 7. Profile1DPtr _pTSum1800_2Jet, _pTSum1800_3Jet; /// Profile histograms, binned in the \f$ E_T \f$ of the leading jet, for /// the \f$ p_T \f$ sum at \f$ \sqrt{s} = 630 \text{GeV} \f$. /// Corresponds to figure 9, and HepData table 9. Profile1DPtr _pTSum630_2Jet, _pTSum630_3Jet; /// Histogram of \f$ p_{T\text{sum}} \f$ distribution for 5 different /// \f$ E_{T1} \f$ bins. /// Corresponds to figure 4, and HepData table 3. Histo1DPtr _pt90Dbn1800Et40, _pt90Dbn1800Et80, _pt90Dbn1800Et120, _pt90Dbn1800Et160, _pt90Dbn1800Et200; /// Histograms of track multiplicity and \f$ p_T \f$ distributions for /// minimum bias events. /// Figure 6, and HepData tables 5 & 6. /// Figure 10, and HepData tables 10 & 11. Histo1DPtr _numTracksDbn1800MB, _ptDbn1800MB; Histo1DPtr _numTracksDbn630MB, _ptDbn630MB; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2004_S5839831); } diff --git a/analyses/pluginCDF/CDF_2005_S6217184.cc b/analyses/pluginCDF/CDF_2005_S6217184.cc --- a/analyses/pluginCDF/CDF_2005_S6217184.cc +++ b/analyses/pluginCDF/CDF_2005_S6217184.cc @@ -1,131 +1,131 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Projections/JetShape.hh" namespace Rivet { /// @brief CDF Run II jet shape analysis /// @author Andy Buckley class CDF_2005_S6217184 : public Analysis { public: /// Constructor CDF_2005_S6217184() : Analysis("CDF_2005_S6217184") { } /// @name Analysis methods //@{ void init() { // Set up projections - const FinalState fs(-2.0, 2.0); + const FinalState fs((Cuts::etaIn(-2.0, 2.0))); declare(fs, "FS"); FastJets fj(fs, FastJets::CDFMIDPOINT, 0.7); fj.useInvisibles(); declare(fj, "Jets"); // Specify pT bins _ptedges = {{ 37.0, 45.0, 55.0, 63.0, 73.0, 84.0, 97.0, 112.0, 128.0, 148.0, 166.0, 186.0, 208.0, 229.0, 250.0, 277.0, 304.0, 340.0, 380.0 }}; // Register a jet shape projection and histogram for each pT bin for (size_t i = 0; i < 6; ++i) { for (size_t j = 0; j < 3; ++j) { const size_t k = i*3 + j; stringstream ss; ss << "JetShape" << k; const string pname = ss.str(); _jsnames_pT[k] = pname; const JetShape jsp(fj, 0.0, 0.7, 7, _ptedges[k], _ptedges[k+1], 0.1, 0.7, RAPIDITY); declare(jsp, pname); book(_profhistRho_pT[k] ,i+1, 1, j+1); book(_profhistPsi_pT[k] ,6+i+1, 1, j+1); } } // Final histo book(_profhistPsi_vs_pT, 13, 1, 1, true); } /// Do the analysis void analyze(const Event& evt) { // Get jets and require at least one to pass pT and y cuts const Jets jets = apply(evt, "Jets") .jetsByPt(Cuts::ptIn(_ptedges.front()*GeV, _ptedges.back()*GeV) && Cuts::absrap < 0.7); MSG_DEBUG("Jet multiplicity before cuts = " << jets.size()); if (jets.size() == 0) { MSG_DEBUG("No jets found in required pT and rapidity range"); vetoEvent; } // Calculate and histogram jet shapes for (size_t ipt = 0; ipt < 18; ++ipt) { const JetShape& jsipt = apply(evt, _jsnames_pT[ipt]); for (size_t ijet = 0; ijet < jsipt.numJets(); ++ijet) { for (size_t rbin = 0; rbin < jsipt.numBins(); ++rbin) { const double r_rho = jsipt.rBinMid(rbin); MSG_DEBUG(ipt << " " << rbin << " (" << r_rho << ") " << jsipt.diffJetShape(ijet, rbin)); /// @note Bin width Jacobian factor of 0.7/0.1 = 7 in the differential shapes plot _profhistRho_pT[ipt]->fill(r_rho/0.7, (0.7/0.1)*jsipt.diffJetShape(ijet, rbin)); const double r_Psi = jsipt.rBinMax(rbin); _profhistPsi_pT[ipt]->fill(r_Psi/0.7, jsipt.intJetShape(ijet, rbin)); } } } } // Finalize void finalize() { // Construct final 1-Psi(0.3/0.7) profile from Psi profiles for (size_t i = 0; i < _ptedges.size()-1; ++i) { // Get entry for rad_Psi = 0.2 bin /// @note Not a great handling of empty bins! Profile1DPtr ph_i = _profhistPsi_pT[i]; const double y = (ph_i->bin(2).effNumEntries() > 0) ? ph_i->bin(2).mean() : 0; const double ey = (ph_i->bin(2).effNumEntries() > 1) ? ph_i->bin(2).stdErr() : 0; _profhistPsi_vs_pT->point(i).setY(y, ey); } } //@} private: /// @name Analysis data //@{ /// Jet \f$ p_\perp\f$ bins. vector _ptedges; // This can't be a raw array if we want to initialise it non-painfully /// JetShape projection name for each \f$p_\perp\f$ bin. string _jsnames_pT[18]; //@} /// @name Histograms //@{ Profile1DPtr _profhistRho_pT[18]; Profile1DPtr _profhistPsi_pT[18]; Scatter2DPtr _profhistPsi_vs_pT; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2005_S6217184); } diff --git a/analyses/pluginCDF/CDF_2006_S6653332.cc b/analyses/pluginCDF/CDF_2006_S6653332.cc --- a/analyses/pluginCDF/CDF_2006_S6653332.cc +++ b/analyses/pluginCDF/CDF_2006_S6653332.cc @@ -1,180 +1,180 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/InvMassFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ChargedLeptons.hh" namespace Rivet { /// @brief CDF Run II analysis: jet \f$ p_T \f$ and \f$ \eta \f$ /// distributions in Z + (b) jet production /// @author Lars Sonnenschein /// /// This CDF analysis provides \f$ p_T \f$ and \f$ \eta \f$ distributions of /// jets in Z + (b) jet production, before and after tagging. class CDF_2006_S6653332 : public Analysis { public: /// Constructor CDF_2006_S6653332() : Analysis("CDF_2006_S6653332"), _Rjet(0.7), _JetPtCut(20.), _JetEtaCut(1.5), _Lep1PtCut(18.), _Lep2PtCut(10.), _LepEtaCut(1.1) { } /// @name Analysis methods //@{ void init() { - const FinalState fs(-3.6, 3.6); + const FinalState fs((Cuts::etaIn(-3.6, 3.6))); declare(fs, "FS"); // Create a final state with any e+e- or mu+mu- pair with // invariant mass 76 -> 106 GeV and ET > 20 (Z decay products) vector > vids; vids.push_back(make_pair(PID::ELECTRON, PID::POSITRON)); vids.push_back(make_pair(PID::MUON, PID::ANTIMUON)); - FinalState fs2(-3.6, 3.6); + FinalState fs2((Cuts::etaIn(-3.6, 3.6))); InvMassFinalState invfs(fs2, vids, 66*GeV, 116*GeV); declare(invfs, "INVFS"); // Make a final state without the Z decay products for jet clustering VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(invfs); declare(vfs, "VFS"); declare(FastJets(vfs, FastJets::CDFMIDPOINT, 0.7), "Jets"); // Book histograms book(_sigmaBJet ,1, 1, 1); book(_ratioBJetToZ ,2, 1, 1); book(_ratioBJetToJet ,3, 1, 1); book(_sumWeightsWithZ, "sumWeightsWithZ"); book(_sumWeightsWithZJet, "sumWeightsWithZJet"); } /// Do the analysis void analyze(const Event& event) { // Check we have an l+l- pair that passes the kinematic cuts // Get the Z decay products (mu+mu- or e+e- pair) const InvMassFinalState& invMassFinalState = apply(event, "INVFS"); const Particles& ZDecayProducts = invMassFinalState.particles(); // Make sure we have at least 2 Z decay products (mumu or ee) if (ZDecayProducts.size() < 2) vetoEvent; // double Lep1Pt = ZDecayProducts[0].pT(); double Lep2Pt = ZDecayProducts[1].pT(); double Lep1Eta = ZDecayProducts[0].absrap(); ///< @todo This is y... should be abseta()? double Lep2Eta = ZDecayProducts[1].absrap(); ///< @todo This is y... should be abseta()? if (Lep1Eta > _LepEtaCut && Lep2Eta > _LepEtaCut) vetoEvent; if (ZDecayProducts[0].abspid()==13 && Lep1Eta > 1. && Lep2Eta > 1.) vetoEvent; if (Lep1Pt < _Lep1PtCut && Lep2Pt < _Lep2PtCut) vetoEvent; _sumWeightsWithZ->fill(); /// @todo Write out a warning if there are more than two decay products FourMomentum Zmom = ZDecayProducts[0].momentum() + ZDecayProducts[1].momentum(); // Put all b-quarks in a vector /// @todo Use jet contents rather than accessing quarks directly Particles bquarks; /// @todo Use nicer looping for (GenEvent::particle_const_iterator p = event.genEvent()->particles_begin(); p != event.genEvent()->particles_end(); ++p) { if ( std::abs((*p)->pdg_id()) == PID::BQUARK ) { bquarks.push_back(Particle(**p)); } } // Get jets const FastJets& jetpro = apply(event, "Jets"); MSG_DEBUG("Jet multiplicity before any pT cut = " << jetpro.size()); const PseudoJets& jets = jetpro.pseudoJetsByPt(); MSG_DEBUG("jetlist size = " << jets.size()); int numBJet = 0; int numJet = 0; // for each b-jet plot the ET and the eta of the jet, normalise to the total cross section at the end // for each event plot N jet and pT(Z), normalise to the total cross section at the end for (PseudoJets::const_iterator jt = jets.begin(); jt != jets.end(); ++jt) { // select jets that pass the kinematic cuts if (jt->perp() > _JetPtCut && fabs(jt->rapidity()) <= _JetEtaCut) { ++numJet; // Does the jet contain a b-quark? /// @todo Use jet contents rather than accessing quarks directly bool bjet = false; for (const Particle& bquark : bquarks) { if (deltaR(jt->rapidity(), jt->phi(), bquark.rapidity(), bquark.phi()) <= _Rjet) { bjet = true; break; } } // end loop around b-jets if (bjet) { numBJet++; } } } // end loop around jets if (numJet > 0) _sumWeightsWithZJet->fill(); if (numBJet > 0) { _sigmaBJet->fill(1960.0); _ratioBJetToZ->fill(1960.0); _ratioBJetToJet->fill(1960.0); } } /// Finalize void finalize() { MSG_DEBUG("Total sum of weights = " << sumOfWeights()); MSG_DEBUG("Sum of weights for Z production in mass range = " << dbl(*_sumWeightsWithZ)); MSG_DEBUG("Sum of weights for Z+jet production in mass range = " << dbl(*_sumWeightsWithZJet)); scale(_sigmaBJet, crossSection()/sumOfWeights()); scale(_ratioBJetToZ, 1.0/ *_sumWeightsWithZ); scale(_ratioBJetToJet, 1.0/ *_sumWeightsWithZJet); } //@} private: /// @name Cuts and counters //@{ double _Rjet; double _JetPtCut; double _JetEtaCut; double _Lep1PtCut; double _Lep2PtCut; double _LepEtaCut; CounterPtr _sumWeightsWithZ; CounterPtr _sumWeightsWithZJet; //@} /// @name Histograms //@{ Histo1DPtr _sigmaBJet; Histo1DPtr _ratioBJetToZ; Histo1DPtr _ratioBJetToJet; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2006_S6653332); } diff --git a/analyses/pluginCDF/CDF_2008_S7540469.cc b/analyses/pluginCDF/CDF_2008_S7540469.cc --- a/analyses/pluginCDF/CDF_2008_S7540469.cc +++ b/analyses/pluginCDF/CDF_2008_S7540469.cc @@ -1,175 +1,175 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief Measurement differential Z/\f$ \gamma^* \f$ + jet + \f$ X \f$ cross sections /// @author Frank Siegert class CDF_2008_S7540469 : public Analysis { public: /// Constructor CDF_2008_S7540469() : Analysis("CDF_2008_S7540469") { } /// @name Analysis methods //@{ /// Book histograms void init() { // Full final state - FinalState fs(-5.0, 5.0); + FinalState fs((Cuts::etaIn(-5.0, 5.0))); declare(fs, "FS"); // Leading electrons in tracking acceptance IdentifiedFinalState elfs(Cuts::abseta < 5 && Cuts::pT > 25*GeV); elfs.acceptIdPair(PID::ELECTRON); declare(elfs, "LeadingElectrons"); book(_h_jet_multiplicity ,1, 1, 1); book(_h_jet_pT_cross_section_incl_1jet ,2, 1, 1); book(_h_jet_pT_cross_section_incl_2jet ,3, 1, 1); } /// Do the analysis void analyze(const Event & event) { // Skip if the event is empty const FinalState& fs = apply(event, "FS"); if (fs.empty()) { MSG_DEBUG("Skipping event " << numEvents() << " because no final state pair found"); vetoEvent; } // Find the Z candidates const FinalState & electronfs = apply(event, "LeadingElectrons"); std::vector > Z_candidates; Particles all_els=electronfs.particles(); for (size_t i=0; i 116.0) { candidate = false; } double abs_eta_0 = fabs(all_els[i].eta()); double abs_eta_1 = fabs(all_els[j].eta()); if (abs_eta_1 < abs_eta_0) { double tmp = abs_eta_0; abs_eta_0 = abs_eta_1; abs_eta_1 = tmp; } if (abs_eta_0 > 1.0) { candidate = false; } if (!(abs_eta_1 < 1.0 || (inRange(abs_eta_1, 1.2, 2.8)))) { candidate = false; } if (candidate) { Z_candidates.push_back(make_pair(all_els[i], all_els[j])); } } } if (Z_candidates.size() != 1) { MSG_DEBUG("Skipping event " << numEvents() << " because no unique electron pair found "); vetoEvent; } // Now build the jets on a FS without the electrons from the Z (including QED radiation) Particles jetparts; for (const Particle& p : fs.particles()) { bool copy = true; if (p.pid() == PID::PHOTON) { FourMomentum p_e0 = Z_candidates[0].first.momentum(); FourMomentum p_e1 = Z_candidates[0].second.momentum(); FourMomentum p_P = p.momentum(); if (deltaR(p_e0, p_P) < 0.2) copy = false; if (deltaR(p_e1, p_P) < 0.2) copy = false; } else { if (p.genParticle()->barcode() == Z_candidates[0].first.genParticle()->barcode()) copy = false; if (p.genParticle()->barcode() == Z_candidates[0].second.genParticle()->barcode()) copy = false; } if (copy) jetparts.push_back(p); } // Proceed to lepton dressing const PseudoJets pjs = mkPseudoJets(jetparts); const auto jplugin = make_shared(0.7, 0.5, 1.0); const Jets jets_all = mkJets(fastjet::ClusterSequence(pjs, jplugin.get()).inclusive_jets()); const Jets jets_cut = sortByPt(filterBy(jets_all, Cuts::pT > 30*GeV && Cuts::abseta < 2.1)); // FastJets jetpro(FastJets::CDFMIDPOINT, 0.7); // jetpro.calc(jetparts); // // Take jets with pt > 30, |eta| < 2.1: // const Jets& jets = jetpro.jets(); // Jets jets_cut; // for (const Jet& j, jets) { // if (j.pT()/GeV > 30.0 && j.abseta() < 2.1) { // jets_cut.push_back(j); // } // } // // Sort by pT: // sort(jets_cut.begin(), jets_cut.end(), cmpMomByPt); // Return if there are no jets: MSG_DEBUG("Num jets above 30 GeV = " << jets_cut.size()); if (jets_cut.empty()) { MSG_DEBUG("No jets pass cuts "); vetoEvent; } // Cut on Delta R between Z electrons and *all* jets for (const Jet& j : jets_cut) { if (deltaR(Z_candidates[0].first, j) < 0.7) vetoEvent; if (deltaR(Z_candidates[0].second, j) < 0.7) vetoEvent; } // Fill histograms for (size_t njet=1; njet<=jets_cut.size(); ++njet) { _h_jet_multiplicity->fill(njet); } for (const Jet& j : jets_cut) { if (jets_cut.size() > 0) { _h_jet_pT_cross_section_incl_1jet->fill(j.pT()); } if (jets_cut.size() > 1) { _h_jet_pT_cross_section_incl_2jet->fill(j.pT()); } } } /// Rescale histos void finalize() { const double invlumi = crossSection()/femtobarn/sumOfWeights(); scale(_h_jet_multiplicity, invlumi); scale(_h_jet_pT_cross_section_incl_1jet, invlumi); scale(_h_jet_pT_cross_section_incl_2jet, invlumi); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_jet_multiplicity; Histo1DPtr _h_jet_pT_cross_section_incl_1jet; Histo1DPtr _h_jet_pT_cross_section_incl_2jet; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2008_S7540469); } diff --git a/analyses/pluginCDF/CDF_2008_S7541902.cc b/analyses/pluginCDF/CDF_2008_S7541902.cc --- a/analyses/pluginCDF/CDF_2008_S7541902.cc +++ b/analyses/pluginCDF/CDF_2008_S7541902.cc @@ -1,194 +1,194 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/InvMassFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include namespace Rivet { /// @brief CDF jet pT and multiplicity distributions in W + jets events /// /// This CDF analysis provides jet pT distributions for 4 jet multiplicity bins /// as well as the jet multiplicity distribution in W + jets events. /// e-Print: arXiv:0711.4044 [hep-ex] class CDF_2008_S7541902 : public Analysis { public: /// Constructor CDF_2008_S7541902() : Analysis("CDF_2008_S7541902"), _electronETCut(20.0*GeV), _electronETACut(1.1), _eTmissCut(30.0*GeV), _mTCut(20.0*GeV), _jetEtCutA(20.0*GeV), _jetEtCutB(25.0*GeV), _jetETA(2.0) { } /// @name Analysis methods //@{ void init() { // Set up projections // Basic FS - FinalState fs(-3.6, 3.6); + FinalState fs((Cuts::etaIn(-3.6, 3.6))); declare(fs, "FS"); // Create a final state with any e-nu pair with invariant mass 65 -> 95 GeV and ET > 20 (W decay products) vector > vids; vids += make_pair(PID::ELECTRON, PID::NU_EBAR); vids += make_pair(PID::POSITRON, PID::NU_E); - FinalState fs2(-3.6, 3.6, 20*GeV); + FinalState fs2((Cuts::etaIn(-3.6, 3.6) && Cuts::pT >= 20*GeV)); InvMassFinalState invfs(fs2, vids, 65*GeV, 95*GeV); declare(invfs, "INVFS"); // Make a final state without the W decay products for jet clustering VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(invfs); declare(vfs, "VFS"); declare(FastJets(vfs, FastJets::CDFJETCLU, 0.4), "Jets"); // Book histograms for (int i = 0 ; i < 4 ; ++i) { book(_histJetEt[i] ,1+i, 1, 1); book(_histJetMultRatio[i], 5, 1, i+1, true); /// @todo These would be better off as YODA::Counter until finalize() book(_histJetMult[i] ,6+i, 1, 1); // _sumW is essentially the 0th "histo" counter } book(_sumW,"sumW"); } /// Do the analysis void analyze(const Event& event) { // Get the W decay products (electron and neutrino) const InvMassFinalState& invMassFinalState = apply(event, "INVFS"); const Particles& wDecayProducts = invMassFinalState.particles(); FourMomentum electronP, neutrinoP; bool gotElectron(false), gotNeutrino(false); for (const Particle& p : wDecayProducts) { FourMomentum p4 = p.momentum(); if (p4.Et() > _electronETCut && fabs(p4.eta()) < _electronETACut && p.abspid() == PID::ELECTRON) { electronP = p4; gotElectron = true; } else if (p4.Et() > _eTmissCut && p.abspid() == PID::NU_E) { neutrinoP = p4; gotNeutrino = true; } } // Veto event if the electron or MET cuts fail if (!gotElectron || !gotNeutrino) vetoEvent; // Veto event if the MTR cut fails double mT2 = 2.0 * ( electronP.pT()*neutrinoP.pT() - electronP.px()*neutrinoP.px() - electronP.py()*neutrinoP.py() ); if (sqrt(mT2) < _mTCut ) vetoEvent; // Get the jets const JetAlg& jetProj = apply(event, "Jets"); Jets theJets = jetProj.jets(cmpMomByEt, Cuts::Et > _jetEtCutA); size_t njetsA(0), njetsB(0); for (const Jet& j : theJets) { const FourMomentum pj = j.momentum(); if (fabs(pj.rapidity()) < _jetETA) { // Fill differential histograms for top 4 jets with Et > 20 if (njetsA < 4 && pj.Et() > _jetEtCutA) { ++njetsA; _histJetEt[njetsA-1]->fill(pj.Et()); } // Count number of jets with Et > 25 (for multiplicity histograms) if (pj.Et() > _jetEtCutB) ++njetsB; } } // Increment event counter _sumW->fill(); // Jet multiplicity for (size_t i = 1; i <= njetsB; ++i) { /// @todo This isn't really a histogram: replace with a YODA::Counter when we have one! _histJetMult[i-1]->fill(1960.); if (i == 4) break; } } /// Finalize void finalize() { // Fill the 0th ratio histogram specially /// @todo This special case for 1-to-0 will disappear if we use Counters for all mults including 0. if (_sumW->val() > 0) { const YODA::Histo1D::Bin& b0 = _histJetMult[0]->bin(0); double ratio = b0.area()/dbl(*_sumW); double frac_err = 1/dbl(*_sumW); ///< This 1/sqrt{N} error treatment isn't right for weighted events: use YODA::Counter if (b0.area() > 0) frac_err = sqrt( sqr(frac_err) + sqr(b0.areaErr()/b0.area()) ); _histJetMultRatio[0]->point(0).setY(ratio, ratio*frac_err); } // Loop over the non-zero multiplicities for (size_t i = 0; i < 3; ++i) { const YODA::Histo1D::Bin& b1 = _histJetMult[i]->bin(0); const YODA::Histo1D::Bin& b2 = _histJetMult[i+1]->bin(0); if (b1.area() == 0.0) continue; double ratio = b2.area()/b1.area(); double frac_err = b1.areaErr()/b1.area(); if (b2.area() > 0) frac_err = sqrt( sqr(frac_err) + sqr(b2.areaErr()/b2.area()) ); _histJetMultRatio[i+1]->point(0).setY(ratio, ratio*frac_err); } // Normalize the non-ratio histograms for (size_t i = 0; i < 4; ++i) { scale(_histJetEt[i], crossSection()/picobarn/sumOfWeights()); scale(_histJetMult[i], crossSection()/picobarn/sumOfWeights()); } } //@} private: /// @name Cuts //@{ /// Cut on the electron ET: double _electronETCut; /// Cut on the electron ETA: double _electronETACut; /// Cut on the missing ET double _eTmissCut; /// Cut on the transverse mass squared double _mTCut; /// Cut on the jet ET for differential cross sections double _jetEtCutA; /// Cut on the jet ET for jet multiplicity double _jetEtCutB; /// Cut on the jet ETA double _jetETA; //@} /// @name Histograms //@{ Histo1DPtr _histJetEt[4]; Histo1DPtr _histJetMultNorm; Scatter2DPtr _histJetMultRatio[4]; Histo1DPtr _histJetMult[4]; CounterPtr _sumW; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2008_S7541902); } diff --git a/analyses/pluginCDF/CDF_2008_S7782535.cc b/analyses/pluginCDF/CDF_2008_S7782535.cc --- a/analyses/pluginCDF/CDF_2008_S7782535.cc +++ b/analyses/pluginCDF/CDF_2008_S7782535.cc @@ -1,140 +1,140 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/JetShape.hh" namespace Rivet { /// @brief CDF Run II b-jet shape paper class CDF_2008_S7782535 : public Analysis { public: /// Constructor CDF_2008_S7782535() : Analysis("CDF_2008_S7782535") { } /// @name Analysis methods //@{ void init() { // Set up projections - const FinalState fs(-3.6, 3.6); + const FinalState fs((Cuts::etaIn(-3.6, 3.6))); declare(fs, "FS"); FastJets jetproj(fs, FastJets::CDFMIDPOINT, 0.7); jetproj.useInvisibles(); declare(jetproj, "Jets"); // Book histograms and corresponding jet shape projections _ptedges = {{ 52, 80, 104, 142, 300 }}; for (size_t i = 0; i < 4; ++i) { stringstream ss; ss << "JetShape" << i; const string pname = ss.str(); _jsnames_pT[i] = pname; const JetShape jsp(jetproj, 0.0, 0.7, 7, _ptedges[i], _ptedges[i+1], 0.0, 0.7, RAPIDITY); declare(jsp, pname); book(_h_Psi_pT[i] ,i+1, 2, 1); } book(_h_OneMinusPsi_vs_pT, 5, 1, 1); } // Do the analysis void analyze(const Event& event) { const FastJets& fjs = apply(event, "Jets"); const Jets& jets = fjs.jets(Cuts::ptIn(_ptedges.front()*GeV, _ptedges.back()*GeV) && Cuts::absrap < 0.7); if (jets.size() == 0) { MSG_DEBUG("No jets found in required pT range"); vetoEvent; } // Filter to just get a vector of b-jets Jets bjets; for (const Jet& j : jets) { if (j.bTagged()) bjets += j; } if (bjets.empty()) { MSG_DEBUG("No b-jet axes in acceptance"); vetoEvent; } // Bin b-jets in pT Jets bjets_ptbinned[4]; for (const Jet& bj : bjets) { const FourMomentum pbj = bj.momentum(); const int ipt = binIndex(pbj.pT(), _ptedges); if (ipt == -1) continue; ///< Out of pT range (somehow!) bjets_ptbinned[ipt] += bj; } // Loop over jet pT bins and fill shape profiles for (size_t ipt = 0; ipt < 4; ++ipt) { if (bjets_ptbinned[ipt].empty()) continue; // Don't use the cached result: copy construct and calculate for provided b-jets only JetShape jsipt = apply(event, _jsnames_pT[ipt]); jsipt.calc(bjets_ptbinned[ipt]); for (size_t ijet = 0; ijet < jsipt.numJets(); ++ijet) { for (size_t rbin = 0; rbin < jsipt.numBins(); ++rbin) { const double r_Psi = jsipt.rBinMax(rbin); _h_Psi_pT[ipt]->fill(r_Psi/0.7, jsipt.intJetShape(ijet, rbin)); } } } } /// Finalize void finalize() { // Construct final 1-Psi(0.3/0.7) profile from Psi profiles for (size_t i = 0; i < _ptedges.size()-1; ++i) { // Get entry for rad_Psi = 0.2 bin Profile1DPtr ph_i = _h_Psi_pT[i]; const double ex = 0.5*(_ptedges[i+1] - _ptedges[i]); const double x = _ptedges[i] + ex; double y = 0; // This is to protect against exceptions double ey = 0; // thrown by YODA when calling mean and if (ph_i->bin(1).effNumEntries() > 1) { // stdErr at y = 1.0 - ph_i->bin(1).mean(); // low stats ey= ph_i->bin(1).stdErr(); } _h_OneMinusPsi_vs_pT->addPoint(x, y, ex, ey); } } //@} private: /// @name Analysis data //@{ /// Jet \f$ p_\perp\f$ bins. vector _ptedges; // This can't be a raw array if we want to initialise it non-painfully /// JetShape projection name for each \f$p_\perp\f$ bin. string _jsnames_pT[4]; //@} /// @name Histograms //@{ Profile1DPtr _h_Psi_pT[4]; Scatter2DPtr _h_OneMinusPsi_vs_pT; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2008_S7782535); } diff --git a/analyses/pluginCDF/CDF_2008_S8095620.cc b/analyses/pluginCDF/CDF_2008_S8095620.cc --- a/analyses/pluginCDF/CDF_2008_S8095620.cc +++ b/analyses/pluginCDF/CDF_2008_S8095620.cc @@ -1,187 +1,187 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/InvMassFinalState.hh" namespace Rivet { /// @brief CDF Run II Z + b-jet cross-section measurement class CDF_2008_S8095620 : public Analysis { public: /// Constructor. /// jet cuts: |eta| <= 1.5 CDF_2008_S8095620() : Analysis("CDF_2008_S8095620"), _Rjet(0.7), _JetPtCut(20.), _JetEtaCut(1.5), _Lep1PtCut(18.), _Lep2PtCut(10.), _LepEtaCut(3.2) { } /// @name Analysis methods //@{ void init() { // Set up projections - const FinalState fs(-3.2, 3.2); + const FinalState fs((Cuts::etaIn(-3.2, 3.2))); declare(fs, "FS"); // Create a final state with any e+e- or mu+mu- pair with // invariant mass 76 -> 106 GeV and ET > 18 (Z decay products) vector > vids; vids.push_back(make_pair(PID::ELECTRON, PID::POSITRON)); vids.push_back(make_pair(PID::MUON, PID::ANTIMUON)); - FinalState fs2(-3.2, 3.2); + FinalState fs2((Cuts::etaIn(-3.2, 3.2))); InvMassFinalState invfs(fs2, vids, 76*GeV, 106*GeV); declare(invfs, "INVFS"); // Make a final state without the Z decay products for jet clustering VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(invfs); declare(vfs, "VFS"); declare(FastJets(vfs, FastJets::CDFMIDPOINT, 0.7), "Jets"); // Book histograms book(_dStot ,1, 1, 1); book(_dSdET ,2, 1, 1); book(_dSdETA ,3, 1, 1); book(_dSdZpT ,4, 1, 1); book(_dSdNJet ,5, 1, 1); book(_dSdNbJet ,6, 1, 1); book(_sumWeightSelected,"sumWeightSelected"); } // Do the analysis void analyze(const Event& event) { // Check we have an l+l- pair that passes the kinematic cuts // Get the Z decay products (mu+mu- or e+e- pair) const InvMassFinalState& invMassFinalState = apply(event, "INVFS"); const Particles& ZDecayProducts = invMassFinalState.particles(); // make sure we have 2 Z decay products (mumu or ee) if (ZDecayProducts.size() < 2) vetoEvent; //new cuts double Lep1Pt = ZDecayProducts[0].perp(); double Lep2Pt = ZDecayProducts[1].perp(); double Lep1Eta = fabs(ZDecayProducts[0].rapidity()); double Lep2Eta = fabs(ZDecayProducts[1].rapidity()); if (Lep1Eta > _LepEtaCut || Lep2Eta > _LepEtaCut) vetoEvent; if (ZDecayProducts[0].abspid()==13 && ((Lep1Eta > 1.5 || Lep2Eta > 1.5) || (Lep1Eta > 1.0 && Lep2Eta > 1.0))) { vetoEvent; } if (Lep1Pt > Lep2Pt) { if (Lep1Pt < _Lep1PtCut || Lep2Pt < _Lep2PtCut) vetoEvent; } else { if (Lep1Pt < _Lep2PtCut || Lep2Pt < _Lep1PtCut) vetoEvent; } _sumWeightSelected->fill(); /// @todo: write out a warning if there are more than two decay products FourMomentum Zmom = ZDecayProducts[0].momentum() + ZDecayProducts[1].momentum(); // Put all b-quarks in a vector /// @todo Use a b-hadron search rather than b-quarks for tagging Particles bquarks; for (const GenParticle* p : particles(event.genEvent())) { if (std::abs(p->pdg_id()) == PID::BQUARK) { bquarks += Particle(*p); } } // Get jets const FastJets& jetpro = apply(event, "Jets"); MSG_DEBUG("Jet multiplicity before any pT cut = " << jetpro.size()); const PseudoJets& jets = jetpro.pseudoJetsByPt(); MSG_DEBUG("jetlist size = " << jets.size()); int numBJet = 0; int numJet = 0; // for each b-jet plot the ET and the eta of the jet, normalise to the total cross section at the end // for each event plot N jet and pT(Z), normalise to the total cross section at the end for (PseudoJets::const_iterator jt = jets.begin(); jt != jets.end(); ++jt) { // select jets that pass the kinematic cuts if (jt->perp() > _JetPtCut && fabs(jt->rapidity()) <= _JetEtaCut) { numJet++; // does the jet contain a b-quark? bool bjet = false; for (const Particle& bquark : bquarks) { if (deltaR(jt->rapidity(), jt->phi(), bquark.rapidity(),bquark.phi()) <= _Rjet) { bjet = true; break; } } // end loop around b-jets if (bjet) { numBJet++; _dSdET->fill(jt->perp()); _dSdETA->fill(fabs(jt->rapidity())); } } } // end loop around jets // wasn't asking for b-jets before!!!! if(numJet > 0 && numBJet > 0) _dSdNJet->fill(numJet); if(numBJet > 0) { _dStot->fill(1960.0); _dSdNbJet->fill(numBJet); _dSdZpT->fill(Zmom.pT()); } } // Finalize void finalize() { // normalise histograms // scale by 1 / the sum-of-weights of events that pass the Z cuts // since the cross sections are normalized to the inclusive // Z cross sections. double Scale = 1.0; if (_sumWeightSelected->val() != 0.0) Scale = 1.0/dbl(*_sumWeightSelected); scale(_dStot,Scale); scale(_dSdET,Scale); scale(_dSdETA,Scale); scale(_dSdNJet,Scale); scale(_dSdNbJet,Scale); scale(_dSdZpT,Scale); } //@} private: double _Rjet; double _JetPtCut; double _JetEtaCut; double _Lep1PtCut; double _Lep2PtCut; double _LepEtaCut; CounterPtr _sumWeightSelected; //@{ /// Histograms Histo1DPtr _dStot; Histo1DPtr _dSdET; Histo1DPtr _dSdETA; Histo1DPtr _dSdNJet; Histo1DPtr _dSdNbJet; Histo1DPtr _dSdZpT; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2008_S8095620); } diff --git a/analyses/pluginCDF/CDF_2009_NOTE_9936.cc b/analyses/pluginCDF/CDF_2009_NOTE_9936.cc --- a/analyses/pluginCDF/CDF_2009_NOTE_9936.cc +++ b/analyses/pluginCDF/CDF_2009_NOTE_9936.cc @@ -1,70 +1,70 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/TriggerCDFRun2.hh" namespace Rivet { class CDF_2009_NOTE_9936 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor CDF_2009_NOTE_9936() : Analysis("CDF_2009_NOTE_9936") { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { declare(TriggerCDFRun2(), "Trigger"); - declare(ChargedFinalState(-1.0, 1.0, 0.4*GeV), "CFS"); + declare(ChargedFinalState((Cuts::etaIn(-1.0, 1.0) && Cuts::pT >= 0.4*GeV)), "CFS"); book(_hist_nch ,1, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { // MinBias Trigger const bool trigger = apply(event, "Trigger").minBiasDecision(); if (!trigger) vetoEvent; // Get events charged multiplicity and fill histogram const ChargedFinalState& cfs = apply(event, "CFS"); _hist_nch->fill(cfs.size()); } /// Normalise histograms etc., after the run void finalize() { normalize(_hist_nch); } //@} private: Histo1DPtr _hist_nch; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2009_NOTE_9936); } diff --git a/analyses/pluginCDF/CDF_2009_S8233977.cc b/analyses/pluginCDF/CDF_2009_S8233977.cc --- a/analyses/pluginCDF/CDF_2009_S8233977.cc +++ b/analyses/pluginCDF/CDF_2009_S8233977.cc @@ -1,122 +1,122 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/TriggerCDFRun2.hh" namespace Rivet { /// @brief CDF Run II min-bias cross-section /// @author Hendrik Hoeth /// /// Measurement of \f$ \langle p_T \rangle \f$ vs. \f$ n_\text{ch} \f$, /// the track \f$ p_T \f$ distribution, and the \f$ \sum E_T \f$ distribution. /// Particles are selected within |eta|<1 and with pT>0.4 GeV. /// There is no pT cut for the \f$ \sum E_T \f$ measurement. /// /// @par Run conditions /// /// @arg \f$ \sqrt{s} = \f$ 1960 GeV /// @arg Run with generic QCD events. /// @arg Set particles with c*tau > 10 mm stable class CDF_2009_S8233977 : public Analysis { public: /// Constructor CDF_2009_S8233977() : Analysis("CDF_2009_S8233977") { } /// @name Analysis methods //@{ /// Book histograms and projections void init() { declare(TriggerCDFRun2(), "Trigger"); - declare(FinalState(-1.0, 1.0, 0.0*GeV), "EtFS"); - declare(ChargedFinalState(-1.0, 1.0, 0.4*GeV), "CFS"); + declare(FinalState((Cuts::etaIn(-1.0, 1.0))), "EtFS"); + declare(ChargedFinalState((Cuts::etaIn(-1.0, 1.0) && Cuts::pT >= 0.4*GeV)), "CFS"); book(_hist_pt ,1, 1, 1); book(_hist_pt_vs_multiplicity ,2, 1, 1); book(_hist_sumEt ,3, 1, 1); book(_sumWeightSelected,"_sumWeightSelected"); } /// Do the analysis void analyze(const Event& evt) { // MinBias Trigger const bool trigger = apply(evt, "Trigger").minBiasDecision(); if (!trigger) vetoEvent; /// @todo The pT and sum(ET) distributions look slightly different from /// Niccolo's Monte Carlo plots. Still waiting for his answer. const ChargedFinalState& trackfs = apply(evt, "CFS"); const size_t numParticles = trackfs.size(); for (const Particle& p : trackfs.particles()) { const double pT = p.pT() / GeV; _hist_pt_vs_multiplicity->fill(numParticles, pT); // The weight for entries in the pT distribution should be weight/(pT*dPhi*dy). // // - dPhi = 2*PI // // - dy depends on the pT: They calculate y assuming the particle has the // pion mass and assuming that eta=1: // dy = 2 * 1/2 * ln [(sqrt(m^2 + (a+1)*pT^2) + a*pT) / (sqrt(m^2 + (a+1)*pT^2) - a*pT)] // with a = sinh(1). // // sinh(1) = 1.1752012 // m(charged pion)^2 = (139.57 MeV)^2 = 0.019479785 GeV^2 const double sinh1 = 1.1752012; const double apT = sinh1 * pT; const double mPi = 139.57*MeV; const double root = sqrt(mPi*mPi + (1+sinh1)*pT*pT); const double dy = std::log((root+apT)/(root-apT)); const double dphi = TWOPI; _hist_pt->fill(pT, 1.0/(pT*dphi*dy)); } // Calc sum(Et) from calo particles const FinalState& etfs = apply(evt, "EtFS"); double sumEt = 0.0; for (const Particle& p : etfs.particles()) { sumEt += p.Et(); } _hist_sumEt->fill(sumEt); _sumWeightSelected->fill(); } /// Normalize histos void finalize() { scale(_hist_sumEt, crossSection()/millibarn/(4*M_PI*dbl(*_sumWeightSelected))); scale(_hist_pt, crossSection()/millibarn/dbl(*_sumWeightSelected)); MSG_DEBUG("sumOfWeights() = " << sumOfWeights()); MSG_DEBUG("_sumWeightSelected = " << dbl(*_sumWeightSelected)); } //@} private: CounterPtr _sumWeightSelected; Profile1DPtr _hist_pt_vs_multiplicity; Histo1DPtr _hist_pt; Histo1DPtr _hist_sumEt; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2009_S8233977); } diff --git a/analyses/pluginCDF/CDF_2009_S8436959.cc b/analyses/pluginCDF/CDF_2009_S8436959.cc --- a/analyses/pluginCDF/CDF_2009_S8436959.cc +++ b/analyses/pluginCDF/CDF_2009_S8436959.cc @@ -1,87 +1,87 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" namespace Rivet { /// @brief CDF inclusive isolated prompt photon cross-section class CDF_2009_S8436959 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor CDF_2009_S8436959() : Analysis("CDF_2009_S8436959") { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FinalState fs; declare(fs, "FS"); - LeadingParticlesFinalState photonfs(FinalState(-1.0, 1.0, 30.0*GeV)); + LeadingParticlesFinalState photonfs(FinalState((Cuts::etaIn(-1.0, 1.0) && Cuts::pT >= 30.0*GeV))); photonfs.addParticleId(PID::PHOTON); declare(photonfs, "LeadingPhoton"); book(_h_Et_photon ,1, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { Particles fs = apply(event, "FS").particles(); Particles photons = apply(event, "LeadingPhoton").particles(); if (photons.size()!=1) { vetoEvent; } FourMomentum leadingPhoton = photons[0].momentum(); double eta_P = leadingPhoton.eta(); double phi_P = leadingPhoton.phi(); FourMomentum mom_in_cone; for (const Particle& p : fs) { if (deltaR(eta_P, phi_P, p.eta(), p.phi()) < 0.4) { mom_in_cone += p.momentum(); } } if ( (mom_in_cone.Et() - leadingPhoton.Et()) > 2.0*GeV) { vetoEvent; } _h_Et_photon->fill(leadingPhoton.Et()); } /// Normalise histograms etc., after the run void finalize() { scale(_h_Et_photon, crossSection()/sumOfWeights()/2.0); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_Et_photon; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2009_S8436959); } diff --git a/analyses/pluginCDF/CDF_2010_S8591881_DY.cc b/analyses/pluginCDF/CDF_2010_S8591881_DY.cc --- a/analyses/pluginCDF/CDF_2010_S8591881_DY.cc +++ b/analyses/pluginCDF/CDF_2010_S8591881_DY.cc @@ -1,209 +1,209 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/ChargedLeptons.hh" namespace Rivet { /// @brief CDF Run II underlying event in Drell-Yan /// @author Hendrik Hoeth /// /// Measurement of the underlying event in Drell-Yan /// \f$ Z/\gamma^* \to e^+ e^- \f$ and /// \f$ Z/\gamma^* \to \mu^+ \mu^- \f$ events. The reconstructed /// Z defines the \f$ \phi \f$ orientation. A Z mass window cut is applied. /// /// @par Run conditions /// /// @arg \f$ \sqrt{s} = \f$ 1960 GeV /// @arg produce Drell-Yan events /// @arg Set particles with c*tau > 10 mm stable /// @arg Z decay mode: Z -> e+e- and Z -> mu+mu- /// @arg gamma decay mode: gamma -> e+e- and gamma -> mu+mu- /// @arg minimum invariant mass of the fermion pair coming from the Z/gamma: 70 GeV class CDF_2010_S8591881_DY : public Analysis { public: /// Constructor CDF_2010_S8591881_DY() : Analysis("CDF_2010_S8591881_DY") { } /// @name Analysis methods //@{ void init() { // Set up projections - const ChargedFinalState cfs(-1.0, 1.0, 0.5*GeV); - const ChargedFinalState clfs(-1.0, 1.0, 20*GeV); + const ChargedFinalState cfs((Cuts::etaIn(-1.0, 1.0) && Cuts::pT >= 0.5*GeV)); + const ChargedFinalState clfs((Cuts::etaIn(-1.0, 1.0) && Cuts::pT >= 20*GeV)); declare(cfs, "FS"); declare(ChargedLeptons(clfs), "CL"); // Book histograms book(_hist_tnchg , 1, 1, 1); book(_hist_pnchg , 1, 1, 2); book(_hist_anchg , 1, 1, 3); book(_hist_pmaxnchg , 2, 1, 1); book(_hist_pminnchg , 2, 1, 2); book(_hist_pdifnchg , 2, 1, 3); book(_hist_tcptsum , 3, 1, 1); book(_hist_pcptsum , 3, 1, 2); book(_hist_acptsum , 3, 1, 3); book(_hist_pmaxcptsum , 4, 1, 1); book(_hist_pmincptsum , 4, 1, 2); book(_hist_pdifcptsum , 4, 1, 3); book(_hist_tcptave , 5, 1, 1); book(_hist_pcptave , 5, 1, 2); book(_hist_tcptmax , 6, 1, 1); book(_hist_pcptmax , 6, 1, 2); book(_hist_zptvsnchg , 7, 1, 1); book(_hist_cptavevsnchg , 8, 1, 1); book(_hist_cptavevsnchgsmallzpt , 9, 1, 1); } /// Do the analysis void analyze(const Event& e) { const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 1) { MSG_DEBUG("Failed multiplicity cut"); vetoEvent; } // Get the leptons const Particles& leptons = apply(e, "CL").chargedLeptons(); // We want exactly two leptons of the same flavour. MSG_DEBUG("lepton multiplicity = " << leptons.size()); if (leptons.size() != 2 || leptons[0].pid() != -leptons[1].pid() ) vetoEvent; // Lepton pT > 20 GeV if (leptons[0].pT()/GeV <= 20 || leptons[1].pT()/GeV <= 20) vetoEvent; // Lepton pair should have an invariant mass between 70 and 110 and |eta| < 6 const FourMomentum dilepton = leptons[0].momentum() + leptons[1].momentum(); if (!inRange(dilepton.mass()/GeV, 70., 110.) || fabs(dilepton.eta()) >= 6) vetoEvent; MSG_DEBUG("Dilepton mass = " << dilepton.mass()/GeV << " GeV"); MSG_DEBUG("Dilepton pT = " << dilepton.pT()/GeV << " GeV"); // Calculate the observables size_t numToward(0), numAway(0); long int numTrans1(0), numTrans2(0); double ptSumToward(0.0), ptSumTrans1(0.0), ptSumTrans2(0.0), ptSumAway(0.0); double ptMaxToward(0.0), ptMaxTrans1(0.0), ptMaxTrans2(0.0), ptMaxAway(0.0); const double phiZ = dilepton.azimuthalAngle(); const double pTZ = dilepton.pT(); /// @todo Replace with for for (Particles::const_iterator p = fs.particles().begin(); p != fs.particles().end(); ++p) { // Don't use the leptons /// @todo Replace with PID::isLepton if (abs(p->pid()) < 20) continue; const double dPhi = deltaPhi(p->momentum().phi(), phiZ); const double pT = p->pT(); double rotatedphi = p->momentum().phi() - phiZ; while (rotatedphi < 0) rotatedphi += 2*PI; if (dPhi < PI/3.0) { ptSumToward += pT; ++numToward; if (pT > ptMaxToward) ptMaxToward = pT; } else if (dPhi < 2*PI/3.0) { if (rotatedphi <= PI) { ptSumTrans1 += pT; ++numTrans1; if (pT > ptMaxTrans1) ptMaxTrans1 = pT; } else { ptSumTrans2 += pT; ++numTrans2; if (pT > ptMaxTrans2) ptMaxTrans2 = pT; } } else { ptSumAway += pT; ++numAway; if (pT > ptMaxAway) ptMaxAway = pT; } // We need to subtract the two leptons from the number of particles to get the correct multiplicity _hist_cptavevsnchg->fill(numParticles-2, pT); if (pTZ < 10) _hist_cptavevsnchgsmallzpt->fill(numParticles-2, pT); } // Fill the histograms _hist_tnchg->fill(pTZ, numToward/(4*PI/3)); _hist_pnchg->fill(pTZ, (numTrans1+numTrans2)/(4*PI/3)); _hist_pmaxnchg->fill(pTZ, (numTrans1>numTrans2 ? numTrans1 : numTrans2)/(2*PI/3)); _hist_pminnchg->fill(pTZ, (numTrans1fill(pTZ, abs(numTrans1-numTrans2)/(2*PI/3)); _hist_anchg->fill(pTZ, numAway/(4*PI/3)); _hist_tcptsum->fill(pTZ, ptSumToward/(4*PI/3)); _hist_pcptsum->fill(pTZ, (ptSumTrans1+ptSumTrans2)/(4*PI/3)); _hist_pmaxcptsum->fill(pTZ, (ptSumTrans1>ptSumTrans2 ? ptSumTrans1 : ptSumTrans2)/(2*PI/3)); _hist_pmincptsum->fill(pTZ, (ptSumTrans1fill(pTZ, fabs(ptSumTrans1-ptSumTrans2)/(2*PI/3)); _hist_acptsum->fill(pTZ, ptSumAway/(4*PI/3)); if (numToward > 0) { _hist_tcptave->fill(pTZ, ptSumToward/numToward); _hist_tcptmax->fill(pTZ, ptMaxToward); } if ((numTrans1+numTrans2) > 0) { _hist_pcptave->fill(pTZ, (ptSumTrans1+ptSumTrans2)/(numTrans1+numTrans2)); _hist_pcptmax->fill(pTZ, (ptMaxTrans1 > ptMaxTrans2 ? ptMaxTrans1 : ptMaxTrans2)); } // We need to subtract the two leptons from the number of particles to get the correct multiplicity _hist_zptvsnchg->fill(numParticles-2, pTZ); } void finalize() { } //@} private: Profile1DPtr _hist_tnchg; Profile1DPtr _hist_pnchg; Profile1DPtr _hist_pmaxnchg; Profile1DPtr _hist_pminnchg; Profile1DPtr _hist_pdifnchg; Profile1DPtr _hist_anchg; Profile1DPtr _hist_tcptsum; Profile1DPtr _hist_pcptsum; Profile1DPtr _hist_pmaxcptsum; Profile1DPtr _hist_pmincptsum; Profile1DPtr _hist_pdifcptsum; Profile1DPtr _hist_acptsum; Profile1DPtr _hist_tcptave; Profile1DPtr _hist_pcptave; Profile1DPtr _hist_tcptmax; Profile1DPtr _hist_pcptmax; Profile1DPtr _hist_zptvsnchg; Profile1DPtr _hist_cptavevsnchg; Profile1DPtr _hist_cptavevsnchgsmallzpt; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2010_S8591881_DY); } diff --git a/analyses/pluginCDF/CDF_2010_S8591881_QCD.cc b/analyses/pluginCDF/CDF_2010_S8591881_QCD.cc --- a/analyses/pluginCDF/CDF_2010_S8591881_QCD.cc +++ b/analyses/pluginCDF/CDF_2010_S8591881_QCD.cc @@ -1,190 +1,190 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief CDF Run II underlying event in leading jet events /// @author Hendrik Hoeth /// /// Rick Field's measurement of the underlying event in "leading jet" events. /// The leading jet (CDF midpoint \f$ R = 0.7 \f$) must be within \f$|\eta| < 2 \f$ /// and defines the "toward" phi direction. Particles are selected in /// \f$ |\eta| < 1 \f$. For the \f$ p_\perp \f$-related observables there /// is a \f$ p_\perp > 0.5 \f$ GeV cut. For \f$ \sum E_\perp \f$ there is no /// \f$ p_\perp \f$ cut. /// /// @par Run conditions /// @arg \f$ \sqrt{s} = \f$ 1960 GeV /// @arg Run with generic QCD events. /// @arg Set particles with c*tau > 10 mm stable /// @arg Several \f$ p_\perp^\text{min} \f$ cutoffs are probably required to fill the profile histograms: /// @arg \f$ p_\perp^\text{min} = \f$ 0 (min bias), 10, 20, 50, 100, 150 GeV /// @arg The corresponding merging points are at \f$ p_T = \f$ 0, 30, 50, 80, 130, 180 GeV class CDF_2010_S8591881_QCD : public Analysis { public: /// Constructor CDF_2010_S8591881_QCD() : Analysis("CDF_2010_S8591881_QCD") { } /// @name Analysis methods //@{ void init() { // Final state for the jet finding - const FinalState fsj(-4.0, 4.0, 0.0*GeV); + const FinalState fsj((Cuts::etaIn(-4.0, 4.0))); declare(fsj, "FSJ"); declare(FastJets(fsj, FastJets::CDFMIDPOINT, 0.7), "MidpointJets"); // Charged final state for the distributions - const ChargedFinalState cfs(-1.0, 1.0, 0.5*GeV); + const ChargedFinalState cfs((Cuts::etaIn(-1.0, 1.0) && Cuts::pT >= 0.5*GeV)); declare(cfs, "CFS"); // Book histograms book(_hist_tnchg ,10, 1, 1); book(_hist_pnchg ,10, 1, 2); book(_hist_anchg ,10, 1, 3); book(_hist_pmaxnchg ,11, 1, 1); book(_hist_pminnchg ,11, 1, 2); book(_hist_pdifnchg ,11, 1, 3); book(_hist_tcptsum ,12, 1, 1); book(_hist_pcptsum ,12, 1, 2); book(_hist_acptsum ,12, 1, 3); book(_hist_pmaxcptsum ,13, 1, 1); book(_hist_pmincptsum ,13, 1, 2); book(_hist_pdifcptsum ,13, 1, 3); book(_hist_pcptave ,14, 1, 1); book(_hist_pcptmax ,15, 1, 1); } // Do the analysis void analyze(const Event& e) { /// @todo Implement Run II min bias trigger cf. CDF_2009? const FinalState& fsj = apply(e, "FSJ"); if (fsj.particles().size() < 1) { MSG_DEBUG("Failed multiplicity cut"); vetoEvent; } const Jets& jets = apply(e, "MidpointJets").jetsByPt(); MSG_DEBUG("Jet multiplicity = " << jets.size()); // We require the leading jet to be within |eta|<2 if (jets.size() < 1 || fabs(jets[0].eta()) >= 2) { MSG_DEBUG("Failed leading jet cut"); vetoEvent; } const double jetphi = jets[0].phi(); const double jeteta = jets[0].eta(); const double jetpT = jets[0].pT(); MSG_DEBUG("Leading jet: pT = " << jetpT << ", eta = " << jeteta << ", phi = " << jetphi); // Get the final states to work with for filling the distributions const FinalState& cfs = apply(e, "CFS"); size_t numOverall(0), numToward(0), numAway(0) ; long int numTrans1(0), numTrans2(0); double ptSumOverall(0.0), ptSumToward(0.0), ptSumTrans1(0.0), ptSumTrans2(0.0), ptSumAway(0.0); double ptMaxOverall(0.0), ptMaxToward(0.0), ptMaxTrans1(0.0), ptMaxTrans2(0.0), ptMaxAway(0.0); // Calculate all the charged stuff for (const Particle& p : cfs.particles()) { const double dPhi = deltaPhi(p.phi(), jetphi); const double pT = p.pT(); const double phi = p.phi(); double rotatedphi = phi - jetphi; while (rotatedphi < 0) rotatedphi += 2*PI; ptSumOverall += pT; ++numOverall; if (pT > ptMaxOverall) { ptMaxOverall = pT; } if (dPhi < PI/3.0) { ptSumToward += pT; ++numToward; if (pT > ptMaxToward) ptMaxToward = pT; } else if (dPhi < 2*PI/3.0) { if (rotatedphi <= PI) { ptSumTrans1 += pT; ++numTrans1; if (pT > ptMaxTrans1) ptMaxTrans1 = pT; } else { ptSumTrans2 += pT; ++numTrans2; if (pT > ptMaxTrans2) ptMaxTrans2 = pT; } } else { ptSumAway += pT; ++numAway; if (pT > ptMaxAway) ptMaxAway = pT; } } // end charged particle loop // Fill the histograms _hist_tnchg->fill(jetpT/GeV, numToward/(4*PI/3)); _hist_pnchg->fill(jetpT/GeV, (numTrans1+numTrans2)/(4*PI/3)); _hist_pmaxnchg->fill(jetpT/GeV, (numTrans1>numTrans2 ? numTrans1 : numTrans2)/(2*PI/3)); _hist_pminnchg->fill(jetpT/GeV, (numTrans1fill(jetpT/GeV, abs(numTrans1-numTrans2)/(2*PI/3)); _hist_anchg->fill(jetpT/GeV, numAway/(4*PI/3)); _hist_tcptsum->fill(jetpT/GeV, ptSumToward/GeV/(4*PI/3)); _hist_pcptsum->fill(jetpT/GeV, (ptSumTrans1+ptSumTrans2)/GeV/(4*PI/3)); _hist_pmaxcptsum->fill(jetpT/GeV, (ptSumTrans1>ptSumTrans2 ? ptSumTrans1 : ptSumTrans2)/GeV/(2*PI/3)); _hist_pmincptsum->fill(jetpT/GeV, (ptSumTrans1fill(jetpT/GeV, fabs(ptSumTrans1-ptSumTrans2)/GeV/(2*PI/3)); _hist_acptsum->fill(jetpT/GeV, ptSumAway/GeV/(4*PI/3)); if ((numTrans1+numTrans2) > 0) { _hist_pcptave->fill(jetpT/GeV, (ptSumTrans1+ptSumTrans2)/GeV/(numTrans1+numTrans2)); _hist_pcptmax->fill(jetpT/GeV, (ptMaxTrans1 > ptMaxTrans2 ? ptMaxTrans1 : ptMaxTrans2)/GeV); } } void finalize() { } //@} private: Profile1DPtr _hist_tnchg; Profile1DPtr _hist_pnchg; Profile1DPtr _hist_anchg; Profile1DPtr _hist_pmaxnchg; Profile1DPtr _hist_pminnchg; Profile1DPtr _hist_pdifnchg; Profile1DPtr _hist_tcptsum; Profile1DPtr _hist_pcptsum; Profile1DPtr _hist_acptsum; Profile1DPtr _hist_pmaxcptsum; Profile1DPtr _hist_pmincptsum; Profile1DPtr _hist_pdifcptsum; Profile1DPtr _hist_pcptave; Profile1DPtr _hist_pcptmax; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2010_S8591881_QCD); } diff --git a/analyses/pluginCDF/CDF_2012_NOTE10874.cc b/analyses/pluginCDF/CDF_2012_NOTE10874.cc --- a/analyses/pluginCDF/CDF_2012_NOTE10874.cc +++ b/analyses/pluginCDF/CDF_2012_NOTE10874.cc @@ -1,92 +1,92 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class CDF_2012_NOTE10874 : public Analysis { public: CDF_2012_NOTE10874() : Analysis("CDF_2012_NOTE10874") {} public: void init() { - const ChargedFinalState cfs(-1.0, 1.0, 0.5*GeV); + const ChargedFinalState cfs((Cuts::etaIn(-1.0, 1.0) && Cuts::pT >= 0.5*GeV)); declare(cfs, "CFS"); int isqrts = -1; if (fuzzyEquals(sqrtS(), 300*GeV)) isqrts = 1; else if (fuzzyEquals(sqrtS(), 900*GeV)) isqrts = 2; else if (fuzzyEquals(sqrtS(), 1960*GeV)) isqrts = 3; assert(isqrts >= 0); book(_h_nch_transverse ,1,1,isqrts); book(_h_ptSumDen ,2,1,isqrts); book(_h_avePt ,3,1,isqrts); } // Little helper function to identify Delta(phi) regions inline int region_index(double dphi) { assert(inRange(dphi, 0.0, PI, CLOSED, CLOSED)); if (dphi < PI/3.0) return 0; if (dphi < 2*PI/3.0) return 1; return 2; } void analyze(const Event& event) { const ChargedFinalState& cfs = apply(event, "CFS"); if (cfs.size() < 1) { vetoEvent; } Particles particles = cfs.particlesByPt(); Particle p_lead = particles[0]; const double philead = p_lead.phi(); const double pTlead = p_lead.pT(); int tNch = 0; double ptSum = 0.0; for (const Particle& p : particles) { const double pT = p.pT(); const double dPhi = deltaPhi(philead, p.phi()); const int ir = region_index(dPhi); if (ir==1) { tNch++; ptSum += pT; } } const double dEtadPhi = 4.0*PI/3.0; _h_nch_transverse->fill(pTlead/GeV, tNch/dEtadPhi); _h_ptSumDen->fill(pTlead/GeV, ptSum/dEtadPhi); if (tNch > 0) { _h_avePt->fill(pTlead/GeV, ptSum/tNch); } } void finalize() { } private: Profile1DPtr _h_nch_transverse, _h_ptSumDen, _h_avePt; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CDF_2012_NOTE10874); } diff --git a/analyses/pluginCMS/CMS_2010_PAS_QCD_10_024.cc b/analyses/pluginCMS/CMS_2010_PAS_QCD_10_024.cc --- a/analyses/pluginCMS/CMS_2010_PAS_QCD_10_024.cc +++ b/analyses/pluginCMS/CMS_2010_PAS_QCD_10_024.cc @@ -1,84 +1,84 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Particle.hh" namespace Rivet { class CMS_2010_PAS_QCD_10_024 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor CMS_2010_PAS_QCD_10_024() : Analysis("CMS_2010_PAS_QCD_10_024"), _weight_pt05_eta08(0.), _weight_pt10_eta08(0.), _weight_pt05_eta24(0.), _weight_pt10_eta24(0.) { } void init() { - declare(ChargedFinalState(-0.8, 0.8, 0.5*GeV), "CFS_08_05"); - declare(ChargedFinalState(-0.8, 0.8, 1.0*GeV), "CFS_08_10"); - declare(ChargedFinalState(-2.4, 2.4, 0.5*GeV), "CFS_24_05"); - declare(ChargedFinalState(-2.4, 2.4, 1.0*GeV), "CFS_24_10"); + declare(ChargedFinalState((Cuts::etaIn(-0.8, 0.8) && Cuts::pT >= 0.5*GeV)), "CFS_08_05"); + declare(ChargedFinalState((Cuts::etaIn(-0.8, 0.8) && Cuts::pT >= 1.0*GeV)), "CFS_08_10"); + declare(ChargedFinalState((Cuts::etaIn(-2.4, 2.4) && Cuts::pT >= 0.5*GeV)), "CFS_24_05"); + declare(ChargedFinalState((Cuts::etaIn(-2.4, 2.4) && Cuts::pT >= 1.0*GeV)), "CFS_24_10"); size_t offset = 0; if (fuzzyEquals(sqrtS()/GeV, 7000, 1E-3)) offset = 0; if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) offset = 4; book(_hist_dNch_deta_pt05_eta08 ,1+offset, 1, 1); book(_hist_dNch_deta_pt10_eta08 ,2+offset, 1, 1); book(_hist_dNch_deta_pt05_eta24 ,3+offset, 1, 1); book(_hist_dNch_deta_pt10_eta24 ,4+offset, 1, 1); } void analyze(const Event& event) { const double weight = 1.0; const ChargedFinalState& cfs_08_05 = apply(event, "CFS_08_05"); const ChargedFinalState& cfs_08_10 = apply(event, "CFS_08_10"); const ChargedFinalState& cfs_24_05 = apply(event, "CFS_24_05"); const ChargedFinalState& cfs_24_10 = apply(event, "CFS_24_10"); // Plot distributions if(!cfs_08_05.particles().empty()) _weight_pt05_eta08 += weight; if(!cfs_24_05.particles().empty()) _weight_pt05_eta24 += weight; for (const Particle& p : cfs_24_05.particles()) { _hist_dNch_deta_pt05_eta24->fill(p.eta(), weight); if(!cfs_08_05.particles().empty()) _hist_dNch_deta_pt05_eta08->fill(p.eta(), weight); } if(!cfs_08_10.particles().empty()) _weight_pt10_eta08 += weight; if(!cfs_24_10.particles().empty()) _weight_pt10_eta24 += weight; for (const Particle& p : cfs_24_10.particles()) { _hist_dNch_deta_pt10_eta24->fill(p.eta(), weight); if(!cfs_08_10.particles().empty()) _hist_dNch_deta_pt10_eta08->fill(p.eta(), weight); } } /// Normalise histograms etc., after the run void finalize() { scale(_hist_dNch_deta_pt05_eta08,1./_weight_pt05_eta08); scale(_hist_dNch_deta_pt10_eta08,1./_weight_pt10_eta08); scale(_hist_dNch_deta_pt05_eta24,1./_weight_pt05_eta24); scale(_hist_dNch_deta_pt10_eta24,1./_weight_pt10_eta24); } private: Histo1DPtr _hist_dNch_deta_pt05_eta08; Histo1DPtr _hist_dNch_deta_pt10_eta08; Histo1DPtr _hist_dNch_deta_pt05_eta24; Histo1DPtr _hist_dNch_deta_pt10_eta24; double _weight_pt05_eta08,_weight_pt10_eta08,_weight_pt05_eta24,_weight_pt10_eta24; }; // Hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2010_PAS_QCD_10_024); } diff --git a/analyses/pluginCMS/CMS_2010_S8547297.cc b/analyses/pluginCMS/CMS_2010_S8547297.cc --- a/analyses/pluginCMS/CMS_2010_S8547297.cc +++ b/analyses/pluginCMS/CMS_2010_S8547297.cc @@ -1,99 +1,99 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class CMS_2010_S8547297 : public Analysis { public: CMS_2010_S8547297() : Analysis("CMS_2010_S8547297") {} void init() { - ChargedFinalState cfs(-2.5, 2.5, 0.0*GeV); + ChargedFinalState cfs((Cuts::etaIn(-2.5, 2.5))); declare(cfs, "CFS"); if (fuzzyEquals(sqrtS()/GeV, 900)) { for (int d=1; d<=3; d++) { for (int y=1; y<=4; y++) { _h_dNch_dpT.push_back(Histo1DPtr()); book(_h_dNch_dpT.back(), d, 1, y); } } book(_h_dNch_dpT_all ,7, 1, 1); book(_h_dNch_dEta ,8, 1, 1); } else if (fuzzyEquals(sqrtS()/GeV, 2360)) { for (int d=4; d<=6; d++) { for (int y=1; y<=4; y++) { _h_dNch_dpT.push_back(Histo1DPtr()); book(_h_dNch_dpT.back(), d, 1, y); } } book(_h_dNch_dpT_all ,7, 1, 2); book(_h_dNch_dEta ,8, 1, 2); } } void analyze(const Event& event) { const double weight = 1.0; //charged particles const ChargedFinalState& charged = apply(event, "CFS"); for (const Particle& p : charged.particles()) { //selecting only charged hadrons if (! PID::isHadron(p.pid())) continue; const double pT = p.pT(); const double eta = p.eta(); // The data is actually a duplicated folded distribution. This should mimic it. _h_dNch_dEta->fill(eta, 0.5*weight); _h_dNch_dEta->fill(-eta, 0.5*weight); if (fabs(eta) < 2.4 && pT > 0.1*GeV) { if (pT < 4.0*GeV) { _h_dNch_dpT_all->fill(pT/GeV, weight/(pT/GeV)); if (pT < 2.0*GeV) { int ietabin = int(fabs(eta)/0.2); _h_dNch_dpT[ietabin]->fill(pT/GeV, weight); } } } } } void finalize() { const double normfac = 1.0/sumOfWeights(); // Normalizing to unit eta is automatic // The pT distributions in bins of eta must be normalized to unit eta. This is a factor of 2 // for the |eta| times 0.2 (eta range). // The pT distributions over all eta are normalized to unit eta (2.0*2.4) and by 1/2*pi*pT. // The 1/pT part is taken care of in the filling. The 1/2pi is taken care of here. const double normpT = normfac/(2.0*0.2); const double normpTall = normfac/(2.0*M_PI*2.0*2.4); for (size_t ietabin=0; ietabin < _h_dNch_dpT.size(); ietabin++){ scale(_h_dNch_dpT[ietabin], normpT); } scale(_h_dNch_dpT_all, normpTall); scale(_h_dNch_dEta, normfac); } private: std::vector _h_dNch_dpT; Histo1DPtr _h_dNch_dpT_all; Histo1DPtr _h_dNch_dEta; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2010_S8547297); } diff --git a/analyses/pluginCMS/CMS_2010_S8656010.cc b/analyses/pluginCMS/CMS_2010_S8656010.cc --- a/analyses/pluginCMS/CMS_2010_S8656010.cc +++ b/analyses/pluginCMS/CMS_2010_S8656010.cc @@ -1,89 +1,89 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class CMS_2010_S8656010 : public Analysis { public: CMS_2010_S8656010() : Analysis("CMS_2010_S8656010") {} void init() { - ChargedFinalState cfs(-2.5, 2.5, 0.0*GeV); + ChargedFinalState cfs((Cuts::etaIn(-2.5, 2.5))); declare(cfs, "CFS"); for (int d=1; d<=3; d++) { for (int y=1; y<=4; y++) { _h_dNch_dpT.push_back(Histo1DPtr()); book(_h_dNch_dpT.back(), d, 1, y); } } book(_h_dNch_dpT_all ,4, 1, 1); book(_h_dNch_dEta ,5, 1, 1); } void analyze(const Event& event) { const double weight = 1.0; //charged particles const ChargedFinalState& charged = apply(event, "CFS"); for (const Particle& p : charged.particles()) { //selecting only charged hadrons if (! PID::isHadron(p.pid())) continue; const double pT = p.pT(); const double eta = p.eta(); // The data is actually a duplicated folded distribution. This should mimic it. _h_dNch_dEta->fill(eta, 0.5*weight); _h_dNch_dEta->fill(-eta, 0.5*weight); if (fabs(eta) < 2.4 && pT > 0.1*GeV) { if (pT < 6.0*GeV) { _h_dNch_dpT_all->fill(pT/GeV, weight/(pT/GeV)); if (pT < 2.0*GeV) { int ietabin = int(fabs(eta)/0.2); _h_dNch_dpT[ietabin]->fill(pT/GeV, weight); } } } } } void finalize() { const double normfac = 1.0/sumOfWeights(); // Normalizing to unit eta is automatic // The pT distributions in bins of eta must be normalized to unit eta. This is a factor of 2 // for the |eta| times 0.2 (eta range). // The pT distributions over all eta are normalized to unit eta (2.0*2.4) and by 1/2*pi*pT. // The 1/pT part is taken care of in the filling. The 1/2pi is taken care of here. const double normpT = normfac/(2.0*0.2); const double normpTall = normfac/(2.0*M_PI*2.0*2.4); for (size_t ietabin=0; ietabin < _h_dNch_dpT.size(); ietabin++){ scale(_h_dNch_dpT[ietabin], normpT); } scale(_h_dNch_dpT_all, normpTall); scale(_h_dNch_dEta, normfac); } private: std::vector _h_dNch_dpT; Histo1DPtr _h_dNch_dpT_all; Histo1DPtr _h_dNch_dEta; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2010_S8656010); } diff --git a/analyses/pluginCMS/CMS_2011_S8884919.cc b/analyses/pluginCMS/CMS_2011_S8884919.cc --- a/analyses/pluginCMS/CMS_2011_S8884919.cc +++ b/analyses/pluginCMS/CMS_2011_S8884919.cc @@ -1,124 +1,124 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/Beam.hh" using namespace std; namespace Rivet { class CMS_2011_S8884919 : public Analysis { public: CMS_2011_S8884919() : Analysis("CMS_2011_S8884919") { } void init() { - ChargedFinalState cfs(-2.4, 2.4, 0.0*GeV); + ChargedFinalState cfs((Cuts::etaIn(-2.4, 2.4))); declare(cfs, "CFS"); // eta bins _etabins.push_back(0.5); _etabins.push_back(1.0); _etabins.push_back(1.5); _etabins.push_back(2.0); _etabins.push_back(2.4) ; if (fuzzyEquals(sqrtS()/GeV, 900)) { for (size_t ietabin=0; ietabin < _etabins.size(); ietabin++) { _h_dNch_dn.push_back( Histo1DPtr() ); book( _h_dNch_dn.back(), 2 + ietabin, 1, 1); } book(_h_dNch_dn_pt500_eta24 ,20, 1, 1); book(_h_dmpt_dNch_eta24 ,23, 1, 1); } if (fuzzyEquals(sqrtS()/GeV, 2360)) { for (size_t ietabin=0; ietabin < _etabins.size(); ietabin++) { _h_dNch_dn.push_back( Histo1DPtr() ); book(_h_dNch_dn.back(), 7 + ietabin, 1, 1); } book(_h_dNch_dn_pt500_eta24 ,21, 1, 1); book(_h_dmpt_dNch_eta24 ,24, 1, 1); } if (fuzzyEquals(sqrtS()/GeV, 7000)) { for (size_t ietabin=0; ietabin < _etabins.size(); ietabin++) { _h_dNch_dn.push_back( Histo1DPtr() ); book(_h_dNch_dn.back(), 12 + ietabin, 1, 1); } book(_h_dNch_dn_pt500_eta24 ,22, 1, 1); book(_h_dmpt_dNch_eta24 ,25, 1, 1); } } void analyze(const Event& event) { const double weight = 1.0; // Get the charged particles const ChargedFinalState& charged = apply(event, "CFS"); // Resetting the multiplicity for the event to 0; vector _nch_in_Evt; vector _nch_in_Evt_pt500; _nch_in_Evt.assign(_etabins.size(), 0); _nch_in_Evt_pt500.assign(_etabins.size(), 0); double sumpt = 0; // Loop over particles in event for (const Particle& p : charged.particles()) { // Selecting only charged hadrons if (! PID::isHadron(p.pid())) continue; double pT = p.pT(); double eta = p.eta(); sumpt += pT; for (size_t ietabin = _etabins.size(); ietabin > 0; --ietabin) { if (fabs(eta) > _etabins[ietabin-1]) break; ++_nch_in_Evt[ietabin-1]; if (pT > 0.5/GeV) ++_nch_in_Evt_pt500[ietabin-1]; } } // Filling multiplicity-dependent histogramms for (size_t ietabin = 0; ietabin < _etabins.size(); ietabin++) { _h_dNch_dn[ietabin]->fill(_nch_in_Evt[ietabin], weight); } // Do only if eta bins are the needed ones if (_etabins[4] == 2.4 && _etabins[0] == 0.5) { if (_nch_in_Evt[4] != 0) { _h_dmpt_dNch_eta24->fill(_nch_in_Evt[4], sumpt/GeV / _nch_in_Evt[4], weight); } _h_dNch_dn_pt500_eta24->fill(_nch_in_Evt_pt500[4], weight); } else { MSG_WARNING("You changed the number of eta bins, but forgot to propagate it everywhere !!"); } } void finalize() { for (size_t ietabin = 0; ietabin < _etabins.size(); ietabin++){ normalize(_h_dNch_dn[ietabin]); } normalize(_h_dNch_dn_pt500_eta24); } private: vector _h_dNch_dn; Histo1DPtr _h_dNch_dn_pt500_eta24; Profile1DPtr _h_dmpt_dNch_eta24; vector _etabins; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2011_S8884919); } diff --git a/analyses/pluginCMS/CMS_2011_S8957746.cc b/analyses/pluginCMS/CMS_2011_S8957746.cc --- a/analyses/pluginCMS/CMS_2011_S8957746.cc +++ b/analyses/pluginCMS/CMS_2011_S8957746.cc @@ -1,100 +1,100 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/Thrust.hh" namespace Rivet { /// Rivet analysis class for CMS_2011_S8957746 dataset class CMS_2011_S8957746 : public Analysis { public: /// Constructor CMS_2011_S8957746() : Analysis("CMS_2011_S8957746") { } /// Initialization, called once before running void init() { // Projections - const FastJets jets(FinalState(-5.0, 5.0, 0.0*GeV), FastJets::ANTIKT, 0.5); + const FastJets jets(FinalState((Cuts::etaIn(-5.0, 5.0))), FastJets::ANTIKT, 0.5); declare(jets, "Jets"); // Book histograms book(_hist_T_90 ,1, 1, 1); book(_hist_m_90 ,2, 1, 1); book(_hist_T_125 ,3, 1, 1); book(_hist_m_125 ,4, 1, 1); book(_hist_T_200 ,5, 1, 1); book(_hist_m_200 ,6, 1, 1); } void analyze(const Event& event) { const double weight = 1.0; const Jets& jets = apply(event, "Jets").jetsByPt(30.0*GeV); if (jets.size() < 2 || fabs(jets[0].eta()) >= 1.3 || fabs(jets[1].eta()) >= 1.3 || jets[0].pT() < 90*GeV) { vetoEvent; } std::vector momenta; for (const Jet& j : jets) { if (j.abseta() < 1.3) { Vector3 mom = j.p3(); mom.setZ(0.0); momenta.push_back(mom); } } if (momenta.size() == 2) { // We need to use a ghost so that Thrust.calc() doesn't return 1. momenta.push_back(Vector3(1e-10*MeV, 0., 0.)); } Thrust thrust; thrust.calc(momenta); // The lowest bin also includes the underflow: const double T = max(log(1-thrust.thrust()), -12.0); const double M = max(log(thrust.thrustMajor()), -6.0); if (jets[0].pT()/GeV > 200) { _hist_T_200->fill(T, weight); _hist_m_200->fill(M, weight); } else if (jets[0].pT()/GeV > 125) { _hist_T_125->fill(T, weight); _hist_m_125->fill(M, weight); } else if (jets[0].pT()/GeV > 90) { _hist_T_90->fill(T, weight); _hist_m_90->fill(M, weight); } } void finalize() { normalize(_hist_T_90); normalize(_hist_m_90); normalize(_hist_T_125); normalize(_hist_m_125); normalize(_hist_T_200); normalize(_hist_m_200); } private: Histo1DPtr _hist_T_90; Histo1DPtr _hist_m_90; Histo1DPtr _hist_T_125; Histo1DPtr _hist_m_125; Histo1DPtr _hist_T_200; Histo1DPtr _hist_m_200; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2011_S8957746); } diff --git a/analyses/pluginCMS/CMS_2011_S9120041.cc b/analyses/pluginCMS/CMS_2011_S9120041.cc --- a/analyses/pluginCMS/CMS_2011_S9120041.cc +++ b/analyses/pluginCMS/CMS_2011_S9120041.cc @@ -1,144 +1,144 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/FastJets.hh" using namespace std; namespace Rivet { // UE charged particles vs. leading jet class CMS_2011_S9120041 : public Analysis { public: /// Constructor CMS_2011_S9120041() : Analysis("CMS_2011_S9120041") {} void init() { - const ChargedFinalState cfs(-2.0, 2.0, 500*MeV); + const ChargedFinalState cfs((Cuts::etaIn(-2.0, 2.0) && Cuts::pT >= 500*MeV)); declare(cfs, "CFS"); - const ChargedFinalState cfsforjet(-2.5, 2.5, 500*MeV); + const ChargedFinalState cfsforjet((Cuts::etaIn(-2.5, 2.5) && Cuts::pT >= 500*MeV)); const FastJets jetpro(cfsforjet, FastJets::SISCONE, 0.5); declare(jetpro, "Jets"); if (fuzzyEquals(sqrtS(), 7.0*TeV)) { book(_h_Nch_vs_pT ,1, 1, 1); // Nch vs. pT_max book(_h_Sum_vs_pT ,2, 1, 1); // sum(pT) vs. pT_max book(_h_pT3_Nch ,5, 1, 1); // transverse Nch, pT_max > 3GeV book(_h_pT3_Sum ,6, 1, 1); // transverse sum(pT), pT_max > 3GeV book(_h_pT3_pT ,7, 1, 1); // transverse pT, pT_max > 3GeV book(_h_pT20_Nch ,8, 1, 1); // transverse Nch, pT_max > 20GeV book(_h_pT20_Sum ,9, 1, 1); // transverse sum(pT), pT_max > 20GeV book(_h_pT20_pT ,10, 1, 1); // transverse pT, pT_max > 20GeV } if (fuzzyEquals(sqrtS(), 0.9*TeV)) { book(_h_Nch_vs_pT ,3, 1, 1); // Nch vs. pT_max book(_h_Sum_vs_pT ,4, 1, 1); // sum(pT) vs. pT_max book(_h_pT3_Nch ,11, 1, 1); // transverse Nch, pT_max > 3GeV book(_h_pT3_Sum ,12, 1, 1); // transverse sum(pT), pT_max > 3GeV book(_h_pT3_pT ,13, 1, 1); // transverse pT, pT_max > 3GeV } book(sumOfWeights3, "TMP/sumOfWeights3"); book(sumOfWeights20, "TMP/sumOfWeights20"); book(_nch_tot_pT3, "TMP/nch_tot_pT3"); book(_nch_tot_pT20, "TMP/nch_tot_pT20"); } /// Perform the per-event analysis void analyze(const Event& event) { // Find the lead jet, applying a restriction that the jets must be within |eta| < 2. FourMomentum p_lead; for (const Jet& j : apply(event, "Jets").jetsByPt(1.0*GeV)) { if (j.abseta() < 2.0) { p_lead = j.momentum(); break; } } if (p_lead.isZero()) vetoEvent; const double philead = p_lead.phi(); const double pTlead = p_lead.pT(); Particles particles = apply(event, "CFS").particlesByPt(); int nTransverse = 0; double ptSumTransverse = 0.; for (const Particle& p : particles) { double dphi = fabs(deltaPhi(philead, p.phi())); if (dphi>PI/3. && dphi 3.0*GeV) _h_pT3_pT->fill(pT); if (fuzzyEquals(sqrtS(), 7.0*TeV) && pTlead > 20.0*GeV) _h_pT20_pT->fill(pT); } } const double area = 8./3. * PI; _h_Nch_vs_pT->fill(pTlead/GeV, 1./area*nTransverse); _h_Sum_vs_pT->fill(pTlead/GeV, 1./area*ptSumTransverse); if(pTlead > 3.0*GeV) { _h_pT3_Nch->fill(nTransverse); _h_pT3_Sum->fill(ptSumTransverse); sumOfWeights3->fill(); _nch_tot_pT3->fill(nTransverse); } if (fuzzyEquals(sqrtS(), 7.0*TeV) && pTlead > 20.0*GeV) { _h_pT20_Nch->fill(nTransverse); _h_pT20_Sum->fill(ptSumTransverse); sumOfWeights20->fill(); _nch_tot_pT20->fill(nTransverse); } } /// Normalise histograms etc., after the run void finalize() { normalize(_h_pT3_Nch); normalize(_h_pT3_Sum); if (sumOfWeights3->val() != 0.0) normalize(_h_pT3_pT, *_nch_tot_pT3 / *sumOfWeights3); if (fuzzyEquals(sqrtS(), 7.0*TeV)) { normalize(_h_pT20_Nch); normalize(_h_pT20_Sum); if (sumOfWeights20->val() != 0.0) normalize(_h_pT20_pT, *_nch_tot_pT20 / *sumOfWeights20); } } private: CounterPtr sumOfWeights3; CounterPtr sumOfWeights20; CounterPtr _nch_tot_pT3; CounterPtr _nch_tot_pT20; Profile1DPtr _h_Nch_vs_pT; Profile1DPtr _h_Sum_vs_pT; Histo1DPtr _h_pT3_Nch; Histo1DPtr _h_pT3_Sum; Histo1DPtr _h_pT3_pT; Histo1DPtr _h_pT20_Nch; Histo1DPtr _h_pT20_Sum; Histo1DPtr _h_pT20_pT; }; // This global object acts as a hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2011_S9120041); } diff --git a/analyses/pluginCMS/CMS_2011_S9215166.cc b/analyses/pluginCMS/CMS_2011_S9215166.cc --- a/analyses/pluginCMS/CMS_2011_S9215166.cc +++ b/analyses/pluginCMS/CMS_2011_S9215166.cc @@ -1,112 +1,112 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { class CMS_2011_S9215166 : public Analysis { public: /// Constructor CMS_2011_S9215166() : Analysis("CMS_2011_S9215166") { } void init() { - const FinalState fs(-6.0, 6.0, 0.0*GeV); + const FinalState fs((Cuts::etaIn(-6.0, 6.0))); declare(fs, "FS"); declare(FastJets(fs, FastJets::ANTIKT, 0.5), "Jets"); VetoedFinalState fsv(fs); fsv.vetoNeutrinos(); fsv.addVetoPairDetail(PID::MUON, 0.0*GeV, 99999.9*GeV); declare(fsv, "fsv"); // For the MB ND selection - const ChargedFinalState fschrgd(-6.0,6.0,0.0*GeV); + const ChargedFinalState fschrgd((Cuts::etaIn(-6.0,6.0))); declare(fschrgd, "fschrgd"); VetoedFinalState fschrgdv(fschrgd); fschrgdv.vetoNeutrinos(); declare(fschrgdv, "fschrgdv"); if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) { book(_hist_mb ,1, 1, 1); // energy flow in MB, 0.9 TeV book(_hist_dijet ,2, 1, 1); // energy flow in dijet events, 0.9 TeV } else if (fuzzyEquals(sqrtS()/GeV, 7000, 1E-3)) { book(_hist_mb ,3, 1, 1); // energy flow in MB, 7 TeV book(_hist_dijet ,4, 1, 1); // energy flow in dijet events, 7 TeV } book(_weightMB, "/tmp/weightMB"); book(_weightDiJet, "/tmp/weightDijet"); } void analyze(const Event& event) { // Skip if the event is empty const FinalState& fsv = apply(event, "fsv"); if (fsv.empty()) vetoEvent; // Veto diffractive topologies according to defined hadron level double count_chrg_forward = 0; double count_chrg_backward = 0; const FinalState& fschrgdv = apply(event, "fschrgdv"); for (const Particle& p : fschrgdv.particles()) { if (3.9 < p.eta() && p.eta() < 4.4) count_chrg_forward++; if (-4.4 < p.eta() && p.eta() < -3.9) count_chrg_backward++; } if (count_chrg_forward == 0 || count_chrg_backward == 0) vetoEvent; /// @todo "Diffractive" veto should really also veto dijet events? // MINIMUM BIAS EVENTS _weightMB->fill(); for (const Particle& p: fsv.particles()) { _hist_mb->fill(p.abseta(), p.E()/GeV); } // DIJET EVENTS double PTCUT = -1.0; if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) PTCUT = 8.0*GeV; else if (fuzzyEquals(sqrtS()/GeV, 7000, 1E-3)) PTCUT = 20.0*GeV; const FastJets& jetpro = apply(event, "Jets"); const Jets jets = jetpro.jetsByPt(PTCUT); if (jets.size() >= 2) { // eta cut for the central jets if (fabs(jets[0].eta()) < 2.5 && fabs(jets[1].eta()) < 2.5) { // Back to back condition of the jets const double diffphi = deltaPhi(jets[1].phi(), jets[0].phi()); if (diffphi-PI < 1.0) { _weightDiJet->fill(); for (const Particle& p: fsv.particles()) { _hist_dijet->fill(p.abseta(), p.E()/GeV); } } } } } void finalize() { scale(_hist_mb , 0.5/_weightMB->sumW()); scale(_hist_dijet, 0.5/_weightDiJet->sumW()); } private: Histo1DPtr _hist_mb, _hist_dijet; CounterPtr _weightMB, _weightDiJet; }; // Hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2011_S9215166); } diff --git a/analyses/pluginCMS/CMS_2012_I1107658.cc b/analyses/pluginCMS/CMS_2012_I1107658.cc --- a/analyses/pluginCMS/CMS_2012_I1107658.cc +++ b/analyses/pluginCMS/CMS_2012_I1107658.cc @@ -1,172 +1,172 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// Underlying event activity in the Drell-Yan process at 7 TeV class CMS_2012_I1107658 : public Analysis { public: /// Constructor CMS_2012_I1107658() : Analysis("CMS_2012_I1107658") { } /// Initialization void init() { /// @note Using a bare muon Z (but with a clustering radius!?) Cut cut = Cuts::abseta < 2.4 && Cuts::pT > 20*GeV; ZFinder zfinder(FinalState(), cut, PID::MUON, 4*GeV, 140*GeV, 0.2, ZFinder::ClusterPhotons::NONE); declare(zfinder, "ZFinder"); - ChargedFinalState cfs(-2, 2, 500*MeV); + ChargedFinalState cfs((Cuts::etaIn(-2, 2) && Cuts::pT >= 500*MeV)); VetoedFinalState nonmuons(cfs); nonmuons.addVetoPairId(PID::MUON); declare(nonmuons, "nonmuons"); book(_h_Nchg_towards_pTmumu ,1, 1, 1); book(_h_Nchg_transverse_pTmumu ,2, 1, 1); book(_h_Nchg_away_pTmumu ,3, 1, 1); book(_h_pTsum_towards_pTmumu ,4, 1, 1); book(_h_pTsum_transverse_pTmumu ,5, 1, 1); book(_h_pTsum_away_pTmumu ,6, 1, 1); book(_h_avgpT_towards_pTmumu ,7, 1, 1); book(_h_avgpT_transverse_pTmumu ,8, 1, 1); book(_h_avgpT_away_pTmumu ,9, 1, 1); book(_h_Nchg_towards_plus_transverse_Mmumu ,10, 1, 1); book(_h_pTsum_towards_plus_transverse_Mmumu ,11, 1, 1); book(_h_avgpT_towards_plus_transverse_Mmumu ,12, 1, 1); book(_h_Nchg_towards_zmass_81_101 ,13, 1, 1); book(_h_Nchg_transverse_zmass_81_101 ,14, 1, 1); book(_h_Nchg_away_zmass_81_101 ,15, 1, 1); book(_h_pT_towards_zmass_81_101 ,16, 1, 1); book(_h_pT_transverse_zmass_81_101 ,17, 1, 1); book(_h_pT_away_zmass_81_101 ,18, 1, 1); book(_h_Nchg_transverse_zpt_5 ,19, 1, 1); book(_h_pT_transverse_zpt_5 ,20, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; const ZFinder& zfinder = apply(event, "ZFinder"); if (zfinder.bosons().size() != 1) vetoEvent; double Zpt = zfinder.bosons()[0].pT()/GeV; double Zphi = zfinder.bosons()[0].phi(); double Zmass = zfinder.bosons()[0].mass()/GeV; Particles particles = apply(event, "nonmuons").particles(); int nTowards = 0; int nTransverse = 0; int nAway = 0; double ptSumTowards = 0; double ptSumTransverse = 0; double ptSumAway = 0; for (const Particle& p : particles) { double dphi = fabs(deltaPhi(Zphi, p.phi())); double pT = p.pT(); if ( dphi < M_PI/3 ) { nTowards++; ptSumTowards += pT; if (Zmass > 81. && Zmass < 101.) _h_pT_towards_zmass_81_101->fill(pT, weight); } else if ( dphi < 2.*M_PI/3 ) { nTransverse++; ptSumTransverse += pT; if (Zmass > 81. && Zmass < 101.) _h_pT_transverse_zmass_81_101->fill(pT, weight); if (Zpt < 5.) _h_pT_transverse_zpt_5->fill(pT, weight); } else { nAway++; ptSumAway += pT; if (Zmass > 81. && Zmass < 101.) _h_pT_away_zmass_81_101->fill(pT, weight); } } // Loop over particles const double area = 8./3.*M_PI; if (Zmass > 81. && Zmass < 101.) { _h_Nchg_towards_pTmumu-> fill(Zpt, 1./area * nTowards, weight); _h_Nchg_transverse_pTmumu-> fill(Zpt, 1./area * nTransverse, weight); _h_Nchg_away_pTmumu-> fill(Zpt, 1./area * nAway, weight); _h_pTsum_towards_pTmumu-> fill(Zpt, 1./area * ptSumTowards, weight); _h_pTsum_transverse_pTmumu-> fill(Zpt, 1./area * ptSumTransverse, weight); _h_pTsum_away_pTmumu-> fill(Zpt, 1./area * ptSumAway, weight); if (nTowards > 0) _h_avgpT_towards_pTmumu-> fill(Zpt, ptSumTowards/nTowards, weight); if (nTransverse > 0) _h_avgpT_transverse_pTmumu-> fill(Zpt, ptSumTransverse/nTransverse, weight); if (nAway > 0) _h_avgpT_away_pTmumu-> fill(Zpt, ptSumAway/nAway, weight); _h_Nchg_towards_zmass_81_101-> fill(nTowards, weight); _h_Nchg_transverse_zmass_81_101->fill(nTransverse, weight); _h_Nchg_away_zmass_81_101-> fill(nAway, weight); } if (Zpt < 5.) { _h_Nchg_towards_plus_transverse_Mmumu->fill(Zmass, (nTowards + nTransverse)/(2.*area), weight); _h_pTsum_towards_plus_transverse_Mmumu->fill(Zmass, (ptSumTowards + ptSumTransverse)/(2.*area), weight); if ((nTowards + nTransverse) > 0) _h_avgpT_towards_plus_transverse_Mmumu->fill(Zmass, (ptSumTowards + ptSumTransverse)/(nTowards + nTransverse), weight); _h_Nchg_transverse_zpt_5->fill(nTransverse, weight); } } /// Normalise histograms etc., after the run void finalize() { scale(_h_pT_towards_zmass_81_101, safediv(1, _h_Nchg_towards_zmass_81_101->integral(), 0)); scale(_h_pT_transverse_zmass_81_101, safediv(1, _h_Nchg_transverse_zmass_81_101->integral(), 0)); scale(_h_pT_away_zmass_81_101, safediv(1, _h_Nchg_away_zmass_81_101->integral(), 0)); scale(_h_pT_transverse_zpt_5, safediv(1, _h_Nchg_transverse_zpt_5->integral(), 0)); normalize(_h_Nchg_towards_zmass_81_101); normalize(_h_Nchg_transverse_zmass_81_101); normalize(_h_Nchg_away_zmass_81_101); normalize(_h_Nchg_transverse_zpt_5); } private: /// @name Histogram objects //@{ Profile1DPtr _h_Nchg_towards_pTmumu; Profile1DPtr _h_Nchg_transverse_pTmumu; Profile1DPtr _h_Nchg_away_pTmumu; Profile1DPtr _h_pTsum_towards_pTmumu; Profile1DPtr _h_pTsum_transverse_pTmumu; Profile1DPtr _h_pTsum_away_pTmumu; Profile1DPtr _h_avgpT_towards_pTmumu; Profile1DPtr _h_avgpT_transverse_pTmumu; Profile1DPtr _h_avgpT_away_pTmumu; Profile1DPtr _h_Nchg_towards_plus_transverse_Mmumu; Profile1DPtr _h_pTsum_towards_plus_transverse_Mmumu; Profile1DPtr _h_avgpT_towards_plus_transverse_Mmumu; Histo1DPtr _h_Nchg_towards_zmass_81_101; Histo1DPtr _h_Nchg_transverse_zmass_81_101; Histo1DPtr _h_Nchg_away_zmass_81_101; Histo1DPtr _h_pT_towards_zmass_81_101; Histo1DPtr _h_pT_transverse_zmass_81_101; Histo1DPtr _h_pT_away_zmass_81_101; Histo1DPtr _h_Nchg_transverse_zpt_5; Histo1DPtr _h_pT_transverse_zpt_5; //@} }; // Hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2012_I1107658); } diff --git a/analyses/pluginCMS/CMS_2012_I1111014.cc b/analyses/pluginCMS/CMS_2012_I1111014.cc --- a/analyses/pluginCMS/CMS_2012_I1111014.cc +++ b/analyses/pluginCMS/CMS_2012_I1111014.cc @@ -1,185 +1,185 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Projections/JetShape.hh" namespace Rivet { /// @brief CMS jet shape analysis /// @author Andreas Hinzmann class CMS_2012_I1111014 : public Analysis { public: /// Constructor CMS_2012_I1111014() : Analysis("CMS_2012_I1111014") { } /// @name Analysis methods //@{ void init() { // Set up projections - const FinalState fs(-5.0, 5.0); + const FinalState fs((Cuts::etaIn(-5.0, 5.0))); declare(fs, "FS"); FastJets fj5(fs, FastJets::ANTIKT, 0.5); declare(fj5, "Jets5"); FastJets fj7(fs, FastJets::ANTIKT, 0.7); declare(fj7, "Jets7"); // Specify pT bins _ptedges = {{ 20.,25.,30.,40.,50.,60.,70.,80.,90.,100.,110.,125.,140.,160.,180.,200.,225.,250.,300.,400.,500.,600.,1000. }}; _yedges = {{ 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0 }}; // Register a jet shape projection and histogram for each pT bin unsigned int histo_counter=1; for (size_t j = 0; j < 6; ++j) { for (size_t i = 0; i < 22; ++i) { if (i > 20 && j == 3) continue; if (i > 18 && j >= 4) continue; // Set up projections for each (pT,y) bin _jsnames_pT[i][j] = "JetShape" + to_str(i) + "_" + to_str(j); const JetShape jsp(fj7, 0.0, 0.7, 7, _ptedges[i], _ptedges[i+1], _yedges[j], _yedges[j+1], RAPIDITY); declare(jsp, _jsnames_pT[i][j]); // Book profile histograms for each (pT,y) bin book(_profhistRho_pT[i][j], histo_counter, 1, 1); histo_counter+=1; } } book(_profhistNch[0], 126, 1, 1); book(_profhistNch[1], 126, 1, 2); book(_profhistDr[0], 127, 1, 1); book(_profhistDr[1], 127, 1, 2); book(_profhistDeta, "TMP/Deta", refData(127,1,1)); book(_profhistDphi, "TMP/Dphi", refData(127,1,1)); book(_profhistAsym, "d128-x01-y01", true); } /// Do the analysis void analyze(const Event& evt) { // Get jets and require at least one to pass pT and y cuts Jets jets7 = apply(evt, "Jets7") .jetsByPt(Cuts::ptIn(_ptedges.front()*GeV, _ptedges.back()*GeV) && Cuts::absrap < 3.0); if(jets7.size()>2) jets7.resize(2); // Use only the first two jets MSG_DEBUG("Jet (R=0.7) multiplicity before cuts = " << jets7.size()); if (jets7.size() == 0) { MSG_DEBUG("No jets (R=0.7) found in required pT and rapidity range"); vetoEvent; } // Calculate and histogram jet shapes for (size_t jy = 0; jy < 6; ++jy) { for (size_t ipt = 0; ipt < 22; ++ipt) { if (ipt > 20 && jy == 3) continue; if (ipt > 18 && jy >= 4) continue; JetShape jsipt = apply(evt, _jsnames_pT[ipt][jy]); jsipt.calc(jets7); for (size_t ijet = 0; ijet < jsipt.numJets(); ++ijet) { for (size_t rbin = 0; rbin < jsipt.numBins(); ++rbin) { const double r_rho = jsipt.rBinMid(rbin); _profhistRho_pT[ipt][jy]->fill(r_rho, (1./0.1)*jsipt.diffJetShape(ijet, rbin)); } } } } // Get jets and require at least one to pass pT and y cuts Jets jets5 = apply(evt, "Jets5") .jetsByPt(Cuts::ptIn(50*GeV, 1000*GeV) && Cuts::absrap < 2.0); // Calculate and histogram charged jet shapes for (const Jet& jet : jets5) { double ncharge = 0; double eta=0; double phi=0; double sumpt=0; for (const Particle& p : jet.particles()) { if ((p.pT() < 0.5) || (p.charge3()==0) || (abs(p.pid())==11) || (abs(p.pid())==13)) continue; ncharge++; sumpt+=p.pT(); eta+=p.pT()*p.eta(); phi+=p.pT()*mapAngleMPiToPi(p.phi()-jet.phi()); } if (jet.absrap()<1.0) { _profhistNch[0]->fill(jet.pT(), ncharge ); } else if (jet.absrap()<2.0) { _profhistNch[1]->fill(jet.pT(), ncharge ); } if (sumpt==0) continue; eta/=sumpt; phi/=sumpt; double deta=0; double dphi=0; for (const Particle& p : jet.particles()) { if ((p.pT() < 0.5) || (p.charge3()==0) || (abs(p.pid())==11) || (abs(p.pid())==13)) continue; deta+=p.pT()*pow(p.eta()-eta,2); dphi+=p.pT()*pow(mapAngleMPiToPi(p.phi()-phi-jet.phi()),2); } deta/=sumpt; dphi/=sumpt; if ((dphi==0)||(deta==0)) continue; if (jet.absrap()<1.0) { _profhistDr[0]->fill(jet.pT(), deta+dphi ); _profhistDeta->fill(jet.pT(), deta ); _profhistDphi->fill(jet.pT(), dphi ); } else if (jet.absrap()<2.0) { _profhistDr[1]->fill(jet.pT(), deta+dphi ); } } } // Finalize void finalize() { for (unsigned int i = 0; i < _profhistAsym->numPoints(); ++i) { if((_profhistDeta->bin(i).numEntries()<2)||(_profhistDphi->bin(i).numEntries()<2)) continue; if((_profhistDeta->bin(i).mean()==0)||(_profhistDphi->bin(i).mean()==0)) continue; double mean_ratio=_profhistDeta->bin(i).mean() / _profhistDphi->bin(i).mean(); double mean_error=mean_ratio*sqrt(pow(_profhistDeta->bin(i).stdErr()/_profhistDeta->bin(i).mean(),2)+pow(_profhistDphi->bin(i).stdErr()/_profhistDphi->bin(i).mean(),2)); _profhistAsym->point(i).setY(mean_ratio,mean_error); } } //@} private: /// @name Analysis data //@{ /// Jet \f$ p_\perp\f$ bins. vector _ptedges; // This can't be a raw array if we want to initialise it non-painfully vector _yedges; /// JetShape projection name for each \f$p_\perp\f$ bin. string _jsnames_pT[22][6]; //@} /// @name Histograms //@{ Profile1DPtr _profhistRho_pT[22][6]; Profile1DPtr _profhistNch[2]; Profile1DPtr _profhistDr[2]; Profile1DPtr _profhistDeta; Profile1DPtr _profhistDphi; Scatter2DPtr _profhistAsym; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2012_I1111014); } diff --git a/analyses/pluginCMS/CMS_2012_I1184941.cc b/analyses/pluginCMS/CMS_2012_I1184941.cc --- a/analyses/pluginCMS/CMS_2012_I1184941.cc +++ b/analyses/pluginCMS/CMS_2012_I1184941.cc @@ -1,70 +1,70 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { class CMS_2012_I1184941 : public Analysis { public: CMS_2012_I1184941() : Analysis("CMS_2012_I1184941") { } void init() { FinalState fs; declare(fs, "FS"); - const FastJets jets(FinalState(-4.9, 4.9, 0.0*GeV), FastJets::ANTIKT, 0.5); + const FastJets jets(FinalState((Cuts::etaIn(-4.9, 4.9))), FastJets::ANTIKT, 0.5); declare(jets, "AntiKtJets05"); book(_h_xi ,1, 1, 1); } void analyze(const Event& event) { double xiM = 0.; double xiP = 0.; const Jets jets = apply(event, "AntiKtJets05").jetsByPt(20.*GeV); if (jets.size() < 2) vetoEvent; // require a dijet system with a 20 GeV cut on both jets if (fabs(jets[0].eta()) > 4.4 || fabs(jets[1].eta()) > 4.4) vetoEvent; const FinalState& fsp = apply(event, "FS"); for (const Particle& p : fsp.particles(cmpMomByEta)) { const double eta = p.eta(); const double energy = p.E(); const double costheta = cos(p.theta()); // Yes, they really correct to +/- infinity, using Pythia 8 ... if (eta < 4.9) xiP += (energy + energy*costheta); if (eta > -4.9 ) xiM += (energy - energy*costheta); } xiP = xiP / (sqrtS()/GeV); xiM = xiM / (sqrtS()/GeV); const double weight = 1.0; _h_xi->fill( xiM, weight ); // Fill the histogram both with xiP and xiM, and get the average in the endjob. _h_xi->fill( xiP, weight ); } void finalize() { scale( _h_xi, crossSection()/microbarn/sumOfWeights() / 2.); } private: Histo1DPtr _h_xi; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2012_I1184941); } diff --git a/analyses/pluginCMS/CMS_2012_I1193338.cc b/analyses/pluginCMS/CMS_2012_I1193338.cc --- a/analyses/pluginCMS/CMS_2012_I1193338.cc +++ b/analyses/pluginCMS/CMS_2012_I1193338.cc @@ -1,82 +1,82 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { class CMS_2012_I1193338 : public Analysis { public: CMS_2012_I1193338() : Analysis("CMS_2012_I1193338") { } void init() { - declare(ChargedFinalState(-2.4, 2.4, 0.2*GeV), "CFS"); + declare(ChargedFinalState((Cuts::etaIn(-2.4, 2.4) && Cuts::pT >= 0.2*GeV)), "CFS"); declare(FinalState(), "FS"); book(_h_sigma ,1, 1, 1); } void analyze(const Event& event) { const double weight = 1.0; const ChargedFinalState& cfs = apply(event, "CFS"); if (cfs.size() > 1) {_h_sigma->fill(1.5, weight);} if (cfs.size() > 2) {_h_sigma->fill(2.5, weight);} if (cfs.size() > 3) {_h_sigma->fill(3.5, weight);} const FinalState& fs = apply(event, "FS"); if (fs.size() < 2) vetoEvent; // need at least two particles to calculate gaps double gapcenter = 0.; double LRG = 0.; double etapre = 0.; bool first = true; for(const Particle& p : fs.particles(cmpMomByEta)) { // sorted from minus to plus if (first) { // First particle first = false; etapre = p.eta(); } else { double gap = fabs(p.eta()-etapre); if (gap > LRG) { LRG = gap; // largest gap gapcenter = (p.eta()+etapre)/2.; // find the center of the gap to separate the X and Y systems. } etapre = p.eta(); } } FourMomentum mxFourVector, myFourVector; for(const Particle& p : fs.particles(cmpMomByEta)) { ((p.eta() > gapcenter) ? mxFourVector : myFourVector) += p.momentum(); } const double M2 = max(mxFourVector.mass2(), myFourVector.mass2()); const double xi = M2/sqr(sqrtS()); // sqrt(s)=7000 GeV, note that units cancel if (xi < 5e-6) vetoEvent; _h_sigma->fill(0.5, weight); } void finalize() { scale(_h_sigma, crossSection()/millibarn/sumOfWeights()); } private: Histo1DPtr _h_sigma; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2012_I1193338); } diff --git a/analyses/pluginCMS/CMS_2013_I1209721.cc b/analyses/pluginCMS/CMS_2013_I1209721.cc --- a/analyses/pluginCMS/CMS_2013_I1209721.cc +++ b/analyses/pluginCMS/CMS_2013_I1209721.cc @@ -1,173 +1,173 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/Thrust.hh" namespace Rivet { /// CMS Z+jets delta(phi) and jet thrust measurement at 7 TeV class CMS_2013_I1209721 : public Analysis { public: CMS_2013_I1209721() : Analysis("CMS_2013_I1209721") { } /// Book projections and histograms void init() { // Full final state - const FinalState fs(-5.0,5.0); + const FinalState fs((Cuts::etaIn(-5.0,5.0))); declare(fs, "FS"); // Z finders for electrons and muons Cut cuts = Cuts::abseta < 2.4 && Cuts::pT > 20*GeV; const ZFinder zfe(fs, cuts, PID::ELECTRON, 71*GeV, 111*GeV); const ZFinder zfm(fs, cuts, PID::MUON, 71*GeV, 111*GeV); declare(zfe, "ZFE"); declare(zfm, "ZFM"); // Jets const FastJets jets(fs, FastJets::ANTIKT, 0.5); declare(jets, "JETS"); // Book histograms from data for (size_t i = 0; i < 2; ++i) { book(_histDeltaPhiZJ1_1[i] ,1+i*9, 1, 1); book(_histDeltaPhiZJ1_2[i] ,2+i*9, 1, 1); book(_histDeltaPhiZJ1_3[i] ,4+i*9, 1, 1); book(_histDeltaPhiZJ2_3[i] ,5+i*9, 1, 1); book(_histDeltaPhiZJ3_3[i] ,3+i*9, 1, 1); book(_histDeltaPhiJ1J2_3[i] ,6+i*9, 1, 1); book(_histDeltaPhiJ1J3_3[i] ,7+i*9, 1, 1); book(_histDeltaPhiJ2J3_3[i] ,8+i*9, 1, 1); book(_histTransvThrust[i] ,9+i*9, 1, 1); } } void analyze(const Event& event) { const double weight = 1.0; // Apply the Z finders const ZFinder& zfe = apply(event, "ZFE"); const ZFinder& zfm = apply(event, "ZFM"); // Choose the Z candidate (there must be one) if (zfe.empty() && zfm.empty()) vetoEvent; const Particles& z = !zfm.empty() ? zfm.bosons() : zfe.bosons(); const Particles& leptons = !zfm.empty() ? zfm.constituents() : zfe.constituents(); // Determine whether we are in the boosted regime const bool is_boosted = (z[0].pT() > 150*GeV); // Build the jets const FastJets& jetfs = apply(event, "JETS"); const Jets& jets = jetfs.jetsByPt(Cuts::pT > 50*GeV && Cuts::abseta < 2.5); // Clean the jets against the lepton candidates, as in the paper, with a deltaR cut of 0.4 against the clustered leptons vector cleanedJets; for (size_t i = 0; i < jets.size(); ++i) { bool isolated = true; for (size_t j = 0; j < 2; ++j) { if (deltaR(leptons[j], jets[i]) < 0.4) { isolated = false; break; } } if (isolated) cleanedJets.push_back(&jets[i]); } // Require at least 1 jet const unsigned int Njets = cleanedJets.size(); if (Njets < 1) vetoEvent; // Now compute the thrust // Collect Z and jets transverse momenta to calculate transverse thrust vector momenta; momenta.clear(); Vector3 mom = z[0].p3(); mom.setZ(0); momenta.push_back(mom); for (size_t i = 0; i < cleanedJets.size(); ++i) { Vector3 mj = cleanedJets[i]->momentum().p3(); mj.setZ(0); momenta.push_back(mj); } if (momenta.size() <= 2){ // We need to use a ghost so that Thrust.calc() doesn't return 1. momenta.push_back(Vector3(0.0000001,0.0000001,0.)); } Thrust thrust; thrust.calc(momenta); const double T = thrust.thrust(); FILLx2(_histTransvThrust, is_boosted, log(max(1-T, 1e-6)), weight); const double dphiZJ1 = deltaPhi(z[0], *cleanedJets[0]); FILLx2(_histDeltaPhiZJ1_1, is_boosted, dphiZJ1, weight); if (Njets > 1) { FILLx2(_histDeltaPhiZJ1_2, is_boosted, dphiZJ1, weight); if (Njets > 2) { FILLx2(_histDeltaPhiZJ1_3, is_boosted, dphiZJ1, weight); FILLx2(_histDeltaPhiZJ2_3, is_boosted, deltaPhi(z[0], *cleanedJets[1]), weight); FILLx2(_histDeltaPhiZJ3_3, is_boosted, deltaPhi(z[0], *cleanedJets[2]), weight); FILLx2(_histDeltaPhiJ1J2_3, is_boosted, deltaPhi(*cleanedJets[0], *cleanedJets[1]), weight); FILLx2(_histDeltaPhiJ1J3_3, is_boosted, deltaPhi(*cleanedJets[0], *cleanedJets[2]), weight); FILLx2(_histDeltaPhiJ2J3_3, is_boosted, deltaPhi(*cleanedJets[1], *cleanedJets[2]), weight); } } } /// Normalizations /// @note Most of these data normalizations neglect the overflow bins void finalize() { for (size_t i = 0; i < 2; ++i) { normalize(_histDeltaPhiZJ1_1[i], 1, false); normalize(_histDeltaPhiZJ1_2[i], 1, false); normalize(_histDeltaPhiZJ1_3[i], 1, false); normalize(_histDeltaPhiZJ2_3[i], 1, false); normalize(_histDeltaPhiZJ3_3[i], 1, false); normalize(_histDeltaPhiJ1J2_3[i], 1, false); normalize(_histDeltaPhiJ1J3_3[i], 1, false); normalize(_histDeltaPhiJ2J3_3[i], 1, false); normalize(_histTransvThrust[i]); } } private: // Define a helper to appropriately fill both unboosted and boosted histo versions void FILLx2(Histo1DPtr* HNAME, bool is_boosted, double VAL, double weight) { double x = VAL; for (size_t i = 0; i < 2; ++i) { if (i == 0 || is_boosted) HNAME[i]->fill(x, weight); } } // Arrays of unboosted/boosted histos Histo1DPtr _histDeltaPhiZJ1_1[2]; Histo1DPtr _histDeltaPhiZJ1_2[2]; Histo1DPtr _histDeltaPhiZJ1_3[2]; Histo1DPtr _histDeltaPhiZJ2_3[2]; Histo1DPtr _histDeltaPhiZJ3_3[2]; Histo1DPtr _histDeltaPhiJ1J2_3[2]; Histo1DPtr _histDeltaPhiJ1J3_3[2]; Histo1DPtr _histDeltaPhiJ2J3_3[2]; Histo1DPtr _histTransvThrust[2]; }; DECLARE_RIVET_PLUGIN(CMS_2013_I1209721); } diff --git a/analyses/pluginCMS/CMS_2013_I1218372.cc b/analyses/pluginCMS/CMS_2013_I1218372.cc --- a/analyses/pluginCMS/CMS_2013_I1218372.cc +++ b/analyses/pluginCMS/CMS_2013_I1218372.cc @@ -1,162 +1,165 @@ // Samantha Dooling DESY // February 2012 // // -*- C++ -*- // ============================= // // Ratio of the energy deposited in the pseudorapidity range // -6.6 < eta < -5.2 for events with a charged particle jet // // ============================= #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { class CMS_2013_I1218372 : public Analysis { public: /// Constructor CMS_2013_I1218372() : Analysis("CMS_2013_I1218372") { } void init() { // gives the range of eta and min pT for the final state from which I get the jets - FastJets jetpro (ChargedFinalState(-2.5, 2.5, 0.3*GeV), FastJets::ANTIKT, 0.5); + FastJets jetpro (ChargedFinalState((Cuts::etaIn(-2.5, 2.5) && Cuts::pT >= 0.3*GeV)), FastJets::ANTIKT, 0.5); declare(jetpro, "Jets"); // skip Neutrinos and Muons - VetoedFinalState fsv(FinalState(-7.0, -4.0, 0.*GeV)); + VetoedFinalState fsv(FinalState((Cuts::etaIn(-7.0, -4.0)))); fsv.vetoNeutrinos(); fsv.addVetoPairId(PID::MUON); declare(fsv, "fsv"); + FinalState a,b; + a = b; + // for the hadron level selection - VetoedFinalState sfsv(FinalState(-DBL_MAX, DBL_MAX, 0.*GeV)); + VetoedFinalState sfsv; sfsv.vetoNeutrinos(); sfsv.addVetoPairId(PID::MUON); declare(sfsv, "sfsv"); //counters book(passedSumOfWeights, "passedSumOfWeights"); book(inclEflow, "inclEflow"); // Temporary histograms to fill the energy flow for leading jet events. // Ratios are calculated in finalyze(). int id = 0; if (fuzzyEquals(sqrtS()/GeV, 900, 1e-3)) id=1; if (fuzzyEquals(sqrtS()/GeV, 2760, 1e-3)) id=2; if (fuzzyEquals(sqrtS()/GeV, 7000, 1e-3)) id=3; book(_h_ratio, id, 1, 1); book(_tmp_jet , "TMP/eflow_jet" ,refData(id, 1, 1)); // Leading jet energy flow in pt book(_tmp_njet, "TMP/number_jet" ,refData(id, 1, 1)); // Number of events in pt } /// Perform the per-event analysis void analyze(const Event& event) { // Skip if the event is empty const FinalState& fsv = apply(event, "fsv"); if (fsv.empty()) vetoEvent; // ====================== Minimum Bias selection const FinalState& sfsv = apply(event, "sfsv"); Particles parts = sfsv.particles(cmpMomByRap); if (parts.empty()) vetoEvent; // find dymax double dymax = 0; int gap_pos = -1; for (size_t i = 0; i < parts.size()-1; ++i) { double dy = parts[i+1].rapidity() - parts[i].rapidity(); if (dy > dymax) { dymax = dy; gap_pos = i; } } // calculate mx2 and my2 FourMomentum xmom; for (int i=0; i<=gap_pos; ++i) { xmom += parts[i].momentum(); } double mx2 = xmom.mass2(); if (mx2<0) vetoEvent; FourMomentum ymom; for (size_t i=gap_pos+1; i 0.1 || xiy > 0.4 || xidd > 0.5)) passedHadronCuts = true; if (fuzzyEquals(sqrtS()/GeV, 2760, 1e-3) && (xix > 0.07 || xiy > 0.2 || xidd > 0.5)) passedHadronCuts = true; if (fuzzyEquals(sqrtS()/GeV, 7000, 1e-3) && (xix > 0.04 || xiy > 0.1 || xidd > 0.5)) passedHadronCuts = true; if (!passedHadronCuts) vetoEvent; // ============================== MINIMUM BIAS EVENTS // loop over particles to calculate the energy passedSumOfWeights->fill(); for (const Particle& p : fsv.particles()) { if (-5.2 > p.eta() && p.eta() > -6.6) inclEflow->fill(p.E()/GeV); } // ============================== JET EVENTS const FastJets& jetpro = apply(event, "Jets"); const Jets& jets = jetpro.jetsByPt(1.0*GeV); if (jets.size()<1) vetoEvent; if (fabs(jets[0].eta()) < 2.0) { _tmp_njet->fill(jets[0].pT()/GeV); // energy flow for (const Particle& p : fsv.particles()) { if (p.eta() > -6.6 && p.eta() < -5.2) { // ask for the CASTOR region _tmp_jet->fill(jets[0].pT()/GeV, p.E()/GeV); } } } }// analysis void finalize() { scale(_tmp_jet, *passedSumOfWeights / *inclEflow); divide(_tmp_jet, _tmp_njet, _h_ratio); } private: // counters CounterPtr passedSumOfWeights; CounterPtr inclEflow; // histograms Scatter2DPtr _h_ratio; Histo1DPtr _tmp_jet; Histo1DPtr _tmp_njet; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2013_I1218372); } diff --git a/analyses/pluginCMS/CMS_2013_I1224539_DIJET.cc b/analyses/pluginCMS/CMS_2013_I1224539_DIJET.cc --- a/analyses/pluginCMS/CMS_2013_I1224539_DIJET.cc +++ b/analyses/pluginCMS/CMS_2013_I1224539_DIJET.cc @@ -1,143 +1,143 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/ZFinder.hh" #include "fastjet/tools/Filter.hh" #include "fastjet/tools/Pruner.hh" namespace Rivet { class CMS_2013_I1224539_DIJET : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor CMS_2013_I1224539_DIJET() : Analysis("CMS_2013_I1224539_DIJET"), _filter(fastjet::Filter(fastjet::JetDefinition(fastjet::cambridge_algorithm, 0.3), fastjet::SelectorNHardest(3))), _trimmer(fastjet::Filter(fastjet::JetDefinition(fastjet::kt_algorithm, 0.2), fastjet::SelectorPtFractionMin(0.03))), _pruner(fastjet::Pruner(fastjet::cambridge_algorithm, 0.1, 0.5)) { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - FinalState fs(-2.4, 2.4, 0*GeV); + FinalState fs((Cuts::etaIn(-2.4, 2.4))); declare(fs, "FS"); // Jet collections declare(FastJets(fs, FastJets::ANTIKT, 0.7), "JetsAK7"); declare(FastJets(fs, FastJets::CAM, 0.8), "JetsCA8"); declare(FastJets(fs, FastJets::CAM, 1.2), "JetsCA12"); // Histograms for (size_t i = 0; i < N_PT_BINS_dj; ++i ) { book(_h_ungroomedAvgJetMass_dj[i] ,i+1+0*N_PT_BINS_dj, 1, 1); book(_h_filteredAvgJetMass_dj[i] ,i+1+1*N_PT_BINS_dj, 1, 1); book(_h_trimmedAvgJetMass_dj[i] ,i+1+2*N_PT_BINS_dj, 1, 1); book(_h_prunedAvgJetMass_dj[i] ,i+1+3*N_PT_BINS_dj, 1, 1); } } // Find the pT histogram bin index for value pt (in GeV), to hack a 2D histogram equivalent /// @todo Use a YODA axis/finder alg when available size_t findPtBin(double ptJ) { const double ptBins_dj[N_PT_BINS_dj+1] = { 220.0, 300.0, 450.0, 500.0, 600.0, 800.0, 1000.0, 1500.0}; for (size_t ibin = 0; ibin < N_PT_BINS_dj; ++ibin) { if (inRange(ptJ, ptBins_dj[ibin], ptBins_dj[ibin+1])) return ibin; } return N_PT_BINS_dj; } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; // Look at events with >= 2 jets const PseudoJets& psjetsAK7 = apply(event, "JetsAK7").pseudoJetsByPt( 50.0*GeV ); if (psjetsAK7.size() < 2) vetoEvent; // Get the leading two jets and find their average pT const fastjet::PseudoJet& j0 = psjetsAK7[0]; const fastjet::PseudoJet& j1 = psjetsAK7[1]; double ptAvg = 0.5 * (j0.pt() + j1.pt()); // Find the appropriate mean pT bin and escape if needed const size_t njetBin = findPtBin(ptAvg/GeV); if (njetBin >= N_PT_BINS_dj) vetoEvent; // Now run the substructure algs... fastjet::PseudoJet filtered0 = _filter(j0); fastjet::PseudoJet filtered1 = _filter(j1); fastjet::PseudoJet trimmed0 = _trimmer(j0); fastjet::PseudoJet trimmed1 = _trimmer(j1); fastjet::PseudoJet pruned0 = _pruner(j0); fastjet::PseudoJet pruned1 = _pruner(j1); // ... and fill the histograms _h_ungroomedAvgJetMass_dj[njetBin]->fill(0.5*(j0.m() + j1.m())/GeV, weight); _h_filteredAvgJetMass_dj[njetBin]->fill(0.5*(filtered0.m() + filtered1.m())/GeV, weight); _h_trimmedAvgJetMass_dj[njetBin]->fill(0.5*(trimmed0.m() + trimmed1.m())/GeV, weight); _h_prunedAvgJetMass_dj[njetBin]->fill(0.5*(pruned0.m() + pruned1.m())/GeV, weight); } /// Normalise histograms etc., after the run void finalize() { const double normalizationVal = 1000; for (size_t i = 0; i < N_PT_BINS_dj; ++i) { normalize(_h_ungroomedAvgJetMass_dj[i], normalizationVal); normalize(_h_filteredAvgJetMass_dj[i], normalizationVal); normalize(_h_trimmedAvgJetMass_dj[i], normalizationVal); normalize(_h_prunedAvgJetMass_dj[i], normalizationVal); } } //@} private: /// @name FastJet grooming tools (configured in constructor init list) //@{ const fastjet::Filter _filter; const fastjet::Filter _trimmer; const fastjet::Pruner _pruner; //@} /// @name Histograms //@{ enum BINS_dj { PT_220_300_dj=0, PT_300_450_dj, PT_450_500_dj, PT_500_600_dj, PT_600_800_dj, PT_800_1000_dj, PT_1000_1500_dj, N_PT_BINS_dj }; Histo1DPtr _h_ungroomedJet0pt, _h_ungroomedJet1pt; Histo1DPtr _h_ungroomedAvgJetMass_dj[N_PT_BINS_dj]; Histo1DPtr _h_filteredAvgJetMass_dj[N_PT_BINS_dj]; Histo1DPtr _h_trimmedAvgJetMass_dj[N_PT_BINS_dj]; Histo1DPtr _h_prunedAvgJetMass_dj[N_PT_BINS_dj]; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2013_I1224539_DIJET); } diff --git a/analyses/pluginCMS/CMS_2013_I1224539_WJET.cc b/analyses/pluginCMS/CMS_2013_I1224539_WJET.cc --- a/analyses/pluginCMS/CMS_2013_I1224539_WJET.cc +++ b/analyses/pluginCMS/CMS_2013_I1224539_WJET.cc @@ -1,193 +1,193 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/ZFinder.hh" #include "fastjet/tools/Filter.hh" #include "fastjet/tools/Pruner.hh" namespace Rivet { class CMS_2013_I1224539_WJET : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor CMS_2013_I1224539_WJET() : Analysis("CMS_2013_I1224539_WJET"), _filter(fastjet::Filter(fastjet::JetDefinition(fastjet::cambridge_algorithm, 0.3), fastjet::SelectorNHardest(3))), _trimmer(fastjet::Filter(fastjet::JetDefinition(fastjet::kt_algorithm, 0.2), fastjet::SelectorPtFractionMin(0.03))), _pruner(fastjet::Pruner(fastjet::cambridge_algorithm, 0.1, 0.5)) { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - FinalState fs(-2.4, 2.4, 0*GeV); + FinalState fs((Cuts::etaIn(-2.4, 2.4))); declare(fs, "FS"); // Find W's with pT > 120, MET > 50 WFinder wfinder(fs, Cuts::abseta < 2.4 && Cuts::pT > 80*GeV, PID::ELECTRON, 50*GeV, 1000*GeV, 50.0*GeV, 0.2, WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); declare(wfinder, "WFinder"); // W+jet jet collections declare(FastJets(wfinder.remainingFinalState(), FastJets::ANTIKT, 0.7), "JetsAK7_wj"); declare(FastJets(wfinder.remainingFinalState(), FastJets::CAM, 0.8), "JetsCA8_wj"); declare(FastJets(wfinder.remainingFinalState(), FastJets::CAM, 1.2), "JetsCA12_wj"); // Histograms /// @note These are 2D histos rendered into slices const int wjetsOffset = 51; for (size_t i = 0; i < N_PT_BINS_vj; ++i) { book(_h_ungroomedJetMass_AK7_wj[i] ,wjetsOffset+i+1+0*N_PT_BINS_vj, 1, 1); book(_h_filteredJetMass_AK7_wj[i] ,wjetsOffset+i+1+1*N_PT_BINS_vj, 1, 1); book(_h_trimmedJetMass_AK7_wj[i] ,wjetsOffset+i+1+2*N_PT_BINS_vj, 1, 1); book(_h_prunedJetMass_AK7_wj[i] ,wjetsOffset+i+1+3*N_PT_BINS_vj, 1, 1); book(_h_prunedJetMass_CA8_wj[i] ,wjetsOffset+i+1+4*N_PT_BINS_vj, 1, 1); if (i > 0) book(_h_filteredJetMass_CA12_wj[i] ,wjetsOffset+i+5*N_PT_BINS_vj, 1, 1); } } bool isBackToBack_wj(const WFinder& wf, const fastjet::PseudoJet& psjet) { const FourMomentum w = wf.bosons()[0]; const FourMomentum l1 = wf.constituentLeptons()[0]; const FourMomentum l2 = wf.constituentNeutrinos()[0]; /// @todo We should make FourMomentum know how to construct itself from a PseudoJet const FourMomentum jmom(psjet.e(), psjet.px(), psjet.py(), psjet.pz()); return (deltaPhi(w, jmom) > 2.0 && deltaR(l1, jmom) > 1.0 && deltaPhi(l2, jmom) > 0.4); } // Find the pT histogram bin index for value pt (in GeV), to hack a 2D histogram equivalent /// @todo Use a YODA axis/finder alg when available size_t findPtBin(double ptJ) { const double ptBins_vj[N_PT_BINS_vj+1] = { 125.0, 150.0, 220.0, 300.0, 450.0 }; for (size_t ibin = 0; ibin < N_PT_BINS_vj; ++ibin) { if (inRange(ptJ, ptBins_vj[ibin], ptBins_vj[ibin+1])) return ibin; } return N_PT_BINS_vj; } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; // Get the W const WFinder& wfinder = apply(event, "WFinder"); if (wfinder.bosons().size() != 1) vetoEvent; const Particle w = wfinder.bosons()[0]; const Particle l = wfinder.constituentLeptons()[0]; // Require a fairly high-pT W and charged lepton if (l.pT() < 80*GeV || w.pT() < 120*GeV) vetoEvent; // Get the pseudojets. const PseudoJets psjetsCA8_wj = apply(event, "JetsCA8_wj").pseudoJetsByPt( 50.0*GeV ); const PseudoJets psjetsCA12_wj = apply(event, "JetsCA12_wj").pseudoJetsByPt( 50.0*GeV ); // AK7 jets const PseudoJets psjetsAK7_wj = apply(event, "JetsAK7_wj").pseudoJetsByPt( 50.0*GeV ); if (!psjetsAK7_wj.empty()) { // Get the leading jet and make sure it's back-to-back with the W const fastjet::PseudoJet& j0 = psjetsAK7_wj[0]; if (isBackToBack_wj(wfinder, j0)) { const size_t njetBin = findPtBin(j0.pt()/GeV); if (njetBin < N_PT_BINS_vj) { fastjet::PseudoJet filtered0 = _filter(j0); fastjet::PseudoJet trimmed0 = _trimmer(j0); fastjet::PseudoJet pruned0 = _pruner(j0); _h_ungroomedJetMass_AK7_wj[njetBin]->fill(j0.m()/GeV, weight); _h_filteredJetMass_AK7_wj[njetBin]->fill(filtered0.m()/GeV, weight); _h_trimmedJetMass_AK7_wj[njetBin]->fill(trimmed0.m()/GeV, weight); _h_prunedJetMass_AK7_wj[njetBin]->fill(pruned0.m()/GeV, weight); } } } // CA8 jets if (!psjetsCA8_wj.empty()) { // Get the leading jet and make sure it's back-to-back with the W const fastjet::PseudoJet& j0 = psjetsCA8_wj[0]; if (isBackToBack_wj(wfinder, j0)) { const size_t njetBin = findPtBin(j0.pt()/GeV); if (njetBin < N_PT_BINS_vj) { fastjet::PseudoJet pruned0 = _pruner(j0); _h_prunedJetMass_CA8_wj[njetBin]->fill(pruned0.m()/GeV, weight); } } } // CA12 jets if (!psjetsCA12_wj.empty()) { // Get the leading jet and make sure it's back-to-back with the W const fastjet::PseudoJet& j0 = psjetsCA12_wj[0]; if (isBackToBack_wj(wfinder, j0)) { const size_t njetBin = findPtBin(j0.pt()/GeV); if (njetBin < N_PT_BINS_vj&&njetBin>0) { fastjet::PseudoJet filtered0 = _filter(j0); _h_filteredJetMass_CA12_wj[njetBin]->fill( filtered0.m() / GeV, weight); } } } } /// Normalise histograms etc., after the run void finalize() { const double normalizationVal = 1000; for (size_t i = 0; i < N_PT_BINS_vj; ++i) { normalize(_h_ungroomedJetMass_AK7_wj[i], normalizationVal); normalize(_h_filteredJetMass_AK7_wj[i], normalizationVal); normalize(_h_trimmedJetMass_AK7_wj[i], normalizationVal); normalize(_h_prunedJetMass_AK7_wj[i], normalizationVal); normalize(_h_prunedJetMass_CA8_wj[i], normalizationVal); if (i > 0) normalize( _h_filteredJetMass_CA12_wj[i], normalizationVal); } } //@} private: /// @name FastJet grooming tools (configured in constructor init list) //@{ const fastjet::Filter _filter; const fastjet::Filter _trimmer; const fastjet::Pruner _pruner; //@} /// @name Histograms //@{ enum BINS_vj { PT_125_150_vj=0, PT_150_220_vj, PT_220_300_vj, PT_300_450_vj, N_PT_BINS_vj }; Histo1DPtr _h_ungroomedJetMass_AK7_wj[N_PT_BINS_vj]; Histo1DPtr _h_filteredJetMass_AK7_wj[N_PT_BINS_vj]; Histo1DPtr _h_trimmedJetMass_AK7_wj[N_PT_BINS_vj]; Histo1DPtr _h_prunedJetMass_AK7_wj[N_PT_BINS_vj]; Histo1DPtr _h_prunedJetMass_CA8_wj[N_PT_BINS_vj]; Histo1DPtr _h_filteredJetMass_CA12_wj[N_PT_BINS_vj]; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2013_I1224539_WJET); } diff --git a/analyses/pluginCMS/CMS_2013_I1258128.cc b/analyses/pluginCMS/CMS_2013_I1258128.cc --- a/analyses/pluginCMS/CMS_2013_I1258128.cc +++ b/analyses/pluginCMS/CMS_2013_I1258128.cc @@ -1,165 +1,165 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/Thrust.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" namespace Rivet { /// CMS Z rapidity measurement class CMS_2013_I1258128 : public Analysis { public: // Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2013_I1258128); void init() { // Full final state const FinalState fs(Cuts::abseta < 5); declare(fs, "FS"); // Z finders for electrons and muons Cut cuts = Cuts::abseta < 2.1 && Cuts::pT > 20*GeV; const ZFinder zfe(fs, cuts, PID::ELECTRON, 76*GeV, 106*GeV); const ZFinder zfm(fs, cuts, PID::MUON, 76*GeV, 106*GeV); declare(zfe, "ZFE"); declare(zfm, "ZFM"); // Try to get the leading photon - LeadingParticlesFinalState photonfs(FinalState(-2.5, 2.5, 40.0*GeV)); + LeadingParticlesFinalState photonfs(FinalState((Cuts::etaIn(-2.5, 2.5) && Cuts::pT >= 40.0*GeV))); photonfs.addParticleId(PID::PHOTON); declare(photonfs, "LeadingPhoton"); // Jets const FastJets jets(fs, FastJets::ANTIKT, 0.5); declare(jets, "JETS"); // Histograms book(_hist1YZ ,1, 1, 1); book(_hist1YJet ,2, 1, 1); book(_hist1YSum ,3, 1, 1); book( _hist1YDif ,4, 1, 1); book( _hist2YPhoton ,5, 1, 1); book( _hist2YJet ,6, 1, 1); book( _hist2YSum ,7, 1, 1); book( _hist2YDif ,8, 1, 1); } void makeZCut(const Event& event) { // Apply the Z finders and veto if no Z found const ZFinder& zfe = apply(event, "ZFE"); const ZFinder& zfm = apply(event, "ZFM"); if (zfe.empty() && zfm.empty()) vetoEvent; // Choose the Z candidate const Particles& z = (!zfm.empty()) ? zfm.bosons() : zfe.bosons(); const Particles& clusteredConstituents = (!zfm.empty()) ? zfm.constituents() : zfe.constituents(); // Insist that the Z is in a high-pT (boosted) regime if (z[0].pT() < 40*GeV) return; // Build the jets const FastJets& jetfs = apply(event, "JETS"); Jets jets = jetfs.jetsByPt(Cuts::pT > 30*GeV && Cuts::abseta < 2.4); if (jets.empty()) return; // Clean the jets against the lepton candidates with a DeltaR cut of 0.5 vector cleanedJets; for (const Jet& j : jets) { bool isolated = true; for (const Particle& p : clusteredConstituents) { if (deltaR(p, j) < 0.5) { isolated = false; break; } } if (isolated) cleanedJets.push_back(&j); } // Require exactly 1 isolated jet if (cleanedJets.size() != 1) return; // Fill histos const double weight = 1.0; const double yz = z[0].rapidity(); const double yjet = cleanedJets[0]->momentum().rapidity(); _hist1YZ->fill(fabs(yz), weight); _hist1YJet->fill(fabs(yjet), weight); _hist1YSum->fill(0.5*fabs(yz + yjet), weight); _hist1YDif->fill(0.5*fabs(yz - yjet), weight); } void makePhotonCut(const Event& event) { // Get the photon const FinalState& photonfs = apply(event, "LeadingPhoton"); if (photonfs.particles().size() < 1) return; const Particle& photon = photonfs.particles().front(); if (photon.pT() < 40*GeV) return; if (fabs(photon.eta()) > 1.4442 ) return; // Build the jets const FastJets& jetfs = apply(event, "JETS"); Jets jets = jetfs.jetsByPt(Cuts::pT > 30*GeV && Cuts::abseta < 2.4); if (jets.empty()) return; // Clean the jets against the photon candidate with a DeltaR cut of 0.5 vector cleanedJets; for (const Jet& j : jets) if (deltaR(photon, j) > 0.5) cleanedJets.push_back(&j); // Require exactly 1 jet if (cleanedJets.size() != 1) return; // Fill histos const double weight = 1.0; const double ypho = photon.rapidity(); const double yjet = cleanedJets[0]->momentum().rapidity(); _hist2YPhoton->fill(fabs(ypho), weight); _hist2YJet->fill(fabs(yjet), weight); _hist2YSum->fill(0.5*fabs(ypho + yjet), weight); _hist2YDif->fill(0.5*fabs(ypho - yjet), weight); } void analyze(const Event& event) { makeZCut(event); makePhotonCut(event); } void finalize() { normalizeByContents(_hist1YZ); normalizeByContents(_hist1YJet); normalizeByContents(_hist1YSum); normalizeByContents(_hist1YDif); normalizeByContents(_hist2YPhoton); normalizeByContents(_hist2YJet); normalizeByContents(_hist2YSum); normalizeByContents(_hist2YDif); } // The CMS normalization in this analysis is that the sum over bin contents // is equal to 1. This function normalizes to area = area*bin_width. / // @note This is a strange definition... why? void normalizeByContents(Histo1DPtr h) { normalize(h, h->bin(0).xWidth()); } private: Histo1DPtr _hist1YZ, _hist1YJet, _hist1YSum, _hist1YDif; Histo1DPtr _hist2YPhoton, _hist2YJet, _hist2YSum, _hist2YDif; }; // Plugin system hook DECLARE_RIVET_PLUGIN(CMS_2013_I1258128); } diff --git a/analyses/pluginCMS/CMS_2013_I1265659.cc b/analyses/pluginCMS/CMS_2013_I1265659.cc --- a/analyses/pluginCMS/CMS_2013_I1265659.cc +++ b/analyses/pluginCMS/CMS_2013_I1265659.cc @@ -1,83 +1,83 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class CMS_2013_I1265659 : public Analysis { public: /// Constructor CMS_2013_I1265659() : Analysis("CMS_2013_I1265659") { } /// Book histograms and initialise projections before the run void init() { - const FastJets jets(FinalState(-10, 10, 0.0*GeV), FastJets::ANTIKT, 0.5); + const FastJets jets(FinalState((Cuts::etaIn(-10, 10))), FastJets::ANTIKT, 0.5); declare(jets, "Jets"); book(_h_hTotD ,1, 1, 1); book(_h_hTotDF ,1, 1, 2); } /// Perform the per-event analysis void analyze(const Event& event) { const Jets& jets = apply(event, "Jets").jetsByPt(30.0*GeV); if (jets.size() < 3) vetoEvent; const FourMomentum jet1 = jets[0].momentum(); const FourMomentum jet2 = jets[1].momentum(); const FourMomentum jet3 = jets[2].momentum(); // Cut on lead jet pT and lead/sublead jet centrality if (jet1.pT() < 100*GeV) vetoEvent; if (jet1.abseta() > 2.5 || jet2.abseta() > 2.5) vetoEvent; // Construct eta & phi distances between 2nd and 3rd jets double dEta23 = jet3.eta() - jet2.eta(); ///< Note not abs double dPhi23 = jet3.phi() - jet2.phi(); ///< Note not abs if (dPhi23 > M_PI) dPhi23 -= 2*M_PI; ///< @todo Use mapTo... functions? if (dPhi23 < -M_PI) dPhi23 += 2*M_PI; ///< @todo Use mapTo... functions? // Cut on distance between 2nd and 3rd jets const double R23 = add_quad(dPhi23, dEta23); if (!inRange(R23, 0.5, 1.5)) vetoEvent; // Cut on dijet mass const FourMomentum diJet = jet1 + jet2; if (diJet.mass() < 220*GeV) vetoEvent; // Calc beta and fill histogram (choose central or fwd histo inline) double beta = fabs(atan2(dPhi23, sign(jet2.eta())*dEta23)); ((jet2.abseta() < 0.8) ? _h_hTotD : _h_hTotDF)->fill(beta, 1.0); } /// Normalise histograms etc., after the run void finalize() { const double width = _h_hTotD->bin(0).xWidth(); normalize(_h_hTotD, width); normalize(_h_hTotDF, width); } private: /// @name Histograms Histo1DPtr _h_hTotD; Histo1DPtr _h_hTotDF; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2013_I1265659); } diff --git a/analyses/pluginCMS/CMS_2013_I1273574.cc b/analyses/pluginCMS/CMS_2013_I1273574.cc --- a/analyses/pluginCMS/CMS_2013_I1273574.cc +++ b/analyses/pluginCMS/CMS_2013_I1273574.cc @@ -1,102 +1,102 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// CMS 4-jet production at 7 TeV class CMS_2013_I1273574 : public Analysis { public: /// Constructor CMS_2013_I1273574() : Analysis("CMS_2013_I1273574") { } /// Book histograms and initialise projections before the run void init() { - const FinalState cnfs(-4.7, 4.7); + const FinalState cnfs((Cuts::etaIn(-4.7, 4.7))); declare(FastJets(cnfs, FastJets::ANTIKT, 0.5), "Jets"); book(_h_jetetas[0] ,1,1,1); book(_h_jetpts[0] ,2,1,1); book(_h_DeltaS ,3,1,1); book(_h_DeltaPhiSoft ,4,1,1); book(_h_DeltaPtRelSoft ,5,1,1); book(_h_jetetas[2] ,6,1,1); book(_h_jetpts[2] ,7,1,1); book(_h_jetetas[3] ,8,1,1); book(_h_jetpts[3] ,9,1,1); book(_h_jetetas[1] ,10,1,1); book(_h_jetpts[1] ,11,1,1); } /// Perform the per-event analysis void analyze(const Event& event) { /// @todo Use jetsByPt(ptGtr(20*GeV) & absetaIn(4.7)), then no need for the lower loop; const Jets jets = apply(event, "Jets").jetsByPt(20*GeV); if (jets.size() < 4) vetoEvent; // Ensure that there are exactly 4 jets > 20 GeV, with two above 50 GeV Jets hardjets, alljets; for (const Jet& j : jets) { if (j.abseta() > 4.7) continue; if (j.pT() > 50*GeV) hardjets.push_back(j); if (j.pT() > 20*GeV) alljets.push_back(j); } if (hardjets.size() < 2 || alljets.size() != 4) vetoEvent; const double weight = 1.0; // Histogram pT and eta of all 4 jets for (size_t i = 0; i < 4; ++i) { _h_jetpts[i]->fill(alljets[i].pT()/GeV, weight); _h_jetetas[i]->fill(alljets[i].eta(), weight); } // Create vector sums of the hard and soft pairs of jets const FourMomentum p12 = alljets[0].momentum() + alljets[1].momentum(); const FourMomentum p34 = alljets[2].momentum() + alljets[3].momentum(); // Fill the delta(phi) between the soft jets const double dphisoft = deltaPhi(alljets[2], alljets[3]); _h_DeltaPhiSoft->fill(dphisoft, weight); // Fill the pT balance between the soft jets const double ptbalanceSoft = p34.pT() / (alljets[2].pT() + alljets[3].pT()); _h_DeltaPtRelSoft->fill(ptbalanceSoft, weight); // Fill the azimuthal angle difference between the two jet pairs const double p12p34_trans = p12.px()*p34.px() + p12.py()*p34.py(); const double DeltaS = acos( p12p34_trans / p12.pT() / p34.pT() ); _h_DeltaS->fill(DeltaS, weight); } /// Normalise histograms (mostly to cross-section) void finalize() { const double invlumi = crossSection()/picobarn/sumOfWeights(); for (size_t i = 0; i < 4; ++i) { scale(_h_jetpts[i], invlumi); scale(_h_jetetas[i], invlumi); } normalize(_h_DeltaPtRelSoft); normalize(_h_DeltaPhiSoft); normalize(_h_DeltaS); } private: Histo1DPtr _h_jetpts[4], _h_jetetas[4]; Histo1DPtr _h_DeltaS, _h_DeltaPhiSoft, _h_DeltaPtRelSoft; }; DECLARE_RIVET_PLUGIN(CMS_2013_I1273574); } diff --git a/analyses/pluginCMS/CMS_2014_I1266056.cc b/analyses/pluginCMS/CMS_2014_I1266056.cc --- a/analyses/pluginCMS/CMS_2014_I1266056.cc +++ b/analyses/pluginCMS/CMS_2014_I1266056.cc @@ -1,141 +1,141 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief Measurement of gamma + jets + X triple differential cross-sections /// /// @author David Grellscheid class CMS_2014_I1266056 : public Analysis { public: // Constructor CMS_2014_I1266056() : Analysis("CMS_2014_I1266056") { } // Book histograms and initialise projections before the run void init() { // Final state - FinalState fs(-3, 3); + FinalState fs((Cuts::etaIn(-3, 3))); declare(fs, "FS"); // Leading photon - LeadingParticlesFinalState photonfs(FinalState(-2.5, 2.5, 40.0*GeV)); + LeadingParticlesFinalState photonfs(FinalState((Cuts::etaIn(-2.5, 2.5) && Cuts::pT >= 40.0*GeV))); photonfs.addParticleId(PID::PHOTON); declare(photonfs, "LeadingPhoton"); // FS excluding the leading photon VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(photonfs); declare(vfs, "JetFS"); // Jets FastJets jetpro(vfs, FastJets::ANTIKT, 0.5); //jetpro.useInvisibles(); declare(jetpro, "Jets"); book(_h_phverycentral_jetcentral, 1, 1, 1); book(_h_phcentral_jetcentral , 2, 1, 1); book(_h_phforward_jetcentral , 3, 1, 1); book(_h_phveryforward_jetcentral, 4, 1, 1); book(_h_phverycentral_jetforward, 1, 1, 2); book(_h_phcentral_jetforward , 2, 1, 2); book(_h_phforward_jetforward , 3, 1, 2); book(_h_phveryforward_jetforward, 4, 1, 2); } // Perform the per-event analysis void analyze(const Event& event) { // Get the photon const FinalState& photonfs = applyProjection(event, "LeadingPhoton"); if (photonfs.particles().empty()) vetoEvent; const FourMomentum photon = photonfs.particles().front().momentum(); // Get the jet Jets jets = applyProjection(event, "Jets").jetsByPt(30.0*GeV); if (jets.empty()) vetoEvent; FourMomentum leadingJet; for ( const Jet & j : jets ) { leadingJet = j.momentum(); // keep the first separated jet if (deltaR(photon, leadingJet) > 0.5) break; } if (deltaR(photon, leadingJet) < 0.5) vetoEvent; // Veto if leading jet is outside plotted rapidity regions if (leadingJet.abseta() > 2.5) vetoEvent; // TODO: photon isolation 'IsoGamma' needed? // Fill histos const double abs_jet_eta = leadingJet.abseta(); const double photon_pt = photon.pT()/GeV; const double abs_photon_eta = photon.abseta(); if (abs_jet_eta < 1.5) { if (abs_photon_eta < 0.9) _h_phverycentral_jetcentral->fill(photon_pt); else if (abs_photon_eta < 1.44) _h_phcentral_jetcentral->fill( photon_pt); else if (abs_photon_eta < 1.57) {} else if (abs_photon_eta < 2.1) _h_phforward_jetcentral->fill( photon_pt); else if (abs_photon_eta < 2.5) _h_phveryforward_jetcentral->fill(photon_pt); } else if (abs_jet_eta < 2.5) { if (abs_photon_eta < 0.9) _h_phverycentral_jetforward->fill(photon_pt); else if (abs_photon_eta < 1.44) _h_phcentral_jetforward->fill( photon_pt); else if (abs_photon_eta < 1.57) {} else if (abs_photon_eta < 2.1) _h_phforward_jetforward->fill( photon_pt); else if (abs_photon_eta < 2.5) _h_phveryforward_jetforward->fill(photon_pt); } } /// Normalise histograms etc., after the run void finalize() { const double scale_jetcentral = crossSection()/sumOfWeights(); // *3 (jet eta < 1.5) scale(_h_phverycentral_jetcentral, scale_jetcentral); // * 1.8 (photon eta < 0.9) scale(_h_phcentral_jetcentral , scale_jetcentral); // * 1.08 (0.9 .. 1.44) scale(_h_phforward_jetcentral , scale_jetcentral); // * 1.06 (1.57 .. 2.1) scale(_h_phveryforward_jetcentral, scale_jetcentral); // * 0.8 (2.1 .. 2.5) const double scale_jetforward = crossSection()/sumOfWeights(); // *2 (1.5 < eta < 2.5) scale(_h_phverycentral_jetforward, scale_jetforward); // .. as above .. scale(_h_phcentral_jetforward , scale_jetforward); // .. as above .. scale(_h_phforward_jetforward , scale_jetforward); // .. as above .. scale(_h_phveryforward_jetforward, scale_jetforward); // .. as above .. } private: Histo1DPtr _h_phverycentral_jetcentral; Histo1DPtr _h_phcentral_jetcentral ; Histo1DPtr _h_phforward_jetcentral ; Histo1DPtr _h_phveryforward_jetcentral; Histo1DPtr _h_phverycentral_jetforward; Histo1DPtr _h_phcentral_jetforward ; Histo1DPtr _h_phforward_jetforward ; Histo1DPtr _h_phveryforward_jetforward; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2014_I1266056); } diff --git a/analyses/pluginCMS/CMS_2015_I1370682.cc b/analyses/pluginCMS/CMS_2015_I1370682.cc --- a/analyses/pluginCMS/CMS_2015_I1370682.cc +++ b/analyses/pluginCMS/CMS_2015_I1370682.cc @@ -1,604 +1,604 @@ #include "Rivet/Analysis.hh" #include "Rivet/Math/LorentzTrans.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { namespace { //< only visible in this compilation unit /// @brief Pseudo top finder /// /// Find top quark in the particle level. /// The definition is based on the agreement at the LHC working group. class PseudoTop : public FinalState { public: /// @name Standard constructors and destructors. //@{ /// The default constructor. May specify the minimum and maximum /// pseudorapidity \f$ \eta \f$ and the min \f$ p_T \f$ (in GeV). PseudoTop(double lepR = 0.1, double lepMinPt = 20, double lepMaxEta = 2.4, double jetR = 0.4, double jetMinPt = 30, double jetMaxEta = 4.7) - : FinalState(-DBL_MAX, DBL_MAX, 0*GeV), + : FinalState(), _lepR(lepR), _lepMinPt(lepMinPt), _lepMaxEta(lepMaxEta), _jetR(jetR), _jetMinPt(jetMinPt), _jetMaxEta(jetMaxEta) { setName("PseudoTop"); } enum TTbarMode {CH_NONE=-1, CH_FULLHADRON = 0, CH_SEMILEPTON, CH_FULLLEPTON}; enum DecayMode {CH_HADRON = 0, CH_MUON, CH_ELECTRON}; TTbarMode mode() const { if (!_isValid) return CH_NONE; if (_mode1 == CH_HADRON && _mode2 == CH_HADRON) return CH_FULLHADRON; else if ( _mode1 != CH_HADRON && _mode2 != CH_HADRON) return CH_FULLLEPTON; else return CH_SEMILEPTON; } DecayMode mode1() const {return _mode1;} DecayMode mode2() const {return _mode2;} /// Clone on the heap. virtual unique_ptr clone() const { return unique_ptr(new PseudoTop(*this)); } //@} public: Particle t1() const {return _t1;} Particle t2() const {return _t2;} Particle b1() const {return _b1;} Particle b2() const {return _b2;} Particles wDecays1() const {return _wDecays1;} Particles wDecays2() const {return _wDecays2;} Jets jets() const {return _jets;} Jets bjets() const {return _bjets;} Jets ljets() const {return _ljets;} protected: // Apply the projection to the event void project(const Event& e); // override; ///< @todo Re-enable when C++11 allowed void cleanup(std::map >& v, const bool doCrossCleanup=false) const; private: const double _lepR, _lepMinPt, _lepMaxEta; const double _jetR, _jetMinPt, _jetMaxEta; //constexpr ///< @todo Re-enable when C++11 allowed static double _tMass; // = 172.5*GeV; ///< @todo Re-enable when C++11 allowed //constexpr ///< @todo Re-enable when C++11 allowed static double _wMass; // = 80.4*GeV; ///< @todo Re-enable when C++11 allowed private: bool _isValid; DecayMode _mode1, _mode2; Particle _t1, _t2; Particle _b1, _b2; Particles _wDecays1, _wDecays2; Jets _jets, _bjets, _ljets; }; // More implementation below the analysis code } /// Pseudo-top analysis from CMS class CMS_2015_I1370682 : public Analysis { public: CMS_2015_I1370682() : Analysis("CMS_2015_I1370682"), _applyCorrection(true), _doShapeOnly(true) { } void init() { declare(PseudoTop(0.1, 20, 2.4, 0.5, 30, 2.4), "ttbar"); // Lepton + Jet channel book(_hSL_topPt ,"d15-x01-y01"); // 1/sigma dsigma/dpt(top) book(_hSL_topPtTtbarSys ,"d16-x01-y01"); // 1/sigma dsigma/dpt*(top) book(_hSL_topY ,"d17-x01-y01"); // 1/sigma dsigma/dy(top) book(_hSL_ttbarDelPhi ,"d18-x01-y01"); // 1/sigma dsigma/ddeltaphi(t,tbar) book(_hSL_topPtLead ,"d19-x01-y01"); // 1/sigma dsigma/dpt(t1) book(_hSL_topPtSubLead ,"d20-x01-y01"); // 1/sigma dsigma/dpt(t2) book(_hSL_ttbarPt ,"d21-x01-y01"); // 1/sigma dsigma/dpt(ttbar) book(_hSL_ttbarY ,"d22-x01-y01"); // 1/sigma dsigma/dy(ttbar) book(_hSL_ttbarMass ,"d23-x01-y01"); // 1/sigma dsigma/dm(ttbar) // Dilepton channel book(_hDL_topPt ,"d24-x01-y01"); // 1/sigma dsigma/dpt(top) book(_hDL_topPtTtbarSys ,"d25-x01-y01"); // 1/sigma dsigma/dpt*(top) book(_hDL_topY ,"d26-x01-y01"); // 1/sigma dsigma/dy(top) book(_hDL_ttbarDelPhi ,"d27-x01-y01"); // 1/sigma dsigma/ddeltaphi(t,tbar) book(_hDL_topPtLead ,"d28-x01-y01"); // 1/sigma dsigma/dpt(t1) book(_hDL_topPtSubLead ,"d29-x01-y01"); // 1/sigma dsigma/dpt(t2) book(_hDL_ttbarPt ,"d30-x01-y01"); // 1/sigma dsigma/dpt(ttbar) book(_hDL_ttbarY ,"d31-x01-y01"); // 1/sigma dsigma/dy(ttbar) book(_hDL_ttbarMass ,"d32-x01-y01"); // 1/sigma dsigma/dm(ttbar) } void analyze(const Event& event) { // Get the ttbar candidate const PseudoTop& ttbar = apply(event, "ttbar"); if ( ttbar.mode() == PseudoTop::CH_NONE ) vetoEvent; const FourMomentum& t1P4 = ttbar.t1().momentum(); const FourMomentum& t2P4 = ttbar.t2().momentum(); const double pt1 = std::max(t1P4.pT(), t2P4.pT()); const double pt2 = std::min(t1P4.pT(), t2P4.pT()); const double dPhi = deltaPhi(t1P4, t2P4); const FourMomentum ttP4 = t1P4 + t2P4; const FourMomentum t1P4AtCM = LorentzTransform::mkFrameTransformFromBeta(ttP4.betaVec()).transform(t1P4); const double weight = 1.0; if ( ttbar.mode() == PseudoTop::CH_SEMILEPTON ) { const Particle lCand1 = ttbar.wDecays1()[0]; // w1 dau0 is the lepton in the PseudoTop if (lCand1.pT() < 33*GeV || lCand1.abseta() > 2.1) vetoEvent; _hSL_topPt->fill(t1P4.pT(), weight); _hSL_topPt->fill(t2P4.pT(), weight); _hSL_topPtTtbarSys->fill(t1P4AtCM.pT(), weight); _hSL_topY->fill(t1P4.rapidity(), weight); _hSL_topY->fill(t2P4.rapidity(), weight); _hSL_ttbarDelPhi->fill(dPhi, weight); _hSL_topPtLead->fill(pt1, weight); _hSL_topPtSubLead->fill(pt2, weight); _hSL_ttbarPt->fill(ttP4.pT(), weight); _hSL_ttbarY->fill(ttP4.rapidity(), weight); _hSL_ttbarMass->fill(ttP4.mass(), weight); } else if ( ttbar.mode() == PseudoTop::CH_FULLLEPTON ) { const Particle lCand1 = ttbar.wDecays1()[0]; // dau0 are the lepton in the PseudoTop const Particle lCand2 = ttbar.wDecays2()[0]; // dau0 are the lepton in the PseudoTop if (lCand1.pT() < 20*GeV || lCand1.abseta() > 2.4) vetoEvent; if (lCand2.pT() < 20*GeV || lCand2.abseta() > 2.4) vetoEvent; _hDL_topPt->fill(t1P4.pT(), weight); _hDL_topPt->fill(t2P4.pT(), weight); _hDL_topPtTtbarSys->fill(t1P4AtCM.pT(), weight); _hDL_topY->fill(t1P4.rapidity(), weight); _hDL_topY->fill(t2P4.rapidity(), weight); _hDL_ttbarDelPhi->fill(dPhi, weight); _hDL_topPtLead->fill(pt1, weight); _hDL_topPtSubLead->fill(pt2, weight); _hDL_ttbarPt->fill(ttP4.pT(), weight); _hDL_ttbarY->fill(ttP4.rapidity(), weight); _hDL_ttbarMass->fill(ttP4.mass(), weight); } } void finalize() { if ( _applyCorrection ) { // Correction functions for TOP-12-028 paper, (parton bin height)/(pseudotop bin height) const double ch15[] = { 5.473609, 4.941048, 4.173346, 3.391191, 2.785644, 2.371346, 2.194161, 2.197167, }; const double ch16[] = { 5.470905, 4.948201, 4.081982, 3.225532, 2.617519, 2.239217, 2.127878, 2.185918, }; const double ch17[] = { 10.003667, 4.546519, 3.828115, 3.601018, 3.522194, 3.524694, 3.600951, 3.808553, 4.531891, 9.995370, }; const double ch18[] = { 4.406683, 4.054041, 3.885393, 4.213646, }; const double ch19[] = { 6.182537, 5.257703, 4.422280, 3.568402, 2.889408, 2.415878, 2.189974, 2.173210, }; const double ch20[] = { 5.199874, 4.693318, 3.902882, 3.143785, 2.607877, 2.280189, 2.204124, 2.260829, }; const double ch21[] = { 6.053523, 3.777506, 3.562251, 3.601356, 3.569347, 3.410472, }; const double ch22[] = { 11.932351, 4.803773, 3.782709, 3.390775, 3.226806, 3.218982, 3.382678, 3.773653, 4.788191, 11.905338, }; const double ch23[] = { 7.145255, 5.637595, 4.049882, 3.025917, 2.326430, 1.773824, 1.235329, }; const double ch24[] = { 2.268193, 2.372063, 2.323975, 2.034655, 1.736793, }; const double ch25[] = { 2.231852, 2.383086, 2.341894, 2.031318, 1.729672, 1.486993, }; const double ch26[] = { 3.993526, 2.308249, 2.075136, 2.038297, 2.036302, 2.078270, 2.295817, 4.017713, }; const double ch27[] = { 2.205978, 2.175010, 2.215376, 2.473144, }; const double ch28[] = { 2.321077, 2.371895, 2.338871, 2.057821, 1.755382, }; const double ch29[] = { 2.222707, 2.372591, 2.301688, 1.991162, 1.695343, }; const double ch30[] = { 2.599677, 2.026855, 2.138620, 2.229553, }; const double ch31[] = { 5.791779, 2.636219, 2.103642, 1.967198, 1.962168, 2.096514, 2.641189, 5.780828, }; const double ch32[] = { 2.006685, 2.545525, 2.477745, 2.335747, 2.194226, 2.076500, }; applyCorrection(_hSL_topPt, ch15); applyCorrection(_hSL_topPtTtbarSys, ch16); applyCorrection(_hSL_topY, ch17); applyCorrection(_hSL_ttbarDelPhi, ch18); applyCorrection(_hSL_topPtLead, ch19); applyCorrection(_hSL_topPtSubLead, ch20); applyCorrection(_hSL_ttbarPt, ch21); applyCorrection(_hSL_ttbarY, ch22); applyCorrection(_hSL_ttbarMass, ch23); applyCorrection(_hDL_topPt, ch24); applyCorrection(_hDL_topPtTtbarSys, ch25); applyCorrection(_hDL_topY, ch26); applyCorrection(_hDL_ttbarDelPhi, ch27); applyCorrection(_hDL_topPtLead, ch28); applyCorrection(_hDL_topPtSubLead, ch29); applyCorrection(_hDL_ttbarPt, ch30); applyCorrection(_hDL_ttbarY, ch31); applyCorrection(_hDL_ttbarMass, ch32); } if ( _doShapeOnly ) { normalize(_hSL_topPt ); normalize(_hSL_topPtTtbarSys); normalize(_hSL_topY ); normalize(_hSL_ttbarDelPhi ); normalize(_hSL_topPtLead ); normalize(_hSL_topPtSubLead ); normalize(_hSL_ttbarPt ); normalize(_hSL_ttbarY ); normalize(_hSL_ttbarMass ); normalize(_hDL_topPt ); normalize(_hDL_topPtTtbarSys); normalize(_hDL_topY ); normalize(_hDL_ttbarDelPhi ); normalize(_hDL_topPtLead ); normalize(_hDL_topPtSubLead ); normalize(_hDL_ttbarPt ); normalize(_hDL_ttbarY ); normalize(_hDL_ttbarMass ); } else { const double s = 1./sumOfWeights(); scale(_hSL_topPt , s); scale(_hSL_topPtTtbarSys, s); scale(_hSL_topY , s); scale(_hSL_ttbarDelPhi , s); scale(_hSL_topPtLead , s); scale(_hSL_topPtSubLead , s); scale(_hSL_ttbarPt , s); scale(_hSL_ttbarY , s); scale(_hSL_ttbarMass , s); scale(_hDL_topPt , s); scale(_hDL_topPtTtbarSys, s); scale(_hDL_topY , s); scale(_hDL_ttbarDelPhi , s); scale(_hDL_topPtLead , s); scale(_hDL_topPtSubLead , s); scale(_hDL_ttbarPt , s); scale(_hDL_ttbarY , s); scale(_hDL_ttbarMass , s); } } void applyCorrection(Histo1DPtr h, const double* cf) { vector& bins = h->bins(); for (size_t i=0, n=bins.size(); i >& v, const bool doCrossCleanup) const { vector >::iterator> toErase; set usedLeg1, usedLeg2; if ( !doCrossCleanup ) { /// @todo Reinstate when C++11 allowed: for (auto key = v.begin(); key != v.end(); ++key) { for (map >::iterator key = v.begin(); key != v.end(); ++key) { const size_t leg1 = key->second.first; const size_t leg2 = key->second.second; if (usedLeg1.find(leg1) == usedLeg1.end() and usedLeg2.find(leg2) == usedLeg2.end()) { usedLeg1.insert(leg1); usedLeg2.insert(leg2); } else { toErase.push_back(key); } } } else { /// @todo Reinstate when C++11 allowed: for (auto key = v.begin(); key != v.end(); ++key) { for (map >::iterator key = v.begin(); key != v.end(); ++key) { const size_t leg1 = key->second.first; const size_t leg2 = key->second.second; if (usedLeg1.find(leg1) == usedLeg1.end() and usedLeg1.find(leg2) == usedLeg1.end()) { usedLeg1.insert(leg1); usedLeg1.insert(leg2); } else { toErase.push_back(key); } } } /// @todo Reinstate when C++11 allowed: for (auto& key : toErase) v.erase(key); for (size_t i = 0; i < toErase.size(); ++i) v.erase(toErase[i]); } void PseudoTop::project(const Event& e) { // Leptons : do the lepton clustering anti-kt R=0.1 using stable photons and leptons not from hadron decay // Neutrinos : neutrinos not from hadron decay // MET : vector sum of all invisible particles in x-y plane // Jets : anti-kt R=0.4 using all particles excluding neutrinos and particles used in lepton clustering // add ghost B hadrons during the jet clustering to identify B jets. // W->lv : dressed lepton and neutrino pairs // W->jj : light flavored dijet // W candidate : select lv or jj pairs which minimise |mW1-80.4|+|mW2-80.4| // lepton-neutrino pair will be selected with higher priority // t->Wb : W candidate + b jet // t candidate : select Wb pairs which minimise |mtop1-172.5|+|mtop2-172.5| _isValid = false; _theParticles.clear(); _wDecays1.clear(); _wDecays2.clear(); _jets.clear(); _bjets.clear(); _ljets.clear(); _mode1 = _mode2 = CH_HADRON; // Collect final state particles Particles pForLep, pForJet; Particles neutrinos; // Prompt neutrinos /// @todo Avoid this unsafe jump into HepMC -- all this can be done properly via VisibleFS and HeavyHadrons projections for (const GenParticle* p : Rivet::particles(e.genEvent())) { const int status = p->status(); const int pid = p->pdg_id(); if (status == 1) { Particle rp = *p; if (!PID::isHadron(pid) && !rp.fromHadron()) { // Collect particles not from hadron decay if (rp.isNeutrino()) { // Prompt neutrinos are kept in separate collection neutrinos.push_back(rp); } else if (pid == 22 || rp.isLepton()) { // Leptons and photons for the dressing pForLep.push_back(rp); } } else if (!rp.isNeutrino()) { // Use all particles from hadron decay pForJet.push_back(rp); } } else if (PID::isHadron(pid) && PID::hasBottom(pid)) { // NOTE: Consider B hadrons with pT > 5GeV - not in CMS proposal //if ( p->momentum().perp() < 5 ) continue; // Do unstable particles, to be used in the ghost B clustering // Use last B hadrons only bool isLast = true; for (const GenParticlePtr pp : Rivet::particles(p->end_vertex(), HepMC::children)) { if (PID::hasBottom(pp->pdg_id())) { isLast = false; break; } } if (!isLast) continue; // Rescale momentum by 10^-20 Particle ghost(pid, FourMomentum(p->momentum())*1e-20/p->momentum().rho()); pForJet.push_back(ghost); } } // Start object building from trivial thing - prompt neutrinos sortByPt(neutrinos); // Proceed to lepton dressing FastJets fjLep(FinalState(), FastJets::ANTIKT, _lepR); fjLep.calc(pForLep); Jets leptons; vector leptonsId; set dressedIdxs; for (const Jet& lep : fjLep.jetsByPt(_lepMinPt)) { if (lep.abseta() > _lepMaxEta) continue; double leadingPt = -1; int leptonId = 0; for (const Particle& p : lep.particles()) { /// @warning Barcodes aren't future-proof in HepMC dressedIdxs.insert(p.genParticle()->barcode()); if (p.isLepton() && p.pT() > leadingPt) { leadingPt = p.pT(); leptonId = p.pid(); } } if (leptonId == 0) continue; leptons.push_back(lep); leptonsId.push_back(leptonId); } // Re-use particles not used in lepton dressing for (const Particle& rp : pForLep) { /// @warning Barcodes aren't future-proof in HepMC const int barcode = rp.genParticle()->barcode(); // Skip if the particle is used in dressing if (dressedIdxs.find(barcode) != dressedIdxs.end()) continue; // Put back to be used in jet clustering pForJet.push_back(rp); } // Then do the jet clustering FastJets fjJet(FinalState(), FastJets::ANTIKT, _jetR); //fjJet.useInvisibles(); // NOTE: CMS proposal to remove neutrinos (AB: wouldn't work anyway, since they were excluded from clustering inputs) fjJet.calc(pForJet); for (const Jet& jet : fjJet.jetsByPt(_jetMinPt)) { if (jet.abseta() > _jetMaxEta) continue; _jets.push_back(jet); bool isBJet = false; for (const Particle& rp : jet.particles()) { if (PID::hasBottom(rp.pid())) { isBJet = true; break; } } if ( isBJet ) _bjets.push_back(jet); else _ljets.push_back(jet); } // Every building blocks are ready. Continue to pseudo-W and pseudo-top combination if (_bjets.size() < 2) return; // Ignore single top for now map > wLepCandIdxs; map > wHadCandIdxs; // Collect leptonic-decaying W's for (size_t iLep = 0, nLep = leptons.size(); iLep < nLep; ++iLep) { const Jet& lep = leptons.at(iLep); for (size_t iNu = 0, nNu = neutrinos.size(); iNu < nNu; ++iNu) { const Particle& nu = neutrinos.at(iNu); const double m = (lep.momentum()+nu.momentum()).mass(); const double dm = std::abs(m-_wMass); wLepCandIdxs[dm] = make_pair(iLep, iNu); } } // Continue to hadronic decaying W's for (size_t i = 0, nLjet = _ljets.size(); i < nLjet; ++i) { const Jet& ljet1 = _ljets[i]; for (size_t j = i+1; j < nLjet; ++j) { const Jet& ljet2 = _ljets[j]; const double m = (ljet1.momentum()+ljet2.momentum()).mass(); const double dm = std::abs(m-_wMass); wHadCandIdxs[dm] = make_pair(i, j); } } // Cleanup W candidate, choose pairs with minimum dm if they share decay products cleanup(wLepCandIdxs); cleanup(wHadCandIdxs, true); const size_t nWLepCand = wLepCandIdxs.size(); const size_t nWHadCand = wHadCandIdxs.size(); if (nWLepCand + nWHadCand < 2) return; // We skip single top int w1Q = 1, w2Q = -1; int w1dau1Id = 1, w2dau1Id = -1; FourMomentum w1dau1LVec, w1dau2LVec; FourMomentum w2dau1LVec, w2dau2LVec; if (nWLepCand == 0) { // Full hadronic case const pair& idPair1 = wHadCandIdxs.begin()->second; const pair& idPair2 = (++wHadCandIdxs.begin())->second; ///< @todo Reinstate std::next const Jet& w1dau1 = _ljets[idPair1.first]; const Jet& w1dau2 = _ljets[idPair1.second]; const Jet& w2dau1 = _ljets[idPair2.first]; const Jet& w2dau2 = _ljets[idPair2.second]; w1dau1LVec = w1dau1.momentum(); w1dau2LVec = w1dau2.momentum(); w2dau1LVec = w2dau1.momentum(); w2dau2LVec = w2dau2.momentum(); } else if (nWLepCand == 1) { // Semi-leptonic case const pair& idPair1 = wLepCandIdxs.begin()->second; const pair& idPair2 = wHadCandIdxs.begin()->second; const Jet& w1dau1 = leptons[idPair1.first]; const Particle& w1dau2 = neutrinos[idPair1.second]; const Jet& w2dau1 = _ljets[idPair2.first]; const Jet& w2dau2 = _ljets[idPair2.second]; w1dau1LVec = w1dau1.momentum(); w1dau2LVec = w1dau2.momentum(); w2dau1LVec = w2dau1.momentum(); w2dau2LVec = w2dau2.momentum(); w1dau1Id = leptonsId[idPair1.first]; w1Q = w1dau1Id > 0 ? -1 : 1; w2Q = -w1Q; switch (w1dau1Id) { case 13: case -13: _mode1 = CH_MUON; break; case 11: case -11: _mode1 = CH_ELECTRON; break; } } else { // Full leptonic case const pair& idPair1 = wLepCandIdxs.begin()->second; const pair& idPair2 = (++wLepCandIdxs.begin())->second; ///< @todo Reinstate std::next const Jet& w1dau1 = leptons[idPair1.first]; const Particle& w1dau2 = neutrinos[idPair1.second]; const Jet& w2dau1 = leptons[idPair2.first]; const Particle& w2dau2 = neutrinos[idPair2.second]; w1dau1LVec = w1dau1.momentum(); w1dau2LVec = w1dau2.momentum(); w2dau1LVec = w2dau1.momentum(); w2dau2LVec = w2dau2.momentum(); w1dau1Id = leptonsId[idPair1.first]; w2dau1Id = leptonsId[idPair2.first]; w1Q = w1dau1Id > 0 ? -1 : 1; w2Q = w2dau1Id > 0 ? -1 : 1; switch (w1dau1Id) { case 13: case -13: _mode1 = CH_MUON; break; case 11: case -11: _mode1 = CH_ELECTRON; break; } switch (w2dau1Id) { case 13: case -13: _mode2 = CH_MUON; break; case 11: case -11: _mode2 = CH_ELECTRON; break; } } const FourMomentum w1LVec = w1dau1LVec+w1dau2LVec; const FourMomentum w2LVec = w2dau1LVec+w2dau2LVec; // Combine b jets double sumDm = 1e9; FourMomentum b1LVec, b2LVec; for (size_t i = 0, n = _bjets.size(); i < n; ++i) { const Jet& bjet1 = _bjets[i]; const double mtop1 = (w1LVec+bjet1.momentum()).mass(); const double dmtop1 = std::abs(mtop1-_tMass); for (size_t j=0; j= 1e9) return; // Failed to make top, but this should not happen. const FourMomentum t1LVec = w1LVec + b1LVec; const FourMomentum t2LVec = w2LVec + b2LVec; // Put all of them into candidate collection _t1 = Particle(w1Q*6, t1LVec); _b1 = Particle(w1Q*5, b1LVec); _wDecays1.push_back(Particle(w1dau1Id, w1dau1LVec)); _wDecays1.push_back(Particle(-w1dau1Id+w1Q, w1dau2LVec)); _t2 = Particle(w2Q*6, t2LVec); _b2 = Particle(w2Q*5, b2LVec); _wDecays2.push_back(Particle(w2dau1Id, w2dau1LVec)); _wDecays2.push_back(Particle(-w2dau1Id+w2Q, w2dau2LVec)); _isValid = true; } } } diff --git a/analyses/pluginCMS/CMS_2015_I1380605.cc b/analyses/pluginCMS/CMS_2015_I1380605.cc --- a/analyses/pluginCMS/CMS_2015_I1380605.cc +++ b/analyses/pluginCMS/CMS_2015_I1380605.cc @@ -1,100 +1,100 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { /// Per-event yield of the highest transverse momentum charged particle and charged-particle jet class CMS_2015_I1380605 : public Analysis { public: DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2015_I1380605); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - const ChargedFinalState cfs(-7., 7., 0.0*GeV); + const ChargedFinalState cfs((Cuts::etaIn(-7., 7.))); declare(cfs, "CFS"); declare(FastJets(cfs, FastJets::ANTIKT, 0.5), "Jets"); book(_h_tracks, 1, 1, 1); book(_h_jets , 2, 1, 1); book(_ntracks, "ntracks"); } /// Perform the per-event analysis void analyze(const Event& event) { // Veto events without forward activity on both sides const ChargedFinalState& cfs = apply(event, "CFS"); const size_t count_plus = cfs.particles(Cuts::eta > 5.3 && Cuts::eta < 6.5).size(); const size_t count_minus = cfs.particles(Cuts::eta < -5.3 && Cuts::eta > -6.5).size(); if (!(count_plus > 0 || count_minus > 0)) vetoEvent; //< @todo Should this really also veto the jet analysis? /// @warning Needs migration to an AO Counter /// @note This isn't the number of tracks, it's the sum of event weights passing the veto _ntracks->fill(); // Do track analysis here // Find pttrackmax double track_ptmax = 0; for (const Particle& p : cfs.particles(Cuts::abseta < 2.4)) track_ptmax = max(track_ptmax, p.pT()); // Fill track analysis histograms for (size_t i = 0; i < _h_tracks->numBins(); ++i) { const double binlimitlow_t = _h_tracks->bin(i).xMin(); const double weightbw_t = _h_tracks->bin(i).xWidth(); const double xbin_t = _h_tracks->bin(i).xMid(); if (track_ptmax > binlimitlow_t) _h_tracks -> fill(xbin_t, weightbw_t); } // Do jet analysis here const Jets jetsdeta = apply(event,"Jets").jets(Cuts::pT > 1*GeV && Cuts::pT < 60*GeV && Cuts::abseta < 1.9); // Find ptjetmax double jet_ptmax = 0; for (const Jet& j : jetsdeta) jet_ptmax = max(jet_ptmax, j.pT()); // Fill jet analysis histograms for (size_t i = 0; i < _h_jets->numBins(); ++i) { const double binlimitlow_j = _h_jets->bin(i).xMin(); const double weightbw_j = _h_jets->bin(i).xWidth(); const double xbin_j = _h_jets->bin(i).xMid(); if (jet_ptmax > binlimitlow_j) _h_jets -> fill(xbin_j, weightbw_j); } } /// Normalise histograms etc., after the run void finalize() { const double norm_t0 = _h_tracks->bin(7).height()/2.056170e-03; const double norm_t1 = _h_tracks->bin(7).sumW()/2.056170e-03; const double norm_j0 = _h_jets->bin(13).height()/3.575290e-03; const double norm_j1 = _h_jets->bin(13).sumW()/3.575290e-03; MSG_DEBUG("Norm track " << norm_t0 << " " << norm_t1); MSG_DEBUG("Norm jets " << norm_j0 << " " << norm_j1); if (norm_t0 > 0 ) scale(_h_tracks, 1./ norm_t0); if (norm_j0 > 0 ) scale(_h_jets, 1./ norm_j0); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_tracks, _h_jets; CounterPtr _ntracks; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2015_I1380605); } diff --git a/analyses/pluginCMS/CMS_2016_I1413748.cc b/analyses/pluginCMS/CMS_2016_I1413748.cc --- a/analyses/pluginCMS/CMS_2016_I1413748.cc +++ b/analyses/pluginCMS/CMS_2016_I1413748.cc @@ -1,328 +1,328 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/PartonicTops.hh" namespace Rivet { /// CMS 8 TeV dilepton channel ttbar spin correlations and polarisation analysis class CMS_2016_I1413748 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_I1413748); /// Book histograms and initialise projections void init() { // Complete final state - FinalState fs(-DBL_MAX, DBL_MAX, 0*GeV); + FinalState fs; // Projection for dressed electrons and muons IdentifiedFinalState photons(fs); photons.acceptIdPair(PID::PHOTON); IdentifiedFinalState el_id(fs); el_id.acceptIdPair(PID::ELECTRON); PromptFinalState electrons(el_id); declare(electrons, "Electrons"); DressedLeptons dressed_electrons(photons, electrons, 0.1); declare(dressed_electrons, "DressedElectrons"); IdentifiedFinalState mu_id(fs); mu_id.acceptIdPair(PID::MUON); PromptFinalState muons(mu_id); declare(muons, "Muons"); DressedLeptons dressed_muons(photons, muons, 0.1); declare(dressed_muons, "DressedMuons"); // Parton-level top quarks declare(PartonicTops(PartonicTops::DecayMode::E_MU, false), "LeptonicPartonTops"); // Booking of histograms // This histogram is independent of the parton-level information, and is an addition to the original analysis. // It is compared to the same data as the parton-level delta_phi histogram d02-x01-y01. book(_h_dphidressedleptons, "d00-x01-y01", _bins_dphi); // The remaining histos use parton-level information book(_h_dphi, "d02-x01-y01", _bins_dphi); book(_h_cos_opening_angle, "d05-x01-y01", _bins_cos_opening_angle); book(_h_c1c2, "d08-x01-y01", _bins_c1c2); book(_h_lep_costheta, "d11-x01-y01", _bins_lep_costheta); book(_h_lep_costheta_CPV, "d14-x01-y01", _bins_lep_costheta_CPV); // 2D histos book(_h_dphi_var[0], "d20-x01-y01", _bins_dphi, _bins_tt_mass); book(_h_cos_opening_angle_var[0], "d26-x01-y01", _bins_cos_opening_angle, _bins_tt_mass); book(_h_c1c2_var[0], "d32-x01-y01", _bins_c1c2, _bins_tt_mass); book(_h_lep_costheta_var[0], "d38-x01-y01", _bins_lep_costheta, _bins_tt_mass); book(_h_lep_costheta_CPV_var[0], "d44-x01-y01", _bins_lep_costheta_CPV, _bins_tt_mass); book(_h_dphi_var[1], "d50-x01-y01", _bins_dphi, _bins_tt_pT); book(_h_cos_opening_angle_var[1], "d56-x01-y01", _bins_cos_opening_angle, _bins_tt_pT); book(_h_c1c2_var[1], "d62-x01-y01", _bins_c1c2, _bins_tt_pT); book(_h_lep_costheta_var[1], "d68-x01-y01", _bins_lep_costheta, _bins_tt_pT); book(_h_lep_costheta_CPV_var[1], "d74-x01-y01", _bins_lep_costheta_CPV, _bins_tt_pT); book(_h_dphi_var[2], "d80-x01-y01", _bins_dphi, _bins_tt_absrapidity); book(_h_cos_opening_angle_var[2], "d86-x01-y01", _bins_cos_opening_angle, _bins_tt_absrapidity); book(_h_c1c2_var[2], "d92-x01-y01", _bins_c1c2, _bins_tt_absrapidity); book(_h_lep_costheta_var[2], "d98-x01-y01", _bins_lep_costheta, _bins_tt_absrapidity); book(_h_lep_costheta_CPV_var[2], "d104-x01-y01", _bins_lep_costheta_CPV, _bins_tt_absrapidity); // Profile histos for asymmetries book(_h_dphi_profile[0], "d17-x01-y01", _bins_tt_mass); book(_h_cos_opening_angle_profile[0], "d23-x01-y01", _bins_tt_mass); book(_h_c1c2_profile[0], "d29-x01-y01", _bins_tt_mass); book(_h_lep_costheta_profile[0], "d35-x01-y01", _bins_tt_mass); book(_h_lep_costheta_CPV_profile[0], "d41-x01-y01", _bins_tt_mass); book(_h_dphi_profile[1], "d47-x01-y01", _bins_tt_pT); book(_h_cos_opening_angle_profile[1], "d53-x01-y01", _bins_tt_pT); book(_h_c1c2_profile[1], "d59-x01-y01", _bins_tt_pT); book(_h_lep_costheta_profile[1], "d65-x01-y01", _bins_tt_pT); book(_h_lep_costheta_CPV_profile[1], "d71-x01-y01", _bins_tt_pT); book(_h_dphi_profile[2], "d77-x01-y01", _bins_tt_absrapidity); book(_h_cos_opening_angle_profile[2], "d83-x01-y01", _bins_tt_absrapidity); book(_h_c1c2_profile[2], "d89-x01-y01", _bins_tt_absrapidity); book(_h_lep_costheta_profile[2], "d95-x01-y01", _bins_tt_absrapidity); book(_h_lep_costheta_CPV_profile[2], "d101-x01-y01", _bins_tt_absrapidity); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; // Use particle-level leptons for the first histogram const DressedLeptons& dressed_electrons = applyProjection(event, "DressedElectrons"); const DressedLeptons& dressed_muons = applyProjection(event, "DressedMuons"); const vector dressedels = dressed_electrons.dressedLeptons(); const vector dressedmus = dressed_muons.dressedLeptons(); const size_t ndressedel = dressedels.size(); const size_t ndressedmu = dressedmus.size(); // For the particle-level histogram, require exactly one electron and exactly one muon, to select // the ttbar->emu channel. Note this means ttbar->emu events with additional PromptFinalState // dilepton pairs from the shower are vetoed - for PYTHIA8, this affects ~0.5% of events, so the // effect is well below the level of sensitivity of the measured distribution. if ( ndressedel == 1 && ndressedmu == 1 ) { const int electrontouse = 0, muontouse = 0; // Opposite-charge leptons only if ( sameSign(dressedels[electrontouse],dressedmus[muontouse]) ) { MSG_INFO("Error, e and mu have same charge, skipping event"); } else { //Get the four-momenta of the positively- and negatively-charged leptons FourMomentum lepPlus = dressedels[electrontouse].charge() > 0 ? dressedels[electrontouse] : dressedmus[muontouse]; FourMomentum lepMinus = dressedels[electrontouse].charge() > 0 ? dressedmus[muontouse] : dressedels[electrontouse]; // Now calculate the variable double dphi_temp = deltaPhi(lepPlus,lepMinus); fillWithUFOF( _h_dphidressedleptons, dphi_temp, weight ); } } // The remaining variables use parton-level information. // Get the leptonically decaying tops const Particles& leptonicpartontops = apply(event, "LeptonicPartonTops").particlesByPt(); Particles chargedleptons; unsigned int ntrueleptonictops = 0; bool oppositesign = false; if ( leptonicpartontops.size() == 2 ) { for (size_t k = 0; k < leptonicpartontops.size(); ++k) { // Get the lepton const Particle lepTop = leptonicpartontops[k]; const auto isPromptChargedLepton = [](const Particle& p){return (isChargedLepton(p) && isPrompt(p, false, false));}; Particles lepton_candidates = lepTop.allDescendants(firstParticleWith(isPromptChargedLepton), false); if ( lepton_candidates.size() < 1 ) MSG_WARNING("error, PartonicTops::DecayMode::E_MU top quark had no daughter lepton candidate, skipping event."); // In some cases there is no lepton from the W decay but only leptons from the decay of a radiated gamma. // These hadronic PartonicTops are currently being mistakenly selected by PartonicTops::DecayMode::E_MU (as of April 2017), and need to be rejected. // PartonicTops::DecayMode::E_MU is being fixed in Rivet, and when it is the veto below should do nothing. /// @todo Should no longer be necessary -- remove bool istrueleptonictop = false; for (size_t i = 0; i < lepton_candidates.size(); ++i) { const Particle& lepton_candidate = lepton_candidates[i]; if ( lepton_candidate.hasParent(PID::PHOTON) ) { MSG_DEBUG("Found gamma parent, top: " << k+1 << " of " << leptonicpartontops.size() << " , lepton: " << i+1 << " of " << lepton_candidates.size()); continue; } if ( !istrueleptonictop && sameSign(lepTop,lepton_candidate) ) { chargedleptons.push_back(lepton_candidate); istrueleptonictop = true; } else MSG_WARNING("Found extra prompt charged lepton from top decay (and without gamma parent), ignoring it."); } if ( istrueleptonictop ) ++ntrueleptonictops; } } if ( ntrueleptonictops == 2 ) { oppositesign = !( sameSign(chargedleptons[0],chargedleptons[1]) ); if ( !oppositesign ) MSG_WARNING("error, same charge tops, skipping event."); } if ( ntrueleptonictops == 2 && oppositesign ) { // Get the four-momenta of the positively- and negatively-charged leptons FourMomentum lepPlus = chargedleptons[0].charge() > 0 ? chargedleptons[0] : chargedleptons[1]; FourMomentum lepMinus = chargedleptons[0].charge() > 0 ? chargedleptons[1] : chargedleptons[0]; const double dphi_temp = deltaPhi(lepPlus,lepMinus); // Get the four-momenta of the positively- and negatively-charged tops FourMomentum topPlus_p4 = leptonicpartontops[0].pid() > 0 ? leptonicpartontops[0] : leptonicpartontops[1]; FourMomentum topMinus_p4 = leptonicpartontops[0].pid() > 0 ? leptonicpartontops[1] : leptonicpartontops[0]; const FourMomentum ttbar_p4 = topPlus_p4 + topMinus_p4; const double tt_mass_temp = ttbar_p4.mass(); const double tt_absrapidity_temp = ttbar_p4.absrapidity(); const double tt_pT_temp = ttbar_p4.pT(); // Lorentz transformations to calculate the spin observables in the helicity basis // Transform everything to the ttbar CM frame LorentzTransform ttCM; ttCM.setBetaVec(-ttbar_p4.betaVec()); topPlus_p4 = ttCM.transform(topPlus_p4); topMinus_p4 = ttCM.transform(topMinus_p4); lepPlus = ttCM.transform(lepPlus); lepMinus = ttCM.transform(lepMinus); // Now boost the leptons to their parent top CM frames LorentzTransform topPlus, topMinus; topPlus.setBetaVec(-topPlus_p4.betaVec()); topMinus.setBetaVec(-topMinus_p4.betaVec()); lepPlus = topPlus.transform(lepPlus); lepMinus = topMinus.transform(lepMinus); const double lepPlus_costheta_temp = lepPlus.vector3().dot(topPlus_p4.vector3()) / (lepPlus.vector3().mod() * topPlus_p4.vector3().mod()); const double lepMinus_costheta_temp = lepMinus.vector3().dot(topMinus_p4.vector3()) / (lepMinus.vector3().mod() * topMinus_p4.vector3().mod()); const double c1c2_temp = lepPlus_costheta_temp * lepMinus_costheta_temp; const double cos_opening_angle_temp = lepPlus.vector3().dot(lepMinus.vector3()) / (lepPlus.vector3().mod() * lepMinus.vector3().mod()); // Fill parton-level histos fillWithUFOF( _h_dphi, dphi_temp, weight ); fillWithUFOF( _h_cos_opening_angle, cos_opening_angle_temp, weight ); fillWithUFOF( _h_c1c2, c1c2_temp, weight ); fillWithUFOF( _h_lep_costheta, lepPlus_costheta_temp, weight ); fillWithUFOF( _h_lep_costheta, lepMinus_costheta_temp, weight ); fillWithUFOF( _h_lep_costheta_CPV, lepPlus_costheta_temp, weight ); fillWithUFOF( _h_lep_costheta_CPV, -lepMinus_costheta_temp, weight ); // Now fill the same variables in the 2D and profile histos vs ttbar invariant mass, pT, and absolute rapidity for (int i_var = 0; i_var < 3; ++i_var) { double var; if ( i_var == 0 ) { var = tt_mass_temp; } else if ( i_var == 1 ) { var = tt_pT_temp; } else { var = tt_absrapidity_temp; } fillWithUFOF( _h_dphi_var[i_var], dphi_temp, var, weight ); fillWithUFOF( _h_cos_opening_angle_var[i_var], cos_opening_angle_temp, var, weight ); fillWithUFOF( _h_c1c2_var[i_var], c1c2_temp, var, weight ); fillWithUFOF( _h_lep_costheta_var[i_var], lepPlus_costheta_temp, var, weight ); fillWithUFOF( _h_lep_costheta_var[i_var], lepMinus_costheta_temp, var, weight ); fillWithUFOF( _h_lep_costheta_CPV_var[i_var], lepPlus_costheta_temp, var, weight ); fillWithUFOF( _h_lep_costheta_CPV_var[i_var], -lepMinus_costheta_temp, var, weight ); fillWithUFOF( _h_dphi_profile[i_var], dphi_temp, var, weight, (_h_dphi->xMax() + _h_dphi->xMin())/2. ); fillWithUFOF( _h_cos_opening_angle_profile[i_var], cos_opening_angle_temp, var, weight, (_h_cos_opening_angle->xMax() + _h_cos_opening_angle->xMin())/2. ); fillWithUFOF( _h_c1c2_profile[i_var], c1c2_temp, var, weight, (_h_c1c2->xMax() + _h_c1c2->xMin())/2. ); fillWithUFOF( _h_lep_costheta_profile[i_var], lepPlus_costheta_temp, var, weight, (_h_lep_costheta->xMax() + _h_lep_costheta->xMin())/2. ); fillWithUFOF( _h_lep_costheta_profile[i_var], lepMinus_costheta_temp, var, weight, (_h_lep_costheta->xMax() + _h_lep_costheta->xMin())/2. ); fillWithUFOF( _h_lep_costheta_CPV_profile[i_var], lepPlus_costheta_temp, var, weight, (_h_lep_costheta_CPV->xMax() + _h_lep_costheta_CPV->xMin())/2. ); fillWithUFOF( _h_lep_costheta_CPV_profile[i_var], -lepMinus_costheta_temp, var, weight, (_h_lep_costheta_CPV->xMax() + _h_lep_costheta_CPV->xMin())/2. ); } } } /// Normalise histograms to unit area void finalize() { normalize(_h_dphidressedleptons); normalize(_h_dphi); normalize(_h_cos_opening_angle); normalize(_h_c1c2); normalize(_h_lep_costheta); normalize(_h_lep_costheta_CPV); for (int i_var = 0; i_var < 3; ++i_var) { normalize(_h_dphi_var[i_var]); normalize(_h_cos_opening_angle_var[i_var]); normalize(_h_c1c2_var[i_var]); normalize(_h_lep_costheta_var[i_var]); normalize(_h_lep_costheta_CPV_var[i_var]); } } private: Histo1DPtr _h_dphidressedleptons, _h_dphi, _h_lep_costheta, _h_lep_costheta_CPV, _h_c1c2, _h_cos_opening_angle; Histo2DPtr _h_dphi_var[3], _h_lep_costheta_var[3], _h_lep_costheta_CPV_var[3], _h_c1c2_var[3], _h_cos_opening_angle_var[3]; Profile1DPtr _h_dphi_profile[3], _h_lep_costheta_profile[3], _h_lep_costheta_CPV_profile[3], _h_c1c2_profile[3], _h_cos_opening_angle_profile[3]; const vector _bins_tt_mass = {300., 430., 530., 1200.}; const vector _bins_tt_pT = {0., 41., 92., 300.}; const vector _bins_tt_absrapidity = {0., 0.34, 0.75, 1.5}; const vector _bins_dphi = {0., 5.*M_PI/60., 10.*M_PI/60., 15.*M_PI/60., 20.*M_PI/60., 25.*M_PI/60., 30.*M_PI/60., 35.*M_PI/60., 40.*M_PI/60., 45.*M_PI/60., 50.*M_PI/60., 55.*M_PI/60., M_PI}; const vector _bins_lep_costheta = {-1., -2./3., -1./3., 0., 1./3., 2./3., 1.}; const vector _bins_lep_costheta_CPV = {-1., -2./3., -1./3., 0., 1./3., 2./3., 1.}; const vector _bins_c1c2 = {-1., -0.4, -10./60., 0., 10./60., 0.4, 1.}; const vector _bins_cos_opening_angle = {-1., -2./3., -1./3., 0., 1./3., 2./3., 1.}; void fillWithUFOF(Histo1DPtr h, double x, double w) { h->fill(std::max(std::min(x, h->xMax()-1e-9),h->xMin()+1e-9), w); } void fillWithUFOF(Histo2DPtr h, double x, double y, double w) { h->fill(std::max(std::min(x, h->xMax()-1e-9),h->xMin()+1e-9), std::max(std::min(y, h->yMax()-1e-9),h->yMin()+1e-9), w); } void fillWithUFOF(Profile1DPtr h, double x, double y, double w, double c) { h->fill(std::max(std::min(y, h->xMax()-1e-9),h->xMin()+1e-9), float(x > c) - float(x < c), w); } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2016_I1413748); } diff --git a/analyses/pluginCMS/CMS_2016_I1430892.cc b/analyses/pluginCMS/CMS_2016_I1430892.cc --- a/analyses/pluginCMS/CMS_2016_I1430892.cc +++ b/analyses/pluginCMS/CMS_2016_I1430892.cc @@ -1,259 +1,259 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/PartonicTops.hh" namespace Rivet { /// CMS 8 TeV dilepton channel ttbar charge asymmetry analysis class CMS_2016_I1430892 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_I1430892); /// Book histograms and initialise projections void init() { // Complete final state - FinalState fs(-DBL_MAX, DBL_MAX, 0*GeV); + FinalState fs; // Projection for dressed electrons and muons IdentifiedFinalState photons(fs); photons.acceptIdPair(PID::PHOTON); IdentifiedFinalState el_id(fs); el_id.acceptIdPair(PID::ELECTRON); PromptFinalState electrons(el_id); declare(electrons, "Electrons"); DressedLeptons dressed_electrons(photons, electrons, 0.1); declare(dressed_electrons, "DressedElectrons"); IdentifiedFinalState mu_id(fs); mu_id.acceptIdPair(PID::MUON); PromptFinalState muons(mu_id); declare(muons, "Muons"); DressedLeptons dressed_muons(photons, muons, 0.1); declare(dressed_muons, "DressedMuons"); // Parton-level top quarks declare(PartonicTops(PartonicTops::DecayMode::E_MU, false), "LeptonicPartonTops"); // Booking of histograms // This histogram is independent of the parton-level information, and is an // addition to the original analysis. It is compared to the same data as // the parton-level delta_abseta histogram d05-x01-y01. book(_h_dabsetadressedleptons, "d00-x01-y01", _bins_dabseta); // The remaining histos use parton-level information book(_h_dabseta, "d05-x01-y01", _bins_dabseta); book(_h_dabsrapidity, "d02-x01-y01", _bins_dabsrapidity); // 2D histos book(_h_dabsrapidity_var[0], "d11-x01-y01", _bins_dabsrapidity, _bins_tt_mass); book(_h_dabseta_var[0], "d17-x01-y01", _bins_dabseta, _bins_tt_mass); book(_h_dabsrapidity_var[1], "d23-x01-y01", _bins_dabsrapidity, _bins_tt_pT); book(_h_dabseta_var[1], "d29-x01-y01", _bins_dabseta, _bins_tt_pT); book(_h_dabsrapidity_var[2], "d35-x01-y01", _bins_dabsrapidity, _bins_tt_absrapidity); book(_h_dabseta_var[2], "d41-x01-y01", _bins_dabseta, _bins_tt_absrapidity); // Profile histos for asymmetries book(_h_dabsrapidity_profile[0], "d08-x01-y01", _bins_tt_mass); book(_h_dabseta_profile[0], "d14-x01-y01", _bins_tt_mass); book(_h_dabsrapidity_profile[1], "d20-x01-y01", _bins_tt_pT); book(_h_dabseta_profile[1], "d26-x01-y01", _bins_tt_pT); book(_h_dabsrapidity_profile[2], "d32-x01-y01", _bins_tt_absrapidity); book(_h_dabseta_profile[2], "d38-x01-y01", _bins_tt_absrapidity); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; // Use particle-level leptons for the first histogram const DressedLeptons& dressed_electrons = applyProjection(event, "DressedElectrons"); const DressedLeptons& dressed_muons = applyProjection(event, "DressedMuons"); const vector dressedels = dressed_electrons.dressedLeptons(); const vector dressedmus = dressed_muons.dressedLeptons(); const size_t ndressedel = dressedels.size(); const size_t ndressedmu = dressedmus.size(); // For the particle-level histogram, require exactly one electron and exactly one muon, to select the ttbar->emu channel. // Note this means ttbar->emu events with additional PromptFinalState dilepton pairs from the shower are vetoed - for PYTHIA8, // this affects ~0.5% of events, so the effect is well below the level of sensitivity of the measured distribution. if ( ndressedel == 1 && ndressedmu == 1 ) { const int electrontouse = 0, muontouse = 0; // Opposite-charge leptons only if ( sameSign(dressedels[electrontouse], dressedmus[muontouse]) ) { MSG_INFO("Error, e and mu have same charge, skipping event"); } else { // Get the four-momenta of the positively- and negatively-charged leptons FourMomentum lepPlus = dressedels[electrontouse].charge() > 0 ? dressedels[electrontouse] : dressedmus[muontouse]; FourMomentum lepMinus = dressedels[electrontouse].charge() > 0 ? dressedmus[muontouse] : dressedels[electrontouse]; // Now calculate the variable double dabseta_temp = lepPlus.abseta() - lepMinus.abseta(); fillWithUFOF( _h_dabsetadressedleptons, dabseta_temp, weight ); } } // The remaining variables use parton-level information. // Get the leptonically decaying tops const Particles leptonicpartontops = apply(event, "LeptonicPartonTops").particlesByPt(); Particles chargedleptons; unsigned int ntrueleptonictops = 0; bool oppositesign = false; if ( leptonicpartontops.size() == 2 ) { for (size_t k = 0; k < leptonicpartontops.size(); ++k) { // Get the lepton const Particle lepTop = leptonicpartontops[k]; const auto isPromptChargedLepton = [](const Particle& p){return (isChargedLepton(p) && isPrompt(p, false, false));}; Particles lepton_candidates = lepTop.allDescendants(firstParticleWith(isPromptChargedLepton), false); if ( lepton_candidates.size() < 1 ) MSG_WARNING("error, PartonicTops::DecayMode::E_MU top quark had no daughter lepton candidate, skipping event."); // In some cases there is no lepton from the W decay but only leptons from the decay of a radiated gamma. // These hadronic PartonicTops are currently being mistakenly selected by PartonicTops::DecayMode::E_MU (as of April 2017), and need to be rejected. // PartonicTops::DecayMode::E_MU is being fixed in Rivet, and when it is the veto below should do nothing. /// @todo Should no longer be necessary -- remove bool istrueleptonictop = false; for (size_t i = 0; i < lepton_candidates.size(); ++i) { const Particle& lepton_candidate = lepton_candidates[i]; if ( lepton_candidate.hasParent(PID::PHOTON) ) { MSG_DEBUG("Found gamma parent, top: " << k+1 << " of " << leptonicpartontops.size() << " , lepton: " << i+1 << " of " << lepton_candidates.size()); continue; } if ( !istrueleptonictop && sameSign(lepTop,lepton_candidate) ) { chargedleptons.push_back(lepton_candidate); istrueleptonictop = true; } else MSG_WARNING("Error, found extra prompt charged lepton from top decay (and without gamma parent), ignoring it."); } if ( istrueleptonictop ) ++ntrueleptonictops; } } if ( ntrueleptonictops == 2 ) { oppositesign = !( sameSign(chargedleptons[0],chargedleptons[1]) ); if ( !oppositesign ) MSG_WARNING("error, same charge tops, skipping event."); } if ( ntrueleptonictops == 2 && oppositesign ) { // Get the four-momenta of the positively- and negatively-charged leptons const FourMomentum lepPlus = chargedleptons[0].charge() > 0 ? chargedleptons[0] : chargedleptons[1]; const FourMomentum lepMinus = chargedleptons[0].charge() > 0 ? chargedleptons[1] : chargedleptons[0]; const double dabseta_temp = lepPlus.abseta() - lepMinus.abseta(); // Get the four-momenta of the positively- and negatively-charged tops const FourMomentum topPlus_p4 = leptonicpartontops[0].pid() > 0 ? leptonicpartontops[0] : leptonicpartontops[1]; const FourMomentum topMinus_p4 = leptonicpartontops[0].pid() > 0 ? leptonicpartontops[1] : leptonicpartontops[0]; const FourMomentum ttbar_p4 = topPlus_p4 + topMinus_p4; const double tt_mass_temp = ttbar_p4.mass(); const double tt_absrapidity_temp = ttbar_p4.absrapidity(); const double tt_pT_temp = ttbar_p4.pT(); const double dabsrapidity_temp = topPlus_p4.absrapidity() - topMinus_p4.absrapidity(); // Fill parton-level histos fillWithUFOF( _h_dabseta, dabseta_temp, weight ); fillWithUFOF( _h_dabsrapidity, dabsrapidity_temp, weight ); // Now fill the same variables in the 2D and profile histos vs ttbar invariant mass, pT, and absolute rapidity for (int i_var = 0; i_var < 3; ++i_var) { double var; if ( i_var == 0 ) { var = tt_mass_temp; } else if ( i_var == 1 ) { var = tt_pT_temp; } else { var = tt_absrapidity_temp; } fillWithUFOF( _h_dabsrapidity_var[i_var], dabsrapidity_temp, var, weight ); fillWithUFOF( _h_dabseta_var[i_var], dabseta_temp, var, weight ); fillWithUFOF( _h_dabsrapidity_profile[i_var], dabsrapidity_temp, var, weight, (_h_dabsrapidity->xMax() + _h_dabsrapidity->xMin())/2. ); fillWithUFOF( _h_dabseta_profile[i_var], dabseta_temp, var, weight, (_h_dabseta->xMax() + _h_dabseta->xMin())/2. ); } } } /// Normalise histograms to unit area void finalize() { normalize(_h_dabsetadressedleptons); normalize(_h_dabseta); normalize(_h_dabsrapidity); for (int i_var = 0; i_var < 3; ++i_var) { normalize(_h_dabsrapidity_var[i_var]); normalize(_h_dabseta_var[i_var]); } } private: Histo1DPtr _h_dabsetadressedleptons, _h_dabseta, _h_dabsrapidity; Histo2DPtr _h_dabseta_var[3], _h_dabsrapidity_var[3]; Profile1DPtr _h_dabseta_profile[3], _h_dabsrapidity_profile[3]; const vector _bins_tt_mass = {300., 430., 530., 1200.}; const vector _bins_tt_pT = {0., 41., 92., 300.}; const vector _bins_tt_absrapidity = {0., 0.34, 0.75, 1.5}; const vector _bins_dabseta = { -2., -68./60., -48./60., -32./60., -20./60., -8./60., 0., 8./60., 20./60., 32./60., 48./60., 68./60., 2.}; const vector _bins_dabsrapidity = {-2., -44./60., -20./60., 0., 20./60., 44./60., 2.}; void fillWithUFOF(Histo1DPtr h, double x, double w) { h->fill(std::max(std::min(x, h->xMax()-1e-9),h->xMin()+1e-9), w); } void fillWithUFOF(Histo2DPtr h, double x, double y, double w) { h->fill(std::max(std::min(x, h->xMax()-1e-9),h->xMin()+1e-9), std::max(std::min(y, h->yMax()-1e-9),h->yMin()+1e-9), w); } void fillWithUFOF(Profile1DPtr h, double x, double y, double w, double c) { h->fill(std::max(std::min(y, h->xMax()-1e-9),h->xMin()+1e-9), float(x > c) - float(x < c), w); } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2016_I1430892); } diff --git a/analyses/pluginCMS/CMS_2016_I1473674.cc b/analyses/pluginCMS/CMS_2016_I1473674.cc --- a/analyses/pluginCMS/CMS_2016_I1473674.cc +++ b/analyses/pluginCMS/CMS_2016_I1473674.cc @@ -1,124 +1,124 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/PartonicTops.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/InvMassFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" namespace Rivet { class CMS_2016_I1473674 : public Analysis { public: // Minimal constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_I1473674); // Set up projections and book histograms void init() { // Complete final state FinalState fs; // Parton level top quarks declare(PartonicTops(PartonicTops::DecayMode::E_MU, false), "LeptonicPartonTops"); declare(PartonicTops(PartonicTops::DecayMode::HADRONIC), "HadronicPartonTops"); // Projections for dressed electrons and muons IdentifiedFinalState photons(fs); photons.acceptIdPair(PID::PHOTON); // IdentifiedFinalState el_id(fs); el_id.acceptIdPair(PID::ELECTRON); PromptFinalState electrons(el_id); declare(electrons, "Electrons"); DressedLeptons dressed_electrons(photons, electrons, 0.1); declare(dressed_electrons, "DressedElectrons"); // IdentifiedFinalState mu_id(fs); mu_id.acceptIdPair(PID::MUON); PromptFinalState muons(mu_id); declare(muons, "Muons"); DressedLeptons dressed_muons(photons, muons, 0.1); declare(dressed_muons, "DressedMuons"); // Projection for jets - VetoedFinalState fs_jets(FinalState(-DBL_MAX, DBL_MAX, 0*GeV)); + VetoedFinalState fs_jets; fs_jets.addVetoOnThisFinalState(dressed_muons); declare(FastJets(fs_jets, FastJets::ANTIKT, 0.5), "Jets"); // Projections for MET declare(MissingMomentum(), "MET"); // Booking of histograms book(_hist_met ,5, 1, 1); book(_hist_ht ,6, 1, 1); book(_hist_st ,7, 1, 1); book(_hist_wpt ,8, 1, 1); } /// Per-event analysis void analyze(const Event& event) { const double weight = 1.0; // Select ttbar -> lepton+jets at parton level, removing tau decays const Particles leptonicpartontops = apply(event, "LeptonicPartonTops").particlesByPt(); if (leptonicpartontops.size() != 1) vetoEvent; const Particles hadronicpartontops = apply(event, "HadronicPartonTops").particlesByPt(); if (hadronicpartontops.size() != 1) vetoEvent; // Select ttbar -> lepton+jets at particle level const DressedLeptons& dressed_electrons = applyProjection(event, "DressedElectrons"); const DressedLeptons& dressed_muons = applyProjection(event, "DressedMuons"); if (dressed_electrons.dressedLeptons().size() + dressed_muons.dressedLeptons().size() != 1) vetoEvent; const FourMomentum lepton = (dressed_electrons.dressedLeptons().empty() ? dressed_muons : dressed_electrons).dressedLeptons()[0]; // MET const MissingMomentum& met = applyProjection(event, "MET"); _hist_met->fill(met.visibleMomentum().pT()/GeV, weight); // HT and ST const FastJets& jetpro = applyProjection(event, "Jets"); const Jets jets = jetpro.jetsByPt(20*GeV); double ht = 0.0; for (const Jet& j : jets) { if (deltaR(j.momentum(), lepton) > 0.3) { ht += j.pT(); } } double st = ht + lepton.pT() + met.visibleMomentum().pT(); _hist_ht->fill(ht/GeV, weight); _hist_st->fill(st/GeV, weight); // WPT const FourMomentum w = lepton - met.visibleMomentum(); _hist_wpt->fill(w.pT()/GeV, weight); } /// Normalize histograms void finalize() { normalize(_hist_met); normalize(_hist_ht); normalize(_hist_st); normalize(_hist_wpt); } private: Histo1DPtr _hist_met, _hist_ht, _hist_st, _hist_wpt; }; DECLARE_RIVET_PLUGIN(CMS_2016_I1473674); } diff --git a/analyses/pluginCMS/CMS_2017_I1467451.cc b/analyses/pluginCMS/CMS_2017_I1467451.cc --- a/analyses/pluginCMS/CMS_2017_I1467451.cc +++ b/analyses/pluginCMS/CMS_2017_I1467451.cc @@ -1,103 +1,103 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedLeptons.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" namespace Rivet { /// Higgs -> WW -> emu + MET in 8 TeV pp collisions class CMS_2017_I1467451 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2017_I1467451); /// Book histograms and initialise projections before the run void init() { const double lepConeSize = 0.1; const double lepMaxEta = 2.5; const Cut lepton_cut = (Cuts::abseta < lepMaxEta); // Initialise and register projections - FinalState fs(-2.5,2.5,0.0*GeV); - FinalState fsm(-5,5,0.0*GeV); + FinalState fs((Cuts::etaIn(-2.5,2.5))); + FinalState fsm((Cuts::etaIn(-5,5))); declare(fs, "FS"); declare(fsm, "FSM"); ChargedLeptons charged_leptons(fs); IdentifiedFinalState photons(fs); photons.acceptIdPair(PID::PHOTON); PromptFinalState prompt_leptons(charged_leptons); prompt_leptons.acceptMuonDecays(true); prompt_leptons.acceptTauDecays(false); PromptFinalState prompt_photons(photons); prompt_photons.acceptMuonDecays(true); prompt_photons.acceptTauDecays(false); DressedLeptons dressed_leptons = DressedLeptons(prompt_photons, prompt_leptons, lepConeSize, lepton_cut, true); declare(dressed_leptons, "DressedLeptons"); MissingMomentum Met(fsm); declare(Met, "MET"); // Book histograms book(histoPtH , 1,1,1); book(histoXsec, 2,1,1); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; Particles leptons = applyProjection(event, "DressedLeptons").particlesByPt(10.0*GeV); if (leptons.size() < 2) vetoEvent; if (leptons[0].pT() < 20*GeV || leptons[1].pT() < 10*GeV) vetoEvent; if (leptons[0].charge() == leptons[1].charge()) vetoEvent; if (leptons[0].abspid() == leptons[1].abspid()) vetoEvent; FourMomentum LL = (leptons[0].momentum() + leptons[1].momentum()); if (LL.mass() < 12*GeV) vetoEvent; if (LL.pT() < 30*GeV) vetoEvent; FourMomentum EtMiss = applyProjection(event,"MET").missingMomentum(); FourMomentum P4H = LL + EtMiss; double dphi = deltaPhi(LL, EtMiss); double mT = sqrt(2*LL.pT()*EtMiss.pT()*(1-cos(dphi))); if (mT < 50*GeV) vetoEvent; histoPtH->fill(min(P4H.pT()/GeV, 199.), weight); histoXsec->fill(8000, weight); ///< @todo Should probably be a Counter } /// Normalise histograms etc., after the run void finalize() { scale(histoPtH, crossSection()/sumOfWeights()); scale(histoXsec, (histoXsec->xMax()-histoXsec->xMin())*crossSection()/sumOfWeights()); } private: Histo1DPtr histoPtH, histoXsec; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2017_I1467451); } diff --git a/analyses/pluginCMS/CMS_2017_I1605749.cc b/analyses/pluginCMS/CMS_2017_I1605749.cc --- a/analyses/pluginCMS/CMS_2017_I1605749.cc +++ b/analyses/pluginCMS/CMS_2017_I1605749.cc @@ -1,145 +1,145 @@ // -*- C++ -*- // Rivet framework #include "Rivet/Analysis.hh" // Projections #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { using namespace Cuts; class CMS_2017_I1605749 : public Analysis { public: // Constructor CMS_2017_I1605749() : Analysis("CMS_2017_I1605749") { } // Book histograms and initialise projections before the run void init() { // Projections - const FinalState fs(-5.0, 5.0, 0.0*GeV); + const FinalState fs((Cuts::etaIn(-5.0, 5.0))); declare(FastJets(fs, FastJets::ANTIKT, 0.5), "Jets"); // Jet Charge Histos for (int i = 1; i <= 18; i++) { book(_h_Charge[i - 1], i, 1, 1); } } // Perform the per-event analysis void analyze(const Event& event) { const Jets& jets = applyProjection(event, "Jets").jetsByPt(10.0*GeV); if (jets.size() < 2) vetoEvent; double leadingpt = jets[0].pt()/GeV; double subleadingpt = jets[1].pt()/GeV; if (jets.size() < 2 || jets[0].abseta() >= 1.5 || jets[1].abseta() >= 1.5 || leadingpt < 400.0 || subleadingpt < 100.0) { vetoEvent; } vector constituents1 = jets[0].constituents(); std::vector numerator(9, 0), denominator(9, 0); double t_jetcharge1, t_jetcharge1k6, t_jetcharge1k3; double t_jetchargeL1, t_jetchargeL1k6, t_jetchargeL1k3; double t_jetchargeT1, t_jetchargeT1k6, t_jetchargeT1k3; denominator[0] = leadingpt; denominator[1] = std::pow(leadingpt, 0.6); denominator[2] = std::pow(leadingpt, 0.3); if (constituents1.size() > 0) { for (unsigned j = 0; j < constituents1.size(); j++) { if (std::abs(constituents1[j].pid()) > 9 && std::abs(constituents1[j].pid())!= 21) { if (constituents1[j].pt() > 1*GeV) { double charge = constituents1[j].charge(); double mom = constituents1[j].pt(); double dotproduct = constituents1[j].p3().dot(jets[0].p3()) / jets[0].p(); double crossproduct = constituents1[j].p3().cross(jets[0].p3()).mod() / jets[0].p(); numerator[0] += (mom * charge); numerator[1] += ((std::pow(mom, 0.6)) * charge); numerator[2] += ((std::pow(mom, 0.3)) * charge); numerator[3] += (dotproduct * charge); numerator[4] += ((std::pow(dotproduct, 0.6)) * charge); numerator[5] += ((std::pow(dotproduct, 0.3)) * charge); denominator[3] += dotproduct; denominator[4] += (std::pow(dotproduct, 0.6)); denominator[5] += (std::pow(dotproduct, 0.3)); numerator[6] += (crossproduct * charge); numerator[7] += ((std::pow(crossproduct, 0.6)) * charge); numerator[8] += ((std::pow(crossproduct, 0.3)) * charge); denominator[6] += crossproduct; denominator[7] += (std::pow(crossproduct, 0.6)); denominator[8] += (std::pow(crossproduct, 0.3)); } } } } t_jetcharge1 = (denominator[0] > 0) ? numerator[0] / denominator[0] : 0; t_jetcharge1k6 = (denominator[1] > 0) ? numerator[1] / denominator[1] : 0; t_jetcharge1k3 = (denominator[2] > 0) ? numerator[2] / denominator[2] : 0; t_jetchargeL1 = (denominator[3] > 0) ? numerator[3] / denominator[3] : 0; t_jetchargeL1k6 = (denominator[4] > 0) ? numerator[4] / denominator[4] : 0; t_jetchargeL1k3 = (denominator[5] > 0) ? numerator[5] / denominator[5] : 0; t_jetchargeT1 = (denominator[6] > 0) ? numerator[6] / denominator[6] : 0; t_jetchargeT1k6 = (denominator[7] > 0) ? numerator[7] / denominator[7] : 0; t_jetchargeT1k3 = (denominator[8] > 0) ? numerator[8] / denominator[8] : 0; _h_Charge[0]->fill(t_jetcharge1); _h_Charge[1]->fill(t_jetcharge1k6); _h_Charge[2]->fill(t_jetcharge1k3); _h_Charge[3]->fill(t_jetchargeL1); _h_Charge[4]->fill(t_jetchargeL1k6); _h_Charge[5]->fill(t_jetchargeL1k3); _h_Charge[6]->fill(t_jetchargeT1); _h_Charge[7]->fill(t_jetchargeT1k6); _h_Charge[8]->fill(t_jetchargeT1k3); if (leadingpt > 400 && leadingpt < 700) { _h_Charge[9]->fill(t_jetcharge1k6); _h_Charge[12]->fill(t_jetchargeL1k6); _h_Charge[15]->fill(t_jetchargeT1k6); } else if (leadingpt > 700 && leadingpt < 1000) { _h_Charge[10]->fill(t_jetcharge1k6); _h_Charge[13]->fill(t_jetchargeL1k6); _h_Charge[16]->fill(t_jetchargeT1k6); } else if (leadingpt > 1000 && leadingpt < 1800) { _h_Charge[11]->fill(t_jetcharge1k6); _h_Charge[14]->fill(t_jetchargeL1k6); _h_Charge[17]->fill(t_jetchargeT1k6); } } // Normalise histograms etc., after the run void finalize() { for (int j = 0; j < 18; j++) { normalize(_h_Charge[j]); for (size_t i = 0; i < _h_Charge[j]-> numBins(); i++) { _h_Charge[j]->bin(i).scaleW(1.0 / _h_Charge[j]->bin(i).width()); } } } private: Histo1DPtr _h_Charge[18]; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2017_I1605749); } diff --git a/analyses/pluginD0/D0_1995_I398175.cc b/analyses/pluginD0/D0_1995_I398175.cc --- a/analyses/pluginD0/D0_1995_I398175.cc +++ b/analyses/pluginD0/D0_1995_I398175.cc @@ -1,136 +1,136 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/JetShape.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief D0 Run-1 jet shapes measurement class D0_1995_I398175 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(D0_1995_I398175); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - const FinalState fs(-4.0, 4.0); + const FinalState fs((Cuts::etaIn(-4.0, 4.0))); declare(fs, "FS"); // FastJets jets(fs, FastJets::ANTIKT, 0.6); FastJets jets(fs, FastJets::D0ILCONE, 1.0); jets.useInvisibles(); declare(jets, "Jets"); // Specify jets pT bins _ptedges = {{ 45.0, 70.0, 105.0, 140.0, 1800.0}}; // Book histograms for (size_t ptbin = 0; ptbin < 4; ++ptbin) { _jsnames_pT[ptbin] = "JetShape" + to_str(ptbin) ; const JetShape jsp(jets, 0.0, 1.0, 10, _ptedges[ptbin], _ptedges[ptbin+1], 0.0, 0.2, PSEUDORAPIDITY); declare(jsp, _jsnames_pT[ptbin]); book( _h_Rho_pT_central[ptbin] ,ptbin+1, 1, 1); } const JetShape jspfwd0(jets, 0.0, 1.0, 10, 45, 70, 2.5, 3.5, PSEUDORAPIDITY); declare(jspfwd0, "JetShapeFwd0"); const JetShape jspfwd1(jets, 0.0, 1.0, 10, 70, 105, 2.5, 3.5, PSEUDORAPIDITY); declare(jspfwd1, "JetShapeFwd1"); book( _h_Rho_pT_forward[0] ,5, 1, 1); book( _h_Rho_pT_forward[1] ,6, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { // Get jets and require at least one to pass pT and y cuts const Jets jets = apply(event, "Jets").jetsByPt(Cuts::ptIn(_ptedges.front()*GeV, _ptedges.back()*GeV) ); MSG_DEBUG("Selecting jets with pT> "<<_ptedges.front()); MSG_DEBUG("Jet multiplicity before cuts = " << jets.size()); if (jets.size() == 0){ MSG_DEBUG("No jets found in required pT and rapidity range"); vetoEvent; } // Calculate and histogram jet shapes for (size_t ipt = 0; ipt < 4; ++ipt) { const JetShape& jsipt = apply(event, _jsnames_pT[ipt]); for (size_t ijet = 0; ijet < jsipt.numJets(); ++ijet) { for (size_t rbin = 0; rbin < jsipt.numBins(); ++rbin) { const double r_rho = jsipt.rBinMid(rbin); MSG_DEBUG(ipt << " " << rbin << " (" << r_rho << ") " << jsipt.diffJetShape(ijet, rbin)); /// @note Bin width Jacobian factor of 0.7/0.1 = 7 in the differential shapes plot // _profhistRho_pT[ipt]->fill(r_rho/0.7, (0.7/0.1)*jsipt.diffJetShape(ijet, rbin)); const double r_Psi = jsipt.rBinMax(rbin); MSG_DEBUG(ipt << " " << rbin << " (" << r_rho << ") " << jsipt.intJetShape(ijet, rbin)); _h_Rho_pT_central[ipt]->fill(r_Psi/1.0, jsipt.intJetShape(ijet, rbin)); } } } const JetShape& jsiptfwd0 = apply(event, "JetShapeFwd0"); for (size_t ijet = 0; ijet < jsiptfwd0.numJets(); ++ijet) { for (size_t rbin = 0; rbin < jsiptfwd0.numBins(); ++rbin) { const double r_Psi = jsiptfwd0.rBinMax(rbin); _h_Rho_pT_forward[0]->fill(r_Psi/1.0, jsiptfwd0.intJetShape(ijet, rbin)); } } const JetShape& jsiptfwd1 = apply(event, "JetShapeFwd1"); for (size_t ijet = 0; ijet < jsiptfwd1.numJets(); ++ijet) { for (size_t rbin = 0; rbin < jsiptfwd1.numBins(); ++rbin) { const double r_Psi = jsiptfwd1.rBinMax(rbin); _h_Rho_pT_forward[1]->fill(r_Psi/1.0, jsiptfwd1.intJetShape(ijet, rbin)); } } } /// Normalise histograms etc., after the run void finalize() { // scale(_h_YYYY, crossSection()/sumOfWeights()); // norm to cross section // normalize(_h_YYYY); // normalize to unity } //@} private: vector _ptedges; string _jsnames_pT[4]; /// @name Histograms //@{ Profile1DPtr _h_Rho_pT_central[4]; Profile1DPtr _h_Rho_pT_forward[2]; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(D0_1995_I398175); } diff --git a/analyses/pluginD0/D0_2001_S4674421.cc b/analyses/pluginD0/D0_2001_S4674421.cc --- a/analyses/pluginD0/D0_2001_S4674421.cc +++ b/analyses/pluginD0/D0_2001_S4674421.cc @@ -1,189 +1,189 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// @brief D0 Run I differential W/Z boson cross-section analysis /// @author Lars Sonnenschein /// @author Andy Buckley class D0_2001_S4674421 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor. D0_2001_S4674421() : Analysis("D0_2001_S4674421") { } /// @name Analysis methods //@{ void init() { // Final state projection - FinalState fs(-5.0, 5.0); // corrected for detector acceptance + FinalState fs((Cuts::etaIn(-5.0, 5.0))); // corrected for detector acceptance declare(fs, "FS"); // Z -> e- e+ - LeadingParticlesFinalState eeFS(FinalState(-5.0, 5.0, 0.)); //20.); + LeadingParticlesFinalState eeFS(FinalState((Cuts::etaIn(-5.0, 5.0)))); //20.); eeFS.addParticleIdPair(PID::ELECTRON); declare(eeFS, "eeFS"); // W- -> e- nu_e~ - LeadingParticlesFinalState enuFS(FinalState(-5.0, 5.0, 0.)); //25.); + LeadingParticlesFinalState enuFS(FinalState((Cuts::etaIn(-5.0, 5.0)))); //25.); enuFS.addParticleId(PID::ELECTRON).addParticleId(PID::NU_EBAR); declare(enuFS, "enuFS"); // W+ -> e+ nu_e - LeadingParticlesFinalState enubFS(FinalState(-5.0, 5.0, 0.)); //25.); + LeadingParticlesFinalState enubFS(FinalState((Cuts::etaIn(-5.0, 5.0)))); //25.); enubFS.addParticleId(PID::POSITRON).addParticleId(PID::NU_E); declare(enubFS, "enubFS"); // Remove neutrinos for isolation of final state particles VetoedFinalState vfs(fs); vfs.vetoNeutrinos(); declare(vfs, "VFS"); // Counters book(_eventsFilledW,"eventsFilledW"); book(_eventsFilledZ,"eventsFilledZ"); // Histograms book(_h_dsigdpt_w ,1, 1, 1); book(_h_dsigdpt_z ,1, 1, 2); book(_h_dsigdpt_scaled_z, 2, 1, 1); } void analyze(const Event& event) { const LeadingParticlesFinalState& eeFS = apply(event, "eeFS"); // Z boson analysis if (eeFS.particles().size() >= 2) { // If there is a Z candidate: // Fill Z pT distributions double deltaM2=1e30,mass2(0.); double pT=-1.; const Particles& Zdaughters = eeFS.particles(); for (size_t ix = 0; ix < Zdaughters.size(); ++ix) { for (size_t iy = ix+1; iy < Zdaughters.size(); ++iy) { if (Zdaughters[ix].pid()!=-Zdaughters[iy].pid()) continue; const FourMomentum pmom = Zdaughters[ix].momentum() + Zdaughters[iy].momentum(); double mz2 = pmom.mass2(); double dm2 = fabs(mz2 - sqr(91.118*GeV)); if (dm2 < deltaM2) { pT = pmom.pT(); deltaM2 = dm2; mass2 = mz2; } } } if (pT > 0. && mass2 > 0. && inRange(sqrt(mass2)/GeV, 75.0, 105.0)) { _eventsFilledZ->fill(); MSG_DEBUG("Z pmom.pT() = " << pT/GeV << " GeV"); _h_dsigdpt_z->fill(pT/GeV); // return if found a Z return; } } // There is no Z -> ee candidate... so this might be a W event const LeadingParticlesFinalState& enuFS = apply(event, "enuFS"); const LeadingParticlesFinalState& enubFS = apply(event, "enubFS"); double deltaM2=1e30; double pT=-1.; for (size_t iw = 0; iw < 2; ++iw) { Particles Wdaughters; Wdaughters = (iw == 0) ? enuFS.particles() : enubFS.particles(); for (size_t ix = 0; ix < Wdaughters.size(); ++ix) { for (size_t iy = ix+1; iy < Wdaughters.size(); ++iy) { if (Wdaughters[ix].pid() == Wdaughters[iy].pid()) continue; const FourMomentum pmom = Wdaughters[0].momentum() + Wdaughters[1].momentum(); double dm2 = abs(pmom.mass2() - sqr(80.4*GeV)); if (dm2 < deltaM2) { pT = pmom.pT(); deltaM2 = dm2; } } } } if (pT > 0.) { _eventsFilledW->fill(); _h_dsigdpt_w->fill(pT/GeV); } } void finalize() { // Get cross-section per event (i.e. per unit weight) from generator const double xSecPerEvent = crossSectionPerEvent()/picobarn; // Correct W pT distribution to W cross-section const double xSecW = xSecPerEvent * dbl(*_eventsFilledW); // Correct Z pT distribution to Z cross-section const double xSecZ = xSecPerEvent * dbl(*_eventsFilledZ); // Get W and Z pT integrals const double wpt_integral = _h_dsigdpt_w->integral(); const double zpt_integral = _h_dsigdpt_z->integral(); // Divide and scale ratio histos if (xSecW == 0 || wpt_integral == 0 || xSecZ == 0 || zpt_integral == 0) { MSG_WARNING("Not filling ratio plot because input histos are empty"); } else { // Scale factor converts event counts to cross-sections, and inverts the // branching ratios since only one decay channel has been analysed for each boson. // Oh, and we put MW/MZ in, like they do in the paper. const double MW_MZ = 0.8820; // Ratio M_W/M_Z const double BRZEE_BRWENU = 0.033632 / 0.1073; // Ratio of branching fractions const double scalefactor = (xSecW / wpt_integral) / (xSecZ / zpt_integral) * MW_MZ * BRZEE_BRWENU; for (size_t ibin = 0; ibin < _h_dsigdpt_w->numBins(); ibin++) { const double xval = _h_dsigdpt_w->bin(ibin).xMid(); const double xerr = _h_dsigdpt_w->bin(ibin).xWidth() / 2.; double yval(0), yerr(0); if (_h_dsigdpt_w->bin(ibin).sumW() != 0 && _h_dsigdpt_z->bin(ibin).sumW() != 0) { yval = scalefactor * _h_dsigdpt_w->bin(ibin).sumW() / _h_dsigdpt_z->bin(ibin).sumW(); yerr = yval * sqrt( sqr(_h_dsigdpt_w->bin(ibin).relErr()) + sqr(_h_dsigdpt_z->bin(ibin).areaErr()) ); } _h_dsigdpt_scaled_z->addPoint(xval, yval, xerr, yerr); } } // Normalize non-ratio histos normalize(_h_dsigdpt_w, xSecW); normalize(_h_dsigdpt_z, xSecZ); } //@} private: /// @name Event counters for cross section normalizations //@{ CounterPtr _eventsFilledW; CounterPtr _eventsFilledZ; //@} //@{ /// Histograms Histo1DPtr _h_dsigdpt_w; Histo1DPtr _h_dsigdpt_z; Scatter2DPtr _h_dsigdpt_scaled_z; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(D0_2001_S4674421); } diff --git a/analyses/pluginD0/D0_2004_S5992206.cc b/analyses/pluginD0/D0_2004_S5992206.cc --- a/analyses/pluginD0/D0_2004_S5992206.cc +++ b/analyses/pluginD0/D0_2004_S5992206.cc @@ -1,137 +1,137 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" namespace Rivet { /* @brief D0 Run II angular correlations in di-jet events * @author Lars Sonnenschein * * Measurement of angular correlations in di-jet events. * * @par Run conditions * * @arg \f$ \sqrt{s} = \f$ 1960 GeV * @arg Run with generic QCD events. * @arg Several \f$ p_\perp^\text{min} \f$ cutoffs are probably required to fill the histograms: * @arg \f$ p_\perp^\text{min} = \f$ 50, 75, 100, 150 GeV for the four pT ranges respecively * */ class D0_2004_S5992206 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor. D0_2004_S5992206() : Analysis("D0_2004_S5992206") { } //@} /// @name Analysis methods //@{ void init() { // Final state for jets, mET etc. - const FinalState fs(-3.0, 3.0); + const FinalState fs((Cuts::etaIn(-3.0, 3.0))); declare(fs, "FS"); // Veto neutrinos, and muons with pT above 1.0 GeV VetoedFinalState vfs(fs); vfs.vetoNeutrinos(); vfs.addVetoPairDetail(PID::MUON, 1.0*GeV, DBL_MAX); declare(vfs, "VFS"); declare(FastJets(vfs, FastJets::D0ILCONE, 0.7), "Jets"); declare(MissingMomentum(vfs), "CalMET"); // Book histograms book(_histJetAzimuth_pTmax75_100 ,1, 2, 1); book(_histJetAzimuth_pTmax100_130 ,2, 2, 1); book(_histJetAzimuth_pTmax130_180 ,3, 2, 1); book(_histJetAzimuth_pTmax180_ ,4, 2, 1); } /// Do the analysis void analyze(const Event& event) { // Analyse and print some info const JetAlg& jetpro = apply(event, "Jets"); MSG_DEBUG("Jet multiplicity before any pT cut = " << jetpro.size()); const Jets jets = jetpro.jetsByPt(40.0*GeV); if (jets.size() >= 2) { MSG_DEBUG("Jet multiplicity after pT > 40 GeV cut = " << jets.size()); } else { vetoEvent; } const double rap1 = jets[0].rapidity(); const double rap2 = jets[1].rapidity(); if (fabs(rap1) > 0.5 || fabs(rap2) > 0.5) { vetoEvent; } MSG_DEBUG("Jet eta and pT requirements fulfilled"); const double pT1 = jets[0].pT(); const MissingMomentum& caloMissEt = apply(event, "CalMET"); MSG_DEBUG("Missing vector Et = " << caloMissEt.vectorEt()/GeV << " GeV"); if (caloMissEt.vectorEt().mod() > 0.7*pT1) { MSG_DEBUG("Vetoing event with too much missing ET: " << caloMissEt.vectorEt()/GeV << " GeV > " << 0.7*pT1/GeV << " GeV"); vetoEvent; } if (pT1/GeV >= 75.0) { const double dphi = deltaPhi(jets[0].phi(), jets[1].phi()); if (inRange(pT1/GeV, 75.0, 100.0)) { _histJetAzimuth_pTmax75_100->fill(dphi); } else if (inRange(pT1/GeV, 100.0, 130.0)) { _histJetAzimuth_pTmax100_130->fill(dphi); } else if (inRange(pT1/GeV, 130.0, 180.0)) { _histJetAzimuth_pTmax130_180->fill(dphi); } else if (pT1/GeV > 180.0) { _histJetAzimuth_pTmax180_->fill(dphi); } } } // Finalize void finalize() { // Normalize histograms to unit area normalize(_histJetAzimuth_pTmax75_100); normalize(_histJetAzimuth_pTmax100_130); normalize(_histJetAzimuth_pTmax130_180); normalize(_histJetAzimuth_pTmax180_); } //@} private: /// @name Histograms //@{ Histo1DPtr _histJetAzimuth_pTmax75_100; Histo1DPtr _histJetAzimuth_pTmax100_130; Histo1DPtr _histJetAzimuth_pTmax130_180; Histo1DPtr _histJetAzimuth_pTmax180_; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(D0_2004_S5992206); } diff --git a/analyses/pluginD0/D0_2006_S6438750.cc b/analyses/pluginD0/D0_2006_S6438750.cc --- a/analyses/pluginD0/D0_2006_S6438750.cc +++ b/analyses/pluginD0/D0_2006_S6438750.cc @@ -1,101 +1,101 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// @brief D0 inclusive isolated photon cross-section vs. \f$ p_\perp(gamma) \f$. /// @author Andy Buckley /// @author Gavin Hesketh class D0_2006_S6438750 : public Analysis { public: /// @name Constructors etc. //@{ /// Default constructor. D0_2006_S6438750() : Analysis("D0_2006_S6438750") { } //@} /// @name Analysis methods //@{ void init() { // General FS for photon isolation FinalState fs; declare(fs, "AllFS"); // Get leading photon - LeadingParticlesFinalState photonfs(FinalState(-0.9, 0.9, 23.0*GeV)); + LeadingParticlesFinalState photonfs(FinalState((Cuts::etaIn(-0.9, 0.9) && Cuts::pT >= 23.0*GeV))); photonfs.addParticleId(PID::PHOTON); declare(photonfs, "LeadingPhoton"); // Book histograms book(_h_pTgamma ,1, 1, 1); } /// Do the analysis void analyze(const Event& event) { // Get the photon const FinalState& photonfs = apply(event, "LeadingPhoton"); if (photonfs.particles().size() != 1) { vetoEvent; } const FourMomentum photon = photonfs.particles().front().momentum(); // Isolate photon by ensuring that a 0.4 cone around it contains less than 10% of the photon's energy double E_P = photon.E(); double eta_P = photon.eta(); double phi_P = photon.phi(); double econe = 0.0; for (const Particle& p : apply(event, "AllFS").particles()) { if (deltaR(eta_P, phi_P, p.eta(), p.phi()) < 0.4) { econe += p.E(); if (econe/E_P > 1.1) { vetoEvent; } } } // Fill histo _h_pTgamma->fill(photon.pT()); } // Finalize void finalize() { const double lumi_gen = sumOfWeights()/crossSection(); // Divide by effective lumi, plus rapidity bin width of 1.8 scale(_h_pTgamma, 1/lumi_gen * 1/1.8); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_pTgamma; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(D0_2006_S6438750); } diff --git a/analyses/pluginD0/D0_2008_S7719523.cc b/analyses/pluginD0/D0_2008_S7719523.cc --- a/analyses/pluginD0/D0_2008_S7719523.cc +++ b/analyses/pluginD0/D0_2008_S7719523.cc @@ -1,200 +1,200 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { // A local scope function for division, handling the div-by-zero case /// @todo Why isn't the math divide() function being found? namespace { inline double _safediv(double a, double b, double result_if_err) { return (b != 0) ? a/b : result_if_err; } } /// @brief Measurement of isolated gamma + jet + X differential cross-sections /// /// Inclusive isolated gamma + jet cross-sections, differential in pT(gamma), for /// various photon and jet rapidity bins. /// /// @author Andy Buckley /// @author Gavin Hesketh class D0_2008_S7719523 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor D0_2008_S7719523() : Analysis("D0_2008_S7719523") { } //@} /// @name Analysis methods //@{ /// Set up projections and book histograms void init() { // General FS FinalState fs; declare(fs, "FS"); // Get leading photon - LeadingParticlesFinalState photonfs(FinalState(-1.0, 1.0, 30.0*GeV)); + LeadingParticlesFinalState photonfs(FinalState((Cuts::etaIn(-1.0, 1.0) && Cuts::pT >= 30.0*GeV))); photonfs.addParticleId(PID::PHOTON); declare(photonfs, "LeadingPhoton"); // FS excluding the leading photon VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(photonfs); declare(vfs, "JetFS"); // Jets FastJets jetpro(vfs, FastJets::D0ILCONE, 0.7); declare(jetpro, "Jets"); // Histograms book(_h_central_same_cross_section ,1, 1, 1); book(_h_central_opp_cross_section ,2, 1, 1); book(_h_forward_same_cross_section ,3, 1, 1); book(_h_forward_opp_cross_section ,4, 1, 1); // Ratio histos to be filled by divide() book(_h_cen_opp_same, 5, 1, 1); book(_h_fwd_opp_same, 8, 1, 1); // Ratio histos to be filled manually, since the num/denom inputs don't match book(_h_cen_same_fwd_same, 6, 1, 1, true); book(_h_cen_opp_fwd_same, 7, 1, 1, true); book(_h_cen_same_fwd_opp, 9, 1, 1, true); book(_h_cen_opp_fwd_opp, 10, 1, 1, true); } /// Do the analysis void analyze(const Event& event) { // Get the photon const FinalState& photonfs = apply(event, "LeadingPhoton"); if (photonfs.particles().size() != 1) { vetoEvent; } const FourMomentum photon = photonfs.particles().front().momentum(); // Isolate photon by ensuring that a 0.4 cone around it contains less than 7% of the photon's energy double egamma = photon.E(); double eta_P = photon.eta(); double phi_P = photon.phi(); double econe = 0.0; for (const Particle& p : apply(event, "JetFS").particles()) { if (deltaR(eta_P, phi_P, p.eta(), p.phi()) < 0.4) { econe += p.E(); // Veto as soon as E_cone gets larger if (econe/egamma > 0.07) { MSG_DEBUG("Vetoing event because photon is insufficiently isolated"); vetoEvent; } } } Jets jets = apply(event, "Jets").jetsByPt(15.0*GeV); if (jets.empty()) vetoEvent; FourMomentum leadingJet = jets[0].momentum(); if (deltaR(eta_P, phi_P, leadingJet.eta(), leadingJet.phi()) < 0.7) { vetoEvent; } int photon_jet_sign = sign( leadingJet.rapidity() * photon.rapidity() ); // Veto if leading jet is outside plotted rapidity regions const double abs_y1 = fabs(leadingJet.rapidity()); if (inRange(abs_y1, 0.8, 1.5) || abs_y1 > 2.5) { MSG_DEBUG("Leading jet falls outside acceptance range; |y1| = " << abs_y1); vetoEvent; } // Fill histos if (fabs(leadingJet.rapidity()) < 0.8) { Histo1DPtr h = (photon_jet_sign >= 1) ? _h_central_same_cross_section : _h_central_opp_cross_section; h->fill(photon.pT()); } else if (inRange( fabs(leadingJet.rapidity()), 1.5, 2.5)) { Histo1DPtr h = (photon_jet_sign >= 1) ? _h_forward_same_cross_section : _h_forward_opp_cross_section; h->fill(photon.pT()); } } /// Finalize void finalize() { const double lumi_gen = sumOfWeights()/crossSection(); const double dy_photon = 2.0; const double dy_jet_central = 1.6; const double dy_jet_forward = 2.0; // Cross-section ratios (6 plots) // Central/central and forward/forward ratios divide(_h_central_opp_cross_section, _h_central_same_cross_section, _h_cen_opp_same); divide(_h_forward_opp_cross_section, _h_forward_same_cross_section, _h_fwd_opp_same); // Central/forward ratio combinations /// @note The central/forward histo binnings are not the same! Hence the need to do these by hand :-( for (size_t i = 0; i < _h_cen_same_fwd_same->numPoints(); ++i) { const YODA::HistoBin1D& cen_same_bini = _h_central_same_cross_section->bin(i); const YODA::HistoBin1D& cen_opp_bini = _h_central_opp_cross_section->bin(i); const YODA::HistoBin1D& fwd_same_bini = _h_central_same_cross_section->bin(i); const YODA::HistoBin1D& fwd_opp_bini = _h_central_opp_cross_section->bin(i); _h_cen_same_fwd_same->point(i).setY(_safediv(cen_same_bini.sumW(), fwd_same_bini.sumW(), 0), add_quad(cen_same_bini.relErr(), fwd_same_bini.relErr())); _h_cen_opp_fwd_same->point(i).setY(_safediv(cen_opp_bini.sumW(), fwd_same_bini.sumW(), 0), add_quad(cen_opp_bini.relErr(), fwd_same_bini.relErr())); _h_cen_same_fwd_opp->point(i).setY(_safediv(cen_same_bini.sumW(), fwd_opp_bini.sumW(), 0), add_quad(cen_same_bini.relErr(), fwd_opp_bini.relErr())); _h_cen_opp_fwd_opp->point(i).setY(_safediv(cen_opp_bini.sumW(), fwd_opp_bini.sumW(), 0), add_quad(cen_opp_bini.relErr(), fwd_opp_bini.relErr())); } // Use generator cross section for remaining histograms // Each of these needs the additional factor 2 because the // y_photon * y_jet requirement reduces the corresponding 2D "bin width" // by a factor 1/2. scale(_h_central_same_cross_section, 2.0/lumi_gen * 1.0/dy_photon * 1.0/dy_jet_central); scale(_h_central_opp_cross_section, 2.0/lumi_gen * 1.0/dy_photon * 1.0/dy_jet_central); scale(_h_forward_same_cross_section, 2.0/lumi_gen * 1.0/dy_photon * 1.0/dy_jet_forward); scale(_h_forward_opp_cross_section, 2.0/lumi_gen * 1.0/dy_photon * 1.0/dy_jet_forward); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_central_same_cross_section; Histo1DPtr _h_central_opp_cross_section; Histo1DPtr _h_forward_same_cross_section; Histo1DPtr _h_forward_opp_cross_section; Scatter2DPtr _h_cen_opp_same; Scatter2DPtr _h_fwd_opp_same; Scatter2DPtr _h_cen_same_fwd_same; Scatter2DPtr _h_cen_opp_fwd_same; Scatter2DPtr _h_cen_same_fwd_opp; Scatter2DPtr _h_cen_opp_fwd_opp; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(D0_2008_S7719523); } diff --git a/analyses/pluginD0/D0_2011_I895662.cc b/analyses/pluginD0/D0_2011_I895662.cc --- a/analyses/pluginD0/D0_2011_I895662.cc +++ b/analyses/pluginD0/D0_2011_I895662.cc @@ -1,93 +1,93 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class D0_2011_I895662 : public Analysis { public: D0_2011_I895662() : Analysis("D0_2011_I895662") { } public: void init() { - FastJets jets(FinalState(-3.6, 3.6, 0.*GeV), FastJets::D0ILCONE, 0.7); + FastJets jets(FinalState((Cuts::etaIn(-3.6, 3.6))), FastJets::D0ILCONE, 0.7); jets.useInvisibles(); declare(jets, "Jets"); book(_h_m3j_08_40 ,1, 1, 1); book(_h_m3j_16_40 ,2, 1, 1); book(_h_m3j_24_40 ,3, 1, 1); book(_h_m3j_24_70 ,4, 1, 1); book(_h_m3j_24_100 ,5, 1, 1); } void analyze(const Event& event) { Jets jets = apply(event, "Jets").jetsByPt(40.*GeV); // Need three jets, leading jet above 150 GeV if (jets.size() < 3 || jets[0].pT() <= 150.*GeV) vetoEvent; std::vector p; for (size_t i=0; i<3; i++) { p.push_back(jets[i].momentum()); } // Jets need to be separated by 2*Rcone if (deltaR(p[0], p[1], RAPIDITY) < 1.4 || deltaR(p[0], p[2], RAPIDITY) < 1.4 || deltaR(p[1], p[2], RAPIDITY) < 1.4) vetoEvent; // Leading three jets need to be within |y|<2.4 double ymax = fabs(p[0].rapidity()); for (size_t i=1; i<3; i++) { if (ymax < fabs(p[i].rapidity())) ymax = fabs(p[i].rapidity()); } if (ymax >= 2.4) vetoEvent; double m3jet = (p[0]+p[1]+p[2]).mass()/GeV; if (ymax < 0.8) _h_m3j_08_40->fill(m3jet); if (ymax < 1.6) _h_m3j_16_40->fill(m3jet); if (ymax < 2.4) { _h_m3j_24_40->fill(m3jet); if (p[2].pT() > 70.*GeV) _h_m3j_24_70->fill(m3jet); if (p[2].pT() > 100.*GeV) _h_m3j_24_100->fill(m3jet); } } void finalize() { // Factor of 1000 is based on GeV <-> TeV mismatch between paper and Hepdata table scale(_h_m3j_08_40, 1000*crossSection()/picobarn/sumOfWeights()); scale(_h_m3j_16_40, 1000*crossSection()/picobarn/sumOfWeights()); scale(_h_m3j_24_40, 1000*crossSection()/picobarn/sumOfWeights()); scale(_h_m3j_24_70, 1000*crossSection()/picobarn/sumOfWeights()); scale(_h_m3j_24_100, 1000*crossSection()/picobarn/sumOfWeights()); } private: Histo1DPtr _h_m3j_08_40; Histo1DPtr _h_m3j_16_40; Histo1DPtr _h_m3j_24_40; Histo1DPtr _h_m3j_24_70; Histo1DPtr _h_m3j_24_100; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(D0_2011_I895662); } diff --git a/analyses/pluginLHCb/LHCB_2012_I1119400.cc b/analyses/pluginLHCb/LHCB_2012_I1119400.cc --- a/analyses/pluginLHCb/LHCB_2012_I1119400.cc +++ b/analyses/pluginLHCb/LHCB_2012_I1119400.cc @@ -1,356 +1,356 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class LHCB_2012_I1119400 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor LHCB_2012_I1119400() : Analysis("LHCB_2012_I1119400"), _p_min(5.0), _pt_min(0.0),_pt1_edge(0.8), _pt2_edge(1.2), //_eta_nbins(4), _eta_min(2.5), _eta_max(4.5) { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { fillMap(_partLftMap); int id_shift = 0; if (fuzzyEquals(sqrtS(), 7*TeV)) id_shift = 1; // define ratios if second pdgid in pair is -1, it means that is a antiparticle/particle ratio _ratiotype["pbarp"] = make_pair(2212, -1); _ratiotype["kminuskplus"] = make_pair(321, -1); _ratiotype["piminuspiplus"] = make_pair(211, -1); _ratiotype["ppi"] = make_pair(2212, 211); _ratiotype["kpi"] = make_pair(321, 211); _ratiotype["pk"] = make_pair(2212, 321); std::map _hepdataid; _hepdataid["pbarp"] = 1 + id_shift; _hepdataid["kminuskplus"] = 3 + id_shift; _hepdataid["piminuspiplus"] = 5 + id_shift; _hepdataid["ppi"] = 7 + id_shift; _hepdataid["kpi"] = 9 + id_shift; _hepdataid["pk"] = 11 + id_shift; std::map >::iterator it; // booking histograms for (it=_ratiotype.begin(); it!=_ratiotype.end(); it++) { book(_h_ratio_lowpt [it->first], _hepdataid[it->first], 1, 1); book(_h_ratio_midpt [it->first], _hepdataid[it->first], 1, 2); book(_h_ratio_highpt[it->first], _hepdataid[it->first], 1, 3); book(_h_num_lowpt [it->first], "TMP/num_l_"+it->first,refData(_hepdataid[it->first], 1, 1)); book(_h_num_midpt [it->first], "TMP/num_m_"+it->first,refData(_hepdataid[it->first], 1, 2)); book(_h_num_highpt [it->first], "TMP/num_h_"+it->first,refData(_hepdataid[it->first], 1, 3)); book(_h_den_lowpt [it->first], "TMP/den_l_"+it->first,refData(_hepdataid[it->first], 1, 1)); book(_h_den_midpt [it->first], "TMP/den_m_"+it->first,refData(_hepdataid[it->first], 1, 2)); book(_h_den_highpt [it->first], "TMP/den_h_"+it->first,refData(_hepdataid[it->first], 1, 3)); } - declare(ChargedFinalState(_eta_min, _eta_max, _pt_min*GeV), "CFS"); + declare(ChargedFinalState(Cuts::etaIn(_eta_min, _eta_max) && Cuts::pT >= _pt_min*GeV), "CFS"); } // Perform the per-event analysis void analyze(const Event& event) { const ChargedFinalState& cfs = apply(event, "CFS"); for (const Particle& p : cfs.particles()) { int id = p.pid(); // continue if particle not a proton, a kaon or a pion if ( !( (abs(id) == 211) || (abs(id) == 321) || (abs(id) == 2212))) { continue; } // cut in momentum const FourMomentum& qmom = p.momentum(); if (qmom.p3().mod() < _p_min) continue; // Lifetime cut: ctau sum of all particle ancestors < 10^-9 m according to the paper (see eq. 5) const double MAX_CTAU = 1.0e-9; // [m] double ancestor_lftsum = getMotherLifeTimeSum(p); if ( (ancestor_lftsum < 0.0) || (ancestor_lftsum > MAX_CTAU) ) continue; double eta = qmom.eta(); double pT = qmom.pT(); std::map >::iterator it; for (it=_ratiotype.begin(); it!=_ratiotype.end(); it++) { // check what type of ratio is if ((it->second.second)==-1) { // check ptbin if (pT < _pt1_edge) { // filling histos for numerator and denominator if (id == -abs(it->second.first)) _h_num_lowpt[it->first]->fill(eta); if (id == abs(it->second.first)) _h_den_lowpt[it->first]->fill(eta); } else if (pT < _pt2_edge) { // filling histos for numerator and denominator if (id == -abs(it->second.first)) _h_num_midpt[it->first]->fill(eta); if (id == abs(it->second.first)) _h_den_midpt[it->first]->fill(eta); } else { // filling histos for numerator and denominator if (id == -abs(it->second.first)) _h_num_highpt[it->first]->fill(eta); if (id == abs(it->second.first)) _h_den_highpt[it->first]->fill(eta); } } else { // check what type of ratio is if (pT < _pt1_edge) { // filling histos for numerator and denominator if (abs(id) == abs(it->second.first)) _h_num_lowpt[it->first]->fill(eta); if (abs(id) == abs(it->second.second)) _h_den_lowpt[it->first]->fill(eta); } else if (pT < _pt2_edge) { // filling histos for numerator and denominator if (abs(id) == abs(it->second.first)) _h_num_midpt[it->first]->fill(eta); if (abs(id) == abs(it->second.second)) _h_den_midpt[it->first]->fill(eta); } else { // filling histos for numerator and denominator if (abs(id) == abs(it->second.first)) _h_num_highpt[it->first]->fill(eta); if (abs(id) == abs(it->second.second)) _h_den_highpt[it->first]->fill(eta); } } } } } // Generate the ratio histograms void finalize() { std::map >::iterator it; // booking histograms for (it=_ratiotype.begin(); it!=_ratiotype.end(); it++) { divide(_h_num_lowpt[it->first], _h_den_lowpt[it->first], _h_ratio_lowpt[it->first]); divide(_h_num_midpt[it->first], _h_den_midpt[it->first], _h_ratio_midpt[it->first]); divide(_h_num_highpt[it->first], _h_den_highpt[it->first], _h_ratio_highpt[it->first]); } } //@} private: // Get particle lifetime from hardcoded data double getLifeTime(int pid) { pid = abs(pid); double lft = -1.0; map::iterator pPartLft = _partLftMap.find(pid); // search stable particle list if (pPartLft == _partLftMap.end()) { if (pid <= 100) return 0.0; for (size_t i=0; i < sizeof(_stablePDGIds)/sizeof(unsigned int); i++) { if (pid == _stablePDGIds[i]) { lft = 0.0; break; } } } else { lft = (*pPartLft).second; } if (lft < 0.0 && PID::isHadron(pid)) { MSG_WARNING("Lifetime map imcomplete --- " << pid << "... assume zero lifetime"); lft = 0.0; } return lft; } // Data members like post-cuts event weight counters go here const double getMotherLifeTimeSum(const Particle& p) { if (p.genParticle() == NULL) return -1.; double lftSum = 0.; double plft = 0.; const GenParticle* part = p.genParticle(); const GenVertex* ivtx = part->production_vertex(); while(ivtx) { if (ivtx->particles_in_size() < 1) { lftSum = -1.; break; }; const GenVertex::particles_in_const_iterator iPart_invtx = ivtx->particles_in_const_begin(); part = (*iPart_invtx); if ( !(part) ) { lftSum = -1.; break; }; ivtx = part->production_vertex(); if ( (part->pdg_id() == 2212) || !(ivtx) ) break; // reached beam plft = getLifeTime(part->pdg_id()); if (plft < 0.) { lftSum = -1.; break; }; lftSum += plft; }; return (lftSum * c_light); } /// @name Private variables // Momentum threshold double _p_min; // The edges of the intervals of transversal momentum double _pt_min; double _pt1_edge; double _pt2_edge; // The limits of the pseudorapidity window //int _eta_nbins; double _eta_min; double _eta_max; // Map between PDG id and particle lifetimes in seconds std::map _partLftMap; // Set of PDG Ids for stable particles (PDG Id <= 100 are considered stable) static const int _stablePDGIds[205]; // Define histograms // ratio std::map _h_ratio_lowpt; std::map _h_ratio_midpt; std::map _h_ratio_highpt; // numerator std::map _h_num_lowpt; std::map _h_num_midpt; std::map _h_num_highpt; // denominator std::map _h_den_lowpt; std::map _h_den_midpt; std::map _h_den_highpt; // Map of ratios and IDs of numerator and denominator std::map > _ratiotype; // Fill the PDG Id to Lifetime[seconds] map // Data was extracted from LHCb Particle Table through LHCb::ParticlePropertySvc bool fillMap(map &m) { m[6] = 4.707703E-25; m[11] = 1.E+16; m[12] = 1.E+16; m[13] = 2.197019E-06; m[14] = 1.E+16; m[15] = 2.906E-13; m[16] = 1.E+16; m[22] = 1.E+16; m[23] = 2.637914E-25; m[24] = 3.075758E-25; m[25] = 9.4E-26; m[35] = 9.4E-26; m[36] = 9.4E-26; m[37] = 9.4E-26; m[84] = 3.335641E-13; m[85] = 1.290893E-12; m[111] = 8.4E-17; m[113] = 4.405704E-24; m[115] = 6.151516E-24; m[117] = 4.088275E-24; m[119] = 2.102914E-24; m[130] = 5.116E-08; m[150] = 1.525E-12; m[211] = 2.6033E-08; m[213] = 4.405704E-24; m[215] = 6.151516E-24; m[217] = 4.088275E-24; m[219] = 2.102914E-24; m[221] = 5.063171E-19; m[223] = 7.752794E-23; m[225] = 3.555982E-24; m[227] = 3.91793E-24; m[229] = 2.777267E-24; m[310] = 8.953E-11; m[313] = 1.308573E-23; m[315] = 6.038644E-24; m[317] = 4.139699E-24; m[319] = 3.324304E-24; m[321] = 1.238E-08; m[323] = 1.295693E-23; m[325] = 6.682357E-24; m[327] = 4.139699E-24; m[329] = 3.324304E-24; m[331] = 3.210791E-21; m[333] = 1.545099E-22; m[335] = 9.016605E-24; m[337] = 7.565657E-24; m[350] = 1.407125E-12; m[411] = 1.04E-12; m[413] = 6.856377E-21; m[415] = 1.778952E-23; m[421] = 4.101E-13; m[423] = 1.000003E-19; m[425] = 1.530726E-23; m[431] = 5.E-13; m[433] = 1.000003E-19; m[435] = 3.291061E-23; m[441] = 2.465214E-23; m[443] = 7.062363E-21; m[445] = 3.242425E-22; m[510] = 1.525E-12; m[511] = 1.525E-12; m[513] = 1.000019E-19; m[515] = 1.31E-23; m[521] = 1.638E-12; m[523] = 1.000019E-19; m[525] = 1.31E-23; m[530] = 1.536875E-12; m[531] = 1.472E-12; m[533] = 1.E-19; m[535] = 1.31E-23; m[541] = 4.5E-13; m[553] = 1.218911E-20; m[1112] = 4.539394E-24; m[1114] = 5.578069E-24; m[1116] = 1.994582E-24; m[1118] = 2.269697E-24; m[1212] = 4.539394E-24; m[1214] = 5.723584E-24; m[1216] = 1.994582E-24; m[1218] = 1.316424E-24; m[2112] = 8.857E+02; m[2114] = 5.578069E-24; m[2116] = 4.388081E-24; m[2118] = 2.269697E-24; m[2122] = 4.539394E-24; m[2124] = 5.723584E-24; m[2126] = 1.994582E-24; m[2128] = 1.316424E-24; m[2212] = 1.E+16; m[2214] = 5.578069E-24; m[2216] = 4.388081E-24; m[2218] = 2.269697E-24; m[2222] = 4.539394E-24; m[2224] = 5.578069E-24; m[2226] = 1.994582E-24; m[2228] = 2.269697E-24; m[3112] = 1.479E-10; m[3114] = 1.670589E-23; m[3116] = 5.485102E-24; m[3118] = 3.656734E-24; m[3122] = 2.631E-10; m[3124] = 4.219309E-23; m[3126] = 8.227653E-24; m[3128] = 3.291061E-24; m[3212] = 7.4E-20; m[3214] = 1.828367E-23; m[3216] = 5.485102E-24; m[3218] = 3.656734E-24; m[3222] = 8.018E-11; m[3224] = 1.838582E-23; m[3226] = 5.485102E-24; m[3228] = 3.656734E-24; m[3312] = 1.639E-10; m[3314] = 6.648608E-23; m[3322] = 2.9E-10; m[3324] = 7.233101E-23; m[3334] = 8.21E-11; m[4112] = 2.991874E-22; m[4114] = 4.088274E-23; m[4122] = 2.E-13; m[4132] = 1.12E-13; m[4212] = 3.999999E-22; m[4214] = 3.291061E-22; m[4222] = 2.951624E-22; m[4224] = 4.417531E-23; m[4232] = 4.42E-13; m[4332] = 6.9E-14; m[4412] = 3.335641E-13; m[4422] = 3.335641E-13; m[4432] = 3.335641E-13; m[5112] = 1.E-19; m[5122] = 1.38E-12; m[5132] = 1.42E-12; m[5142] = 1.290893E-12; m[5212] = 1.E-19; m[5222] = 1.E-19; m[5232] = 1.42E-12; m[5242] = 1.290893E-12; m[5312] = 1.E-19; m[5322] = 1.E-19; m[5332] = 1.55E-12; m[5342] = 1.290893E-12; m[5442] = 1.290893E-12; m[5512] = 1.290893E-12; m[5522] = 1.290893E-12; m[5532] = 1.290893E-12; m[5542] = 1.290893E-12; m[10111] = 2.48382E-24; m[10113] = 4.635297E-24; m[10115] = 2.54136E-24; m[10211] = 2.48382E-24; m[10213] = 4.635297E-24; m[10215] = 2.54136E-24; m[10223] = 1.828367E-24; m[10225] = 3.636531E-24; m[10311] = 2.437823E-24; m[10313] = 7.313469E-24; m[10315] = 3.538775E-24; m[10321] = 2.437823E-24; m[10323] = 7.313469E-24; m[10325] = 3.538775E-24; m[10331] = 4.804469E-24; m[10411] = 4.38E-24; m[10413] = 3.29E-23; m[10421] = 4.38E-24; m[10423] = 3.22653E-23; m[10431] = 6.5821E-22; m[10433] = 6.5821E-22; m[10441] = 6.453061E-23; m[10511] = 4.39E-24; m[10513] = 1.65E-23; m[10521] = 4.39E-24; m[10523] = 1.65E-23; m[10531] = 4.39E-24; m[10533] = 1.65E-23; m[11114] = 2.194041E-24; m[11116] = 1.828367E-24; m[11212] = 1.880606E-24; m[11216] = 1.828367E-24; m[12112] = 2.194041E-24; m[12114] = 2.194041E-24; m[12116] = 5.063171E-24; m[12126] = 1.828367E-24; m[12212] = 2.194041E-24; m[12214] = 2.194041E-24; m[12216] = 5.063171E-24; m[12224] = 2.194041E-24; m[12226] = 1.828367E-24; m[13112] = 6.582122E-24; m[13114] = 1.09702E-23; m[13116] = 5.485102E-24; m[13122] = 1.316424E-23; m[13124] = 1.09702E-23; m[13126] = 6.928549E-24; m[13212] = 6.582122E-24; m[13214] = 1.09702E-23; m[13216] = 5.485102E-24; m[13222] = 6.582122E-24; m[13224] = 1.09702E-23; m[13226] = 5.485102E-24; m[13314] = 2.742551E-23; m[13324] = 2.742551E-23; m[14122] = 1.828367E-22; m[20022] = 1.E+16; m[20113] = 1.567172E-24; m[20213] = 1.567172E-24; m[20223] = 2.708692E-23; m[20313] = 3.782829E-24; m[20315] = 2.384827E-24; m[20323] = 3.782829E-24; m[20325] = 2.384827E-24; m[20333] = 1.198929E-23; m[20413] = 2.63E-24; m[20423] = 2.63E-24; m[20433] = 6.5821E-22; m[20443] = 7.395643E-22; m[20513] = 2.63E-24; m[20523] = 2.63E-24; m[20533] = 2.63E-24; m[21112] = 2.632849E-24; m[21114] = 3.291061E-24; m[21212] = 2.632849E-24; m[21214] = 6.582122E-24; m[22112] = 4.388081E-24; m[22114] = 3.291061E-24; m[22122] = 2.632849E-24; m[22124] = 6.582122E-24; m[22212] = 4.388081E-24; m[22214] = 3.291061E-24; m[22222] = 2.632849E-24; m[22224] = 3.291061E-24; m[23112] = 7.313469E-24; m[23114] = 2.991874E-24; m[23122] = 4.388081E-24; m[23124] = 6.582122E-24; m[23126] = 3.291061E-24; m[23212] = 7.313469E-24; m[23214] = 2.991874E-24; m[23222] = 7.313469E-24; m[23224] = 2.991874E-24; m[30113] = 2.632849E-24; m[30213] = 2.632849E-24; m[30221] = 1.880606E-24; m[30223] = 2.089563E-24; m[30313] = 2.056913E-24; m[30323] = 2.056913E-24; m[30443] = 2.419898E-23; m[31114] = 1.880606E-24; m[31214] = 3.291061E-24; m[32112] = 3.989164E-24; m[32114] = 1.880606E-24; m[32124] = 3.291061E-24; m[32212] = 3.989164E-24; m[32214] = 1.880606E-24; m[32224] = 1.880606E-24; m[33122] = 1.880606E-23; m[42112] = 6.582122E-24; m[42212] = 6.582122E-24; m[43122] = 2.194041E-24; m[53122] = 4.388081E-24; m[100111] = 1.645531E-24; m[100113] = 1.64553E-24; m[100211] = 1.645531E-24; m[100213] = 1.64553E-24; m[100221] = 1.196749E-23; m[100223] = 3.061452E-24; m[100313] = 2.837122E-24; m[100323] = 2.837122E-24; m[100331] = 4.459432E-25; m[100333] = 4.388081E-24; m[100441] = 4.701516E-23; m[100443] = 2.076379E-21; m[100553] = 2.056913E-20; m[200553] = 3.242425E-20; m[300553] = 3.210791E-23; m[9000111] = 8.776163E-24; m[9000211] = 8.776163E-24; m[9000443] = 8.227652E-24; m[9000553] = 5.983747E-24; m[9010111] = 3.164482E-24; m[9010211] = 3.164482E-24; m[9010221] = 9.403031E-24; m[9010443] = 8.438618E-24; m[9010553] = 8.3318E-24; m[9020443] = 1.061633E-23; m[9030221] = 6.038644E-24; m[9042413] = 2.07634E-21; m[9050225] = 1.394517E-24; m[9060225] = 3.291061E-24; m[9080225] = 4.388081E-24; m[9090225] = 2.056913E-24; m[9910445] = 2.07634E-21; m[9920443] = 2.07634E-21; return true; } }; const int LHCB_2012_I1119400::_stablePDGIds[205] = { 311, 543, 545, 551, 555, 557, 1103, 2101, 2103, 2203, 3101, 3103, 3201, 3203, 3303, 4101, 4103, 4124, 4201, 4203, 4301, 4303, 4312, 4314, 4322, 4324, 4334, 4403, 4414, 4424, 4434, 4444, 5101, 5103, 5114, 5201, 5203, 5214, 5224, 5301, 5303, 5314, 5324, 5334, 5401, 5403, 5412, 5414, 5422, 5424, 5432, 5434, 5444, 5503, 5514, 5524, 5534, 5544, 5554, 10022, 10333, 10335, 10443, 10541, 10543, 10551, 10553, 10555, 11112, 12118, 12122, 12218, 12222, 13316, 13326, 20543, 20553, 20555, 23314, 23324, 30343, 30353, 30363, 30553, 33314, 33324, 41214, 42124, 52114, 52214, 100311, 100315, 100321, 100325, 100411, 100413, 100421, 100423, 100551, 100555, 100557, 110551, 110553, 110555, 120553, 120555, 130553, 200551, 200555, 210551, 210553, 220553, 1000001, 1000002, 1000003, 1000004, 1000005, 1000006, 1000011, 1000012, 1000013, 1000014, 1000015, 1000016, 1000021, 1000022, 1000023, 1000024, 1000025, 1000035, 1000037, 1000039, 2000001, 2000002, 2000003, 2000004, 2000005, 2000006, 2000011, 2000012, 2000013, 2000014, 2000015, 2000016, 3000111, 3000113, 3000211, 3000213, 3000221, 3000223, 3000331, 3100021, 3100111, 3100113, 3200111, 3200113, 3300113, 3400113, 4000001, 4000002, 4000011, 4000012, 5000039, 9000221, 9900012, 9900014, 9900016, 9900023, 9900024, 9900041, 9900042 }; // Plugin hook DECLARE_RIVET_PLUGIN(LHCB_2012_I1119400); } diff --git a/analyses/pluginLHCb/LHCB_2013_I1208105.cc b/analyses/pluginLHCb/LHCB_2013_I1208105.cc --- a/analyses/pluginLHCb/LHCB_2013_I1208105.cc +++ b/analyses/pluginLHCb/LHCB_2013_I1208105.cc @@ -1,235 +1,235 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class LHCB_2013_I1208105 : public Analysis { public: LHCB_2013_I1208105() : Analysis("LHCB_2013_I1208105") { } void init() { // Projections - declare(FinalState(1.9, 4.9), "forwardFS"); - declare(FinalState(-3.5,-1.5), "backwardFS"); - declare(ChargedFinalState(1.9, 4.9), "forwardCFS"); - declare(ChargedFinalState(-3.5,-1.5), "backwardCFS"); + declare(FinalState((Cuts::etaIn(1.9, 4.9))), "forwardFS"); + declare(FinalState((Cuts::etaIn(-3.5,-1.5))), "backwardFS"); + declare(ChargedFinalState((Cuts::etaIn(1.9, 4.9))), "forwardCFS"); + declare(ChargedFinalState((Cuts::etaIn(-3.5,-1.5))), "backwardCFS"); // Histos book(_s_chEF_minbias, 1, 1, 1, true); book(_s_chEF_hard, 2, 1, 1, true); book(_s_chEF_diff, 3, 1, 1, true); book(_s_chEF_nondiff, 4, 1, 1, true); book(_s_totEF_minbias, 5, 1, 1, true); book(_s_totEF_hard, 6, 1, 1, true); book(_s_totEF_diff, 7, 1, 1, true); book(_s_totEF_nondiff, 8, 1, 1, true); // Temporary profiles and histos /// @todo Convert to declared/registered temp histos book(_tp_chEF_minbias, "TMP/chEF_minbias", refData(1,1,1)); book(_tp_chEF_hard, "TMP/chEF_hard", refData(2,1,1)); book(_tp_chEF_diff, "TMP/chEF_diff", refData(3,1,1)); book(_tp_chEF_nondiff, "TMP/chEF_nondiff", refData(4,1,1)); book(_tp_totEF_minbias, "TMP/totEF_minbias", refData(5,1,1)); book(_tp_totEF_hard, "TMP/totEF_hard", refData(6,1,1)); book(_tp_totEF_diff, "TMP/totEF_diff", refData(7,1,1)); book(_tp_totEF_nondiff, "TMP/totEF_nondiff", refData(8,1,1)); book(_th_chN_minbias, "TMP/chN_minbias", refData(1,1,1)); book(_th_chN_hard, "TMP/chN_hard", refData(2,1,1)); book(_th_chN_diff, "TMP/chN_diff", refData(3,1,1)); book(_th_chN_nondiff, "TMP/chN_nondiff", refData(4,1,1)); book(_th_totN_minbias, "TMP/totN_minbias", refData(5,1,1)); book(_th_totN_hard, "TMP/totN_hard", refData(6,1,1)); book(_th_totN_diff, "TMP/totN_diff", refData(7,1,1)); book(_th_totN_nondiff, "TMP/totN_nondiff", refData(8,1,1)); // Counters book(_mbSumW, "TMP/mbSumW"); book(_hdSumW, "TMP/hdSumW"); book(_dfSumW, "TMP/dfSumW"); book(_ndSumW, "TMP/ndSumW"); book(_mbchSumW, "TMP/mbchSumW"); book(_hdchSumW, "TMP/hdchSumW"); book(_dfchSumW, "TMP/dfchSumW"); book(_ndchSumW, "TMP/ndchSumW"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& ffs = apply(event, "forwardFS"); const FinalState& bfs = apply(event, "backwardFS"); const ChargedFinalState& fcfs = apply(event, "forwardCFS"); const ChargedFinalState& bcfs = apply(event, "backwardCFS"); // Veto this event completely if there are no forward *charged* particles if (fcfs.empty()) vetoEvent; // Charged and neutral version { // Decide empirically if this is a "hard" or "diffractive" event bool ishardEvt = false; for (const Particle& p : ffs.particles()) { if (p.pT() > 3.0*GeV) { ishardEvt = true; break; } } // Decide empirically if this is a "diffractive" event /// @todo Can be "diffractive" *and* "hard"? bool isdiffEvt = (bfs.size() == 0); // Update event-type weight counters _mbSumW->fill(); (isdiffEvt ? _dfSumW : _ndSumW)->fill(); if (ishardEvt) _hdSumW->fill(); // Plot energy flow for (const Particle& p : ffs.particles()) { const double eta = p.eta(); const double energy = p.E(); _tp_totEF_minbias->fill(eta, energy); _th_totN_minbias->fill(eta); if (ishardEvt) { _tp_totEF_hard->fill(eta, energy); _th_totN_hard->fill(eta); } if (isdiffEvt) { _tp_totEF_diff->fill(eta, energy); _th_totN_diff->fill(eta); } else { _tp_totEF_nondiff->fill(eta, energy); _th_totN_nondiff->fill(eta); } } } // Charged-only version { bool ishardEvt = false; for (const Particle& p : fcfs.particles()) { if (p.pT() > 3.0*GeV) { ishardEvt = true; break; } } // Decide empirically if this is a "diffractive" event /// @todo Can be "diffractive" *and* "hard"? bool isdiffEvt = (bcfs.size() == 0); // Update event-type weight counters _mbchSumW->fill(); (isdiffEvt ? _dfchSumW : _ndchSumW)->fill(); if (ishardEvt) _hdchSumW->fill(); // Plot energy flow for (const Particle& p : fcfs.particles()) { const double eta = p.eta(); const double energy = p.E(); _tp_chEF_minbias->fill(eta, energy); _th_chN_minbias->fill(eta); if (ishardEvt) { _tp_chEF_hard->fill(eta, energy); _th_chN_hard->fill(eta); } if (isdiffEvt) { _tp_chEF_diff->fill(eta, energy); _th_chN_diff->fill(eta); } else { _tp_chEF_nondiff->fill(eta, energy); _th_chN_nondiff->fill(eta); } } } } void finalize() { for (size_t i = 0; i < _s_totEF_minbias->numPoints(); ++i) { const double val = _tp_totEF_minbias->bin(i).mean() * _th_totN_minbias->bin(i).height(); const double err = (_tp_totEF_minbias->bin(i).mean() * _th_totN_minbias->bin(i).heightErr() + _tp_totEF_minbias->bin(i).stdErr() * _th_totN_minbias->bin(i).height()); _s_totEF_minbias->point(i).setY(val/_mbSumW->val(), err/_mbSumW->val()); } for (size_t i = 0; i < _s_totEF_hard->numPoints(); ++i) { const double val = _tp_totEF_hard->bin(i).mean() * _th_totN_hard->bin(i).height(); const double err = (_tp_totEF_hard->bin(i).mean() * _th_totN_hard->bin(i).heightErr() + _tp_totEF_hard->bin(i).stdErr() * _th_totN_hard->bin(i).height()); _s_totEF_hard->point(i).setY(val/_hdSumW->val(), err/_hdSumW->val()); } for (size_t i = 0; i < _s_totEF_diff->numPoints(); ++i) { const double val = _tp_totEF_diff->bin(i).mean() * _th_totN_diff->bin(i).height(); const double err = (_tp_totEF_diff->bin(i).mean() * _th_totN_diff->bin(i).heightErr() + _tp_totEF_diff->bin(i).stdErr() * _th_totN_diff->bin(i).height()); _s_totEF_diff->point(i).setY(val/_dfSumW->val(), err/_dfSumW->val()); } for (size_t i = 0; i < _s_totEF_nondiff->numPoints(); ++i) { const double val = _tp_totEF_nondiff->bin(i).mean() * _th_totN_nondiff->bin(i).height(); const double err = (_tp_totEF_nondiff->bin(i).mean() * _th_totN_nondiff->bin(i).heightErr() + _tp_totEF_nondiff->bin(i).stdErr() * _th_totN_nondiff->bin(i).height()); _s_totEF_nondiff->point(i).setY(val/_ndSumW->val(), err/_ndSumW->val()); } for (size_t i = 0; i < _s_chEF_minbias->numPoints(); ++i) { const double val = _tp_chEF_minbias->bin(i).mean() * _th_chN_minbias->bin(i).height(); const double err = (_tp_chEF_minbias->bin(i).mean() * _th_chN_minbias->bin(i).heightErr() + _tp_chEF_minbias->bin(i).stdErr() * _th_chN_minbias->bin(i).height()); _s_chEF_minbias->point(i).setY(val/_mbchSumW->val(), err/_mbchSumW->val()); } for (size_t i = 0; i < _s_chEF_hard->numPoints(); ++i) { const double val = _tp_chEF_hard->bin(i).mean() * _th_chN_hard->bin(i).height(); const double err = (_tp_chEF_hard->bin(i).mean() * _th_chN_hard->bin(i).heightErr() + _tp_chEF_hard->bin(i).stdErr() * _th_chN_hard->bin(i).height()); _s_chEF_hard->point(i).setY(val/_hdchSumW->val(), err/_hdchSumW->val()); } for (size_t i = 0; i < _s_chEF_diff->numPoints(); ++i) { const double val = _tp_chEF_diff->bin(i).mean() * _th_chN_diff->bin(i).height(); const double err = (_tp_chEF_diff->bin(i).mean() * _th_chN_diff->bin(i).heightErr() + _tp_chEF_diff->bin(i).stdErr() * _th_chN_diff->bin(i).height()); _s_chEF_diff->point(i).setY(val/_dfchSumW->val(), err/_dfchSumW->val()); } for (size_t i = 0; i < _s_chEF_nondiff->numPoints(); ++i) { const double val = _tp_chEF_nondiff->bin(i).mean() * _th_chN_nondiff->bin(i).height(); const double err = (_tp_chEF_nondiff->bin(i).mean() * _th_chN_nondiff->bin(i).heightErr() + _tp_chEF_nondiff->bin(i).stdErr() * _th_chN_nondiff->bin(i).height()); _s_chEF_nondiff->point(i).setY(val/_ndchSumW->val(), err/_ndchSumW->val()); } } private: /// @name Histograms and counters /// /// @note Histograms correspond to charged and total EF for each class of events: /// minimum bias, hard scattering, diffractive enriched and non-diffractive enriched. //@{ // Scatters to be filled in finalize with 1/d_eta Scatter2DPtr _s_totEF_minbias, _s_totEF_hard, _s_totEF_diff, _s_totEF_nondiff; Scatter2DPtr _s_chEF_minbias, _s_chEF_hard, _s_chEF_diff, _s_chEF_nondiff; // Temp profiles containing Profile1DPtr _tp_totEF_minbias, _tp_totEF_hard, _tp_totEF_diff, _tp_totEF_nondiff; Profile1DPtr _tp_chEF_minbias, _tp_chEF_hard, _tp_chEF_diff, _tp_chEF_nondiff; // Temp profiles containing Histo1DPtr _th_totN_minbias, _th_totN_hard, _th_totN_diff, _th_totN_nondiff; Histo1DPtr _th_chN_minbias, _th_chN_hard, _th_chN_diff, _th_chN_nondiff; // Sums of weights (~ #events) in each event class CounterPtr _mbSumW, _hdSumW, _dfSumW, _ndSumW; CounterPtr _mbchSumW, _hdchSumW, _dfchSumW, _ndchSumW; //@} }; // Hook for the plugin system DECLARE_RIVET_PLUGIN(LHCB_2013_I1208105); } diff --git a/analyses/pluginLHCb/LHCB_2014_I1281685.cc b/analyses/pluginLHCb/LHCB_2014_I1281685.cc --- a/analyses/pluginLHCb/LHCB_2014_I1281685.cc +++ b/analyses/pluginLHCb/LHCB_2014_I1281685.cc @@ -1,1177 +1,1178 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { /// Charged particle multiplicities and densities in $pp$ collisions at $\sqrt{s} = 7$ TeV class LHCB_2014_I1281685 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor LHCB_2014_I1281685() : Analysis("LHCB_2014_I1281685"), _p_min(2.0), _pt_min(0.2), _eta_min(2.0), _eta_max(4.8), _maxlft(1.0e-11) { } //@} /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { fillMap(_partLftMap); // Projections - declare(ChargedFinalState(_eta_min, _eta_max, _pt_min*GeV), "CFS"); + declare(ChargedFinalState(Cuts::etaIn(_eta_min, _eta_max) && Cuts::pT >= _pt_min*GeV), + "CFS"); // Book histograms book(_h_mult_total ,"d03-x01-y01", 50, 0.5, 50.5); book(_h_mult_eta[0] ,"d04-x01-y01", 21, -0.5, 20.5); //eta=[2.0,2.5] book(_h_mult_eta[1] ,"d04-x01-y02", 21, -0.5, 20.5); //eta=[2.5,3.0] book(_h_mult_eta[2] ,"d04-x01-y03", 21, -0.5, 20.5); //eta=[3.0,3.5] book(_h_mult_eta[3] ,"d04-x01-y04", 21, -0.5, 20.5); //eta=[3.5,4.0] book(_h_mult_eta[4] ,"d04-x01-y05", 21, -0.5, 20.5); //eta=[4.0,4.5] book(_h_mult_pt[0] ,"d05-x01-y01", 21, -0.5, 20.5); //pT=[0.2,0.3]GeV book(_h_mult_pt[1] ,"d05-x01-y02", 21, -0.5, 20.5); //pT=[0.3,0.4]GeV book(_h_mult_pt[2] ,"d05-x01-y03", 21, -0.5, 20.5); //pT=[0.4,0.6]GeV book(_h_mult_pt[3] ,"d05-x01-y04", 21, -0.5, 20.5); //pT=[0.6,1.0]GeV book(_h_mult_pt[4] ,"d05-x01-y05", 21, -0.5, 20.5); //pT=[1.0,2.0]GeV book(_h_dndeta ,"d01-x01-y01", 14, 2.0, 4.8); //eta=[2,4.8] book(_h_dndpt ,"d02-x01-y01", 18, 0.2, 2.0); //pT =[0,2]GeV // Counters book(_sumW, "TMP/sumW"); } /// Perform the per-event analysis void analyze(const Event& event) { // Variable to store multiplicities per event int LHCbcountAll = 0; //count particles fulfiling all requirements int LHCbcountEta[8] = {0,0,0,0,0,0,0,0}; //count per eta-bin int LHCbcountPt[7] = {0,0,0,0,0,0,0}; //count per pT-bin vector val_dNdEta; vector val_dNdPt; val_dNdEta.clear(); val_dNdPt.clear(); const ChargedFinalState& cfs = apply(event, "CFS"); for (const Particle& p : cfs.particles()) { int id = p.pid(); // continue if particle is not a pion, kaon, proton, muon or electron if ( !( (abs(id) == 211) || (abs(id) == 321) || (abs(id) == 2212) || (abs(id) == 13) || (abs(id) == 11)) ) { continue; } const FourMomentum& qmom = p.momentum(); const double eta = p.momentum().eta(); const double pT = p.momentum().pT(); //minimum momentum if (qmom.p3().mod() < _p_min) continue; //minimum tr. momentum if (pT < _pt_min) continue; //eta range if ((eta < _eta_min) || (eta > _eta_max)) continue; /* Select only prompt particles via lifetime */ //Sum of all mother lifetimes (PDG lifetime) < 10ps double ancestors_sumlft = getAncestorSumLifetime(p); if( (ancestors_sumlft > _maxlft) || (ancestors_sumlft < 0) ) continue; //after all cuts; LHCbcountAll++; //count particles in whole kin. range //in eta bins if( eta >2.0 && eta <= 2.5) LHCbcountEta[0]++; if( eta >2.5 && eta <= 3.0) LHCbcountEta[1]++; if( eta >3.0 && eta <= 3.5) LHCbcountEta[2]++; if( eta >3.5 && eta <= 4.0) LHCbcountEta[3]++; if( eta >4.0 && eta <= 4.5) LHCbcountEta[4]++; if( eta >2.0 && eta <= 4.8) LHCbcountEta[5]++; //cross-check //in pT bins if( pT > 0.2 && pT <= 0.3) LHCbcountPt[0]++; if( pT > 0.3 && pT <= 0.4) LHCbcountPt[1]++; if( pT > 0.4 && pT <= 0.6) LHCbcountPt[2]++; if( pT > 0.6 && pT <= 1.0) LHCbcountPt[3]++; if( pT > 1.0 && pT <= 2.0) LHCbcountPt[4]++; if( pT > 0.2) LHCbcountPt[5]++; //cross-check //particle densities -> need proper normalization (finalize) val_dNdPt.push_back( pT ); val_dNdEta.push_back( eta ); }//end for // Fill histograms only, if at least 1 particle pre event was within the // kinematic range of the analysis! if (LHCbcountAll) { _sumW->fill(); _h_mult_total->fill(LHCbcountAll); _h_mult_eta[0]->fill(LHCbcountEta[0]); _h_mult_eta[1]->fill(LHCbcountEta[1]); _h_mult_eta[2]->fill(LHCbcountEta[2]); _h_mult_eta[3]->fill(LHCbcountEta[3]); _h_mult_eta[4]->fill(LHCbcountEta[4]); _h_mult_pt[0]->fill(LHCbcountPt[0]); _h_mult_pt[1]->fill(LHCbcountPt[1]); _h_mult_pt[2]->fill(LHCbcountPt[2]); _h_mult_pt[3]->fill(LHCbcountPt[3]); _h_mult_pt[4]->fill(LHCbcountPt[4]); for (size_t part = 0; part < val_dNdEta.size(); part++) _h_dndeta->fill(val_dNdEta[part]); for (size_t part = 0; part < val_dNdPt.size(); part++) _h_dndpt->fill(val_dNdPt[part]); } } /// Normalise histograms etc., after the run void finalize() { const double scalefactor = 1.0/_sumW->val(); // normalize multiplicity histograms by nEvents const double scale1k = 1000.; // to match '10^3' scale in reference histograms scale( _h_dndeta, scalefactor ); scale( _h_dndpt, scalefactor*0.1 ); //additional factor 0.1 for [0.1 GeV/c] scale( _h_mult_total, scalefactor*scale1k); _h_mult_eta[0]->scaleW( scalefactor*scale1k ); _h_mult_eta[1]->scaleW( scalefactor*scale1k ); _h_mult_eta[2]->scaleW( scalefactor*scale1k ); _h_mult_eta[3]->scaleW( scalefactor*scale1k ); _h_mult_eta[4]->scaleW( scalefactor*scale1k ); _h_mult_pt[0]->scaleW( scalefactor*scale1k ); _h_mult_pt[1]->scaleW( scalefactor*scale1k ); _h_mult_pt[2]->scaleW( scalefactor*scale1k ); _h_mult_pt[3]->scaleW( scalefactor*scale1k ); _h_mult_pt[4]->scaleW( scalefactor*scale1k ); } //@} private: // Get mean PDG lifetime for particle with PID double getLifetime(int pid) { double lft = 0.; map::iterator pPartLft = _partLftMap.find(pid); if (pPartLft != _partLftMap.end()) { lft = (*pPartLft).second; } else { // allow identifying missing life times only in debug mode MSG_DEBUG("Could not determine lifetime for particle with PID " << pid << "... Assume non-prompt particle"); lft = -1; } return lft; } // Get sum of all ancestor particles const double getAncestorSumLifetime(const Particle& p) { double lftSum = 0.; double plft = 0.; const GenParticle* part = p.genParticle(); if ( 0 == part ) return -1; const GenVertex* ivtx = part->production_vertex(); while(ivtx) { if (ivtx->particles_in_size() < 1) { lftSum = -1.; break; }; const GenVertex::particles_in_const_iterator iPart_invtx = ivtx->particles_in_const_begin(); part = (*iPart_invtx); if ( !(part) ) { lftSum = -1.; break; }; ivtx = part->production_vertex(); if ( (part->pdg_id() == 2212) || !(ivtx) ) break; // reached beam plft = getLifetime(part->pdg_id()); if (plft < 0.) { lftSum = -1.; break; }; lftSum += plft; } return (lftSum); } /// Hard-coded map linking PDG ID with PDG lifetime[s] (converted from ParticleTable.txt) bool fillMap(map& m) { // PDGID = LIFETIME m[22] = 1.000000e+016; m[-11] = 1.000000e+016; m[11] = 1.000000e+016; m[12] = 1.000000e+016; m[-13] = 2.197036e-006; m[13] = 2.197036e-006; m[111] = 8.438618e-017; m[211] = 2.603276e-008; m[-211] = 2.603276e-008; m[130] = 5.174624e-008; m[321] = 1.238405e-008; m[-321] = 1.238405e-008; m[2112] = 885.646128; m[2212] = 1.000000e+016; m[-2212] = 1.000000e+016; m[310] = 8.934603e-011; m[221] = 5.578070e-019; m[3122] = 2.631796e-010; m[3222] = 8.018178e-011; m[3212] = 7.395643e-020; m[3112] = 1.479129e-010; m[3322] = 2.899613e-010; m[3312] = 1.637344e-010; m[3334] = 8.207135e-011; m[-2112] = 885.646128; m[-3122] = 2.631796e-010; m[-3222] = 8.018178e-011; m[-3212] = 7.395643e-020; m[-3112] = 1.479129e-010; m[-3322] = 2.899613e-010; m[-3312] = 1.637344e-010; m[-3334] = 8.207135e-011; m[113] = 4.411610e-024; m[213] = 4.411610e-024; m[-213] = 4.411610e-024; m[223] = 7.798723e-023; m[333] = 1.545099e-022; m[323] = 1.295693e-023; m[-323] = 1.295693e-023; m[313] = 1.298249e-023; m[-313] = 1.298249e-023; m[20213] = 1.500000e-024; m[-20213] = 1.500000e-024; m[450000000] = 1.000000e+015; m[460000000] = 1.000000e+015; m[470000000] = 1.000000e+015; m[480000000] = 1.000000e+015; m[490000000] = 1.000000e+015; m[20022] = 1.000000e+016; m[-15] = 2.906014e-013; m[15] = 2.906014e-013; m[24] = 3.104775e-025; m[-24] = 3.104775e-025; m[23] = 2.637914e-025; m[411] = 1.051457e-012; m[-411] = 1.051457e-012; m[421] = 4.116399e-013; m[-421] = 4.116399e-013; m[431] = 4.904711e-013; m[-431] = 4.904711e-013; m[4122] = 1.994582e-013; m[-4122] = 1.994582e-013; m[443] = 7.565657e-021; m[413] = 6.856377e-021; m[-413] = 6.856377e-021; m[423] = 1.000003e-019; m[-423] = 1.000003e-019; m[433] = 1.000003e-019; m[-433] = 1.000003e-019; m[521] = 1.671000e-012; m[-521] = 1.671000e-012; m[511] = 1.536000e-012; m[-511] = 1.536000e-012; m[531] = 1.461000e-012; m[-531] = 1.461000e-012; m[541] = 4.600000e-013; m[-541] = 4.600000e-013; m[5122] = 1.229000e-012; m[-5122] = 1.229000e-012; m[4112] = 4.388081e-022; m[-4112] = 4.388081e-022; m[4212] = 3.999999e-022; m[-4212] = 3.999999e-022; m[4222] = 3.291060e-022; m[-4222] = 3.291060e-022; m[25] = 9.400000e-026; m[35] = 9.400000e-026; m[36] = 9.400000e-026; m[37] = 9.400000e-026; m[-37] = 9.400000e-026; m[4312] = 9.800002e-014; m[-4312] = 9.800002e-014; m[4322] = 3.500001e-013; m[-4322] = 3.500001e-013; m[4332] = 6.453061e-014; m[-4332] = 6.453061e-014; m[4132] = 9.824063e-014; m[-4132] = 9.824063e-014; m[4232] = 4.417532e-013; m[-4232] = 4.417532e-013; m[5222] = 1.000000e-019; m[-5222] = 1.000000e-019; m[5212] = 1.000000e-019; m[-5212] = 1.000000e-019; m[5112] = 1.000000e-019; m[-5112] = 1.000000e-019; m[5312] = 1.000000e-019; m[-5312] = 1.000000e-019; m[5322] = 1.000000e-019; m[-5322] = 1.000000e-019; m[5332] = 1.550000e-012; m[-5332] = 1.550000e-012; m[5132] = 1.390000e-012; m[-5132] = 1.390000e-012; m[5232] = 1.390000e-012; m[-5232] = 1.390000e-012; m[100443] = 2.194041e-021; m[331] = 3.258476e-021; m[441] = 4.113826e-023; m[10441] = 4.063038e-023; m[20443] = 7.154480e-022; m[445] = 3.164482e-022; m[9000111] = 1.149997e-023; m[9000211] = 1.149997e-023; m[-9000211] = 1.149997e-023; m[20113] = 1.500000e-024; m[115] = 6.151516e-024; m[215] = 6.151516e-024; m[-215] = 6.151516e-024; m[10323] = 7.313469e-024; m[-10323] = 7.313469e-024; m[10313] = 7.313469e-024; m[-10313] = 7.313469e-024; m[20323] = 3.782829e-024; m[-20323] = 3.782829e-024; m[20313] = 3.782829e-024; m[-20313] = 3.782829e-024; m[10321] = 2.238817e-024; m[-10321] = 2.238817e-024; m[10311] = 2.238817e-024; m[-10311] = 2.238817e-024; m[325] = 6.682357e-024; m[-325] = 6.682357e-024; m[315] = 6.038644e-024; m[-315] = 6.038644e-024; m[10411] = 4.380000e-024; m[20413] = 2.630000e-024; m[10413] = 3.290000e-023; m[-415] = 2.632849e-023; m[-10411] = 4.380000e-024; m[-20413] = 2.630000e-024; m[-10413] = 3.290000e-023; m[415] = 2.632849e-023; m[10421] = 4.380000e-024; m[20423] = 2.630000e-024; m[10423] = 3.482604e-023; m[-425] = 2.861792e-023; m[-10421] = 4.380000e-024; m[-20423] = 2.630000e-024; m[-10423] = 3.482604e-023; m[425] = 2.861792e-023; m[10431] = 6.582100e-022; m[20433] = 6.582100e-022; m[10433] = 6.582100e-022; m[435] = 4.388100e-023; m[-10431] = 6.582100e-022; m[-20433] = 6.582100e-022; m[-10433] = 6.582100e-022; m[-435] = 4.388100e-023; m[2224] = 5.485102e-024; m[2214] = 5.485102e-024; m[2114] = 5.485102e-024; m[1114] = 5.485102e-024; m[-2224] = 5.485102e-024; m[-2214] = 5.485102e-024; m[-2114] = 5.485102e-024; m[-1114] = 5.485102e-024; m[-523] = 1.000019e-019; m[523] = 1.000019e-019; m[513] = 1.000019e-019; m[-513] = 1.000019e-019; m[533] = 1.000000e-019; m[-533] = 1.000000e-019; m[10521] = 4.390000e-024; m[20523] = 2.630000e-024; m[10523] = 1.650000e-023; m[525] = 1.310000e-023; m[-10521] = 4.390000e-024; m[-20523] = 2.630000e-024; m[-10523] = 1.650000e-023; m[-525] = 1.310000e-023; m[10511] = 4.390000e-024; m[20513] = 2.630000e-024; m[10513] = 1.650000e-023; m[515] = 1.310000e-023; m[-10511] = 4.390000e-024; m[-20513] = 2.630000e-024; m[-10513] = 1.650000e-023; m[-515] = 1.310000e-023; m[10531] = 4.390000e-024; m[20533] = 2.630000e-024; m[10533] = 1.650000e-023; m[535] = 1.310000e-023; m[-10531] = 4.390000e-024; m[-20533] = 2.630000e-024; m[-10533] = 1.650000e-023; m[-535] = 1.310000e-023; m[14] = 1.000000e+016; m[-14] = 1.000000e+016; m[-12] = 1.000000e+016; m[1] = 0.000000e+000; m[-1] = 0.000000e+000; m[2] = 0.000000e+000; m[-2] = 0.000000e+000; m[3] = 0.000000e+000; m[-3] = 0.000000e+000; m[4] = 0.000000e+000; m[-4] = 0.000000e+000; m[5] = 0.000000e+000; m[-5] = 0.000000e+000; m[6] = 4.707703e-025; m[-6] = 4.707703e-025; m[7] = 0.000000e+000; m[-7] = 0.000000e+000; m[8] = 0.000000e+000; m[-8] = 0.000000e+000; m[16] = 1.000000e+016; m[-16] = 1.000000e+016; m[17] = 0.000000e+000; m[-17] = 0.000000e+000; m[18] = 0.000000e+000; m[-18] = 0.000000e+000; m[21] = 0.000000e+000; m[32] = 0.000000e+000; m[33] = 0.000000e+000; m[34] = 0.000000e+000; m[-34] = 0.000000e+000; m[39] = 0.000000e+000; m[41] = 0.000000e+000; m[-41] = 0.000000e+000; m[42] = 0.000000e+000; m[-42] = 0.000000e+000; m[43] = 0.000000e+000; m[44] = 0.000000e+000; m[-44] = 0.000000e+000; m[81] = 0.000000e+000; m[82] = 0.000000e+000; m[-82] = 0.000000e+000; m[83] = 0.000000e+000; m[84] = 3.335641e-013; m[-84] = 3.335641e-013; m[85] = 1.290893e-012; m[-85] = 1.290893e-012; m[86] = 0.000000e+000; m[-86] = 0.000000e+000; m[87] = 0.000000e+000; m[-87] = 0.000000e+000; m[88] = 0.000000e+000; m[90] = 0.000000e+000; m[91] = 0.000000e+000; m[92] = 0.000000e+000; m[93] = 0.000000e+000; m[94] = 0.000000e+000; m[95] = 0.000000e+000; m[96] = 0.000000e+000; m[97] = 0.000000e+000; m[98] = 0.000000e+000; m[99] = 0.000000e+000; m[117] = 4.088275e-024; m[119] = 1.828367e-024; m[217] = 4.088275e-024; m[-217] = 4.088275e-024; m[219] = 1.828367e-024; m[-219] = 1.828367e-024; m[225] = 3.555982e-024; m[227] = 3.917930e-024; m[229] = 3.392846e-024; m[311] = 1.000000e+016; m[-311] = 1.000000e+016; m[317] = 4.139699e-024; m[-317] = 4.139699e-024; m[319] = 3.324304e-024; m[-319] = 3.324304e-024; m[327] = 4.139699e-024; m[-327] = 4.139699e-024; m[329] = 3.324304e-024; m[-329] = 3.324304e-024; m[335] = 8.660687e-024; m[337] = 7.565657e-024; m[543] = 0.000000e+000; m[-543] = 0.000000e+000; m[545] = 0.000000e+000; m[-545] = 0.000000e+000; m[551] = 0.000000e+000; m[553] = 1.253738e-020; m[555] = 1.000000e+016; m[557] = 0.000000e+000; m[-450000000] = 0.000000e+000; m[-490000000] = 0.000000e+000; m[-460000000] = 0.000000e+000; m[-470000000] = 0.000000e+000; m[1103] = 0.000000e+000; m[-1103] = 0.000000e+000; m[1112] = 4.388081e-024; m[-1112] = 4.388081e-024; m[1116] = 1.880606e-024; m[-1116] = 1.880606e-024; m[1118] = 2.194041e-024; m[-1118] = 2.194041e-024; m[1212] = 4.388081e-024; m[-1212] = 4.388081e-024; m[1214] = 5.485102e-024; m[-1214] = 5.485102e-024; m[1216] = 1.880606e-024; m[-1216] = 1.880606e-024; m[1218] = 1.462694e-024; m[-1218] = 1.462694e-024; m[2101] = 0.000000e+000; m[-2101] = 0.000000e+000; m[2103] = 0.000000e+000; m[-2103] = 0.000000e+000; m[2116] = 4.388081e-024; m[-2116] = 4.388081e-024; m[2118] = 2.194041e-024; m[-2118] = 2.194041e-024; m[2122] = 4.388081e-024; m[-2122] = 4.388081e-024; m[2124] = 5.485102e-024; m[-2124] = 5.485102e-024; m[2126] = 1.880606e-024; m[-2126] = 1.880606e-024; m[2128] = 1.462694e-024; m[-2128] = 1.462694e-024; m[2203] = 0.000000e+000; m[-2203] = 0.000000e+000; m[2216] = 4.388081e-024; m[-2216] = 4.388081e-024; m[2218] = 2.194041e-024; m[-2218] = 2.194041e-024; m[2222] = 4.388081e-024; m[-2222] = 4.388081e-024; m[2226] = 1.880606e-024; m[-2226] = 1.880606e-024; m[2228] = 2.194041e-024; m[-2228] = 2.194041e-024; m[3101] = 0.000000e+000; m[-3101] = 0.000000e+000; m[3103] = 0.000000e+000; m[-3103] = 0.000000e+000; m[3114] = 1.670589e-023; m[-3114] = 1.670589e-023; m[3116] = 5.485102e-024; m[-3116] = 5.485102e-024; m[3118] = 3.656734e-024; m[-3118] = 3.656734e-024; m[3124] = 4.219309e-023; m[-3124] = 4.219309e-023; m[3126] = 8.227653e-024; m[-3126] = 8.227653e-024; m[3128] = 3.291061e-024; m[-3128] = 3.291061e-024; m[3201] = 0.000000e+000; m[-3201] = 0.000000e+000; m[3203] = 0.000000e+000; m[-3203] = 0.000000e+000; m[3214] = 1.828367e-023; m[-3214] = 1.828367e-023; m[3216] = 5.485102e-024; m[-3216] = 5.485102e-024; m[3218] = 3.656734e-024; m[-3218] = 3.656734e-024; m[3224] = 1.838582e-023; m[-3224] = 1.838582e-023; m[3226] = 5.485102e-024; m[-3226] = 5.485102e-024; m[3228] = 3.656734e-024; m[-3228] = 3.656734e-024; m[3303] = 0.000000e+000; m[-3303] = 0.000000e+000; m[3314] = 6.648608e-023; m[-3314] = 6.648608e-023; m[3324] = 7.233101e-023; m[-3324] = 7.233101e-023; m[4101] = 0.000000e+000; m[-4101] = 0.000000e+000; m[4103] = 0.000000e+000; m[-4103] = 0.000000e+000; m[4114] = 0.000000e+000; m[-4114] = 0.000000e+000; m[4201] = 0.000000e+000; m[-4201] = 0.000000e+000; m[4203] = 0.000000e+000; m[-4203] = 0.000000e+000; m[4214] = 3.291061e-022; m[-4214] = 3.291061e-022; m[4224] = 0.000000e+000; m[-4224] = 0.000000e+000; m[4301] = 0.000000e+000; m[-4301] = 0.000000e+000; m[4303] = 0.000000e+000; m[-4303] = 0.000000e+000; m[4314] = 0.000000e+000; m[-4314] = 0.000000e+000; m[4324] = 0.000000e+000; m[-4324] = 0.000000e+000; m[4334] = 0.000000e+000; m[-4334] = 0.000000e+000; m[4403] = 0.000000e+000; m[-4403] = 0.000000e+000; m[4412] = 3.335641e-013; m[-4412] = 3.335641e-013; m[4414] = 3.335641e-013; m[-4414] = 3.335641e-013; m[4422] = 3.335641e-013; m[-4422] = 3.335641e-013; m[4424] = 3.335641e-013; m[-4424] = 3.335641e-013; m[4432] = 3.335641e-013; m[-4432] = 3.335641e-013; m[4434] = 3.335641e-013; m[-4434] = 3.335641e-013; m[4444] = 3.335641e-013; m[-4444] = 3.335641e-013; m[5101] = 0.000000e+000; m[-5101] = 0.000000e+000; m[5103] = 0.000000e+000; m[-5103] = 0.000000e+000; m[5114] = 0.000000e+000; m[-5114] = 0.000000e+000; m[5142] = 1.290893e-012; m[-5142] = 1.290893e-012; m[5201] = 0.000000e+000; m[-5201] = 0.000000e+000; m[5203] = 0.000000e+000; m[-5203] = 0.000000e+000; m[5214] = 0.000000e+000; m[-5214] = 0.000000e+000; m[5224] = 0.000000e+000; m[-5224] = 0.000000e+000; m[5242] = 1.290893e-012; m[-5242] = 1.290893e-012; m[5301] = 0.000000e+000; m[-5301] = 0.000000e+000; m[5303] = 0.000000e+000; m[-5303] = 0.000000e+000; m[5314] = 0.000000e+000; m[-5314] = 0.000000e+000; m[5324] = 0.000000e+000; m[-5324] = 0.000000e+000; m[5334] = 0.000000e+000; m[-5334] = 0.000000e+000; m[5342] = 1.290893e-012; m[-5342] = 1.290893e-012; m[5401] = 0.000000e+000; m[-5401] = 0.000000e+000; m[5403] = 0.000000e+000; m[-5403] = 0.000000e+000; m[5412] = 1.290893e-012; m[-5412] = 1.290893e-012; m[5414] = 1.290893e-012; m[-5414] = 1.290893e-012; m[5422] = 1.290893e-012; m[-5422] = 1.290893e-012; m[5424] = 1.290893e-012; m[-5424] = 1.290893e-012; m[5432] = 1.290893e-012; m[-5432] = 1.290893e-012; m[5434] = 1.290893e-012; m[-5434] = 1.290893e-012; m[5442] = 1.290893e-012; m[-5442] = 1.290893e-012; m[5444] = 1.290893e-012; m[-5444] = 1.290893e-012; m[5503] = 0.000000e+000; m[-5503] = 0.000000e+000; m[5512] = 1.290893e-012; m[-5512] = 1.290893e-012; m[5514] = 1.290893e-012; m[-5514] = 1.290893e-012; m[5522] = 1.290893e-012; m[-5522] = 1.290893e-012; m[5524] = 1.290893e-012; m[-5524] = 1.290893e-012; m[5532] = 1.290893e-012; m[-5532] = 1.290893e-012; m[5534] = 1.290893e-012; m[-5534] = 1.290893e-012; m[5542] = 1.290893e-012; m[-5542] = 1.290893e-012; m[5544] = 1.290893e-012; m[-5544] = 1.290893e-012; m[5554] = 1.290893e-012; m[-5554] = 1.290893e-012; m[10022] = 0.000000e+000; m[10111] = 2.483820e-024; m[10113] = 4.635297e-024; m[10115] = 2.541360e-024; m[10211] = 2.483820e-024; m[-10211] = 2.483820e-024; m[10213] = 4.635297e-024; m[-10213] = 4.635297e-024; m[10215] = 2.541360e-024; m[-10215] = 2.541360e-024; m[9010221] = 1.316424e-023; m[10223] = 1.828367e-024; m[10225] = 0.000000e+000; m[10315] = 3.538775e-024; m[-10315] = 3.538775e-024; m[10325] = 3.538775e-024; m[-10325] = 3.538775e-024; m[10331] = 5.265698e-024; m[10333] = 0.000000e+000; m[10335] = 0.000000e+000; m[10443] = 0.000000e+000; m[10541] = 0.000000e+000; m[-10541] = 0.000000e+000; m[10543] = 0.000000e+000; m[-10543] = 0.000000e+000; m[10551] = 1.000000e+016; m[10553] = 0.000000e+000; m[10555] = 0.000000e+000; m[11112] = 0.000000e+000; m[-11112] = 0.000000e+000; m[11114] = 2.194041e-024; m[-11114] = 2.194041e-024; m[11116] = 1.880606e-024; m[-11116] = 1.880606e-024; m[11212] = 1.880606e-024; m[-11212] = 1.880606e-024; m[11216] = 0.000000e+000; m[-11216] = 0.000000e+000; m[12112] = 1.880606e-024; m[-12112] = 1.880606e-024; m[12114] = 2.194041e-024; m[-12114] = 2.194041e-024; m[12116] = 5.063171e-024; m[-12116] = 5.063171e-024; m[12118] = 0.000000e+000; m[-12118] = 0.000000e+000; m[12122] = 0.000000e+000; m[-12122] = 0.000000e+000; m[12126] = 1.880606e-024; m[-12126] = 1.880606e-024; m[12212] = 1.880606e-024; m[-12212] = 1.880606e-024; m[12214] = 2.194041e-024; m[-12214] = 2.194041e-024; m[12216] = 5.063171e-024; m[-12216] = 5.063171e-024; m[12218] = 0.000000e+000; m[-12218] = 0.000000e+000; m[12222] = 0.000000e+000; m[-12222] = 0.000000e+000; m[12224] = 2.194041e-024; m[-12224] = 2.194041e-024; m[12226] = 1.880606e-024; m[-12226] = 1.880606e-024; m[13112] = 6.582122e-024; m[-13112] = 6.582122e-024; m[13114] = 1.097020e-023; m[-13114] = 1.097020e-023; m[13116] = 5.485102e-024; m[-13116] = 5.485102e-024; m[13122] = 1.316424e-023; m[-13122] = 1.316424e-023; m[13124] = 1.097020e-023; m[-13124] = 1.097020e-023; m[13126] = 6.928549e-024; m[-13126] = 6.928549e-024; m[13212] = 6.582122e-024; m[-13212] = 6.582122e-024; m[13214] = 1.097020e-023; m[-13214] = 1.097020e-023; m[13216] = 5.485102e-024; m[-13216] = 5.485102e-024; m[13222] = 6.582122e-024; m[-13222] = 6.582122e-024; m[13224] = 1.097020e-023; m[-13224] = 1.097020e-023; m[13226] = 5.485102e-024; m[-13226] = 5.485102e-024; m[13314] = 2.742551e-023; m[-13314] = 2.742551e-023; m[13316] = 0.000000e+000; m[-13316] = 0.000000e+000; m[13324] = 2.742551e-023; m[-13324] = 2.742551e-023; m[13326] = 0.000000e+000; m[-13326] = 0.000000e+000; m[14122] = 1.828367e-022; m[-14122] = 1.828367e-022; m[14124] = 0.000000e+000; m[-14124] = 0.000000e+000; m[10221] = 2.194040e-024; m[20223] = 2.742551e-023; m[20315] = 2.384827e-024; m[-20315] = 2.384827e-024; m[20325] = 2.384827e-024; m[-20325] = 2.384827e-024; m[20333] = 1.185968e-023; m[20543] = 0.000000e+000; m[-20543] = 0.000000e+000; m[20553] = 1.000000e+016; m[20555] = 0.000000e+000; m[21112] = 2.632849e-024; m[-21112] = 2.632849e-024; m[21114] = 3.291061e-024; m[-21114] = 3.291061e-024; m[21212] = 2.632849e-024; m[-21212] = 2.632849e-024; m[21214] = 6.582122e-024; m[-21214] = 6.582122e-024; m[22112] = 4.388081e-024; m[-22112] = 4.388081e-024; m[22114] = 3.291061e-024; m[-22114] = 3.291061e-024; m[22122] = 2.632849e-024; m[-22122] = 2.632849e-024; m[22124] = 6.582122e-024; m[-22124] = 6.582122e-024; m[22212] = 4.388081e-024; m[-22212] = 4.388081e-024; m[22214] = 3.291061e-024; m[-22214] = 3.291061e-024; m[22222] = 2.632849e-024; m[-22222] = 2.632849e-024; m[22224] = 3.291061e-024; m[-22224] = 3.291061e-024; m[23112] = 7.313469e-024; m[-23112] = 7.313469e-024; m[23114] = 2.991874e-024; m[-23114] = 2.991874e-024; m[23122] = 4.388081e-024; m[-23122] = 4.388081e-024; m[23124] = 6.582122e-024; m[-23124] = 6.582122e-024; m[23126] = 3.291061e-024; m[-23126] = 3.291061e-024; m[23212] = 7.313469e-024; m[-23212] = 7.313469e-024; m[23214] = 2.991874e-024; m[-23214] = 2.991874e-024; m[23222] = 7.313469e-024; m[-23222] = 7.313469e-024; m[23224] = 2.991874e-024; m[-23224] = 2.991874e-024; m[23314] = 0.000000e+000; m[-23314] = 0.000000e+000; m[23324] = 0.000000e+000; m[-23324] = 0.000000e+000; m[30113] = 2.742551e-024; m[30213] = 2.742551e-024; m[-30213] = 2.742551e-024; m[30223] = 2.991874e-024; m[30313] = 2.056913e-024; m[-30313] = 2.056913e-024; m[30323] = 2.056913e-024; m[-30323] = 2.056913e-024; m[30343] = 0.000000e+000; m[-30343] = 0.000000e+000; m[30353] = 0.000000e+000; m[-30353] = 0.000000e+000; m[30363] = 0.000000e+000; m[-30363] = 0.000000e+000; m[30411] = 0.000000e+000; m[-30411] = 0.000000e+000; m[30413] = 0.000000e+000; m[-30413] = 0.000000e+000; m[30421] = 0.000000e+000; m[-30421] = 0.000000e+000; m[30423] = 0.000000e+000; m[-30423] = 0.000000e+000; m[30443] = 2.789035e-023; m[30553] = 0.000000e+000; m[31114] = 1.880606e-024; m[-31114] = 1.880606e-024; m[31214] = 4.388081e-024; m[-31214] = 4.388081e-024; m[32112] = 4.388081e-024; m[-32112] = 4.388081e-024; m[32114] = 1.880606e-024; m[-32114] = 1.880606e-024; m[32124] = 4.388081e-024; m[-32124] = 4.388081e-024; m[32212] = 4.388081e-024; m[-32212] = 4.388081e-024; m[32214] = 1.880606e-024; m[-32214] = 1.880606e-024; m[32224] = 1.880606e-024; m[-32224] = 1.880606e-024; m[33122] = 1.880606e-023; m[-33122] = 1.880606e-023; m[33314] = 0.000000e+000; m[-33314] = 0.000000e+000; m[33324] = 0.000000e+000; m[-33324] = 0.000000e+000; m[41214] = 0.000000e+000; m[-41214] = 0.000000e+000; m[42112] = 6.582122e-024; m[-42112] = 6.582122e-024; m[42124] = 0.000000e+000; m[-42124] = 0.000000e+000; m[42212] = 6.582122e-024; m[-42212] = 6.582122e-024; m[43122] = 2.194041e-024; m[-43122] = 2.194041e-024; m[52114] = 0.000000e+000; m[-52114] = 0.000000e+000; m[52214] = 0.000000e+000; m[-52214] = 0.000000e+000; m[53122] = 4.388081e-024; m[-53122] = 4.388081e-024; m[100111] = 1.645531e-024; m[100113] = 2.123265e-024; m[100211] = 1.645531e-024; m[-100211] = 1.645531e-024; m[100213] = 2.123265e-024; m[-100213] = 2.123265e-024; m[100221] = 1.196749e-023; m[100223] = 3.871836e-024; m[100225] = 0.000000e+000; m[100311] = 0.000000e+000; m[-100311] = 0.000000e+000; m[100313] = 2.837122e-024; m[-100313] = 2.837122e-024; m[100315] = 0.000000e+000; m[-100315] = 0.000000e+000; m[100321] = 0.000000e+000; m[-100321] = 0.000000e+000; m[100323] = 2.837122e-024; m[-100323] = 2.837122e-024; m[100325] = 0.000000e+000; m[-100325] = 0.000000e+000; m[100331] = 0.000000e+000; m[100333] = 4.388081e-024; m[100335] = 3.291061e-024; m[100441] = 0.000000e+000; m[100551] = 0.000000e+000; m[100553] = 1.495937e-020; m[100555] = 1.000000e+016; m[100557] = 0.000000e+000; m[110551] = 1.000000e+016; m[110553] = 0.000000e+000; m[110555] = 0.000000e+000; m[120553] = 1.000000e+016; m[120555] = 0.000000e+000; m[130553] = 0.000000e+000; m[200111] = 3.134344e-024; m[200211] = 3.134344e-024; m[-200211] = 3.134344e-024; m[200551] = 0.000000e+000; m[200553] = 2.502708e-020; m[200555] = 0.000000e+000; m[210551] = 0.000000e+000; m[210553] = 0.000000e+000; m[220553] = 0.000000e+000; m[300553] = 4.701516e-023; m[9000221] = 0.000000e+000; m[9000443] = 1.265793e-023; m[9000553] = 5.983747e-024; m[9010443] = 8.438618e-024; m[9010553] = 8.331800e-024; m[9020221] = 6.038644e-024; m[9020443] = 1.530726e-023; m[9060225] = 4.388081e-024; m[9070225] = 2.056913e-024; m[1000001] = 0.000000e+000; m[-1000001] = 0.000000e+000; m[1000002] = 0.000000e+000; m[-1000002] = 0.000000e+000; m[1000003] = 0.000000e+000; m[-1000003] = 0.000000e+000; m[1000004] = 0.000000e+000; m[-1000004] = 0.000000e+000; m[1000005] = 0.000000e+000; m[-1000005] = 0.000000e+000; m[1000006] = 0.000000e+000; m[-1000006] = 0.000000e+000; m[1000011] = 0.000000e+000; m[-1000011] = 0.000000e+000; m[1000012] = 0.000000e+000; m[-1000012] = 0.000000e+000; m[1000013] = 0.000000e+000; m[-1000013] = 0.000000e+000; m[1000014] = 0.000000e+000; m[-1000014] = 0.000000e+000; m[1000015] = 0.000000e+000; m[-1000015] = 0.000000e+000; m[1000016] = 0.000000e+000; m[-1000016] = 0.000000e+000; m[1000021] = 0.000000e+000; m[1000022] = 0.000000e+000; m[1000023] = 0.000000e+000; m[1000024] = 0.000000e+000; m[-1000024] = 0.000000e+000; m[1000025] = 0.000000e+000; m[1000035] = 0.000000e+000; m[1000037] = 0.000000e+000; m[-1000037] = 0.000000e+000; m[1000039] = 0.000000e+000; m[2000001] = 0.000000e+000; m[-2000001] = 0.000000e+000; m[2000002] = 0.000000e+000; m[-2000002] = 0.000000e+000; m[2000003] = 0.000000e+000; m[-2000003] = 0.000000e+000; m[2000004] = 0.000000e+000; m[-2000004] = 0.000000e+000; m[2000005] = 0.000000e+000; m[-2000005] = 0.000000e+000; m[2000006] = 0.000000e+000; m[-2000006] = 0.000000e+000; m[2000011] = 0.000000e+000; m[-2000011] = 0.000000e+000; m[2000012] = 0.000000e+000; m[-2000012] = 0.000000e+000; m[2000013] = 0.000000e+000; m[-2000013] = 0.000000e+000; m[2000014] = 0.000000e+000; m[-2000014] = 0.000000e+000; m[2000015] = 0.000000e+000; m[-2000015] = 0.000000e+000; m[2000016] = 0.000000e+000; m[-2000016] = 0.000000e+000; m[3000111] = 0.000000e+000; m[3000113] = 0.000000e+000; m[3000211] = 0.000000e+000; m[-3000211] = 0.000000e+000; m[3000213] = 0.000000e+000; m[-3000213] = 0.000000e+000; m[3000221] = 0.000000e+000; m[3000223] = 0.000000e+000; m[3000331] = 0.000000e+000; m[3100021] = 0.000000e+000; m[3100111] = 0.000000e+000; m[3100113] = 0.000000e+000; m[3200111] = 0.000000e+000; m[3200113] = 0.000000e+000; m[3300113] = 0.000000e+000; m[3400113] = 0.000000e+000; m[4000001] = 0.000000e+000; m[-4000001] = 0.000000e+000; m[4000002] = 0.000000e+000; m[-4000002] = 0.000000e+000; m[4000011] = 0.000000e+000; m[-4000011] = 0.000000e+000; m[4000012] = 0.000000e+000; m[-4000012] = 0.000000e+000; m[5000039] = 0.000000e+000; m[9900012] = 0.000000e+000; m[9900014] = 0.000000e+000; m[9900016] = 0.000000e+000; m[9900023] = 0.000000e+000; m[9900024] = 0.000000e+000; m[-9900024] = 0.000000e+000; m[9900041] = 0.000000e+000; m[-9900041] = 0.000000e+000; m[9900042] = 0.000000e+000; m[-9900042] = 0.000000e+000; m[1027013000] = 0.000000e+000; m[1012006000] = 0.000000e+000; m[1063029000] = 0.000000e+000; m[1014007000] = 0.000000e+000; m[1016008000] = 0.000000e+000; m[1028014000] = 0.000000e+000; m[1065029000] = 0.000000e+000; m[1009004000] = 0.000000e+000; m[1019009000] = 0.000000e+000; m[1056026000] = 0.000000e+000; m[1207082000] = 0.000000e+000; m[1208082000] = 0.000000e+000; m[1029014000] = 0.000000e+000; m[1206082000] = 0.000000e+000; m[1054026000] = 0.000000e+000; m[1018008000] = 0.000000e+000; m[1030014000] = 0.000000e+000; m[1057026000] = 0.000000e+000; m[1204082000] = 0.000000e+000; m[-99000000] = 0.000000e+000; m[1028013000] = 0.000000e+000; m[1040018000] = 0.000000e+000; m[1011005000] = 0.000000e+000; m[1012005000] = 0.000000e+000; m[1013006000] = 0.000000e+000; m[1014006000] = 0.000000e+000; m[1052024000] = 0.000000e+000; m[1024012000] = 0.000000e+000; m[1026012000] = 0.000000e+000; m[1027012000] = 0.000000e+000; m[1015007000] = 0.000000e+000; m[1022010000] = 0.000000e+000; m[1058028000] = 0.000000e+000; m[1060028000] = 0.000000e+000; m[1062028000] = 0.000000e+000; m[1064028000] = 0.000000e+000; m[1007003000] = 0.000000e+000; m[1025012000] = 0.000000e+000; m[1053024000] = 0.000000e+000; m[1055025000] = 0.000000e+000; m[1008004000] = 0.000000e+000; m[1010004000] = 0.000000e+000; m[1010005000] = 0.000000e+000; m[1016007000] = 0.000000e+000; m[1017008000] = 0.000000e+000; m[1019008000] = 0.000000e+000; m[1023010000] = 0.000000e+000; m[1024011000] = 0.000000e+000; m[1031015000] = 0.000000e+000; m[1039017000] = 0.000000e+000; m[1040017000] = 0.000000e+000; m[1036018000] = 0.000000e+000; m[1050024000] = 0.000000e+000; m[1054024000] = 0.000000e+000; m[1059026000] = 0.000000e+000; m[1061028000] = 0.000000e+000; m[1063028000] = 0.000000e+000; m[1092042000] = 0.000000e+000; m[1095042000] = 0.000000e+000; m[1096042000] = 0.000000e+000; m[1097042000] = 0.000000e+000; m[1098042000] = 0.000000e+000; m[1100042000] = 0.000000e+000; m[1108046000] = 0.000000e+000; // Added by hand: m[9902210] = 0.000000e+000; //diffractive p-state -> assume no lifetime return true; } private: /// @name Histograms //@{ Histo1DPtr _h_mult_total; // full kinematic range Histo1DPtr _h_mult_eta[5]; // in eta bins Histo1DPtr _h_mult_pt[5]; // in pT bins Histo1DPtr _h_dndeta; // density dn/deta Histo1DPtr _h_dndpt; // density dn/dpT //@} /// @name Private variables double _p_min; double _pt_min; double _eta_min; double _eta_max; double _maxlft; /// Count selected events CounterPtr _sumW; map _partLftMap; // Map }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(LHCB_2014_I1281685); } diff --git a/analyses/pluginMC/MC_HFJETS.cc b/analyses/pluginMC/MC_HFJETS.cc --- a/analyses/pluginMC/MC_HFJETS.cc +++ b/analyses/pluginMC/MC_HFJETS.cc @@ -1,151 +1,151 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/UnstableFinalState.hh" #include "Rivet/Projections/PrimaryHadrons.hh" #include "Rivet/Projections/HeavyHadrons.hh" namespace Rivet { class MC_HFJETS : public Analysis { public: /// Constructor MC_HFJETS() : Analysis("MC_HFJETS") { } public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - FastJets fj(FinalState(-5, 5), FastJets::ANTIKT, 0.6); + FastJets fj(FinalState((Cuts::etaIn(-5, 5))), FastJets::ANTIKT, 0.6); fj.useInvisibles(); declare(fj, "Jets"); declare(HeavyHadrons(Cuts::abseta < 5 && Cuts::pT > 500*MeV), "BCHadrons"); book(_h_ptCJetLead ,"ptCJetLead", linspace(5, 0, 20, false) + logspace(25, 20, 200)); book(_h_ptCHadrLead ,"ptCHadrLead", linspace(5, 0, 10, false) + logspace(25, 10, 200)); book(_h_ptFracC ,"ptfracC", 50, 0, 1.5); book(_h_eFracC ,"efracC", 50, 0, 1.5); book(_h_ptBJetLead ,"ptBJetLead", linspace(5, 0, 20, false) + logspace(25, 20, 200)); book(_h_ptBHadrLead ,"ptBHadrLead", linspace(5, 0, 10, false) + logspace(25, 10, 200)); book(_h_ptFracB ,"ptfracB", 50, 0, 1.5); book(_h_eFracB ,"efracB", 50, 0, 1.5); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; // Get jets and heavy hadrons const Jets& jets = apply(event, "Jets").jetsByPt(); const Particles bhadrons = sortByPt(apply(event, "BCHadrons").bHadrons()); const Particles chadrons = sortByPt(apply(event, "BCHadrons").cHadrons()); MSG_DEBUG("# b hadrons = " << bhadrons.size() << ", # c hadrons = " << chadrons.size()); // Max HF hadron--jet axis dR to be regarded as a jet tag const double MAX_DR = 0.3; // Tag the leading b and c jets with a deltaR < 0.3 match // b-tagged jet are excluded from also being considered as c-tagged /// @todo Do this again with the ghost match? MSG_DEBUG("Getting b/c-tags"); bool gotLeadingB = false, gotLeadingC = false;; for (const Jet& j : jets) { if (!gotLeadingB) { FourMomentum leadBJet, leadBHadr; double dRmin = MAX_DR; for (const Particle& b : bhadrons) { const double dRcand = min(dRmin, deltaR(j, b)); if (dRcand < dRmin) { dRmin = dRcand; leadBJet = j.momentum(); leadBHadr = b.momentum(); MSG_DEBUG("New closest b-hadron jet tag candidate: dR = " << dRmin << " for jet pT = " << j.pT()/GeV << " GeV, " << " b hadron pT = " << b.pT()/GeV << " GeV, PID = " << b.pid()); } } if (dRmin < MAX_DR) { // A jet has been tagged, so fill the histos and break the loop _h_ptBJetLead->fill(leadBJet.pT()/GeV, weight); _h_ptBHadrLead->fill(leadBHadr.pT()/GeV, weight); _h_ptFracB->fill(leadBHadr.pT() / leadBJet.pT(), weight); _h_eFracB->fill(leadBHadr.E() / leadBJet.E(), weight); gotLeadingB = true; continue; // escape this loop iteration so the same jet isn't c-tagged } } if (!gotLeadingC) { FourMomentum leadCJet, leadCHadr; double dRmin = MAX_DR; for (const Particle& c : chadrons) { const double dRcand = min(dRmin, deltaR(j, c)); if (dRcand < dRmin) { dRmin = dRcand; leadCJet = j.momentum(); leadCHadr = c.momentum(); MSG_DEBUG("New closest c-hadron jet tag candidate: dR = " << dRmin << " for jet pT = " << j.pT()/GeV << " GeV, " << " c hadron pT = " << c.pT()/GeV << " GeV, PID = " << c.pid()); } } if (dRmin < MAX_DR) { // A jet has been tagged, so fill the histos and break the loop _h_ptCJetLead->fill(leadCJet.pT()/GeV, weight); _h_ptCHadrLead->fill(leadCHadr.pT()/GeV, weight); _h_ptFracC->fill(leadCHadr.pT() / leadCJet.pT(), weight); _h_eFracC->fill(leadCHadr.E() / leadCJet.E(), weight); gotLeadingB = true; } } // If we've found both a leading b and a leading c jet, break the loop over jets if (gotLeadingB && gotLeadingC) break; } } /// Normalise histograms etc., after the run void finalize() { normalize(_h_ptCJetLead); normalize(_h_ptCHadrLead); normalize(_h_ptFracC); normalize(_h_eFracC); normalize(_h_ptBJetLead); normalize(_h_ptBHadrLead); normalize(_h_ptFracB); normalize(_h_eFracB); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_ptCJetLead, _h_ptCHadrLead, _h_ptFracC, _h_eFracC; Histo1DPtr _h_ptBJetLead, _h_ptBHadrLead, _h_ptFracB, _h_eFracB; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_HFJETS); } diff --git a/analyses/pluginMC/MC_LEADJETUE.cc b/analyses/pluginMC/MC_LEADJETUE.cc --- a/analyses/pluginMC/MC_LEADJETUE.cc +++ b/analyses/pluginMC/MC_LEADJETUE.cc @@ -1,167 +1,167 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief MC validation analysis for underlying event in jet events /// @author Andy Buckley class MC_LEADJETUE : public Analysis { public: /// Constructor MC_LEADJETUE() : Analysis("MC_LEADJETUE") { } /// @name Analysis methods //@{ // Book histograms void init() { // Final state for the jet finding - const FinalState fsj(-4.0, 4.0, 0.0*GeV); + const FinalState fsj((Cuts::etaIn(-4.0, 4.0))); declare(fsj, "FSJ"); declare(FastJets(fsj, FastJets::KT, 0.7), "Jets"); // Charged final state for the distributions - const ChargedFinalState cfs(-1.0, 1.0, 0.5*GeV); + const ChargedFinalState cfs((Cuts::etaIn(-1.0, 1.0) && Cuts::pT >= 0.5*GeV)); declare(cfs, "CFS"); const double maxpt1 = 500.0; book(_hist_pnchg ,"trans-nchg", 50, 0.0, maxpt1); book(_hist_pmaxnchg ,"trans-maxnchg", 50, 0.0, maxpt1); book(_hist_pminnchg ,"trans-minnchg", 50, 0.0, maxpt1); book(_hist_pcptsum ,"trans-ptsum", 50, 0.0, maxpt1); book(_hist_pmaxcptsum ,"trans-maxptsum", 50, 0.0, maxpt1); book(_hist_pmincptsum ,"trans-minptsum", 50, 0.0, maxpt1); book(_hist_pcptave ,"trans-ptavg", 50, 0.0, maxpt1); } // Do the analysis void analyze(const Event& e) { const FinalState& fsj = apply(e, "FSJ"); if (fsj.particles().empty()) { MSG_DEBUG("Failed multiplicity cut"); vetoEvent; } const FastJets& jetpro = apply(e, "Jets"); const Jets jets = jetpro.jetsByPt(); MSG_DEBUG("Jet multiplicity = " << jets.size()); // Require the leading jet to be within |eta| < 2 if (jets.size() < 1 || fabs(jets[0].eta()) > 2) { MSG_DEBUG("Failed jet cut"); vetoEvent; } const double jetphi = jets[0].phi(); const double jetpT = jets[0].pT(); MSG_DEBUG("Leading jet: pT = " << jetpT/GeV << " GeV" << ", eta = " << jets[0].eta() << ", phi = " << jetphi); // Get the final states to work with for filling the distributions const FinalState& cfs = apply(e, "CFS"); size_t numOverall(0), numToward(0), numTrans1(0), numTrans2(0), numAway(0); double ptSumOverall(0.0), ptSumToward(0.0), ptSumTrans1(0.0), ptSumTrans2(0.0), ptSumAway(0.0); double ptMaxOverall(0.0), ptMaxToward(0.0), ptMaxTrans1(0.0), ptMaxTrans2(0.0), ptMaxAway(0.0); // Calculate all the charged stuff for (const Particle& p : cfs.particles()) { const double dPhi = deltaPhi(p.phi(), jetphi); const double pT = p.pT(); const double phi = p.phi(); const double rotatedphi = phi - jetphi; ptSumOverall += pT; ++numOverall; if (pT > ptMaxOverall) ptMaxOverall = pT; if (dPhi < PI/3.0) { ptSumToward += pT; ++numToward; if (pT > ptMaxToward) ptMaxToward = pT; } else if (dPhi < 2*PI/3.0) { if (rotatedphi <= PI) { ptSumTrans1 += pT; ++numTrans1; if (pT > ptMaxTrans1) ptMaxTrans1 = pT; } else { ptSumTrans2 += pT; ++numTrans2; if (pT > ptMaxTrans2) ptMaxTrans2 = pT; } } else { ptSumAway += pT; ++numAway; if (pT > ptMaxAway) ptMaxAway = pT; } } // Fill the histograms //_hist_tnchg->fill(jetpT/GeV, numToward/(4*PI/3)); _hist_pnchg->fill(jetpT/GeV, (numTrans1+numTrans2)/(4*PI/3)); _hist_pmaxnchg->fill(jetpT/GeV, (numTrans1>numTrans2 ? numTrans1 : numTrans2)/(2*PI/3)); _hist_pminnchg->fill(jetpT/GeV, (numTrans1fill(jetpT/GeV, abs(numTrans1-numTrans2)/(2*PI/3)); //_hist_anchg->fill(jetpT/GeV, numAway/(4*PI/3)); //_hist_tcptsum->fill(jetpT/GeV, ptSumToward/GeV/(4*PI/3)); _hist_pcptsum->fill(jetpT/GeV, (ptSumTrans1+ptSumTrans2)/GeV/(4*PI/3)); _hist_pmaxcptsum->fill(jetpT/GeV, (ptSumTrans1>ptSumTrans2 ? ptSumTrans1 : ptSumTrans2)/GeV/(2*PI/3)); _hist_pmincptsum->fill(jetpT/GeV, (ptSumTrans1fill(jetpT/GeV, fabs(ptSumTrans1-ptSumTrans2)/GeV/(2*PI/3)); //_hist_acptsum->fill(jetpT/GeV, ptSumAway/GeV/(4*PI/3)); //if (numToward > 0) { // _hist_tcptave->fill(jetpT/GeV, ptSumToward/GeV/numToward); // _hist_tcptmax->fill(jetpT/GeV, ptMaxToward/GeV); //} if ((numTrans1+numTrans2) > 0) { _hist_pcptave->fill(jetpT/GeV, (ptSumTrans1+ptSumTrans2)/GeV/(numTrans1+numTrans2)); //_hist_pcptmax->fill(jetpT/GeV, (ptMaxTrans1 > ptMaxTrans2 ? ptMaxTrans1 : ptMaxTrans2)/GeV); } //if (numAway > 0) { // _hist_acptave->fill(jetpT/GeV, ptSumAway/GeV/numAway); // _hist_acptmax->fill(jetpT/GeV, ptMaxAway/GeV); //} } void finalize() { // } private: Profile1DPtr _hist_pnchg; Profile1DPtr _hist_pmaxnchg; Profile1DPtr _hist_pminnchg; Profile1DPtr _hist_pcptsum; Profile1DPtr _hist_pmaxcptsum; Profile1DPtr _hist_pmincptsum; Profile1DPtr _hist_pcptave; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_LEADJETUE); } diff --git a/analyses/pluginMC/MC_PDFS.cc b/analyses/pluginMC/MC_PDFS.cc --- a/analyses/pluginMC/MC_PDFS.cc +++ b/analyses/pluginMC/MC_PDFS.cc @@ -1,96 +1,96 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" // #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { /// Generic analysis looking at various distributions of final state particles class MC_PDFS : public Analysis { public: /// Constructor MC_PDFS() : Analysis("MC_PDFS") { } public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Projections - // declare(ChargedFinalState(-5.0, 5.0, 500*MeV), "CFS"); + // declare(ChargedFinalState((Cuts::etaIn(-5.0, 5.0) && Cuts::pT >= 500*MeV)), "CFS"); // Histograms book(_histPdfX ,"PdfX", logspace(50, 0.000001, 1.0)); book(_histPdfXmin ,"PdfXmin", logspace(50, 0.000001, 1.0)); book(_histPdfXmax ,"PdfXmax", logspace(50, 0.000001, 1.0)); book(_histPdfQ ,"PdfQ", 50, 0.0, 30.0); book(_histPdfXQ,"PdfXQ", logspace(50, 0.000001, 1.0), linspace(50, 0.0, 30.0)); //book( _histPdfTrackptVsX ,"PdfTrackptVsX", logspace(50, 0.000001, 1.0)); //book( _histPdfTrackptVsQ ,"PdfTrackptVsQ", 50, 0.0, 30.0); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; // This analysis needs a valid HepMC PDF info object to do anything if (event.genEvent()->pdf_info() == 0) vetoEvent; HepMC::PdfInfo pdfi = *(event.genEvent()->pdf_info()); MSG_DEBUG("PDF Q = " << pdfi.scalePDF() << " for (id, x) = " << "(" << pdfi.id1() << ", " << pdfi.x1() << ") " << "(" << pdfi.id2() << ", " << pdfi.x2() << ")"); _histPdfX->fill(pdfi.x1(), weight); _histPdfX->fill(pdfi.x2(), weight); _histPdfXmin->fill(std::min(pdfi.x1(), pdfi.x2()), weight); _histPdfXmax->fill(std::max(pdfi.x1(), pdfi.x2()), weight); _histPdfQ->fill(pdfi.scalePDF(), weight); // always in GeV? _histPdfXQ->fill(pdfi.x1(), pdfi.scalePDF(), weight); // always in GeV? _histPdfXQ->fill(pdfi.x2(), pdfi.scalePDF(), weight); // always in GeV? // const FinalState& cfs = apply(event, "CFS"); // for (const Particle& p : cfs.particles()) { // if (fabs(eta) < 2.5 && p.pT() > 10*GeV) { // _histPdfTrackptVsX->fill(pdfi.x1(), p.pT()/GeV, weight); // _histPdfTrackptVsX->fill(pdfi.x2(), p.pT()/GeV, weight); // _histPdfTrackptVsQ->fill(pdfi.scalePDF(), p.pT()/GeV, weight); // } // } } /// Finalize void finalize() { scale(_histPdfX, 1/sumOfWeights()); scale(_histPdfXmin, 1/sumOfWeights()); scale(_histPdfXmax, 1/sumOfWeights()); scale(_histPdfQ, 1/sumOfWeights()); } //@} private: /// @name Histograms //@{ Histo1DPtr _histPdfX, _histPdfXmin, _histPdfXmax, _histPdfQ; Histo2DPtr _histPdfXQ; // Profile1DPtr _histPdfTrackptVsX, _histPdfTrackptVsQ; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_PDFS); } diff --git a/analyses/pluginMC/MC_PHOTONINC.cc b/analyses/pluginMC/MC_PHOTONINC.cc --- a/analyses/pluginMC/MC_PHOTONINC.cc +++ b/analyses/pluginMC/MC_PHOTONINC.cc @@ -1,109 +1,109 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// @brief MC validation analysis for single photon events class MC_PHOTONINC : public Analysis { public: /// Default constructor MC_PHOTONINC() : Analysis("MC_PHOTONINC") { } /// @name Analysis methods //@{ /// Book histograms void init() { // General FS - FinalState fs(-5.0, 5.0); + FinalState fs((Cuts::etaIn(-5.0, 5.0))); declare(fs, "FS"); // Get leading photon - LeadingParticlesFinalState photonfs(FinalState(-1.0, 1.0, 30.0*GeV)); + LeadingParticlesFinalState photonfs(FinalState((Cuts::etaIn(-1.0, 1.0) && Cuts::pT >= 30.0*GeV))); photonfs.addParticleId(PID::PHOTON); declare(photonfs, "LeadingPhoton"); // FS for isolation excludes the leading photon VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(photonfs); declare(vfs, "JetFS"); book(_h_photon_pT ,"photon_pT", logspace(50, 30.0, 0.5*(sqrtS()>0.?sqrtS():14000.))); book(_h_photon_pT_lin ,"photon_pT_lin", 50, 0.0, 70.0); book(_h_photon_y ,"photon_y", 50, -5.0, 5.0); } /// Do the analysis void analyze(const Event& e) { // Get the photon const Particles photons = apply(e, "LeadingPhoton").particles(); if (photons.size() != 1) { vetoEvent; } const FourMomentum photon = photons.front().momentum(); // Get all charged particles const FinalState& fs = apply(e, "JetFS"); if (fs.empty()) { vetoEvent; } // Passed cuts, so get the weight const double weight = 1.0; // Isolate photon by ensuring that a 0.4 cone around it contains less than 7% of the photon's energy const double egamma = photon.E(); double econe = 0.0; for (const Particle& p : fs.particles()) { if (deltaR(photon, p.momentum()) < 0.4) { econe += p.E(); // Veto as soon as E_cone gets larger if (econe/egamma > 0.07) { vetoEvent; } } } _h_photon_pT->fill(photon.pT(),weight); _h_photon_pT_lin->fill(photon.pT(),weight); _h_photon_y->fill(photon.rapidity(),weight); } // Finalize void finalize() { scale(_h_photon_pT, crossSectionPerEvent()); scale(_h_photon_pT_lin, crossSectionPerEvent()); scale(_h_photon_y, crossSectionPerEvent()); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_photon_pT; Histo1DPtr _h_photon_pT_lin; Histo1DPtr _h_photon_y; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_PHOTONINC); } diff --git a/analyses/pluginMC/MC_PHOTONJETS.cc b/analyses/pluginMC/MC_PHOTONJETS.cc --- a/analyses/pluginMC/MC_PHOTONJETS.cc +++ b/analyses/pluginMC/MC_PHOTONJETS.cc @@ -1,122 +1,122 @@ // -*- C++ -*- #include "Rivet/Analyses/MC_JetAnalysis.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief MC validation analysis for photon + jets events class MC_PHOTONJETS : public MC_JetAnalysis { public: /// Default constructor MC_PHOTONJETS() : MC_JetAnalysis("MC_PHOTONJETS", 4, "Jets") { } /// @name Analysis methods //@{ /// Book histograms void init() { // General FS - FinalState fs(-5.0, 5.0); + FinalState fs((Cuts::etaIn(-5.0, 5.0))); declare(fs, "FS"); // Get leading photon - LeadingParticlesFinalState photonfs(FinalState(-1.0, 1.0, 30.0*GeV)); + LeadingParticlesFinalState photonfs(FinalState((Cuts::etaIn(-1.0, 1.0) && Cuts::pT >= 30.0*GeV))); photonfs.addParticleId(PID::PHOTON); declare(photonfs, "LeadingPhoton"); // FS for jets excludes the leading photon VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(photonfs); declare(vfs, "JetFS"); FastJets jetpro(vfs, FastJets::ANTIKT, 0.4); declare(jetpro, "Jets"); book(_h_photon_jet1_deta ,"photon_jet1_deta", 50, -5.0, 5.0); book(_h_photon_jet1_dphi ,"photon_jet1_dphi", 20, 0.0, M_PI); book(_h_photon_jet1_dR ,"photon_jet1_dR", 25, 0.5, 7.0); MC_JetAnalysis::init(); } /// Do the analysis void analyze(const Event& e) { // Get the photon /// @todo share IsolatedPhoton projection between all MC_*PHOTON* analyses const Particles photons = apply(e, "LeadingPhoton").particles(); if (photons.size() != 1) { vetoEvent; } const FourMomentum photon = photons.front().momentum(); // Get all charged particles const FinalState& fs = apply(e, "JetFS"); if (fs.empty()) { vetoEvent; } // Passed cuts, so get the weight const double weight = 1.0; // Isolate photon by ensuring that a 0.4 cone around it contains less than 7% of the photon's energy const double egamma = photon.E(); double econe = 0.0; for (const Particle& p : fs.particles()) { if (deltaR(photon, p.momentum()) < 0.4) { econe += p.E(); // Veto as soon as E_cone gets larger if (econe/egamma > 0.07) { vetoEvent; } } } const Jets& jets = apply(e, "Jets").jetsByPt(_jetptcut); if (jets.size()>0) { _h_photon_jet1_deta->fill(photon.eta()-jets[0].eta(), weight); _h_photon_jet1_dphi->fill(mapAngle0ToPi(photon.phi()-jets[0].phi()), weight); _h_photon_jet1_dR->fill(deltaR(photon, jets[0].momentum()), weight); } MC_JetAnalysis::analyze(e); } // Finalize void finalize() { scale(_h_photon_jet1_deta, crossSectionPerEvent()); scale(_h_photon_jet1_dphi, crossSectionPerEvent()); scale(_h_photon_jet1_dR, crossSectionPerEvent()); MC_JetAnalysis::finalize(); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_photon_jet1_deta; Histo1DPtr _h_photon_jet1_dphi; Histo1DPtr _h_photon_jet1_dR; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_PHOTONJETS); } diff --git a/analyses/pluginMC/MC_PHOTONKTSPLITTINGS.cc b/analyses/pluginMC/MC_PHOTONKTSPLITTINGS.cc --- a/analyses/pluginMC/MC_PHOTONKTSPLITTINGS.cc +++ b/analyses/pluginMC/MC_PHOTONKTSPLITTINGS.cc @@ -1,93 +1,93 @@ // -*- C++ -*- #include "Rivet/Analyses/MC_JetSplittings.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief MC validation analysis for photon + jets events class MC_PHOTONKTSPLITTINGS : public MC_JetSplittings { public: /// Default constructor MC_PHOTONKTSPLITTINGS() : MC_JetSplittings("MC_PHOTONKTSPLITTINGS", 4, "Jets") { } /// @name Analysis methods //@{ /// Book histograms void init() { // General FS - FinalState fs(-5.0, 5.0); + FinalState fs((Cuts::etaIn(-5.0, 5.0))); declare(fs, "FS"); // Get leading photon - LeadingParticlesFinalState photonfs(FinalState(-1.0, 1.0, 30.0*GeV)); + LeadingParticlesFinalState photonfs(FinalState((Cuts::etaIn(-1.0, 1.0) && Cuts::pT >= 30.0*GeV))); photonfs.addParticleId(PID::PHOTON); declare(photonfs, "LeadingPhoton"); // FS for jets excludes the leading photon VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(photonfs); declare(vfs, "JetFS"); FastJets jetpro(vfs, FastJets::KT, 0.6); declare(jetpro, "Jets"); MC_JetSplittings::init(); } /// Do the analysis void analyze(const Event& e) { // Get the photon const Particles photons = apply(e, "LeadingPhoton").particles(); if (photons.size() != 1) { vetoEvent; } const FourMomentum photon = photons.front().momentum(); // Get all charged particles const FinalState& fs = apply(e, "JetFS"); if (fs.empty()) { vetoEvent; } // Isolate photon by ensuring that a 0.4 cone around it contains less than 7% of the photon's energy const double egamma = photon.E(); double econe = 0.0; for (const Particle& p : fs.particles()) { if (deltaR(photon, p.momentum()) < 0.4) { econe += p.E(); // Veto as soon as E_cone gets larger if (econe/egamma > 0.07) { vetoEvent; } } } MC_JetSplittings::analyze(e); } // Finalize void finalize() { MC_JetSplittings::finalize(); } //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_PHOTONKTSPLITTINGS); } diff --git a/analyses/pluginMC/MC_SUSY.cc b/analyses/pluginMC/MC_SUSY.cc --- a/analyses/pluginMC/MC_SUSY.cc +++ b/analyses/pluginMC/MC_SUSY.cc @@ -1,318 +1,318 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" namespace Rivet { /// @brief MC validation analysis for SUSY events /// @author Andy Buckley class MC_SUSY : public Analysis { public: /// Constructor MC_SUSY() : Analysis("MC_SUSY") { } /// @name Analysis methods //@{ // Book histograms void init() { // Basic final state - const FinalState fs(-4.0, 4.0, 10*GeV); + const FinalState fs((Cuts::etaIn(-4.0, 4.0) && Cuts::pT >= 10*GeV)); // Tracks and jets declare(ChargedFinalState(fs), "Tracks"); declare(FastJets(fs, FastJets::ANTIKT, 0.7), "Jets"); IdentifiedFinalState photonfs(fs); photonfs.acceptId(PID::PHOTON); declare(photonfs, "AllPhotons"); IdentifiedFinalState efs(fs); efs.acceptIdPair(PID::ELECTRON); declare(efs, "Electrons"); IdentifiedFinalState mufs(fs); mufs.acceptIdPair(PID::MUON); declare(mufs, "Muons"); MissingMomentum missing(fs); declare(missing, "MET"); LeadingParticlesFinalState lpfs(fs); lpfs.addParticleIdPair(PID::ELECTRON); lpfs.addParticleIdPair(PID::MUON); declare(lpfs, "LeadingParticles"); book(_hist_n_trk ,"n-trk", 50, 0.5, 300.5); book(_hist_phi_trk ,"phi-trk", 50, -PI, PI); book(_hist_eta_trk ,"eta-trk", 50, -4, 4); book(_hist_pt_trk ,"pt-trk", 100, 0.0, 1500); book(_hist_n_jet ,"n-jet", 21, -0.5, 20.5); book(_hist_phi_jet ,"phi-jet", 50, -PI, PI); book(_hist_eta_jet ,"eta-jet", 50, -4, 4); book(_hist_pt_jet ,"pt-jet", 100, 0.0, 1500); book(_hist_n_e ,"n-e", 11, -0.5, 10.5); book(_hist_phi_e ,"phi-e", 50, -PI, PI); book(_hist_eta_e ,"eta-e", 50, -4, 4); book(_hist_pt_e ,"pt-e", 100, 0.0, 500); book(_hist_n_mu ,"n-mu", 11, -0.5, 10.5); book(_hist_phi_mu ,"phi-mu", 50, -PI, PI); book(_hist_eta_mu ,"eta-mu", 50, -4, 4); book(_hist_pt_mu ,"pt-mu", 100, 0.0, 500); book(_hist_n_gamma ,"n-gamma", 11, -0.5, 10.5); book(_hist_phi_gamma ,"phi-gamma", 50, -PI, PI); book(_hist_eta_gamma ,"eta-gamma", 50, -4, 4); book(_hist_pt_gamma ,"pt-gamma", 100, 0.0, 500); book(_hist_n_gammaiso ,"n-gamma-iso", 11, -0.5, 10.5); book(_hist_phi_gammaiso ,"phi-gamma-iso", 50, -PI, PI); book(_hist_eta_gammaiso ,"eta-gamma-iso", 50, -4, 4); book(_hist_pt_gammaiso ,"pt-gamma-iso", 100, 0.0, 500); book(_hist_met ,"Etmiss", 100, 0.0, 1500); book(_hist_mll_ossf_ee ,"mll-ossf-ee", 50, 0.0, 500); book(_hist_mll_ossf_mumu ,"mll-ossf-mumu", 50, 0.0, 500); book(_hist_mll_osof_emu ,"mll-osof-emu", 50, 0.0, 500); book(_hist_mll_all_ossf_ee ,"mll-all-ossf-ee", 50, 0.0, 500); book(_hist_mll_all_ossf_mumu ,"mll-all-ossf-mumu", 50, 0.0, 500); book(_hist_mll_all_osof_emu ,"mll-all-osof-emu", 50, 0.0, 500); book(_hist_mll_2_ossf_ee ,"mll-2-ossf-ee", 50, 0.0, 500); book(_hist_mll_2_ossf_mumu ,"mll-2-ossf-mumu", 50, 0.0, 500); book(_hist_mll_2_osof_emu ,"mll-2-osof-emu", 50, 0.0, 500); /// @todo LSP eta, pT, phi, mass: no reliable cross-scenario LSP PID but /// maybe plot for all of chi^0_1, gravitino, sneutrino, gluino, ... or /// identify the LSP as any PID::isSUSY (?) particle with status = 1? } // Do the analysis void analyze(const Event& evt) { const FinalState& tracks = apply(evt, "Tracks"); if (tracks.particles().empty()) { MSG_DEBUG("Failed multiplicity cut"); vetoEvent; } // Fill track histos _hist_n_trk->fill(tracks.size()); for (const Particle& t : tracks.particles()) { const FourMomentum& p = t.momentum(); _hist_phi_trk->fill(mapAngleMPiToPi(p.phi())); _hist_eta_trk->fill(p.eta()); _hist_pt_trk->fill(p.pT()/GeV); } // Get jets and fill jet histos const FastJets& jetpro = apply(evt, "Jets"); const Jets jets = jetpro.jetsByPt(); MSG_DEBUG("Jet multiplicity = " << jets.size()); _hist_n_jet->fill(jets.size()); for (const Jet& j : jets) { const FourMomentum& pj = j.momentum(); _hist_phi_jet->fill(mapAngleMPiToPi(pj.phi())); _hist_eta_jet->fill(pj.eta()); _hist_pt_jet->fill(pj.pT()/GeV); } /// @todo Resum photons around electrons // Fill final state electron/positron histos const FinalState& efs = apply(evt, "Electrons"); _hist_n_e->fill(efs.size()); vector epluses, eminuses; for (const Particle& e : efs.particles()) { const FourMomentum& p = e.momentum(); _hist_phi_e->fill(mapAngleMPiToPi(p.phi())); _hist_eta_e->fill(p.eta()); _hist_pt_e->fill(p.pT()/GeV); // Add sufficiently hard leptons to collections for m_ll histo if (p.pT()/GeV > 20) { if (PID::charge3(e.pid()) > 0) epluses += p; else eminuses += p; } } /// @todo Resum photons around muons // Fill final state muon/antimuon histos const FinalState& mufs = apply(evt, "Muons"); _hist_n_mu->fill(mufs.size()); vector mupluses, muminuses; for (const Particle& mu : mufs.particles()) { const FourMomentum& p = mu.momentum(); _hist_phi_mu->fill(mapAngleMPiToPi(p.phi())); _hist_eta_mu->fill(p.eta()); _hist_pt_mu->fill(p.pT()/GeV); // Add sufficiently hard leptons to collections for m_ll histo if (p.pT()/GeV > 20) { if (PID::charge3(mu.pid()) > 0) mupluses += p; else muminuses += p; } } // Fill final state non-isolated photon histos const FinalState& allphotonfs = apply(evt, "AllPhotons"); _hist_n_gamma->fill(allphotonfs.size()); Particles isolatedphotons; for (const Particle& ph : allphotonfs.particles()) { const FourMomentum& p = ph.momentum(); _hist_phi_gamma->fill(mapAngleMPiToPi(p.phi())); _hist_eta_gamma->fill(p.eta()); _hist_pt_gamma->fill(p.pT()/GeV); // Select isolated photons bool isolated = true; for (const Jet& j : jets) { if (deltaR(j.momentum(), p) < 0.2) { isolated = false; break; } } if (isolated) isolatedphotons += ph; } // Fill final state isolated photon histos _hist_n_gammaiso->fill(isolatedphotons.size()); for (const Particle& ph_iso : isolatedphotons) { const FourMomentum& p = ph_iso.momentum(); _hist_phi_gammaiso->fill(mapAngleMPiToPi(p.phi())); _hist_eta_gammaiso->fill(p.eta()); _hist_pt_gammaiso->fill(p.pT()/GeV); } // Calculate and fill missing Et histos const MissingMomentum& met = apply(evt, "MET"); _hist_met->fill(met.vectorEt().mod()/GeV); // Choose highest-pT leptons of each sign and flavour for dilepton mass edges const FinalState& lpfs = apply(evt, "LeadingParticles"); bool eplus_ok(false), eminus_ok(false), muplus_ok(false), muminus_ok(false); FourMomentum peplus, peminus, pmuplus, pmuminus; for (const Particle& p : lpfs.particles()) { // Only use leptons above 20 GeV if (p.pT()/GeV < 20) continue; // Identify the PID const PdgId pid = p.pid(); if (pid == PID::ELECTRON) { eminus_ok = true; peminus = p.momentum(); } else if (pid == PID::POSITRON) { eplus_ok = true; peplus = p.momentum(); } else if (pid == PID::MUON) { muminus_ok = true; pmuminus = p.momentum(); } else if (pid == PID::ANTIMUON) { muplus_ok = true; pmuplus = p.momentum(); } else { throw Error("Unexpected particle type in leading particles FS!"); } } // m_ee if (eminus_ok && eplus_ok) { const double m_ee = FourMomentum(peplus + peminus).mass(); _hist_mll_ossf_ee->fill(m_ee/GeV); if (epluses.size() == 1 && eminuses.size() == 1) _hist_mll_2_ossf_ee->fill(m_ee/GeV); } // m_mumu if (muminus_ok && muplus_ok) { const double m_mumu = FourMomentum(pmuplus + pmuminus).mass(); _hist_mll_ossf_mumu->fill(m_mumu/GeV); if (mupluses.size() == 1 && muminuses.size() == 1) _hist_mll_2_ossf_mumu->fill(m_mumu/GeV); } // m_emu (both configurations) if (eminus_ok && muplus_ok) { const double m_emu = FourMomentum(pmuplus + peminus).mass(); _hist_mll_osof_emu->fill(m_emu/GeV); if (mupluses.size() == 1 && eminuses.size() == 1) _hist_mll_2_osof_emu->fill(m_emu/GeV); } if (muminus_ok && eplus_ok) { const double m_mue = FourMomentum(peplus + pmuminus).mass(); _hist_mll_osof_emu->fill(m_mue/GeV); if (epluses.size() == 1 && muminuses.size() == 1) _hist_mll_2_osof_emu->fill(m_mue/GeV); } // m_ll plots using *all* electrons, positrons, muons and antimuons // m_ee for (const FourMomentum& peplus : epluses) { for (const FourMomentum& peminus : eminuses) { const double m_ee = FourMomentum(peplus + peminus).mass(); _hist_mll_all_ossf_ee->fill(m_ee/GeV); } } // m_mumu for (const FourMomentum& pmuplus : mupluses) { for (const FourMomentum& pmuminus : muminuses) { const double m_mumu = FourMomentum(pmuplus + pmuminus).mass(); _hist_mll_all_ossf_mumu->fill(m_mumu/GeV); } } // m_emu (both configurations) for (const FourMomentum& pmuplus : mupluses) { for (const FourMomentum& peminus : eminuses) { const double m_emu = FourMomentum(pmuplus + peminus).mass(); _hist_mll_all_osof_emu->fill(m_emu/GeV); } } for (const FourMomentum& peplus : epluses) { for (const FourMomentum& pmuminus : muminuses) { const double m_mue = FourMomentum(peplus + pmuminus).mass(); _hist_mll_all_osof_emu->fill(m_mue/GeV); } } } void finalize() { /// @todo Normalisations } //@} private: Histo1DPtr _hist_n_trk, _hist_phi_trk, _hist_eta_trk, _hist_pt_trk; Histo1DPtr _hist_n_jet, _hist_phi_jet, _hist_eta_jet, _hist_pt_jet; Histo1DPtr _hist_n_e, _hist_phi_e, _hist_eta_e, _hist_pt_e; Histo1DPtr _hist_n_mu, _hist_phi_mu, _hist_eta_mu, _hist_pt_mu; Histo1DPtr _hist_n_gamma, _hist_phi_gamma, _hist_eta_gamma, _hist_pt_gamma; Histo1DPtr _hist_n_gammaiso, _hist_phi_gammaiso, _hist_eta_gammaiso, _hist_pt_gammaiso; Histo1DPtr _hist_met; Histo1DPtr _hist_mll_2_ossf_ee, _hist_mll_2_ossf_mumu, _hist_mll_2_osof_emu; Histo1DPtr _hist_mll_ossf_ee, _hist_mll_ossf_mumu, _hist_mll_osof_emu; Histo1DPtr _hist_mll_all_ossf_ee, _hist_mll_all_ossf_mumu, _hist_mll_all_osof_emu; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_SUSY); } diff --git a/analyses/pluginMC/MC_TTBAR.cc b/analyses/pluginMC/MC_TTBAR.cc --- a/analyses/pluginMC/MC_TTBAR.cc +++ b/analyses/pluginMC/MC_TTBAR.cc @@ -1,300 +1,300 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/ChargedLeptons.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/AnalysisLoader.hh" namespace Rivet { class MC_TTBAR : public Analysis { public: /// Minimal constructor MC_TTBAR() : Analysis("MC_TTBAR") { } /// @name Analysis methods //@{ /// Set up projections and book histograms void init() { // A FinalState is used to select particles within |eta| < 4.2 and with pT // > 30 GeV, out of which the ChargedLeptons projection picks only the // electrons and muons, to be accessed later as "LFS". - ChargedLeptons lfs(FinalState(-4.2, 4.2, 30*GeV)); + ChargedLeptons lfs(FinalState((Cuts::etaIn(-4.2, 4.2) && Cuts::pT >= 30*GeV))); declare(lfs, "LFS"); // A second FinalState is used to select all particles in |eta| < 4.2, // with no pT cut. This is used to construct jets and measure missing // transverse energy. - VetoedFinalState fs(FinalState(-4.2, 4.2, 0*GeV)); + VetoedFinalState fs(FinalState((Cuts::etaIn(-4.2, 4.2)))); fs.addVetoOnThisFinalState(lfs); declare(FastJets(fs, FastJets::ANTIKT, 0.6), "Jets"); declare(MissingMomentum(fs), "MissingET"); // Booking of histograms book(_h_njets ,"jet_mult", 11, -0.5, 10.5); // book(_h_jet_1_pT ,"jet_1_pT", logspace(50, 20.0, 500.0)); book(_h_jet_2_pT ,"jet_2_pT", logspace(50, 20.0, 400.0)); book(_h_jet_3_pT ,"jet_3_pT", logspace(50, 20.0, 300.0)); book(_h_jet_4_pT ,"jet_4_pT", logspace(50, 20.0, 200.0)); book(_h_jet_HT ,"jet_HT", logspace(50, 100.0, 2000.0)); // book(_h_bjet_1_pT ,"jetb_1_pT", logspace(50, 20.0, 400.0)); book(_h_bjet_2_pT ,"jetb_2_pT", logspace(50, 20.0, 300.0)); // book(_h_ljet_1_pT ,"jetl_1_pT", logspace(50, 20.0, 400.0)); book(_h_ljet_2_pT ,"jetl_2_pT", logspace(50, 20.0, 300.0)); // book(_h_W_mass ,"W_mass", 75, 30, 180); book(_h_t_mass ,"t_mass", 150, 130, 430); book(_h_t_mass_W_cut ,"t_mass_W_cut", 150, 130, 430); // book(_h_jetb_1_jetb_2_dR ,"jetb_1_jetb_2_dR", 20, 0.0, 7.0); book(_h_jetb_1_jetb_2_deta ,"jetb_1_jetb_2_deta", 20, 0.0, 7.0); book(_h_jetb_1_jetb_2_dphi ,"jetb_1_jetb_2_dphi", 20, 0.0, M_PI); book(_h_jetb_1_jetl_1_dR ,"jetb_1_jetl_1_dR", 20, 0.0, 7.0); book(_h_jetb_1_jetl_1_deta ,"jetb_1_jetl_1_deta", 20, 0.0, 7.0); book(_h_jetb_1_jetl_1_dphi ,"jetb_1_jetl_1_dphi", 20, 0.0, M_PI); book(_h_jetl_1_jetl_2_dR ,"jetl_1_jetl_2_dR", 20, 0.0, 7.0); book(_h_jetl_1_jetl_2_deta ,"jetl_1_jetl_2_deta", 20, 0.0, 7.0); book(_h_jetl_1_jetl_2_dphi ,"jetl_1_jetl_2_dphi", 20, 0.0, M_PI); book(_h_jetb_1_W_dR ,"jetb_1_W_dR", 20, 0.0, 7.0); book(_h_jetb_1_W_deta ,"jetb_1_W_deta", 20, 0.0, 7.0); book(_h_jetb_1_W_dphi ,"jetb_1_W_dphi", 20, 0.0, M_PI); book(_h_jetb_1_l_dR ,"jetb_1_l_dR", 20, 0.0, 7.0); book(_h_jetb_1_l_deta ,"jetb_1_l_deta", 20, 0.0, 7.0); book(_h_jetb_1_l_dphi ,"jetb_1_l_dphi", 20, 0.0, M_PI); book(_h_jetb_1_l_mass ,"jetb_1_l_mass", 40, 0.0, 500.0); } void analyze(const Event& event) { const double weight = 1.0; // Use the "LFS" projection to require at least one hard charged // lepton. This is an experimental signature for the leptonically decaying // W. This helps to reduce pure QCD backgrounds. const ChargedLeptons& lfs = apply(event, "LFS"); MSG_DEBUG("Charged lepton multiplicity = " << lfs.chargedLeptons().size()); for (const Particle& lepton : lfs.chargedLeptons()) { MSG_DEBUG("Lepton pT = " << lepton.pT()); } if (lfs.chargedLeptons().empty()) { MSG_DEBUG("Event failed lepton multiplicity cut"); vetoEvent; } // Use a missing ET cut to bias toward events with a hard neutrino from // the leptonically decaying W. This helps to reduce pure QCD backgrounds. const MissingMomentum& met = apply(event, "MissingET"); MSG_DEBUG("Vector ET = " << met.vectorEt().mod() << " GeV"); if (met.vectorEt().mod() < 30*GeV) { MSG_DEBUG("Event failed missing ET cut"); vetoEvent; } // Use the "Jets" projection to check that there are at least 4 jets of // any pT. Getting the jets sorted by pT ensures that the first jet is the // hardest, and so on. We apply no pT cut here only because we want to // plot all jet pTs to help optimise our jet pT cut. const FastJets& jetpro = apply(event, "Jets"); const Jets alljets = jetpro.jetsByPt(); if (alljets.size() < 4) { MSG_DEBUG("Event failed jet multiplicity cut"); vetoEvent; } // Update passed-cuts counter and fill all-jets histograms _h_jet_1_pT->fill(alljets[0].pT()/GeV, weight); _h_jet_2_pT->fill(alljets[1].pT()/GeV, weight); _h_jet_3_pT->fill(alljets[2].pT()/GeV, weight); _h_jet_4_pT->fill(alljets[3].pT()/GeV, weight); // Insist that the hardest 4 jets pass pT hardness cuts. If we don't find // at least 4 such jets, we abandon this event. const Jets jets = jetpro.jetsByPt(30*GeV); _h_njets->fill(jets.size(), weight); double ht = 0.0; for (const Jet& j : jets) { ht += j.pT(); } _h_jet_HT->fill(ht/GeV, weight); if (jets.size() < 4 || jets[0].pT() < 60*GeV || jets[1].pT() < 50*GeV || jets[3].pT() < 30*GeV) { MSG_DEBUG("Event failed jet cuts"); vetoEvent; } // Sort the jets into b-jets and light jets. We expect one hard b-jet from // each top decay, so our 4 hardest jets should include two b-jets. The // Jet::bTagged() method is equivalent to perfect experimental // b-tagging, in a generator-independent way. Jets bjets, ljets; for (const Jet& jet : jets) { // // Don't count jets that overlap with the hard leptons bool isolated = true; for (const Particle& lepton : lfs.chargedLeptons()) { if (deltaR(jet.momentum(), lepton.momentum()) < 0.3) { isolated = false; break; } } if (!isolated) { MSG_DEBUG("Jet failed lepton isolation cut"); break; } if (jet.bTagged()) { bjets.push_back(jet); } else { ljets.push_back(jet); } } MSG_DEBUG("Number of b-jets = " << bjets.size()); MSG_DEBUG("Number of l-jets = " << ljets.size()); if (bjets.size() != 2) { MSG_DEBUG("Event failed post-lepton-isolation b-tagging cut"); vetoEvent; } if (ljets.size() < 2) { MSG_DEBUG("Event failed since not enough light jets remaining after lepton-isolation"); vetoEvent; } // Plot the pTs of the identified jets. _h_bjet_1_pT->fill(bjets[0].pT(), weight); _h_bjet_2_pT->fill(bjets[1].pT(), weight); _h_ljet_1_pT->fill(ljets[0].pT(), weight); _h_ljet_2_pT->fill(ljets[1].pT(), weight); // Construct the hadronically decaying W momentum 4-vector from pairs of // non-b-tagged jets. The pair which best matches the W mass is used. We start // with an always terrible 4-vector estimate which should always be "beaten" by // a real jet pair. FourMomentum W(10*(sqrtS()>0.?sqrtS():14000.), 0, 0, 0); for (size_t i = 0; i < ljets.size()-1; ++i) { for (size_t j = i + 1; j < ljets.size(); ++j) { const FourMomentum Wcand = ljets[i].momentum() + ljets[j].momentum(); MSG_TRACE(i << "," << j << ": candidate W mass = " << Wcand.mass()/GeV << " GeV, vs. incumbent candidate with " << W.mass()/GeV << " GeV"); if (fabs(Wcand.mass() - 80.4*GeV) < fabs(W.mass() - 80.4*GeV)) { W = Wcand; } } } MSG_DEBUG("Candidate W mass = " << W.mass() << " GeV"); // There are two b-jets with which this can be combined to make the // hadronically decaying top, one of which is correct and the other is // not... but we have no way to identify which is which, so we construct // both possible top momenta and fill the histograms with both. const FourMomentum t1 = W + bjets[0].momentum(); const FourMomentum t2 = W + bjets[1].momentum(); _h_W_mass->fill(W.mass(), weight); _h_t_mass->fill(t1.mass(), weight); _h_t_mass->fill(t2.mass(), weight); // Placing a cut on the well-known W mass helps to reduce backgrounds if (inRange(W.mass()/GeV, 75.0, 85.0)) { MSG_DEBUG("W found with mass " << W.mass()/GeV << " GeV"); _h_t_mass_W_cut->fill(t1.mass(), weight); _h_t_mass_W_cut->fill(t2.mass(), weight); _h_jetb_1_jetb_2_dR->fill(deltaR(bjets[0].momentum(), bjets[1].momentum()),weight); _h_jetb_1_jetb_2_deta->fill(fabs(bjets[0].eta()-bjets[1].eta()),weight); _h_jetb_1_jetb_2_dphi->fill(deltaPhi(bjets[0].momentum(),bjets[1].momentum()),weight); _h_jetb_1_jetl_1_dR->fill(deltaR(bjets[0].momentum(), ljets[0].momentum()),weight); _h_jetb_1_jetl_1_deta->fill(fabs(bjets[0].eta()-ljets[0].eta()),weight); _h_jetb_1_jetl_1_dphi->fill(deltaPhi(bjets[0].momentum(),ljets[0].momentum()),weight); _h_jetl_1_jetl_2_dR->fill(deltaR(ljets[0].momentum(), ljets[1].momentum()),weight); _h_jetl_1_jetl_2_deta->fill(fabs(ljets[0].eta()-ljets[1].eta()),weight); _h_jetl_1_jetl_2_dphi->fill(deltaPhi(ljets[0].momentum(),ljets[1].momentum()),weight); _h_jetb_1_W_dR->fill(deltaR(bjets[0].momentum(), W),weight); _h_jetb_1_W_deta->fill(fabs(bjets[0].eta()-W.eta()),weight); _h_jetb_1_W_dphi->fill(deltaPhi(bjets[0].momentum(),W),weight); FourMomentum l=lfs.chargedLeptons()[0].momentum(); _h_jetb_1_l_dR->fill(deltaR(bjets[0].momentum(), l),weight); _h_jetb_1_l_deta->fill(fabs(bjets[0].eta()-l.eta()),weight); _h_jetb_1_l_dphi->fill(deltaPhi(bjets[0].momentum(),l),weight); _h_jetb_1_l_mass->fill(FourMomentum(bjets[0].momentum()+l).mass(), weight); } } void finalize() { normalize(_h_njets); normalize(_h_jet_1_pT); normalize(_h_jet_2_pT); normalize(_h_jet_3_pT); normalize(_h_jet_4_pT); normalize(_h_jet_HT); normalize(_h_bjet_1_pT); normalize(_h_bjet_2_pT); normalize(_h_ljet_1_pT); normalize(_h_ljet_2_pT); normalize(_h_W_mass); normalize(_h_t_mass); normalize(_h_t_mass_W_cut); normalize(_h_jetb_1_jetb_2_dR); normalize(_h_jetb_1_jetb_2_deta); normalize(_h_jetb_1_jetb_2_dphi); normalize(_h_jetb_1_jetl_1_dR); normalize(_h_jetb_1_jetl_1_deta); normalize(_h_jetb_1_jetl_1_dphi); normalize(_h_jetl_1_jetl_2_dR); normalize(_h_jetl_1_jetl_2_deta); normalize(_h_jetl_1_jetl_2_dphi); normalize(_h_jetb_1_W_dR); normalize(_h_jetb_1_W_deta); normalize(_h_jetb_1_W_dphi); normalize(_h_jetb_1_l_dR); normalize(_h_jetb_1_l_deta); normalize(_h_jetb_1_l_dphi); normalize(_h_jetb_1_l_mass); } //@} private: // @name Histogram data members //@{ Histo1DPtr _h_njets; Histo1DPtr _h_jet_1_pT, _h_jet_2_pT, _h_jet_3_pT, _h_jet_4_pT; Histo1DPtr _h_jet_HT; Histo1DPtr _h_bjet_1_pT, _h_bjet_2_pT; Histo1DPtr _h_ljet_1_pT, _h_ljet_2_pT; Histo1DPtr _h_W_mass; Histo1DPtr _h_t_mass, _h_t_mass_W_cut; Histo1DPtr _h_jetb_1_jetb_2_dR, _h_jetb_1_jetb_2_deta, _h_jetb_1_jetb_2_dphi; Histo1DPtr _h_jetb_1_jetl_1_dR, _h_jetb_1_jetl_1_deta, _h_jetb_1_jetl_1_dphi; Histo1DPtr _h_jetl_1_jetl_2_dR, _h_jetl_1_jetl_2_deta, _h_jetl_1_jetl_2_dphi; Histo1DPtr _h_jetb_1_W_dR, _h_jetb_1_W_deta, _h_jetb_1_W_dphi; Histo1DPtr _h_jetb_1_l_dR, _h_jetb_1_l_deta, _h_jetb_1_l_dphi,_h_jetb_1_l_mass; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_TTBAR); } diff --git a/analyses/pluginPetra/TASSO_1990_S2148048.cc b/analyses/pluginPetra/TASSO_1990_S2148048.cc --- a/analyses/pluginPetra/TASSO_1990_S2148048.cc +++ b/analyses/pluginPetra/TASSO_1990_S2148048.cc @@ -1,136 +1,136 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Thrust.hh" #include "Rivet/Projections/Sphericity.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class TASSO_1990_S2148048 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(TASSO_1990_S2148048); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - const ChargedFinalState cfs(-DBL_MAX, DBL_MAX, 0.1/GeV); + const ChargedFinalState cfs(Cuts::pT >= 0.1/GeV); declare(cfs, "CFS"); // Thrust and sphericity declare(Thrust(cfs), "Thrust"); declare(Sphericity(cfs), "Sphericity"); // Histos int offset = 0; switch (int(sqrtS()/GeV)) { case 14: offset = 0; break; case 22: offset = 1; break; case 35: offset = 2; break; case 44: offset = 3; break; } //book(_h_xp , 2, 1, 1+offset); book(_h_sphericity , 6, 1, 1+offset); book(_h_aplanarity , 7, 1, 1+offset); book(_h_thrust , 8, 1, 1+offset); } /// Perform the per-event analysis void analyze(const Event& event) { const ChargedFinalState& cfs = apply(event, "CFS"); //// Get beams and average beam momentum //const ParticlePair& beams = apply(event, "Beams").beams(); //const double meanBeamMom = ( beams.first.p3().mod() + //beams.second.p3().mod() ) / 2.0; // TASSO hadronic event selection TODO: move this into a trigger definition // See page 2 in publication // Condition 1) --- require at least 5 (4) 'good' tracks int nch = cfs.particles().size(); if ( (int(sqrtS()/GeV) > 27 && nch < 5) || (int(sqrtS()/GeV) <= 27 && nch < 4 ) ) { MSG_DEBUG("Failed # good tracks cut: " << nch); vetoEvent; } // Condition 2) --- // Condition 5) --- scalar momentum (not pT!!!) sum >= 0.265*s double momsum = 0.0; for (const Particle& p : cfs.particles()) { const double mom = p.p3().mod(); momsum += mom; } if (momsum <=0.265 * sqrtS()/GeV) { MSG_DEBUG("Failed pTsum cut: " << momsum << " < " << 0.265 * sqrtS()/GeV); vetoEvent; } // Raise counter for events that pass trigger conditions //_sumWPassed += 1.0; const Thrust& thrust = apply(event, "Thrust"); //const Vector3 & thrustAxis = thrust.thrustAxis (); //double theta = thrustAxis.theta(); //if ( fabs(cos(theta)) >= 0.8 ) { //MSG_DEBUG("Failed thrust angle cut: " << fabs(cos(theta))); //vetoEvent; //} const Sphericity& sphericity = apply(event, "Sphericity"); //// Fill histograms in order of appearance in paper //for (const Particle& p : cfs.particles()) { //// Get momentum and energy of each particle. //const Vector3 mom3 = p.p3(); //// Scaled momenta. //const double mom = mom3.mod(); //const double scaledMom = mom/meanBeamMom; //_h_xp->fill(scaledMom); //} // _h_sphericity->fill(sphericity.sphericity()); _h_aplanarity->fill(sphericity.aplanarity()); _h_thrust->fill(thrust.thrust()); } /// Normalise histograms etc., after the run void finalize() { //scale(_h_xp, _sumWPassed/(crossSection()*sumOfWeights())); normalize(_h_sphericity); normalize(_h_aplanarity); normalize(_h_thrust ); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_xp, _h_sphericity, _h_aplanarity, _h_thrust; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(TASSO_1990_S2148048); } diff --git a/analyses/pluginRHIC/STAR_2006_S6500200.cc b/analyses/pluginRHIC/STAR_2006_S6500200.cc --- a/analyses/pluginRHIC/STAR_2006_S6500200.cc +++ b/analyses/pluginRHIC/STAR_2006_S6500200.cc @@ -1,108 +1,108 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" namespace Rivet { /// @brief STAR identified hadron spectra in pp at 200 GeV class STAR_2006_S6500200 : public Analysis { public: /// Constructor STAR_2006_S6500200() : Analysis("STAR_2006_S6500200") { } /// Book projections and histograms void init() { - ChargedFinalState bbc1(-5.0,-3.3, 0.0*GeV); // beam-beam-counter trigger - ChargedFinalState bbc2( 3.3, 5.0, 0.0*GeV); // beam-beam-counter trigger + ChargedFinalState bbc1(Cuts::etaIn(-5.0,-3.3)); // beam-beam-counter trigger + ChargedFinalState bbc2(Cuts::etaIn( 3.3, 5.0)); // beam-beam-counter trigger declare(bbc1, "BBC1"); declare(bbc2, "BBC2"); IdentifiedFinalState pionfs(Cuts::abseta < 2.5 && Cuts::pT > 0.3*GeV); IdentifiedFinalState protonfs(Cuts::abseta < 2.5 && Cuts::pT > 0.4*GeV); pionfs.acceptIdPair(PID::PIPLUS); protonfs.acceptIdPair(PID::PROTON); declare(pionfs, "PionFS"); declare(protonfs, "ProtonFS"); book(_h_pT_piplus ,1, 1, 1); // full range pion binning book(_h_pT_piminus ,1, 2, 1); // full range pion binning book(_tmp_pT_piplus ,"TMP/pT_piplus", refData(2, 3, 1)); // pi histo compatible with more restricted proton binning book(_tmp_pT_piminus ,"TMP/pT_piminus", refData(2, 4, 1)); // pi histo compatible with more restricted proton binning book(_h_pT_proton ,1, 3, 1); book(_h_pT_antiproton ,1, 4, 1); book(_s_piminus_piplus, 2, 1, 1); book(_s_antipr_pr , 2, 2, 1); book(_s_pr_piplus , 2, 3, 1); book(_s_antipr_piminus, 2, 4, 1); book(_sumWeightSelected, "sumWeightSelected"); } /// Do the analysis void analyze(const Event& event) { const ChargedFinalState& bbc1 = apply(event, "BBC1"); const ChargedFinalState& bbc2 = apply(event, "BBC2"); if (bbc1.size() < 1 || bbc2.size() < 1) { MSG_DEBUG("Failed beam-beam-counter trigger"); vetoEvent; } const IdentifiedFinalState& pionfs = apply(event, "PionFS"); for (const Particle& p : pionfs.particles()) { if (p.absrap() < 0.5) { /// @todo Use a binned counter to avoid this bin width cancellation hack const double pT = p.pT() / GeV; ((p.pid() > 0) ? _h_pT_piplus : _h_pT_piminus)->fill(pT, 1.0/pT); ((p.pid() > 0) ? _tmp_pT_piplus : _tmp_pT_piminus)->fill(pT, 1.0/pT); } } const IdentifiedFinalState& protonfs = apply(event, "ProtonFS"); for (const Particle& p : protonfs.particles()) { if (p.absrap() < 0.5) { /// @todo Use a binned counter to avoid this bin width cancellation hack const double pT = p.pT() / GeV; ((p.pid() > 0) ? _h_pT_proton : _h_pT_antiproton)->fill(pT, 1.0/pT); } } _sumWeightSelected->fill(); } /// Finalize void finalize() { divide(_h_pT_piminus, _h_pT_piplus, _s_piminus_piplus); divide(_h_pT_antiproton, _h_pT_proton, _s_antipr_pr); divide(_h_pT_proton, _tmp_pT_piplus, _s_pr_piplus); divide(_h_pT_antiproton, _tmp_pT_piminus, _s_antipr_piminus); const YODA::Scatter1D factor = (1/(2*M_PI)) / *_sumWeightSelected; scale(_h_pT_piplus, factor); scale(_h_pT_piminus, factor); scale(_h_pT_proton, factor); scale(_h_pT_antiproton, factor); } private: CounterPtr _sumWeightSelected; Histo1DPtr _h_pT_piplus, _h_pT_piminus, _h_pT_proton, _h_pT_antiproton; Histo1DPtr _tmp_pT_piplus, _tmp_pT_piminus; Scatter2DPtr _s_piminus_piplus, _s_antipr_pr, _s_pr_piplus, _s_antipr_piminus; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(STAR_2006_S6500200); } diff --git a/analyses/pluginRHIC/STAR_2006_S6870392.cc b/analyses/pluginRHIC/STAR_2006_S6870392.cc --- a/analyses/pluginRHIC/STAR_2006_S6870392.cc +++ b/analyses/pluginRHIC/STAR_2006_S6870392.cc @@ -1,86 +1,86 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief STAR inclusive jet cross-section in pp at 200 GeV class STAR_2006_S6870392 : public Analysis { public: /// Constructor STAR_2006_S6870392() : Analysis("STAR_2006_S6870392") { } /// @name Analysis methods //@{ /// Book projections and histograms void init() { - FinalState fs(-2.0, 2.0); + FinalState fs((Cuts::etaIn(-2.0, 2.0))); declare(fs, "FS"); declare(FastJets(fs, FastJets::CDFMIDPOINT, 0.4, JetAlg::Muons::ALL, JetAlg::Invisibles::NONE, nullptr, 0.5), "MidpointJets"); book(_h_jet_pT_MB ,1, 1, 1); book(_h_jet_pT_HT ,2, 1, 1); } /// Do the analysis void analyze(const Event& event) { // Skip if the event is empty const FinalState& fs = apply(event, "FS"); if (fs.empty()) { MSG_DEBUG("Skipping event " << numEvents() << " because no final state found "); vetoEvent; } // Find jets const FastJets& jetpro = apply(event, "MidpointJets"); const Jets& jets = jetpro.jetsByPt(); if (!jets.empty()) { const Jet& j1 = jets.front(); if (inRange(fabs(j1.eta()), 0.2, 0.8)) { for (const Jet& j : jets) { const FourMomentum pj = j.momentum(); _h_jet_pT_MB->fill(pj.pT()); _h_jet_pT_HT->fill(pj.pT()); } } } } /// Finalize void finalize() { double normalisation = crossSection()/picobarn/sumOfWeights()/(2*0.6*2*M_PI); scale(_h_jet_pT_MB, normalisation); scale(_h_jet_pT_HT, normalisation); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_jet_pT_MB; Histo1DPtr _h_jet_pT_HT; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(STAR_2006_S6870392); } diff --git a/analyses/pluginRHIC/STAR_2008_S7869363.cc b/analyses/pluginRHIC/STAR_2008_S7869363.cc --- a/analyses/pluginRHIC/STAR_2008_S7869363.cc +++ b/analyses/pluginRHIC/STAR_2008_S7869363.cc @@ -1,173 +1,173 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/LossyFinalState.hh" namespace Rivet { /// @todo Replace with SmearedParticles class STARRandomFilter { public: STARRandomFilter() { } // Return true to throw away a particle bool operator()(const Particle& p) { /// @todo Use a better RNG? size_t idx = int(floor(p.pT()/MeV/50)); if (idx > 11) idx = 11; return (rand()/static_cast(RAND_MAX) > _trkeff[idx]); } CmpState compare(const STARRandomFilter& other) const { return CmpState::GT; // @todo really? } private: const static double _trkeff[12]; }; // Here we have the track reconstruction efficiencies for tracks with pT from 0 to 600 MeV // in steps of 50 MeV. The efficiency is assumed to be 0.88 for pT >= 600 MeV const double STARRandomFilter::_trkeff[12] = {0,0,0.38,0.72,0.78,0.81,0.82,0.84,0.85,0.86,0.87,0.88}; class STAR_2008_S7869363 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor STAR_2008_S7869363() : Analysis("STAR_2008_S7869363") { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - const ChargedFinalState cfs(-0.5, 0.5, 0.2*GeV); + const ChargedFinalState cfs((Cuts::etaIn(-0.5, 0.5) && Cuts::pT >= 0.2*GeV)); const LossyFinalState lfs(cfs, STARRandomFilter()); declare(lfs, "FS"); book(_h_dNch ,1, 1, 1); book(_h_dpT_Pi ,2, 1, 1); book(_h_dpT_Piplus ,2, 1, 2); book(_h_dpT_Kaon ,2, 1, 3); book(_h_dpT_Kaonplus ,2, 1, 4); book(_h_dpT_AntiProton ,2, 1, 5); book(_h_dpT_Proton ,2, 1, 6); // book(nCutsPassed, "nCutsPassed"); // book(nPi, "nPi"); // book(nPiPlus, "nPiPlus"); // book(nKaon, "nKaon"); // book(nKaonPlus, "nKaonPlus"); // book(nProton, "nProton"); // book(nAntiProton, "nAntiProton"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& charged = apply(event, "FS"); // Vertex reconstruction efficiencies as a function of charged multiplicity. // For events with more than 23 reconstructed tracks the efficiency is 100%. double vtxeffs[24] = { 0.000000,0.512667,0.739365,0.847131,0.906946,0.940922,0.959328,0.96997, 0.975838,0.984432,0.988311,0.990327,0.990758,0.995767,0.99412,0.992271, 0.996631,0.994802,0.99635,0.997384,0.998986,0.996441,0.994513,1.000000 }; double vtxeff = 1.0; if (charged.particles().size() < 24) { vtxeff = vtxeffs[charged.particles().size()]; } const double weight = vtxeff; for (const Particle& p : charged.particles()) { double pT = p.pT()/GeV; double y = p.rapidity(); if (fabs(y) < 0.1) { // nCutsPassed->fill(weight); const PdgId id = p.pid(); switch (id) { case -211: _h_dpT_Pi->fill(pT, weight/(TWOPI*pT*0.2)); // nPi->fill(weight); break; case 211: _h_dpT_Piplus->fill(pT, weight/(TWOPI*pT*0.2)); // nPiPlus->fill(weight); break; case -321: _h_dpT_Kaon->fill(pT, weight/(TWOPI*pT*0.2)); // nKaon->fill(weight); break; case 321: _h_dpT_Kaonplus->fill(pT, weight/(TWOPI*pT*0.2)); // nKaonPlus->fill(weight); break; case -2212: _h_dpT_AntiProton->fill(pT, weight/(TWOPI*pT*0.2)); // nAntiProton->fill(weight); break; case 2212: _h_dpT_Proton->fill(pT, weight/(TWOPI*pT*0.2)); // nProton->fill(weight); break; } } else { continue; } } _h_dNch->fill(charged.particles().size(), weight); } /// Normalise histograms etc., after the run void finalize() { //double nTot = nPi + nPiPlus + nKaon + nKaonPlus + nProton + nAntiProton; normalize(_h_dNch); /// @todo Norm to data! normalize(_h_dpT_Pi , 0.389825 ); normalize(_h_dpT_Piplus , 0.396025 ); normalize(_h_dpT_Kaon , 0.03897 ); normalize(_h_dpT_Kaonplus , 0.04046 ); normalize(_h_dpT_AntiProton, 0.0187255); normalize(_h_dpT_Proton , 0.016511 ); } private: Histo1DPtr _h_dNch; Histo1DPtr _h_dpT_Pi, _h_dpT_Piplus; Histo1DPtr _h_dpT_Kaon, _h_dpT_Kaonplus; Histo1DPtr _h_dpT_AntiProton, _h_dpT_Proton; Profile1DPtr _h_pT_vs_Nch; //CounterPtr nCutsPassed, nPi, nPiPlus, nKaon, nKaonPlus, nProton, nAntiProton; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(STAR_2008_S7869363); } diff --git a/analyses/pluginRHIC/STAR_2008_S7993412.cc b/analyses/pluginRHIC/STAR_2008_S7993412.cc --- a/analyses/pluginRHIC/STAR_2008_S7993412.cc +++ b/analyses/pluginRHIC/STAR_2008_S7993412.cc @@ -1,79 +1,79 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { /// @brief STAR di-hadron correlations in d-Au at 200 GeV class STAR_2008_S7993412 : public Analysis { public: STAR_2008_S7993412() : Analysis("STAR_2008_S7993412") { } /// @name Analysis methods //@{ /// Book projections and histograms void init() { - ChargedFinalState fs(-1.0, 1.0, 1.0*GeV); + ChargedFinalState fs((Cuts::etaIn(-1.0, 1.0) && Cuts::pT >= 1.0*GeV)); declare(fs, "FS"); book(_h_Y_jet_trigger ,1, 1, 1); book(_h_Y_jet_associated ,2, 1, 1); } /// Do the analysis void analyze(const Event& event) { // Skip if the event is empty const FinalState& fs = apply(event, "FS"); if (fs.empty()) { MSG_DEBUG("Skipping event " << numEvents() << " because no final state found "); vetoEvent; } for (const Particle& tp : fs.particles()) { const double triggerpT = tp.pT(); if (triggerpT >= 2.0 && triggerpT < 5.0) { int n_associated = 0; for (const Particle& ap : fs.particles()) { if (!inRange(ap.pT()/GeV, 1.5, triggerpT)) continue; if (deltaPhi(tp.phi(), ap.phi()) > 1) continue; if (fabs(tp.eta() - ap.eta()) > 1.75) continue; n_associated += 1; } //const double dPhidEta = 2 * 2*1.75; //_h_Y_jet_trigger->fill(triggerpT, n_associated/dPhidEta); _h_Y_jet_trigger->fill(triggerpT, n_associated); } } } /// Finalize void finalize() { } //@} private: /// @name Histograms //@{ Profile1DPtr _h_Y_jet_trigger; Profile1DPtr _h_Y_jet_associated; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(STAR_2008_S7993412); } diff --git a/analyses/pluginRHIC/STAR_2009_UE_HELEN.cc b/analyses/pluginRHIC/STAR_2009_UE_HELEN.cc --- a/analyses/pluginRHIC/STAR_2009_UE_HELEN.cc +++ b/analyses/pluginRHIC/STAR_2009_UE_HELEN.cc @@ -1,165 +1,167 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/NeutralFinalState.hh" #include "Rivet/Projections/MergedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "fastjet/SISConePlugin.hh" namespace Rivet { /// @brief STAR underlying event /// @author Hendrik Hoeth class STAR_2009_UE_HELEN : public Analysis { public: /// Constructor STAR_2009_UE_HELEN() : Analysis("STAR_2009_UE_HELEN") { } /// @name Analysis methods //@{ void init() { // Charged final state, |eta|<1, pT>0.2GeV - const ChargedFinalState cfs(-1.0, 1.0, 0.2*GeV); + const Cut c = Cuts::etaIn(-1.0, 1.0) && Cuts::pT >= 0.2*GeV; + + const ChargedFinalState cfs(c); declare(cfs, "CFS"); // Neutral final state, |eta|<1, ET>0.2GeV (needed for the jets) - const NeutralFinalState nfs(-1.0, 1.0, 0.2*GeV); + const NeutralFinalState nfs(c); declare(nfs, "NFS"); // STAR can't see neutrons and K^0_L VetoedFinalState vfs(nfs); vfs.vetoNeutrinos(); vfs.addVetoPairId(PID::K0L); vfs.addVetoPairId(PID::NEUTRON); declare(vfs, "VFS"); // Jets are reconstructed from charged and neutral particles, // and the cuts are different (pT vs. ET), so we need to merge them. const MergedFinalState jfs(cfs, vfs); declare(jfs, "JFS"); // SISCone, R = 0.7, overlap_threshold = 0.75 declare(FastJets(jfs, FastJets::SISCONE, 0.7), "AllJets"); // Book histograms book(_hist_pmaxnchg , 1, 1, 1); book(_hist_pminnchg , 2, 1, 1); book(_hist_anchg , 3, 1, 1); } // Do the analysis void analyze(const Event& e) { const FinalState& cfs = apply(e, "CFS"); if (cfs.particles().size() < 1) { MSG_DEBUG("Failed multiplicity cut"); vetoEvent; } const Jets& alljets = apply(e, "AllJets").jetsByPt(); MSG_DEBUG("Total jet multiplicity = " << alljets.size()); // The jet acceptance region is |eta|<(1-R)=0.3 (with R = jet radius) // Jets also must have a neutral energy fraction of < 0.7 Jets jets; for (const Jet & jet : alljets) { if (jet.neutralEnergy()/jet.totalEnergy() < 0.7 && jet.abseta() < 0.3) jets.push_back(jet); } // This analysis requires a di-jet like event. // WARNING: There is more data in preparation, some of which // does _not_ have this constraint! if (jets.size() != 2) { MSG_DEBUG("Failed jet multiplicity cut"); vetoEvent; } // The di-jet constraints in this analysis are: // - 2 and only 2 jets in the acceptance region // - delta(Phi) between the jets is > 150 degrees // - Pt_awayjet/Pt_towards_jet > 0.7 if (deltaPhi(jets[0].phi(), jets[1].phi()) <= 5*PI/6 || jets[1].pT()/jets[0].pT() <= 0.7) { MSG_DEBUG("Failed di-jet criteria"); vetoEvent; } // Now lets start ... const double jetphi = jets[0].phi(); const double jetpT = jets[0].pT(); size_t numTrans1(0), numTrans2(0), numAway(0); // Calculate all the charged stuff for (const Particle& p : cfs.particles()) { const double dPhi = deltaPhi(p.phi(), jetphi); const double pT = p.pT(); const double phi = p.phi(); double rotatedphi = phi - jetphi; while (rotatedphi < 0) rotatedphi += 2*PI; // @TODO: WARNING: The following lines are a hack to correct // for the STAR tracking efficiency. Once we have the // final numbers (corrected to hadron level), we need // to remove this!!!! if (1.0*rand()/static_cast(RAND_MAX) > 0.87834-exp(-1.48994-0.788432*pT)) { continue; } // -------- end of efficiency hack ------- if (dPhi < PI/3.0) { // toward } else if (dPhi < 2*PI/3.0) { if (rotatedphi <= PI) { ++numTrans1; } else { ++numTrans2; } } else { ++numAway; } } // end charged particle loop // Fill the histograms _hist_pmaxnchg->fill(jetpT, (numTrans1>numTrans2 ? numTrans1 : numTrans2)/(2*PI/3)); _hist_pminnchg->fill(jetpT, (numTrans1fill(jetpT, numAway/(PI*0.7*0.7)); // jet area = pi*R^2 } void finalize() { // } //@} private: Profile1DPtr _hist_pmaxnchg; Profile1DPtr _hist_pminnchg; Profile1DPtr _hist_anchg; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(STAR_2009_UE_HELEN); } diff --git a/analyses/pluginSPS/UA1_1990_S2044935.cc b/analyses/pluginSPS/UA1_1990_S2044935.cc --- a/analyses/pluginSPS/UA1_1990_S2044935.cc +++ b/analyses/pluginSPS/UA1_1990_S2044935.cc @@ -1,176 +1,176 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" namespace Rivet { /// @brief UA1 minbias track multiplicities, \f$ p_\perp \f$ and \f$ E_\perp \f$ class UA1_1990_S2044935 : public Analysis { public: /// Constructor UA1_1990_S2044935() : Analysis("UA1_1990_S2044935") { } /// @name Analysis methods //@{ /// Book projections and histograms void init() { - declare(ChargedFinalState(-5.5, 5.5), "TriggerFS"); - declare(ChargedFinalState(-2.5, 2.5), "TrackFS"); - const FinalState trkcalofs(-2.5, 2.5); + declare(ChargedFinalState((Cuts::etaIn(-5.5, 5.5))), "TriggerFS"); + declare(ChargedFinalState((Cuts::etaIn(-2.5, 2.5))), "TrackFS"); + const FinalState trkcalofs((Cuts::etaIn(-2.5, 2.5))); declare(MissingMomentum(trkcalofs), "MET25"); - const FinalState calofs(-6.0, 6.0); + const FinalState calofs((Cuts::etaIn(-6.0, 6.0))); declare(MissingMomentum(calofs), "MET60"); if (fuzzyEquals(sqrtS()/GeV, 63)) { book(_hist_Pt ,8,1,1); } else if (fuzzyEquals(sqrtS()/GeV, 200)) { book(_hist_Nch ,1,1,1); book(_hist_Esigd3p ,2,1,1); book(_hist_Pt ,6,1,1); book(_hist_Et ,9,1,1); book(_hist_Etavg ,12,1,1); } else if (fuzzyEquals(sqrtS()/GeV, 500)) { book(_hist_Nch ,1,1,2); book(_hist_Esigd3p ,2,1,2); book(_hist_Et ,10,1,1); book(_hist_Etavg ,12,1,2); } else if (fuzzyEquals(sqrtS()/GeV, 900)) { book(_hist_Nch ,1,1,3); book(_hist_Esigd3p ,2,1,3); book(_hist_Pt ,7,1,1); book(_hist_Et ,11,1,1); book(_hist_Etavg ,12,1,3); book(_hist_Esigd3p08 ,3,1,1); book(_hist_Esigd3p40 ,4,1,1); book(_hist_Esigd3p80 ,5,1,1); } book(_sumwTrig, "TMP/sumwTrig"); // book(_sumwTrig08, "TMP/sumwTrig08"); // book(_sumwTrig40, "TMP/sumwTrig40"); // book(_sumwTrig80, "TMP/sumwTrig80"); } void analyze(const Event& event) { // Trigger const FinalState& trigfs = apply(event, "TriggerFS"); unsigned int n_minus(0), n_plus(0); for (const Particle& p : trigfs.particles()) { const double eta = p.eta(); if (inRange(eta, -5.5, -1.5)) n_minus++; else if (inRange(eta, 1.5, 5.5)) n_plus++; } MSG_DEBUG("Trigger -: " << n_minus << ", Trigger +: " << n_plus); if (n_plus == 0 || n_minus == 0) vetoEvent; _sumwTrig->fill(); // Use good central detector tracks const FinalState& cfs = apply(event, "TrackFS"); const double Et25 = apply(event, "MET25").scalarEt(); const double Et60 = apply(event, "MET60").scalarEt(); const unsigned int nch = cfs.size(); // Event level histos if (!fuzzyEquals(sqrtS()/GeV, 63, 1E-3)) { _hist_Nch->fill(nch); _hist_Et->fill(Et60/GeV); _hist_Etavg->fill(nch, Et25/GeV); } // Particle/track level histos const double deta = 2 * 5.0; const double dphi = TWOPI; const double dnch_deta = nch/deta; for (const Particle& p : cfs.particles()) { const double pt = p.pT(); const double scaled_weight = 1.0/(deta*dphi*pt/GeV); if (!fuzzyEquals(sqrtS()/GeV, 500, 1E-3)) { _hist_Pt->fill(nch, pt/GeV); } if (!fuzzyEquals(sqrtS()/GeV, 63, 1E-3)) { _hist_Esigd3p->fill(pt/GeV, scaled_weight); } // Also fill for specific dn/deta ranges at 900 GeV if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) { if (inRange(dnch_deta, 0.8, 4.0)) { //_sumwTrig08 ->fill(); _hist_Esigd3p08->fill(pt/GeV, scaled_weight); } else if (inRange(dnch_deta, 4.0, 8.0)) { //_sumwTrig40 ->fill(); _hist_Esigd3p40->fill(pt/GeV, scaled_weight); } else { //MSG_WARNING(dnch_deta); if (dnch_deta > 8.0) { //_sumwTrig80 ->fill(); _hist_Esigd3p80->fill(pt/GeV, scaled_weight); } } } } } void finalize() { if (_sumwTrig->val() <= 0) { MSG_WARNING("No events passed the trigger!"); return; } const double xsec = crossSectionPerEvent(); if (!fuzzyEquals(sqrtS()/GeV, 63, 1E-3)) { scale(_hist_Nch, 2*xsec/millibarn); ///< Factor of 2 for Nch bin widths? scale(_hist_Esigd3p, xsec/millibarn); scale(_hist_Et, xsec/millibarn); } if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) { // NB. Ref data is normalised to a fixed value not reproducible from MC. const double scale08 = (_hist_Esigd3p08->bin(0).area() > 0) ? 0.933e5/_hist_Esigd3p08->bin(0).height() : 0; scale(_hist_Esigd3p08, scale08); const double scale40 = (_hist_Esigd3p40->bin(0).area() > 0) ? 1.369e5/_hist_Esigd3p40->bin(0).height() : 0; scale(_hist_Esigd3p40, scale40); const double scale80 = (_hist_Esigd3p80->bin(0).area() > 0) ? 1.657e5/_hist_Esigd3p80->bin(0).height() : 0; scale(_hist_Esigd3p80, scale80); } } //@} private: /// @name Weight counters //@{ CounterPtr _sumwTrig; //, _sumwTrig08, _sumwTrig40, _sumwTrig80; //@} /// @name Histogram collections //@{ Histo1DPtr _hist_Nch; Histo1DPtr _hist_Esigd3p; Histo1DPtr _hist_Esigd3p08; Histo1DPtr _hist_Esigd3p40; Histo1DPtr _hist_Esigd3p80; Profile1DPtr _hist_Pt; Profile1DPtr _hist_Etavg; Histo1DPtr _hist_Et; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(UA1_1990_S2044935); } diff --git a/analyses/pluginSPS/UA5_1982_S875503.cc b/analyses/pluginSPS/UA5_1982_S875503.cc --- a/analyses/pluginSPS/UA5_1982_S875503.cc +++ b/analyses/pluginSPS/UA5_1982_S875503.cc @@ -1,93 +1,93 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/TriggerUA5.hh" namespace Rivet { /// @brief UA5 multiplicity and \f$ \eta \f$ distributions class UA5_1982_S875503 : public Analysis { public: /// Default constructor UA5_1982_S875503() : Analysis("UA5_1982_S875503") { } /// @name Analysis methods //@{ /// Set up projections and book histos void init() { declare(TriggerUA5(), "Trigger"); - declare(ChargedFinalState(-3.5, 3.5), "CFS"); + declare(ChargedFinalState((Cuts::etaIn(-3.5, 3.5))), "CFS"); // Book histos based on pp or ppbar beams if (beamIds().first == beamIds().second) { book(_hist_nch ,2,1,1); book(_hist_eta ,3,1,1); } else { book(_hist_nch ,2,1,2); book(_hist_eta ,4,1,1); } book(_sumWTrig, "sumW"); } void analyze(const Event& event) { // Trigger const TriggerUA5& trigger = apply(event, "Trigger"); if (!trigger.nsdDecision()) vetoEvent; _sumWTrig->fill(); // Get tracks const ChargedFinalState& cfs = apply(event, "CFS"); // Fill mean charged multiplicity histos _hist_nch->fill(_hist_nch->bin(0).xMid(), cfs.size()); // Iterate over all tracks and fill eta histograms for (const Particle& p : cfs.particles()) { const double eta = p.abseta(); _hist_eta->fill(eta); } } void finalize() { /// @todo Why the factor of 2 on Nch for ppbar? if (beamIds().first == beamIds().second) { scale(_hist_nch, 1.0 / *_sumWTrig); } else { scale(_hist_nch, 0.5 / *_sumWTrig); } scale(_hist_eta, 0.5 / *_sumWTrig); } //@} private: /// @name Counters //@{ CounterPtr _sumWTrig; //@} /// @name Histogram collections //@{ Histo1DPtr _hist_nch; Histo1DPtr _hist_eta; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(UA5_1982_S875503); } diff --git a/analyses/pluginSPS/UA5_1986_S1583476.cc b/analyses/pluginSPS/UA5_1986_S1583476.cc --- a/analyses/pluginSPS/UA5_1986_S1583476.cc +++ b/analyses/pluginSPS/UA5_1986_S1583476.cc @@ -1,123 +1,123 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/TriggerUA5.hh" namespace Rivet { /// @brief UA5 \f$ \eta \f$ distributions at 200 and 900 GeV class UA5_1986_S1583476 : public Analysis { public: /// Constructor UA5_1986_S1583476() : Analysis("UA5_1986_S1583476") { } /// @name Analysis methods //@{ /// Set up projections and histograms void init() { declare(TriggerUA5(), "Trigger"); declare(Beam(), "Beams"); - declare(ChargedFinalState(-5.0, 5.0), "CFS50"); + declare(ChargedFinalState((Cuts::etaIn(-5.0, 5.0))), "CFS50"); // Histograms if (fuzzyEquals(sqrtS()/GeV, 200.0, 1E-4)) { book(_hist_eta_nsd ,1,1,1); book(_hist_eta_inelastic ,1,1,2); _hists_eta_nsd.resize(6); for (int i = 1; i <= 6; ++i) { _sumWn.push_back({}); book(_sumWn.back(), "TMP/sumWn"+to_str(i)); book(_hists_eta_nsd[i-1],2,1,i); } } else if (fuzzyEquals(sqrtS()/GeV, 900.0, 1E-4)) { book(_hist_eta_nsd ,1,1,3); book(_hist_eta_inelastic ,1,1,4); _hists_eta_nsd.resize(9); for (int i = 1; i <= 9; ++i) { _sumWn.push_back({}); book(_sumWn.back(), "TMP/sumWn"+to_str(i)); book(_hists_eta_nsd[i-1],3,1,i); } } book(_sumWTrig, "sumWtrig"); book(_sumWTrigNSD, "sumWtrigNSD"); } /// Fill eta histograms (in Nch bins) void analyze(const Event& event) { // Trigger const TriggerUA5& trigger = apply(event, "Trigger"); if (!trigger.sdDecision()) vetoEvent; const bool isNSD = trigger.nsdDecision(); // Get the index corresponding to the max Nch range histo/sum(w) vector index const ChargedFinalState& cfs50 = apply(event, "CFS50"); const int numP = cfs50.size(); const int ni = (int)floor(static_cast(numP-2)/10.0); const int num_idx = min(ni, (int)_sumWn.size()-1); MSG_TRACE("Multiplicity index: " << numP << " charged particles -> #" << num_idx); // Update weights _sumWTrig->fill(); if (isNSD) { _sumWTrigNSD->fill(); if (num_idx >= 0) _sumWn[num_idx]->fill(); } // Fill histos for (const Particle& p : cfs50.particles()) { const double eta = p.abseta(); _hist_eta_inelastic->fill(eta); if (isNSD) { _hist_eta_nsd->fill(eta); if (num_idx >= 0) _hists_eta_nsd[num_idx]->fill(eta); } } } /// Scale histos void finalize() { MSG_DEBUG("sumW_NSD,inel = " << _sumWTrigNSD->val() << ", " << _sumWTrig->val()); scale(_hist_eta_nsd, 0.5 / *_sumWTrigNSD); scale(_hist_eta_inelastic, 0.5 / *_sumWTrig); // for (size_t i = 0; i < _hists_eta_nsd.size(); ++i) { MSG_DEBUG("sumW[n] = " << _sumWn[i]->val()); scale(_hists_eta_nsd[i], 0.5 / *_sumWn[i]); } } private: /// @name Weight counters //@{ CounterPtr _sumWTrig; CounterPtr _sumWTrigNSD; vector _sumWn; //@} /// @name Histograms //@{ Histo1DPtr _hist_eta_nsd; Histo1DPtr _hist_eta_inelastic; vector _hists_eta_nsd; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(UA5_1986_S1583476); } diff --git a/analyses/pluginSPS/UA5_1987_S1640666.cc b/analyses/pluginSPS/UA5_1987_S1640666.cc --- a/analyses/pluginSPS/UA5_1987_S1640666.cc +++ b/analyses/pluginSPS/UA5_1987_S1640666.cc @@ -1,72 +1,72 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/TriggerUA5.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class UA5_1987_S1640666 : public Analysis { public: /// Constructor UA5_1987_S1640666() : Analysis("UA5_1987_S1640666") { } /// Book histograms and initialise projections before the run void init() { declare(TriggerUA5(), "Trigger"); - declare(ChargedFinalState(-5.0, 5.0), "CFS"); + declare(ChargedFinalState((Cuts::etaIn(-5.0, 5.0))), "CFS"); book(_hist_mean_nch ,1, 1, 1); book(_hist_nch ,3, 1, 1); book(_sumWPassed, "SumW"); } /// Perform the per-event analysis void analyze(const Event& event) { // Trigger const TriggerUA5& trigger = apply(event, "Trigger"); if (!trigger.nsdDecision()) vetoEvent; _sumWPassed->fill(); // Count final state particles in several eta regions const int Nch = apply(event, "CFS").size(); // Fill histograms _hist_nch->fill(Nch); _hist_mean_nch->fill(_hist_mean_nch->bin(0).xMid(), Nch); } /// Normalise histograms etc., after the run void finalize() { scale(_hist_nch, 1.0 / *_sumWPassed); scale(_hist_mean_nch, 1.0 / *_sumWPassed); } private: CounterPtr _sumWPassed; Histo1DPtr _hist_mean_nch; Histo1DPtr _hist_nch; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(UA5_1987_S1640666); } diff --git a/analyses/pluginSPS/UA5_1988_S1867512.cc b/analyses/pluginSPS/UA5_1988_S1867512.cc --- a/analyses/pluginSPS/UA5_1988_S1867512.cc +++ b/analyses/pluginSPS/UA5_1988_S1867512.cc @@ -1,195 +1,195 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/TriggerUA5.hh" namespace Rivet { namespace { /// @brief Helper function to fill correlation points into scatter plot Point2D correlation_helper(double x, double xerr, const vector & nf, const vector & nb, CounterPtr sumWPassed) { return Point2D(x, correlation(nf, nb), xerr, correlation_err(nf, nb)/sqrt(sumWPassed->val())); } } /// @brief UA5 charged particle correlations at 200, 546 and 900 GeV class UA5_1988_S1867512 : public Analysis { public: UA5_1988_S1867512() : Analysis("UA5_1988_S1867512") { } /// @name Analysis methods //@{ void init() { // Projections declare(TriggerUA5(), "Trigger"); // Symmetric eta interval - declare(ChargedFinalState(-0.5, 0.5), "CFS05"); + declare(ChargedFinalState((Cuts::etaIn(-0.5, 0.5))), "CFS05"); // Asymmetric intervals first // Forward eta intervals - declare(ChargedFinalState(0.0, 1.0), "CFS10F"); - declare(ChargedFinalState(0.5, 1.5), "CFS15F"); - declare(ChargedFinalState(1.0, 2.0), "CFS20F"); - declare(ChargedFinalState(1.5, 2.5), "CFS25F"); - declare(ChargedFinalState(2.0, 3.0), "CFS30F"); - declare(ChargedFinalState(2.5, 3.5), "CFS35F"); - declare(ChargedFinalState(3.0, 4.0), "CFS40F"); + declare(ChargedFinalState((Cuts::etaIn(0.0, 1.0))), "CFS10F"); + declare(ChargedFinalState((Cuts::etaIn(0.5, 1.5))), "CFS15F"); + declare(ChargedFinalState((Cuts::etaIn(1.0, 2.0))), "CFS20F"); + declare(ChargedFinalState((Cuts::etaIn(1.5, 2.5))), "CFS25F"); + declare(ChargedFinalState((Cuts::etaIn(2.0, 3.0))), "CFS30F"); + declare(ChargedFinalState((Cuts::etaIn(2.5, 3.5))), "CFS35F"); + declare(ChargedFinalState((Cuts::etaIn(3.0, 4.0))), "CFS40F"); // Backward eta intervals - declare(ChargedFinalState(-1.0, 0.0), "CFS10B"); - declare(ChargedFinalState(-1.5, -0.5), "CFS15B"); - declare(ChargedFinalState(-2.0, -1.0), "CFS20B"); - declare(ChargedFinalState(-2.5, -1.5), "CFS25B"); - declare(ChargedFinalState(-3.0, -2.0), "CFS30B"); - declare(ChargedFinalState(-3.5, -2.5), "CFS35B"); - declare(ChargedFinalState(-4.0, -3.0), "CFS40B"); + declare(ChargedFinalState((Cuts::etaIn(-1.0, 0.0))), "CFS10B"); + declare(ChargedFinalState((Cuts::etaIn(-1.5, -0.5))), "CFS15B"); + declare(ChargedFinalState((Cuts::etaIn(-2.0, -1.0))), "CFS20B"); + declare(ChargedFinalState((Cuts::etaIn(-2.5, -1.5))), "CFS25B"); + declare(ChargedFinalState((Cuts::etaIn(-3.0, -2.0))), "CFS30B"); + declare(ChargedFinalState((Cuts::etaIn(-3.5, -2.5))), "CFS35B"); + declare(ChargedFinalState((Cuts::etaIn(-4.0, -3.0))), "CFS40B"); // Histogram booking, we have sqrt(s) = 200, 546 and 900 GeV // TODO use Scatter2D to be able to output errors if (fuzzyEquals(sqrtS()/GeV, 200.0, 1E-4)) { book(_hist_correl, 2, 1, 1); book(_hist_correl_asym, 3, 1, 1); } else if (fuzzyEquals(sqrtS()/GeV, 546.0, 1E-4)) { book(_hist_correl, 2, 1, 2); book(_hist_correl_asym, 3, 1, 2); } else if (fuzzyEquals(sqrtS()/GeV, 900.0, 1E-4)) { book(_hist_correl, 2, 1, 3); book(_hist_correl_asym, 3, 1, 3); } book(_sumWPassed, "sumW"); } void analyze(const Event& event) { // Trigger const bool trigger = apply(event, "Trigger").nsdDecision(); if (!trigger) vetoEvent; _sumWPassed->fill(); // Count forward/backward particles n_10f.push_back(apply(event, "CFS10F").size()); n_15f.push_back(apply(event, "CFS15F").size()); n_20f.push_back(apply(event, "CFS20F").size()); n_25f.push_back(apply(event, "CFS25F").size()); n_30f.push_back(apply(event, "CFS30F").size()); n_35f.push_back(apply(event, "CFS35F").size()); n_40f.push_back(apply(event, "CFS40F").size()); // n_10b.push_back(apply(event, "CFS10B").size()); n_15b.push_back(apply(event, "CFS15B").size()); n_20b.push_back(apply(event, "CFS20B").size()); n_25b.push_back(apply(event, "CFS25B").size()); n_30b.push_back(apply(event, "CFS30B").size()); n_35b.push_back(apply(event, "CFS35B").size()); n_40b.push_back(apply(event, "CFS40B").size()); // n_05 .push_back(apply(event, "CFS05").size()); } void finalize() { // The correlation strength is defined in formulas // 4.1 and 4.2 // Fill histos, gap width histo comes first // * Set the errors as Delta b / sqrt(sumWPassed) with // Delta b being the absolute uncertainty of b according to // Gaussian error-propagation (linear limit) and assuming // Poissonian uncertainties for the number of particles in // the eta-intervals // // Define vectors to be able to fill Scatter2Ds vector points; // Fill the y-value vector points.push_back(correlation_helper(0, 0.5, n_10f, n_10b, _sumWPassed)); points.push_back(correlation_helper(1, 0.5, n_15f, n_15b, _sumWPassed)); points.push_back(correlation_helper(2, 0.5, n_20f, n_20b, _sumWPassed)); points.push_back(correlation_helper(3, 0.5, n_25f, n_25b, _sumWPassed)); points.push_back(correlation_helper(4, 0.5, n_30f, n_30b, _sumWPassed)); points.push_back(correlation_helper(5, 0.5, n_35f, n_35b, _sumWPassed)); points.push_back(correlation_helper(6, 0.5, n_40f, n_40b, _sumWPassed)); // Fill the DPS _hist_correl->addPoints(points); // Fill gap-center histo (Fig 15) // // The first bin contains the c_str strengths of // the gap size histo that has ane eta gap of two // // Now do the other histo -- clear already defined vectors first points.clear(); points.push_back(correlation_helper(0, 0.25, n_20f, n_20b, _sumWPassed)); points.push_back(correlation_helper(0.5, 0.25, n_25f, n_15b, _sumWPassed)); points.push_back(correlation_helper(1, 0.25, n_30f, n_10b, _sumWPassed)); points.push_back(correlation_helper(1.5, 0.25, n_35f, n_05 , _sumWPassed)); points.push_back(correlation_helper(2, 0.25, n_40f, n_10f, _sumWPassed)); // Fill in correlation strength for assymetric intervals, // see Tab. 5 // Fill the DPS _hist_correl_asym->addPoints(points); } //@} private: /// @name Counters //@{ CounterPtr _sumWPassed; //@} /// @name Vectors for storing the number of particles in the different eta intervals per event. /// @todo Is there a better way? //@{ std::vector n_10f; std::vector n_15f; std::vector n_20f; std::vector n_25f; std::vector n_30f; std::vector n_35f; std::vector n_40f; // std::vector n_10b; std::vector n_15b; std::vector n_20b; std::vector n_25b; std::vector n_30b; std::vector n_35b; std::vector n_40b; // std::vector n_05; //@} /// @name Histograms //@{ // Symmetric eta intervals Scatter2DPtr _hist_correl; // For asymmetric eta intervals Scatter2DPtr _hist_correl_asym; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(UA5_1988_S1867512); } diff --git a/analyses/pluginSPS/UA5_1989_S1926373.cc b/analyses/pluginSPS/UA5_1989_S1926373.cc --- a/analyses/pluginSPS/UA5_1989_S1926373.cc +++ b/analyses/pluginSPS/UA5_1989_S1926373.cc @@ -1,110 +1,110 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/TriggerUA5.hh" namespace Rivet { /// @brief UA5 min bias charged multiplicities in central \f$ \eta \f$ ranges class UA5_1989_S1926373 : public Analysis { public: /// Constructor UA5_1989_S1926373() : Analysis("UA5_1989_S1926373") { } /// @name Analysis methods //@{ /// Book histograms and projections void init() { declare(TriggerUA5(), "Trigger"); - declare(ChargedFinalState(-0.5, 0.5), "CFS05"); - declare(ChargedFinalState(-1.5, 1.5), "CFS15"); - declare(ChargedFinalState(-3.0, 3.0), "CFS30"); - declare(ChargedFinalState(-5.0, 5.0), "CFS50"); + declare(ChargedFinalState((Cuts::etaIn(-0.5, 0.5))), "CFS05"); + declare(ChargedFinalState((Cuts::etaIn(-1.5, 1.5))), "CFS15"); + declare(ChargedFinalState((Cuts::etaIn(-3.0, 3.0))), "CFS30"); + declare(ChargedFinalState((Cuts::etaIn(-5.0, 5.0))), "CFS50"); // NB. _hist_nch and _hist_ncheta50 use the same data but different binning if (fuzzyEquals(sqrtS()/GeV, 200, 1E-3)) { book(_hist_nch ,1, 1, 1); book(_hist_nch_eta05 ,3, 1, 1); book(_hist_nch_eta15 ,4, 1, 1); book(_hist_nch_eta30 ,5, 1, 1); book(_hist_nch_eta50 ,6, 1, 1); book(_hist_mean_nch ,11, 1, 1); } else if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) { book(_hist_nch ,2, 1, 1); book(_hist_nch_eta05 ,7, 1, 1); book(_hist_nch_eta15 ,8, 1, 1); book(_hist_nch_eta30 ,9, 1, 1); book(_hist_nch_eta50 ,10, 1, 1); book(_hist_mean_nch ,12, 1, 1); } book(_sumWPassed, "SumW"); /// @todo Moments of distributions } /// Do the analysis void analyze(const Event& event) { // Trigger const TriggerUA5& trigger = apply(event, "Trigger"); if (!trigger.nsdDecision()) vetoEvent; _sumWPassed->fill(); // Count final state particles in several eta regions const int numP05 = apply(event, "CFS05").size(); const int numP15 = apply(event, "CFS15").size(); const int numP30 = apply(event, "CFS30").size(); const int numP50 = apply(event, "CFS50").size(); // Fill histograms _hist_nch->fill(numP50); _hist_nch_eta05->fill(numP05); _hist_nch_eta15->fill(numP15); _hist_nch_eta30->fill(numP30); _hist_nch_eta50->fill(numP50); _hist_mean_nch->fill(_hist_mean_nch->bin(0).xMid(), numP50); } void finalize() { scale(_hist_nch, 1.0 / *_sumWPassed); scale(_hist_nch_eta05, 1.0 / *_sumWPassed); scale(_hist_nch_eta15, 1.0 / *_sumWPassed); scale(_hist_nch_eta30, 1.0 / *_sumWPassed); scale(_hist_nch_eta50, 1.0 / *_sumWPassed); scale(_hist_mean_nch, 1.0 / *_sumWPassed); } //@} private: /// @name Counters //@{ CounterPtr _sumWPassed; //@} /// @name Histograms //@{ Histo1DPtr _hist_nch; Histo1DPtr _hist_nch_eta05; Histo1DPtr _hist_nch_eta15; Histo1DPtr _hist_nch_eta30; Histo1DPtr _hist_nch_eta50; Histo1DPtr _hist_mean_nch; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(UA5_1989_S1926373); } diff --git a/analyses/pluginTOTEM/CMSTOTEM_2014_I1294140.cc b/analyses/pluginTOTEM/CMSTOTEM_2014_I1294140.cc --- a/analyses/pluginTOTEM/CMSTOTEM_2014_I1294140.cc +++ b/analyses/pluginTOTEM/CMSTOTEM_2014_I1294140.cc @@ -1,78 +1,78 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/Beam.hh" namespace Rivet { class CMSTOTEM_2014_I1294140 : public Analysis { public: CMSTOTEM_2014_I1294140() : Analysis("CMSTOTEM_2014_I1294140") { } void init() { - ChargedFinalState cfs(-7.0, 7.0, 0.0*GeV); + ChargedFinalState cfs((Cuts::etaIn(-7.0, 7.0))); declare(cfs, "CFS"); book(_Nevt_after_cuts_or, "Nevt_or"); book(_Nevt_after_cuts_and, "Nevt_and"); book(_Nevt_after_cuts_xor, "Nevt_xor"); if (fuzzyEquals(sqrtS(), 8000*GeV, 1E-3)) { book(_h_dNch_dEta_OR ,1, 1, 1); book(_h_dNch_dEta_AND ,2, 1, 1); book(_h_dNch_dEta_XOR ,3, 1, 1); } } void analyze(const Event& event) { // Count forward and backward charged particles const ChargedFinalState& charged = apply(event, "CFS"); int count_plus = 0, count_minus = 0; for (const Particle& p : charged.particles()) { if (inRange(p.eta(), 5.3, 6.5)) count_plus++; if (inRange(p.eta(), -6.5, -5.3)) count_minus++; } // Cut combinations const bool cutsor = (count_plus > 0 || count_minus > 0); const bool cutsand = (count_plus > 0 && count_minus > 0); const bool cutsxor = ( (count_plus > 0 && count_minus == 0) || (count_plus == 0 && count_minus > 0) ); // Increment counters and fill histos if (cutsor) _Nevt_after_cuts_or ->fill(); if (cutsand) _Nevt_after_cuts_and ->fill(); if (cutsxor) _Nevt_after_cuts_xor ->fill(); for (const Particle& p : charged.particles()) { if (cutsor) _h_dNch_dEta_OR ->fill(p.abseta()); if (cutsand) _h_dNch_dEta_AND->fill(p.abseta()); if (cutsxor) _h_dNch_dEta_XOR->fill(p.abseta()); } } void finalize() { scale(_h_dNch_dEta_OR, 0.5 / *_Nevt_after_cuts_or); scale(_h_dNch_dEta_AND, 0.5 / *_Nevt_after_cuts_and); scale(_h_dNch_dEta_XOR, 0.5 / *_Nevt_after_cuts_xor); } private: Histo1DPtr _h_dNch_dEta_OR, _h_dNch_dEta_AND, _h_dNch_dEta_XOR; CounterPtr _Nevt_after_cuts_or, _Nevt_after_cuts_and, _Nevt_after_cuts_xor; }; // Hook for the plugin system DECLARE_RIVET_PLUGIN(CMSTOTEM_2014_I1294140); } diff --git a/analyses/pluginTOTEM/TOTEM_2012_I1115294.cc b/analyses/pluginTOTEM/TOTEM_2012_I1115294.cc --- a/analyses/pluginTOTEM/TOTEM_2012_I1115294.cc +++ b/analyses/pluginTOTEM/TOTEM_2012_I1115294.cc @@ -1,62 +1,62 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class TOTEM_2012_I1115294 : public Analysis { public: TOTEM_2012_I1115294() : Analysis("TOTEM_2012_I1115294") { } public: void init() { - ChargedFinalState cfsm(-6.50, -5.35, 40.*MeV); - ChargedFinalState cfsp( 5.35, 6.50, 40.*MeV); + ChargedFinalState cfsm((Cuts::etaIn(-6.50, -5.35) && Cuts::pT >= 40.*MeV)); + ChargedFinalState cfsp((Cuts::etaIn( 5.35, 6.50) && Cuts::pT >= 40.*MeV)); declare(cfsm, "CFSM"); declare(cfsp, "CFSP"); book(_h_eta ,1, 1, 1); book(_sumofweights, "sumofweights"); } void analyze(const Event& event) { const ChargedFinalState cfsm = apply(event, "CFSM"); const ChargedFinalState cfsp = apply(event, "CFSP"); if (cfsm.size() == 0 && cfsp.size() == 0) vetoEvent; _sumofweights->fill(); for (const Particle& p : cfsm.particles() + cfsp.particles()) { _h_eta->fill(p.abseta()); } } void finalize() { scale(_h_eta, 0.5 / *_sumofweights); } private: CounterPtr _sumofweights; Histo1DPtr _h_eta; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(TOTEM_2012_I1115294); } diff --git a/analyses/pluginTOTEM/TOTEM_2014_I1328627.cc b/analyses/pluginTOTEM/TOTEM_2014_I1328627.cc --- a/analyses/pluginTOTEM/TOTEM_2014_I1328627.cc +++ b/analyses/pluginTOTEM/TOTEM_2014_I1328627.cc @@ -1,58 +1,58 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class TOTEM_2014_I1328627 : public Analysis { public: TOTEM_2014_I1328627() : Analysis("TOTEM_2014_I1328627") { } void init() { - ChargedFinalState cfsm(-7.0, -6.0, 0.0*GeV); - ChargedFinalState cfsp( 3.7, 4.8, 0.0*GeV); + ChargedFinalState cfsm(Cuts::etaIn(-7.0, -6.0)); + ChargedFinalState cfsp(Cuts::etaIn( 3.7, 4.8)); declare(cfsm, "CFSM"); declare(cfsp, "CFSP"); book(_h_eta ,1, 1, 1); book(_sumofweights, "sumofweights"); } void analyze(const Event& event) { const ChargedFinalState cfsm = apply(event, "CFSM"); const ChargedFinalState cfsp = apply(event, "CFSP"); if (cfsm.size() == 0 && cfsp.size() == 0) vetoEvent; _sumofweights->fill(); for (const Particle& p : cfsm.particles() + cfsp.particles()) { _h_eta->fill(p.abseta()); } } void finalize() { scale(_h_eta, 1./ *_sumofweights); } private: CounterPtr _sumofweights; Histo1DPtr _h_eta; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(TOTEM_2014_I1328627); } diff --git a/include/Rivet/Projections/ChargedFinalState.hh b/include/Rivet/Projections/ChargedFinalState.hh --- a/include/Rivet/Projections/ChargedFinalState.hh +++ b/include/Rivet/Projections/ChargedFinalState.hh @@ -1,44 +1,41 @@ // -*- C++ -*- #ifndef RIVET_ChargedFinalState_HH #define RIVET_ChargedFinalState_HH #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Project only charged final state particles. class ChargedFinalState : public FinalState { public: /// @name Constructors //@{ /// Construction from another FinalState ChargedFinalState(const FinalState& fsp); /// Construction using Cuts object ChargedFinalState(const Cut& c=Cuts::open()); - /// Single eta-range constructor. - ChargedFinalState(double mineta, double maxeta, double minpt=0*GeV); - /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(ChargedFinalState); //@} /// Apply the projection on the supplied event. void project(const Event& e); /// Compare projections. CmpState compare(const Projection& p) const; }; } #endif diff --git a/include/Rivet/Projections/ConstLossyFinalState.hh b/include/Rivet/Projections/ConstLossyFinalState.hh --- a/include/Rivet/Projections/ConstLossyFinalState.hh +++ b/include/Rivet/Projections/ConstLossyFinalState.hh @@ -1,77 +1,74 @@ // -*- C++ -*- #ifndef RIVET_ConstLossyFinalState_HH #define RIVET_ConstLossyFinalState_HH #include "Rivet/Tools/Logging.hh" #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Particle.hh" #include "Rivet/Event.hh" #include "Rivet/Projection.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/LossyFinalState.hh" namespace Rivet { /// Functor used to implement constant random lossiness. class ConstRandomFilter { public: ConstRandomFilter(double lossFraction) : _lossFraction(lossFraction) { assert(_lossFraction >= 0); } // If operator() returns true, particle is deleted ("lost") bool operator()(const Particle&) { return rand01() < _lossFraction; } CmpState compare(const ConstRandomFilter& other) const { return cmp(_lossFraction, other._lossFraction); } private: double _lossFraction; }; /// @brief Randomly lose a constant fraction of particles. class ConstLossyFinalState : public LossyFinalState { public: /// @name Constructors //@{ /// Constructor from a FinalState. ConstLossyFinalState(const FinalState& fsp, double lossfraction) : LossyFinalState(fsp, ConstRandomFilter(lossfraction)) { setName("ConstLossyFinalState"); } /// Stand-alone constructor. Initialises the base FinalState projection. - ConstLossyFinalState(double lossfraction, - double mineta = -DBL_MAX, - double maxeta = DBL_MAX, - double minpt = 0.0) - : LossyFinalState(ConstRandomFilter(lossfraction), mineta, maxeta, minpt) + ConstLossyFinalState(double lossfraction, const Cut& c=Cuts::open()) + : LossyFinalState(ConstRandomFilter(lossfraction), c) { setName("ConstLossyFinalState"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(ConstLossyFinalState); //@} }; } #endif diff --git a/include/Rivet/Projections/FinalState.hh b/include/Rivet/Projections/FinalState.hh --- a/include/Rivet/Projections/FinalState.hh +++ b/include/Rivet/Projections/FinalState.hh @@ -1,56 +1,52 @@ // -*- C++ -*- #ifndef RIVET_FinalState_HH #define RIVET_FinalState_HH #include "Rivet/Projections/ParticleFinder.hh" namespace Rivet { /// @brief Project out all final-state particles in an event. /// Probably the most important projection in Rivet! class FinalState : public ParticleFinder { public: /// @name Standard constructors etc. //@{ /// Construction using Cuts object FinalState(const Cut& c=Cuts::open()); /// Construction using another FinalState and a Cuts object FinalState(const FinalState& fsp, const Cut& c); - /// Old constructor with numeric cut arguments, retained for compatibility - //DEPRECATED("Use the versions with Cut arguments") - FinalState(double mineta, double maxeta, double minpt=0.0*GeV); - /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(FinalState); //@} /// Apply the projection to the event. virtual void project(const Event& e); /// Compare projections. virtual CmpState compare(const Projection& p) const; /// Decide if a particle is to be accepted or not. /// @todo Rename to _accept or acceptFinal? virtual bool accept(const Particle& p) const; private: // Hide lossy copy constructors for all classes derived from FinalState template FinalState(const T& rhs); template FinalState const& operator=(T const& rhs); }; } #endif diff --git a/include/Rivet/Projections/HadronicFinalState.hh b/include/Rivet/Projections/HadronicFinalState.hh --- a/include/Rivet/Projections/HadronicFinalState.hh +++ b/include/Rivet/Projections/HadronicFinalState.hh @@ -1,51 +1,49 @@ // -*- C++ -*- #ifndef RIVET_HadronicFinalState_HH #define RIVET_HadronicFinalState_HH #include "Rivet/Tools/Logging.hh" #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Particle.hh" #include "Rivet/Event.hh" #include "Rivet/Projection.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Project only hadronic final state particles. class HadronicFinalState : public FinalState { public: /// Constructor: the supplied FinalState projection is assumed to live through the run. HadronicFinalState(const FinalState& fsp) { setName("HadronicFinalState"); declare(fsp, "FS"); } - HadronicFinalState(double mineta = -DBL_MAX, - double maxeta = DBL_MAX, - double minpt = 0.0*GeV) + HadronicFinalState(const Cut& c=Cuts::open()) { setName("HadronicFinalState"); - declare(FinalState(mineta, maxeta, minpt), "FS"); + declare(FinalState(c), "FS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(HadronicFinalState); protected: /// Apply the projection on the supplied event. void project(const Event& e); /// Compare projections. CmpState compare(const Projection& p) const; }; } #endif diff --git a/include/Rivet/Projections/LossyFinalState.hh b/include/Rivet/Projections/LossyFinalState.hh --- a/include/Rivet/Projections/LossyFinalState.hh +++ b/include/Rivet/Projections/LossyFinalState.hh @@ -1,81 +1,78 @@ // -*- C++ -*- #ifndef RIVET_LossyFinalState_HH #define RIVET_LossyFinalState_HH #include "Rivet/Tools/Logging.hh" #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Particle.hh" #include "Rivet/Event.hh" #include "Rivet/Projection.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Templated FS projection which can lose some of the supplied particles. template class LossyFinalState : public FinalState { public: /// @name Constructors //@{ /// Constructor from FinalState. LossyFinalState(const FinalState& fsp, FILTER filter) : _filter(filter) { setName("LossyFinalState"); declare(fsp, "FS"); } /// Stand-alone constructor. Initialises the base FinalState projection. - LossyFinalState(FILTER filter, - double mineta = -DBL_MAX, - double maxeta = DBL_MAX, - double minpt = 0.0) + LossyFinalState(FILTER filter, const Cut& c=Cuts::open()) : _filter(filter) { setName("LossyFinalState"); - declare(FinalState(mineta, maxeta, minpt), "FS"); + declare(FinalState(c), "FS"); } /// Virtual destructor, to allow subclassing virtual ~LossyFinalState() { } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(LossyFinalState); //@} /// Apply the projection on the supplied event. void project(const Event& e) { const FinalState& fs = applyProjection(e, "FS"); getLog() << Log::DEBUG << "Pre-loss number of FS particles = " << fs.particles().size() << '\n'; _theParticles.clear(); std::remove_copy_if(fs.particles().begin(), fs.particles().end(), std::back_inserter(_theParticles), _filter); getLog() << Log::DEBUG << "Filtered number of FS particles = " << _theParticles.size() << '\n'; } /// Compare projections. CmpState compare(const Projection& p) const { const LossyFinalState& other = pcast< LossyFinalState >(p); const CmpState fscmp = mkNamedPCmp(other, "FS"); if (fscmp != CmpState::EQ) return fscmp; return _filter.compare(other._filter); } protected: /// Filtering object: must support operator(const Particle&) and compare(const Filter&) FILTER _filter; }; } #endif diff --git a/include/Rivet/Projections/NeutralFinalState.hh b/include/Rivet/Projections/NeutralFinalState.hh --- a/include/Rivet/Projections/NeutralFinalState.hh +++ b/include/Rivet/Projections/NeutralFinalState.hh @@ -1,65 +1,57 @@ // -*- C++ -*- #ifndef RIVET_NeutralFinalState_HH #define RIVET_NeutralFinalState_HH #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Project only neutral final state particles. class NeutralFinalState : public FinalState { public: /// @name Constructors //@{ /// Construction from another FinalState NeutralFinalState(const FinalState& fsp, double etmin=0*GeV) : _Etmin(etmin) { setName("NeutralFinalState"); declare(fsp, "FS"); } /// Construction using Cuts object NeutralFinalState(const Cut& c=Cuts::open()) : _Etmin(0.0*GeV) { setName("NeutralFinalState"); declare(FinalState(c), "FS"); } - /// Construction from explicit eta range and min ET cut values - NeutralFinalState(double mineta, double maxeta, double etmin=0*GeV) - : _Etmin(etmin) - { - setName("NeutralFinalState"); - declare(FinalState(mineta, maxeta, 0.0*GeV), "FS"); - } - /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(NeutralFinalState); //@} /// Apply the projection on the supplied event. void project(const Event& e); /// Compare projections. CmpState compare(const Projection& p) const; protected: /// The minimum allowed transverse energy. /// @todo Remove in favour of a Cut double _Etmin; }; } #endif diff --git a/include/Rivet/Projections/NonHadronicFinalState.hh b/include/Rivet/Projections/NonHadronicFinalState.hh --- a/include/Rivet/Projections/NonHadronicFinalState.hh +++ b/include/Rivet/Projections/NonHadronicFinalState.hh @@ -1,50 +1,48 @@ // -*- C++ -*- #ifndef RIVET_NonHadronicFinalState_HH #define RIVET_NonHadronicFinalState_HH #include "Rivet/Tools/Logging.hh" #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Particle.hh" #include "Rivet/Event.hh" #include "Rivet/Projection.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Project only hadronic final state particles. class NonHadronicFinalState : public FinalState { public: /// Constructor: the supplied FinalState projection is assumed to live through the run. NonHadronicFinalState(FinalState& fsp) { setName("NonHadronicFinalState"); declare(fsp, "FS"); } - NonHadronicFinalState(double mineta = -DBL_MAX, - double maxeta = DBL_MAX, - double minpt = 0.0*GeV) + NonHadronicFinalState(const Cut& c=Cuts::open()) { setName("NonHadronicFinalState"); - declare(FinalState(mineta, maxeta, minpt), "FS"); + declare(FinalState(c), "FS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(NonHadronicFinalState); /// Apply the projection on the supplied event. void project(const Event& e); /// Compare projections. CmpState compare(const Projection& p) const; }; } #endif diff --git a/include/Rivet/Projections/TriggerCDFRun0Run1.hh b/include/Rivet/Projections/TriggerCDFRun0Run1.hh --- a/include/Rivet/Projections/TriggerCDFRun0Run1.hh +++ b/include/Rivet/Projections/TriggerCDFRun0Run1.hh @@ -1,55 +1,55 @@ // -*- C++ -*- #ifndef RIVET_TriggerCDFRun0Run1_HH #define RIVET_TriggerCDFRun0Run1_HH #include "Rivet/Projection.hh" #include "Rivet/Event.hh" #include "Rivet/Particle.hh" #include "Rivet/Projections/Beam.hh" namespace Rivet { /// @brief Access to the min bias triggers used by CDF in Run 0 and Run 1 class TriggerCDFRun0Run1 : public Projection { public: /// Default constructor. TriggerCDFRun0Run1() { setName("TriggerCDFRun0Run1"); - declare(ChargedFinalState(-5.9, 5.9), "CFS"); + declare(ChargedFinalState(Cuts::etaIn(-5.9, 5.9)), "CFS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(TriggerCDFRun0Run1); /// The trigger result bool minBiasDecision() const { return _decision_mb; } /// Project on to the Event void project(const Event& evt); protected: /// Compare with other projections. virtual CmpState compare(const Projection&) const { return CmpState::EQ; } private: /// The min bias trigger decision bool _decision_mb; }; } #endif diff --git a/include/Rivet/Projections/TriggerCDFRun2.hh b/include/Rivet/Projections/TriggerCDFRun2.hh --- a/include/Rivet/Projections/TriggerCDFRun2.hh +++ b/include/Rivet/Projections/TriggerCDFRun2.hh @@ -1,55 +1,55 @@ // -*- C++ -*- #ifndef RIVET_TriggerCDFRun2_HH #define RIVET_TriggerCDFRun2_HH #include "Rivet/Projection.hh" #include "Rivet/Event.hh" #include "Rivet/Particle.hh" #include "Rivet/Projections/Beam.hh" namespace Rivet { /// @brief Access to the min bias triggers used by CDF in Run 0 and Run 1 class TriggerCDFRun2 : public Projection { public: /// Default constructor. TriggerCDFRun2() { setName("TriggerCDFRun2"); - declare(ChargedFinalState(-4.7, 4.7), "CFS"); + declare(ChargedFinalState(Cuts::etaIn(-4.7, 4.7)), "CFS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(TriggerCDFRun2); /// The trigger result bool minBiasDecision() const { return _decision_mb; } /// Project on to the Event void project(const Event& evt); protected: /// Compare with other projections. virtual CmpState compare(const Projection&) const { return CmpState::EQ; } private: /// The min bias trigger decision bool _decision_mb; }; } #endif diff --git a/include/Rivet/Projections/VetoedFinalState.hh b/include/Rivet/Projections/VetoedFinalState.hh --- a/include/Rivet/Projections/VetoedFinalState.hh +++ b/include/Rivet/Projections/VetoedFinalState.hh @@ -1,181 +1,181 @@ // -*- C++ -*- #ifndef RIVET_VetoedFinalState_HH #define RIVET_VetoedFinalState_HH #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief FS modifier to exclude classes of particles from the final state. class VetoedFinalState : public FinalState { public: /// Typedef for a pair of back-to-back cuts. typedef pair BinaryCut; /// Typedef for a vetoing entry. typedef map VetoDetails; /// Typedef for a veto on a composite particle mass. typedef multimap CompositeVeto; /// @name Constructors //@{ /// Default constructor. - VetoedFinalState() { + VetoedFinalState(const Cut& c=Cuts::open()) { setName("VetoedFinalState"); - declare(FinalState(), "FS"); + declare(FinalState(c), "FS"); } /// Constructor with specific FinalState. VetoedFinalState(const FinalState& fsp) { setName("VetoedFinalState"); declare(fsp, "FS"); } /// You can add a map of ID plus a pair containing \f$ p_{Tmin} \f$ and /// \f$ p_{Tmax} \f$ - these define the range of particles to be vetoed. VetoedFinalState(const VetoDetails& vetocodes) : _vetoCodes(vetocodes) { setName("VetoedFinalState"); declare(FinalState(), "FS"); } /// You can add a map of ID plus a pair containing \f$ p_{Tmin} \f$ and /// \f$ p_{Tmax} \f$ - these define the range of particles to be vetoed. /// This version also supplies a specific FinalState to be used. VetoedFinalState(const FinalState& fsp, const VetoDetails& vetocodes) : _vetoCodes(vetocodes) { setName("VetoedFinalState"); declare(fsp, "FS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(VetoedFinalState); //@} /// Get the list of particle IDs and \f$ p_T \f$ ranges to veto. const VetoDetails& vetoDetails() const { return _vetoCodes; } /// Add a particle ID and \f$ p_T \f$ range to veto. Particles with \f$ p_T \f$ /// IN the given range will be rejected. VetoedFinalState& addVetoDetail(const long id, const double ptmin, const double ptmax) { BinaryCut ptrange(ptmin, ptmax); _vetoCodes.insert(make_pair(id, ptrange)); return *this; } /// Add a particle/antiparticle pair to veto in a given \f$ p_T \f$ range. Given a single ID, both /// the particle and its conjugate antiparticle will be rejected if their \f$ p_T \f$ is IN the given range. VetoedFinalState& addVetoPairDetail(const long id, const double ptmin, const double ptmax) { addVetoDetail(id, ptmin, ptmax); addVetoDetail(-id, ptmin, ptmax); return *this; } /// Add a particle/antiparticle pair to veto. Given a single ID, both the particle and its corresponding /// antiparticle (for all \f$ p_T \f$ values) will be vetoed. VetoedFinalState& addVetoPairId(const long id) { addVetoId(id); addVetoId(-id); return *this; } /// Add a particle ID to veto (all \f$ p_T \f$ range will be vetoed). VetoedFinalState& addVetoId(const long id) { BinaryCut ptrange(0.0, std::numeric_limits::max()); _vetoCodes.insert(make_pair(id, ptrange)); return *this; } /// Veto all neutrinos (convenience method) VetoedFinalState& vetoNeutrinos() { addVetoPairId(PID::NU_E); addVetoPairId(PID::NU_MU); addVetoPairId(PID::NU_TAU); return *this; } /// Add a veto on composite masses within a given width. /// The composite mass is composed of nProducts decay products /// @ todo might we want to specify a range of pdg ids for the decay products? VetoedFinalState& addCompositeMassVeto(const double &mass, const double &width, int nProducts=2){ double halfWidth = 0.5*width; BinaryCut massRange(mass - halfWidth, mass + halfWidth); _compositeVetoes.insert(make_pair(nProducts, massRange)); _nCompositeDecays.insert(nProducts); return *this; } /// Veto the decay products of particle with pdg id /// @todo Need HepMC to sort themselves out and keep vector bosons from /// the hard vtx in the event record before this will work reliably for all pdg ids VetoedFinalState& addDecayProductsVeto(const long id) { _parentVetoes.insert(id); return *this; } /// Set the list of particle IDs and \f$ p_T \f$ ranges to veto. VetoedFinalState& setVetoDetails(const VetoDetails& ids) { _vetoCodes = ids; return *this; } /// Clear the list of particle IDs and ranges to veto. VetoedFinalState& reset() { _vetoCodes.clear(); return *this; } /// Veto particles from a supplied final state VetoedFinalState& addVetoOnThisFinalState(const ParticleFinder& fs) { const string name = "FS_" + to_str(_vetofsnames.size()); declare(fs, name); _vetofsnames.insert(name); return *this; } /// Apply the projection on the supplied event. void project(const Event& e); /// Compare projections. CmpState compare(const Projection& p) const; private: /// The final-state particles. VetoDetails _vetoCodes; /// Composite particle masses to veto CompositeVeto _compositeVetoes; set _nCompositeDecays; typedef set ParentVetos; /// Set of decaying particle IDs to veto ParentVetos _parentVetoes; /// Set of finalstate to be vetoed set _vetofsnames; }; } #endif diff --git a/include/Rivet/Projections/VisibleFinalState.hh b/include/Rivet/Projections/VisibleFinalState.hh --- a/include/Rivet/Projections/VisibleFinalState.hh +++ b/include/Rivet/Projections/VisibleFinalState.hh @@ -1,55 +1,53 @@ // -*- C++ -*- #ifndef RIVET_VisibleFinalState_HH #define RIVET_VisibleFinalState_HH #include "Rivet/Tools/Logging.hh" #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Particle.hh" #include "Rivet/Event.hh" #include "Rivet/Projection.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Final state modifier excluding particles which are not experimentally visible class VisibleFinalState : public FinalState { public: /// @name Constructors //@{ /// Constructor with min and max pseudorapidity \f$ \eta \f$ and min \f$ p_T \f$ (in GeV). - VisibleFinalState(double mineta = -DBL_MAX, - double maxeta = DBL_MAX, - double minpt = 0.0*GeV) + VisibleFinalState(const Cut& c=Cuts::open()) { setName("VisibleFinalState"); - declare(FinalState(mineta, maxeta, minpt), "FS"); + declare(FinalState(c), "FS"); } /// Constructor with specific FinalState. VisibleFinalState(const FinalState& fsp) { setName("VisibleFinalState"); declare(fsp, "FS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(VisibleFinalState); //@} /// Apply the projection on the supplied event. void project(const Event& e); /// Compare projections. CmpState compare(const Projection& p) const; }; } #endif diff --git a/src/Projections/ChargedFinalState.cc b/src/Projections/ChargedFinalState.cc --- a/src/Projections/ChargedFinalState.cc +++ b/src/Projections/ChargedFinalState.cc @@ -1,48 +1,43 @@ // -*- C++ -*- #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { ChargedFinalState::ChargedFinalState(const FinalState& fsp) { setName("ChargedFinalState"); declare(fsp, "FS"); } ChargedFinalState::ChargedFinalState(const Cut& c) { setName("ChargedFinalState"); declare(FinalState(c), "FS"); } - ChargedFinalState::ChargedFinalState(double mineta, double maxeta, double minpt) { - setName("ChargedFinalState"); - declare(FinalState(mineta, maxeta, minpt), "FS"); - } - CmpState ChargedFinalState::compare(const Projection& p) const { return mkNamedPCmp(p, "FS"); } } namespace { inline bool chargedParticleFilter(const Rivet::Particle& p) { return Rivet::PID::charge3(p.pid()) == 0; } } namespace Rivet { void ChargedFinalState::project(const Event& e) { const FinalState& fs = applyProjection(e, "FS"); _theParticles.clear(); std::remove_copy_if(fs.particles().begin(), fs.particles().end(), std::back_inserter(_theParticles), chargedParticleFilter); MSG_DEBUG("Number of charged final-state particles = " << _theParticles.size()); if (getLog().isActive(Log::TRACE)) { for (vector::iterator p = _theParticles.begin(); p != _theParticles.end(); ++p) { MSG_TRACE("Selected: " << p->pid() << ", charge = " << PID::charge3(p->pid())/3.0); } } } } diff --git a/src/Projections/FinalState.cc b/src/Projections/FinalState.cc --- a/src/Projections/FinalState.cc +++ b/src/Projections/FinalState.cc @@ -1,101 +1,81 @@ // -*- C++ -*- #include "Rivet/Projections/FinalState.hh" namespace Rivet { FinalState::FinalState(const Cut& c) : ParticleFinder(c) { setName("FinalState"); const bool isopen = (c == Cuts::open()); MSG_TRACE("Check for open FS conditions: " << std::boolalpha << isopen); if (!isopen) declare(FinalState(), "OpenFS"); } FinalState::FinalState(const FinalState& fsp, const Cut& c) : ParticleFinder(c) { setName("FinalState"); MSG_TRACE("Registering base FSP as 'PrevFS'"); declare(fsp, "PrevFS"); } - /// @deprecated, keep for backwards compatibility for now. - FinalState::FinalState(double mineta, double maxeta, double minpt) { - setName("FinalState"); - const bool openpt = isZero(minpt); - const bool openeta = (mineta <= -DBL_MAX && maxeta >= DBL_MAX); - MSG_TRACE("Check for open FS conditions:" << std::boolalpha << " eta=" << openeta << ", pt=" << openpt); - if (openpt && openeta) { - _cuts = Cuts::open(); - } else { - declare(FinalState(), "OpenFS"); - if (openeta) - _cuts = (Cuts::pT >= minpt); - else if ( openpt ) - _cuts = Cuts::etaIn(mineta, maxeta); - else - _cuts = (Cuts::etaIn(mineta, maxeta) && Cuts::pT >= minpt); - } - } - - CmpState FinalState::compare(const Projection& p) const { const FinalState& other = dynamic_cast(p); // First check if there is a PrevFS and it it matches if (hasProjection("PrevFS") != other.hasProjection("PrevFS")) return CmpState::UNDEF; if (hasProjection("PrevFS")) { const PCmp prevcmp = mkPCmp(other, "PrevFS"); if (prevcmp != CmpState::EQ) return prevcmp; } // Then check the extra cuts const bool cutcmp = _cuts == other._cuts; MSG_TRACE(_cuts << " VS " << other._cuts << " -> EQ == " << std::boolalpha << cutcmp); if (!cutcmp) return CmpState::UNDEF; // Checks all passed: these FSes are equivalent return CmpState::EQ; } void FinalState::project(const Event& e) { _theParticles.clear(); // Handle "open FS" special case, which should not/cannot recurse if (_cuts == Cuts::OPEN) { MSG_TRACE("Open FS processing: should only see this once per event (" << e.genEvent()->event_number() << ")"); for (const GenParticle* p : Rivet::particles(e.genEvent())) { if (p->status() == 1) { MSG_TRACE("FS GV = " << p->production_vertex()); _theParticles.push_back(Particle(*p)); } } return; } // Base the calculation on PrevFS if available, otherwise OpenFS /// @todo In general, we'd like to calculate a restrictive FS based on the most restricted superset FS. const Particles& allstable = applyProjection(e, (hasProjection("PrevFS") ? "PrevFS" : "OpenFS")).particles(); for (const Particle& p : allstable) { const bool passed = accept(p); MSG_TRACE("Choosing: ID = " << p.pid() << ", pT = " << p.pT()/GeV << " GeV" << ", eta = " << p.eta() << ": result = " << std::boolalpha << passed); if (passed) _theParticles.push_back(p); } MSG_TRACE("Number of final-state particles = " << _theParticles.size()); } /// Decide if a particle is to be accepted or not. bool FinalState::accept(const Particle& p) const { // Not having status == 1 should never happen! assert(p.genParticle() == NULL || p.genParticle()->status() == 1); return _cuts->accept(p); } } diff --git a/src/Projections/TriggerUA5.cc b/src/Projections/TriggerUA5.cc --- a/src/Projections/TriggerUA5.cc +++ b/src/Projections/TriggerUA5.cc @@ -1,52 +1,52 @@ // -*- C++ -*- #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/TriggerUA5.hh" namespace Rivet { TriggerUA5::TriggerUA5() { setName("TriggerUA5"); declare(Beam(), "Beam"); - declare(ChargedFinalState(-5.6, 5.6), "CFS"); + declare(ChargedFinalState(Cuts::etaIn(-5.6, 5.6)), "CFS"); } void TriggerUA5::project(const Event& evt) { _n_plus = 0; _n_minus = 0; // Start with the assumption that the trigger fails _decision_sd = false; _decision_nsd_1 = false; _decision_nsd_2 = false; // Triggers can be different for pp and ppbar running const Beam& b = applyProjection(evt, "Beam"); _samebeams = (b.beams().first.pid() == b.beams().second.pid()); // Count hodoscope hits const ChargedFinalState& cfs = applyProjection(evt, "CFS"); for (const Particle& p : cfs.particles()) { if (inRange(p.eta(), -5.6, -2.0)) _n_minus++; else if (inRange(p.eta(), 2.0, 5.6)) _n_plus++; } MSG_DEBUG("Trigger -: " << _n_minus << ", Trigger +: " << _n_plus); // Common SD/NSD trigger requirement: must activate at least one hodoscope if (_n_minus == 0 && _n_plus == 0) return; _decision_sd = true; // Extra NSD trigger requirements if (_n_minus == 0 || _n_plus == 0) return; _decision_nsd_1 = true; if (_n_minus < 2 || _n_plus < 2) return; _decision_nsd_2 = true; } }