diff --git a/analyses/pluginALICE/ALICE_2012_I1116147.cc b/analyses/pluginALICE/ALICE_2012_I1116147.cc --- a/analyses/pluginALICE/ALICE_2012_I1116147.cc +++ b/analyses/pluginALICE/ALICE_2012_I1116147.cc @@ -1,86 +1,86 @@ //-*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/UnstableFinalState.hh" namespace Rivet { class ALICE_2012_I1116147 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2012_I1116147); /// Initialise projections and histograms void init() { const UnstableFinalState ufs(Cuts::absrap < RAPMAX); - addProjection(ufs, "UFS"); + declare(ufs, "UFS"); // Check if cm energy is 7 TeV or 0.9 TeV if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) _cm_energy_case = 1; else if (fuzzyEquals(sqrtS()/GeV, 7000, 1E-3)) _cm_energy_case = 2; if (_cm_energy_case == 0) throw UserError("Center of mass energy of the given input is neither 900 nor 7000 GeV."); // Book histos if (_cm_energy_case == 1) { book(_h_pi0, 2,1,1); } else { book(_h_pi0, 1,1,1); book(_h_eta, 3,1,1); book(_h_etaToPion, 4,1,1); } // Temporary plots with the binning of _h_etaToPion to construct the eta/pi0 ratio book(_temp_h_pion, "TMP/h_pion", refData(4,1,1)); book(_temp_h_eta , "TMP/h_eta", refData(4,1,1)); } /// Per-event analysis void analyze(const Event& event) { const FinalState& ufs = apply(event, "UFS"); for (const Particle& p : ufs.particles()) { const double normfactor = TWOPI*p.pT()/GeV*2*RAPMAX; if (p.pid() == 111) { // Neutral pion; ALICE corrects for pi0 feed-down from K_0_s and Lambda if (p.hasAncestor(310) || p.hasAncestor(3122) || p.hasAncestor(-3122)) continue; //< K_0_s, Lambda, Anti-Lambda _h_pi0->fill(p.pT()/GeV, 1.0/normfactor); _temp_h_pion->fill(p.pT()/GeV); } else if (p.pid() == 221 && _cm_energy_case == 2) { // eta meson (only for 7 TeV) _h_eta->fill(p.pT()/GeV, 1.0/normfactor); _temp_h_eta->fill(p.pT()/GeV); } } } /// Normalize histos and construct ratio void finalize() { scale(_h_pi0, crossSection()/microbarn/sumOfWeights()); if (_cm_energy_case == 2) { divide(_temp_h_eta, _temp_h_pion, _h_etaToPion); scale(_h_eta, crossSection()/microbarn/sumOfWeights()); } } private: const double RAPMAX = 0.8; int _cm_energy_case = 0; Histo1DPtr _h_pi0, _h_eta; Histo1DPtr _temp_h_pion, _temp_h_eta; Scatter2DPtr _h_etaToPion; }; DECLARE_RIVET_PLUGIN(ALICE_2012_I1116147); } diff --git a/analyses/pluginALICE/ALICE_2017_I1512110.cc b/analyses/pluginALICE/ALICE_2017_I1512110.cc --- a/analyses/pluginALICE/ALICE_2017_I1512110.cc +++ b/analyses/pluginALICE/ALICE_2017_I1512110.cc @@ -1,87 +1,87 @@ //-*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/UnstableFinalState.hh" namespace Rivet { class ALICE_2017_I1512110 : public Analysis { public: /// Constructor ALICE_2017_I1512110() : Analysis("ALICE_2017_I1512110"), _rapmax(0.8) { } void init() { const UnstableFinalState ufs(Cuts::absrap < _rapmax); - addProjection(ufs, "UFS"); + declare(ufs, "UFS"); book(_h_pi0, 3,1,1); book(_h_eta, 4,1,1); book(_h_etaToPion, 5,1,1); // temporary plots with the binning of _h_etaToPion // to construct the eta/pi0 ratio in the end book(_temp_h_pion, "TMP/h_pion",refData(5,1,1)); book(_temp_h_eta , "TMP/h_eta", refData(5,1,1)); } void analyze(const Event& event) { const UnstableFinalState& ufs = applyProjection(event, "UFS"); for (const Particle& p : ufs.particles()) { if (p.pid() == 111) { // neutral pion; ALICE corrects for pi0 feed-down if ( !(p.hasAncestor(310) || p.hasAncestor(130) || // K0_s, K0_l p.hasAncestor(321) || p.hasAncestor(-321) || // K+,K- p.hasAncestor(3122) || p.hasAncestor(-3122) || // Lambda, Anti-Lambda p.hasAncestor(3212) || p.hasAncestor(-3212) || // Sigma0 p.hasAncestor(3222) || p.hasAncestor(-3222) || // Sigmas p.hasAncestor(3112) || p.hasAncestor(-3112) || // Sigmas p.hasAncestor(3322) || p.hasAncestor(-3322) || // Cascades p.hasAncestor(3312) || p.hasAncestor(-3312) )) // Cascades { _h_pi0->fill(p.pT()/GeV, 1.0 /(TWOPI*p.pT()/GeV*2*_rapmax)); _temp_h_pion->fill(p.pT()/GeV); } } else if (p.pid() == 221){ // eta meson _h_eta->fill(p.pT()/GeV, 1.0 /(TWOPI*p.pT()/GeV*2*_rapmax)); _temp_h_eta->fill(p.pT()/GeV); } } } void finalize() { scale(_h_pi0, crossSection()/picobarn/sumOfWeights()); scale(_h_eta, crossSection()/picobarn/sumOfWeights()); divide(_temp_h_eta, _temp_h_pion, _h_etaToPion); } private: double _rapmax; Histo1DPtr _h_pi0; Histo1DPtr _h_eta; Histo1DPtr _temp_h_pion; Histo1DPtr _temp_h_eta; Scatter2DPtr _h_etaToPion; }; DECLARE_RIVET_PLUGIN(ALICE_2017_I1512110); } diff --git a/analyses/pluginALICE/ALICE_2017_I1620477.cc b/analyses/pluginALICE/ALICE_2017_I1620477.cc --- a/analyses/pluginALICE/ALICE_2017_I1620477.cc +++ b/analyses/pluginALICE/ALICE_2017_I1620477.cc @@ -1,89 +1,89 @@ //-*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/UnstableFinalState.hh" #include "Rivet/Tools/ParticleUtils.hh" namespace Rivet { class ALICE_2017_I1620477 : public Analysis { public: /// Constructor ALICE_2017_I1620477() : Analysis("ALICE_2017_I1620477"), _rapmax(0.8) { } void init() { const UnstableFinalState ufs(Cuts::absrap < _rapmax); - addProjection(ufs, "UFS"); + declare(ufs, "UFS"); book(_h_pi0,1,1,1); book(_h_eta,2,1,1); book(_h_etaToPion,8,1,1); // temporary plots with the binning of _h_etaToPion // to construct the eta/pi0 ratio in the end book(_temp_h_pion,"TMP/h_pion",refData(8,1,1)); book(_temp_h_eta, "TMP/h_eta", refData(8,1,1)); } void analyze(const Event& event) { const UnstableFinalState& ufs = applyProjection(event, "UFS"); for(auto p: ufs.particles()) { if (p.pid() == 111) { // neutral pion; ALICE corrects for pi0 feed-down if ( !(p.hasAncestor(310) || p.hasAncestor(130) || // K0_s, K0_l p.hasAncestor(321) || p.hasAncestor(-321) || // K+,K- p.hasAncestor(3122) || p.hasAncestor(-3122) || // Lambda, Anti-Lambda p.hasAncestor(3212) || p.hasAncestor(-3212) || // Sigma0 p.hasAncestor(3222) || p.hasAncestor(-3222) || // Sigmas p.hasAncestor(3112) || p.hasAncestor(-3112) || // Sigmas p.hasAncestor(3322) || p.hasAncestor(-3322) || // Cascades p.hasAncestor(3312) || p.hasAncestor(-3312) )) // Cascades { _h_pi0->fill(p.pT()/GeV, 1. /(TWOPI*p.pT()/GeV*2*_rapmax)); _temp_h_pion->fill(p.pT()/GeV); } } else if (p.pid() == 221) { // eta meson _h_eta->fill(p.pT()/GeV, 1. /(TWOPI*p.pT()/GeV*2*_rapmax)); _temp_h_eta->fill(p.pT()/GeV); } } } void finalize() { scale(_h_pi0, crossSection()/picobarn/sumOfWeights()); scale(_h_eta, crossSection()/picobarn/sumOfWeights()); divide(_temp_h_eta, _temp_h_pion, _h_etaToPion); } private: double _rapmax; Histo1DPtr _h_pi0; Histo1DPtr _h_eta; Histo1DPtr _temp_h_pion; Histo1DPtr _temp_h_eta; Scatter2DPtr _h_etaToPion; }; DECLARE_RIVET_PLUGIN(ALICE_2017_I1620477); } diff --git a/analyses/pluginATLAS/ATLAS_2012_I1203852.cc b/analyses/pluginATLAS/ATLAS_2012_I1203852.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1203852.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1203852.cc @@ -1,371 +1,371 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" #include "Rivet/Projections/UnstableFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/MergedFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "Rivet/Projections/InvMassFinalState.hh" namespace Rivet { /// Generic Z candidate struct Zstate : public ParticlePair { Zstate() { } Zstate(ParticlePair _particlepair) : ParticlePair(_particlepair) { } FourMomentum mom() const { return first.momentum() + second.momentum(); } operator FourMomentum() const { return mom(); } static bool cmppT(const Zstate& lx, const Zstate& rx) { return lx.mom().pT() < rx.mom().pT(); } }; /// @name ZZ analysis class ATLAS_2012_I1203852 : public Analysis { public: /// Default constructor ATLAS_2012_I1203852() : Analysis("ATLAS_2012_I1203852") { } void init() { // NB Missing ET is not required to be neutrinos FinalState fs(-5.0, 5.0, 0.0*GeV); // Final states to form Z bosons vids.push_back(make_pair(PID::ELECTRON, PID::POSITRON)); vids.push_back(make_pair(PID::MUON, PID::ANTIMUON)); IdentifiedFinalState Photon(fs); Photon.acceptIdPair(PID::PHOTON); IdentifiedFinalState bare_EL(fs); bare_EL.acceptIdPair(PID::ELECTRON); IdentifiedFinalState bare_MU(fs); bare_MU.acceptIdPair(PID::MUON); // Selection 1: ZZ-> llll selection Cut etaranges_lep = Cuts::abseta < 3.16 && Cuts::pT > 7*GeV; DressedLeptons electron_sel4l(Photon, bare_EL, 0.1, etaranges_lep); declare(electron_sel4l, "ELECTRON_sel4l"); DressedLeptons muon_sel4l(Photon, bare_MU, 0.1, etaranges_lep); declare(muon_sel4l, "MUON_sel4l"); // Selection 2: ZZ-> llnunu selection Cut etaranges_lep2 = Cuts::abseta < 2.5 && Cuts::pT > 10*GeV; DressedLeptons electron_sel2l2nu(Photon, bare_EL, 0.1, etaranges_lep2); declare(electron_sel2l2nu, "ELECTRON_sel2l2nu"); DressedLeptons muon_sel2l2nu(Photon, bare_MU, 0.1, etaranges_lep2); declare(muon_sel2l2nu, "MUON_sel2l2nu"); /// Get all neutrinos. These will not be used to form jets. IdentifiedFinalState neutrino_fs(Cuts::abseta < 4.5); neutrino_fs.acceptNeutrinos(); declare(neutrino_fs, "NEUTRINO_FS"); // Calculate missing ET from the visible final state, not by requiring neutrinos - addProjection(MissingMomentum(Cuts::abseta < 4.5), "MISSING"); + declare(MissingMomentum(Cuts::abseta < 4.5), "MISSING"); VetoedFinalState jetinput; jetinput.addVetoOnThisFinalState(bare_MU); jetinput.addVetoOnThisFinalState(neutrino_fs); FastJets jetpro(fs, FastJets::ANTIKT, 0.4); declare(jetpro, "jet"); // Both ZZ on-shell histos book(_h_ZZ_xsect ,1, 1, 1); book(_h_ZZ_ZpT ,3, 1, 1); book(_h_ZZ_phill ,5, 1, 1); book(_h_ZZ_mZZ ,7, 1, 1); // One Z off-shell (ZZstar) histos book(_h_ZZs_xsect ,1, 1, 2); // ZZ -> llnunu histos book(_h_ZZnunu_xsect ,1, 1, 3); book(_h_ZZnunu_ZpT ,4, 1, 1); book(_h_ZZnunu_phill ,6, 1, 1); book(_h_ZZnunu_mZZ ,8, 1, 1); } /// Do the analysis void analyze(const Event& e) { //////////////////////////////////////////////////////////////////// // preselection of leptons for ZZ-> llll final state //////////////////////////////////////////////////////////////////// Particles leptons_sel4l; const vector& mu_sel4l = apply(e, "MUON_sel4l").dressedLeptons(); const vector& el_sel4l = apply(e, "ELECTRON_sel4l").dressedLeptons(); vector leptonsFS_sel4l; leptonsFS_sel4l.insert( leptonsFS_sel4l.end(), mu_sel4l.begin(), mu_sel4l.end() ); leptonsFS_sel4l.insert( leptonsFS_sel4l.end(), el_sel4l.begin(), el_sel4l.end() ); //////////////////////////////////////////////////////////////////// // OVERLAP removal dR(l,l)>0.2 //////////////////////////////////////////////////////////////////// for ( const DressedLepton& l1 : leptonsFS_sel4l) { bool isolated = true; for (DressedLepton& l2 : leptonsFS_sel4l) { const double dR = deltaR(l1, l2); if (dR < 0.2 && l1 != l2) { isolated = false; break; } } if (isolated) leptons_sel4l.push_back(l1); } ////////////////////////////////////////////////////////////////// // Exactly two opposite charged leptons ////////////////////////////////////////////////////////////////// // calculate total 'flavour' charge double totalcharge = 0; for (Particle& l : leptons_sel4l) totalcharge += l.pid(); // Analyze 4 lepton events if (leptons_sel4l.size() == 4 && totalcharge == 0 ) { Zstate Z1, Z2; // Identifies Z states from 4 lepton pairs identifyZstates(Z1, Z2,leptons_sel4l); //////////////////////////////////////////////////////////////////////////// // Z MASS WINDOW // -ZZ: for both Z: 6620 GeV /////////////////////////////////////////////////////////////////////////// Zstate leadPtZ = std::max(Z1, Z2, Zstate::cmppT); double mZ1 = Z1.mom().mass(); double mZ2 = Z2.mom().mass(); double ZpT = leadPtZ.mom().pT(); double phill = fabs(deltaPhi(leadPtZ.first, leadPtZ.second)); if (phill > M_PI) phill = 2*M_PI-phill; double mZZ = (Z1.mom() + Z2.mom()).mass(); if (mZ1 > 20*GeV && mZ2 > 20*GeV) { // ZZ* selection if (inRange(mZ1, 66*GeV, 116*GeV) || inRange(mZ2, 66*GeV, 116*GeV)) { _h_ZZs_xsect -> fill(sqrtS()*GeV); } // ZZ selection if (inRange(mZ1, 66*GeV, 116*GeV) && inRange(mZ2, 66*GeV, 116*GeV)) { _h_ZZ_xsect -> fill(sqrtS()*GeV); _h_ZZ_ZpT -> fill(ZpT); _h_ZZ_phill -> fill(phill); _h_ZZ_mZZ -> fill(mZZ); } } } //////////////////////////////////////////////////////////////////// /// preselection of leptons for ZZ-> llnunu final state //////////////////////////////////////////////////////////////////// Particles leptons_sel2l2nu; // output const vector& mu_sel2l2nu = apply(e, "MUON_sel2l2nu").dressedLeptons(); const vector& el_sel2l2nu = apply(e, "ELECTRON_sel2l2nu").dressedLeptons(); vector leptonsFS_sel2l2nu; leptonsFS_sel2l2nu.insert( leptonsFS_sel2l2nu.end(), mu_sel2l2nu.begin(), mu_sel2l2nu.end() ); leptonsFS_sel2l2nu.insert( leptonsFS_sel2l2nu.end(), el_sel2l2nu.begin(), el_sel2l2nu.end() ); // Lepton preselection for ZZ-> llnunu if ((mu_sel2l2nu.empty() || el_sel2l2nu.empty()) // cannot have opposite flavour && (leptonsFS_sel2l2nu.size() == 2) // exactly two leptons && (leptonsFS_sel2l2nu[0].charge() * leptonsFS_sel2l2nu[1].charge() < 1 ) // opposite charge && (deltaR(leptonsFS_sel2l2nu[0], leptonsFS_sel2l2nu[1]) > 0.3) // overlap removal && (leptonsFS_sel2l2nu[0].pT() > 20*GeV && leptonsFS_sel2l2nu[1].pT() > 20*GeV)) { // trigger requirement leptons_sel2l2nu.insert(leptons_sel2l2nu.end(), leptonsFS_sel2l2nu.begin(), leptonsFS_sel2l2nu.end()); } if (leptons_sel2l2nu.empty()) vetoEvent; // no further analysis, fine to veto Particles leptons_sel2l2nu_jetveto; for (const DressedLepton& l : mu_sel2l2nu) leptons_sel2l2nu_jetveto.push_back(l.constituentLepton()); for (const DressedLepton& l : el_sel2l2nu) leptons_sel2l2nu_jetveto.push_back(l.constituentLepton()); double ptll = (leptons_sel2l2nu[0].momentum() + leptons_sel2l2nu[1].momentum()).pT(); // Find Z1-> ll FinalState fs2(-3.2, 3.2); InvMassFinalState imfs(fs2, vids, 20*GeV, sqrtS()); imfs.calc(leptons_sel2l2nu); if (imfs.particlePairs().size() != 1) vetoEvent; const ParticlePair& Z1constituents = imfs.particlePairs()[0]; FourMomentum Z1 = Z1constituents.first.momentum() + Z1constituents.second.momentum(); // Z to neutrinos candidate from missing ET const MissingMomentum & missmom = applyProjection(e, "MISSING"); const FourMomentum Z2 = missmom.missingMomentum(ZMASS); double met_Znunu = missmom.missingEt(); //Z2.pT(); // mTZZ const double mT2_1st_term = add_quad(ZMASS, ptll) + add_quad(ZMASS, met_Znunu); const double mT2_2nd_term = Z1.px() + Z2.px(); const double mT2_3rd_term = Z1.py() + Z2.py(); const double mTZZ = sqrt(sqr(mT2_1st_term) - sqr(mT2_2nd_term) - sqr(mT2_3rd_term)); if (!inRange(Z2.mass(), 66*GeV, 116*GeV)) vetoEvent; if (!inRange(Z1.mass(), 76*GeV, 106*GeV)) vetoEvent; ///////////////////////////////////////////////////////////// // AXIAL MET < 75 GeV //////////////////////////////////////////////////////////// double dPhiZ1Z2 = fabs(deltaPhi(Z1, Z2)); if (dPhiZ1Z2 > M_PI) dPhiZ1Z2 = 2*M_PI - dPhiZ1Z2; const double axialEtmiss = -Z2.pT()*cos(dPhiZ1Z2); if (axialEtmiss < 75*GeV) vetoEvent; const double ZpT = Z1.pT(); double phill = fabs(deltaPhi(Z1constituents.first, Z1constituents.second)); if (phill > M_PI) phill = 2*M_PI - phill; //////////////////////////////////////////////////////////////////////////// // JETS // -"j": found by "jetpro" projection && pT() > 25 GeV && |eta| < 4.5 // -"goodjets": "j" && dR(electron/muon,jet) > 0.3 // // JETVETO: veto all events with at least one good jet /////////////////////////////////////////////////////////////////////////// vector good_jets; for (const Jet& j : apply(e, "jet").jetsByPt(25)) { if (j.abseta() > 4.5) continue; bool isLepton = 0; for (const Particle& l : leptons_sel2l2nu_jetveto) { const double dR = deltaR(l.momentum(), j.momentum()); if (dR < 0.3) { isLepton = true; break; } } if (!isLepton) good_jets.push_back(j); } size_t n_sel_jets = good_jets.size(); if (n_sel_jets != 0) vetoEvent; ///////////////////////////////////////////////////////////// // Fractional MET and lepton pair difference: "RatioMet"< 0.4 //////////////////////////////////////////////////////////// double ratioMet = fabs(Z2.pT() - Z1.pT()) / Z1.pT(); if (ratioMet > 0.4 ) vetoEvent; // End of ZZllnunu selection: now fill histograms _h_ZZnunu_xsect->fill(sqrtS()/GeV); _h_ZZnunu_ZpT ->fill(ZpT); _h_ZZnunu_phill->fill(phill); _h_ZZnunu_mZZ ->fill(mTZZ); } /// Finalize void finalize() { const double norm = crossSection()/sumOfWeights()/femtobarn; scale(_h_ZZ_xsect, norm); normalize(_h_ZZ_ZpT); normalize(_h_ZZ_phill); normalize(_h_ZZ_mZZ); scale(_h_ZZs_xsect, norm); scale(_h_ZZnunu_xsect, norm); normalize(_h_ZZnunu_ZpT); normalize(_h_ZZnunu_phill); normalize(_h_ZZnunu_mZZ); } private: void identifyZstates(Zstate& Z1, Zstate& Z2, const Particles& leptons_sel4l); Histo1DPtr _h_ZZ_xsect, _h_ZZ_ZpT, _h_ZZ_phill, _h_ZZ_mZZ; Histo1DPtr _h_ZZs_xsect; Histo1DPtr _h_ZZnunu_xsect, _h_ZZnunu_ZpT, _h_ZZnunu_phill, _h_ZZnunu_mZZ; vector< pair > vids; const double ZMASS = 91.1876; // GeV }; /// 4l to ZZ assignment -- algorithm void ATLAS_2012_I1203852::identifyZstates(Zstate& Z1, Zstate& Z2, const Particles& leptons_sel4l) { ///////////////////////////////////////////////////////////////////////////// /// ZZ->4l pairing /// - Exactly two same flavour opposite charged leptons /// - Ambiguities in pairing are resolved by choosing the combination /// that results in the smaller value of the sum |mll - mZ| for the two pairs ///////////////////////////////////////////////////////////////////////////// Particles part_pos_el, part_neg_el, part_pos_mu, part_neg_mu; for (const Particle& l : leptons_sel4l) { if (l.abspid() == PID::ELECTRON) { if (l.pid() < 0) part_neg_el.push_back(l); if (l.pid() > 0) part_pos_el.push_back(l); } else if (l.abspid() == PID::MUON) { if (l.pid() < 0) part_neg_mu.push_back(l); if (l.pid() > 0) part_pos_mu.push_back(l); } } // ee/mm channel if ( part_neg_el.size() == 2 || part_neg_mu.size() == 2) { Zstate Zcand_1, Zcand_2, Zcand_3, Zcand_4; if (part_neg_el.size() == 2) { // ee Zcand_1 = Zstate( ParticlePair( part_neg_el[0], part_pos_el[0] ) ); Zcand_2 = Zstate( ParticlePair( part_neg_el[0], part_pos_el[1] ) ); Zcand_3 = Zstate( ParticlePair( part_neg_el[1], part_pos_el[0] ) ); Zcand_4 = Zstate( ParticlePair( part_neg_el[1], part_pos_el[1] ) ); } else { // mumu Zcand_1 = Zstate( ParticlePair( part_neg_mu[0], part_pos_mu[0] ) ); Zcand_2 = Zstate( ParticlePair( part_neg_mu[0], part_pos_mu[1] ) ); Zcand_3 = Zstate( ParticlePair( part_neg_mu[1], part_pos_mu[0] ) ); Zcand_4 = Zstate( ParticlePair( part_neg_mu[1], part_pos_mu[1] ) ); } // We can have the following pairs: (Z1 + Z4) || (Z2 + Z3) double minValue_1, minValue_2; minValue_1 = fabs( Zcand_1.mom().mass() - ZMASS ) + fabs( Zcand_4.mom().mass() - ZMASS); minValue_2 = fabs( Zcand_2.mom().mass() - ZMASS ) + fabs( Zcand_3.mom().mass() - ZMASS); if (minValue_1 < minValue_2 ) { Z1 = Zcand_1; Z2 = Zcand_4; } else { Z1 = Zcand_2; Z2 = Zcand_3; } // emu channel } else if (part_neg_mu.size() == 1 && part_neg_el.size() == 1) { Z1 = Zstate ( ParticlePair (part_neg_mu[0], part_pos_mu[0] ) ); Z2 = Zstate ( ParticlePair (part_neg_el[0], part_pos_el[0] ) ); } } // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2012_I1203852); } diff --git a/analyses/pluginATLAS/ATLAS_2014_I1310835.cc b/analyses/pluginATLAS/ATLAS_2014_I1310835.cc --- a/analyses/pluginATLAS/ATLAS_2014_I1310835.cc +++ b/analyses/pluginATLAS/ATLAS_2014_I1310835.cc @@ -1,267 +1,267 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" namespace Rivet { /// @brief H(125)->ZZ->4l at 8 TeV class ATLAS_2014_I1310835 : public Analysis { public: /// Default constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2014_I1310835); void init() { const FinalState fs(Cuts::abseta < 5.0); PromptFinalState photons(Cuts::abspid == PID::PHOTON); PromptFinalState bare_el(Cuts::abspid == PID::ELECTRON); PromptFinalState bare_mu(Cuts::abspid == PID::MUON); // Selection: lepton selection Cut etaranges_el = Cuts::abseta < 2.47 && Cuts::pT > 7*GeV; DressedLeptons electron_sel4l(photons, bare_el, 0.1, etaranges_el, false); - addProjection(electron_sel4l, "electrons"); + declare(electron_sel4l, "electrons"); Cut etaranges_mu = Cuts::abseta < 2.7 && Cuts::pT > 6*GeV; DressedLeptons muon_sel4l(photons, bare_mu, 0.1, etaranges_mu, false); - addProjection(muon_sel4l, "muons"); + declare(muon_sel4l, "muons"); FastJets jetpro(fs, FastJets::ANTIKT, 0.4, JetAlg::Muons::NONE, JetAlg::Invisibles::NONE); - addProjection(jetpro, "jet"); + declare(jetpro, "jet"); // Book histos book(_h_pt , 1, 1, 1); book(_h_rapidity , 2, 1, 1); book(_h_m34 , 3, 1, 1); book(_h_costheta , 4, 1, 1); book(_h_njets , 5, 1, 1); book(_h_leadingjetpt, 6, 1, 1); } /// Do the analysis void analyze(const Event& e) { //////////////////////////////////////////////////////////////////// // preselection of leptons for ZZ-> llll final state //////////////////////////////////////////////////////////////////// const vector& mu_sel4l = applyProjection(e, "muons").dressedLeptons(); const vector& el_sel4l = applyProjection(e, "electrons").dressedLeptons(); vector leptonsFS_sel4l; leptonsFS_sel4l.insert( leptonsFS_sel4l.end(), mu_sel4l.begin(), mu_sel4l.end() ); leptonsFS_sel4l.insert( leptonsFS_sel4l.end(), el_sel4l.begin(), el_sel4l.end() ); ///////////////////////////////////////////////////////////////////////////// /// H->ZZ->4l pairing ///////////////////////////////////////////////////////////////////////////// size_t el_p = 0; size_t el_n = 0; size_t mu_p = 0; size_t mu_n = 0; for (const Particle& l : leptonsFS_sel4l) { if (l.abspid() == PID::ELECTRON) { if (l.pid() < 0) ++el_n; if (l.pid() > 0) ++el_p; } else if (l.abspid() == PID::MUON) { if (l.pid() < 0) ++mu_n; if (l.pid() > 0) ++mu_p; } } bool pass_sfos = ( (el_p >=2 && el_n >=2) || (mu_p >=2 && mu_n >=2) || (el_p >=1 && el_n >=1 && mu_p >=1 && mu_n >=1) ); if (!pass_sfos) vetoEvent; Zstate Z1, Z2, Zcand; size_t n_parts = leptonsFS_sel4l.size(); size_t l1_index = 0; size_t l2_index = 0; // determine Z1 first double min_mass_diff = -1; for (size_t i = 0; i < n_parts; ++i) { for (size_t j = 0; j < n_parts; ++j) { if (i >= j) continue; if (leptonsFS_sel4l[i].pid() != -1*leptonsFS_sel4l[j].pid()) continue; //only pair SFOS leptons Zcand = Zstate( ParticlePair(leptonsFS_sel4l[i], leptonsFS_sel4l[j]) ); double mass_diff = fabs( Zcand.mom().mass() - 91.1876 ); if (min_mass_diff == -1 || mass_diff < min_mass_diff) { min_mass_diff = mass_diff; Z1 = Zcand; l1_index = i; l2_index = j; } } } //determine Z2 second min_mass_diff = -1; for (size_t i = 0; i < n_parts; ++i) { if (i == l1_index || i == l2_index) continue; for (size_t j = 0; j < n_parts; ++j) { if (j == l1_index || j == l2_index || i >= j) continue; if (leptonsFS_sel4l[i].pid() != -1*leptonsFS_sel4l[j].pid()) continue; // only pair SFOS leptons Zcand = Zstate( ParticlePair(leptonsFS_sel4l[i], leptonsFS_sel4l[j]) ); double mass_diff = fabs( Zcand.mom().mass() - 91.1876 ); if (min_mass_diff == -1 || mass_diff < min_mass_diff) { min_mass_diff = mass_diff; Z2 = Zcand; } } } Particles leptons_sel4l; leptons_sel4l.push_back(Z1.first); leptons_sel4l.push_back(Z1.second); leptons_sel4l.push_back(Z2.first); leptons_sel4l.push_back(Z2.second); //////////////////////////////////////////////////////////////////////////// // Kinematic Requirements /////////////////////////////////////////////////////////////////////////// //leading lepton pT requirement std::vector lepton_pt; for (const Particle& i : leptons_sel4l) lepton_pt.push_back(i.pT() / GeV); std::sort(lepton_pt.begin(), lepton_pt.end(), [](const double pT1, const double pT2) { return pT1 > pT2; }); if (!(lepton_pt[0] > 20*GeV && lepton_pt[1] > 15*GeV && lepton_pt[2] > 10*GeV)) vetoEvent; //invariant mass requirements if (!(inRange(Z1.mom().mass(), 50*GeV, 106*GeV) && inRange(Z2.mom().mass(), 12*GeV, 115*GeV))) vetoEvent; //lepton separation requirements for (unsigned int i = 0; i < 4; ++i) { for (unsigned int j = 0; j < 4; ++j) { if (i >= j) continue; double dR = deltaR(leptons_sel4l[i], leptons_sel4l[j]); bool sameflavor = leptons_sel4l[i].abspid() == leptons_sel4l[j].abspid(); if ( sameflavor && dR < 0.1) vetoEvent; if (!sameflavor && dR < 0.2) vetoEvent; } } // J/Psi veto requirement for (unsigned int i = 0; i < 4; ++i) { for (unsigned int j = 0; j < 4; ++j) { if (i >= j) continue; if ( leptons_sel4l[i].pid() != -1*leptons_sel4l[j].pid() ) continue; if ((leptons_sel4l[i].momentum() + leptons_sel4l[j].momentum()).mass() <= 5*GeV) vetoEvent; } } // 4-lepton invariant mass requirement double m4l = (Z1.mom() + Z2.mom()).mass(); if (!(inRange(m4l, 118*GeV, 129*GeV))) vetoEvent; //////////////////////////////////////////////////////////////////////////// // Higgs observables /////////////////////////////////////////////////////////////////////////// FourMomentum Higgs = Z1.mom() + Z2.mom(); double H4l_pt = Higgs.pt()/GeV; double H4l_rapidity = Higgs.absrap(); LorentzTransform HRF_boost; //HRF_boost.mkFrameTransformFromBeta(Higgs.boostVector()); HRF_boost.setBetaVec(- Higgs.boostVector()); FourMomentum Z1_in_HRF = HRF_boost.transform( Z1.mom() ); double H4l_costheta = fabs(cos( Z1_in_HRF.theta())); double H4l_m34 = Z2.mom().mass()/GeV; //////////////////////////////////////////////////////////////////////////// // Jet observables /////////////////////////////////////////////////////////////////////////// Jets jets; for (const Jet& jet : applyProjection(e, "jet").jetsByPt(Cuts::pT > 30*GeV && Cuts::absrap < 4.4)) { bool overlaps = false; for (const Particle& lep : leptonsFS_sel4l) { if (lep.abspid() != PID::ELECTRON) continue; const double dR = deltaR(lep, jet); if (dR < 0.2) { overlaps = true; break; } } if (!overlaps) jets += jet; } size_t n_jets = jets.size(); if (n_jets > 3) n_jets = 3; std::vector jet_pt; for (const Jet& i : jets) jet_pt.push_back(i.pT()/GeV); double leading_jet_pt = n_jets? jet_pt[0] : 0.; //////////////////////////////////////////////////////////////////////////// // End of H->ZZ->llll selection: now fill histograms //////////////////////////////////////////////////////////////////////////// _h_pt->fill(H4l_pt); _h_rapidity->fill(H4l_rapidity); _h_costheta->fill(H4l_costheta); _h_m34->fill(H4l_m34); _h_njets->fill(n_jets + 1); _h_leadingjetpt->fill(leading_jet_pt); } /// Generic Z candidate struct Zstate : public ParticlePair { Zstate() { } Zstate(ParticlePair _particlepair) : ParticlePair(_particlepair) { } FourMomentum mom() const { return first.momentum() + second.momentum(); } operator FourMomentum() const { return mom(); } }; /// Finalize void finalize() { const double norm = crossSection()/sumOfWeights()/femtobarn; std::cout << "xsec: " << crossSection() << '\n'; std::cout << "sumw: " << sumOfWeights() << '\n'; std::cout << "femb: " << femtobarn << '\n'; std::cout << "norm: " << norm << '\n'; scale(_h_pt, norm); scale(_h_rapidity, norm); scale(_h_costheta, norm); scale(_h_m34, norm); scale(_h_njets, norm); scale(_h_leadingjetpt, norm); } private: Histo1DPtr _h_pt, _h_rapidity, _h_costheta; Histo1DPtr _h_m34, _h_njets, _h_leadingjetpt; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2014_I1310835); } diff --git a/analyses/pluginATLAS/ATLAS_2015_I1393758.cc b/analyses/pluginATLAS/ATLAS_2015_I1393758.cc --- a/analyses/pluginATLAS/ATLAS_2015_I1393758.cc +++ b/analyses/pluginATLAS/ATLAS_2015_I1393758.cc @@ -1,141 +1,141 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class ATLAS_2015_I1393758 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2015_I1393758); public: void init() { - addProjection(FastJets(FinalState(), FastJets::ANTIKT, 0.4), "Jets"); + declare(FastJets(FinalState(), FastJets::ANTIKT, 0.4), "Jets"); book(forward_kappa3 ,1, 1, 1); book(forward_kappa5 ,2, 1, 1); book(forward_kappa7 ,3, 1, 1); book(central_kappa3 ,4, 1, 1); book(central_kappa5 ,5, 1, 1); book(central_kappa7 ,6, 1, 1); book(forwardRMS_kappa3, "d07-x01-y01", true); book(forwardRMS_kappa5, "d08-x01-y01", true); book(forwardRMS_kappa7, "d09-x01-y01", true); book(centralRMS_kappa3, "d10-x01-y01", true); book(centralRMS_kappa5, "d11-x01-y01", true); book(centralRMS_kappa7, "d12-x01-y01", true); } /// Perform the per-event analysis void analyze(const Event& event) { Jets m_goodJets = applyProjection(event, "Jets").jetsByPt(Cuts::pT > 25*GeV && Cuts::abseta < 2.1); if (m_goodJets.size() < 2) vetoEvent; if (m_goodJets[0].pT() < 50*GeV) vetoEvent; if (m_goodJets[1].pT() < 50*GeV) vetoEvent; if (fabs(1.0 - m_goodJets[0].pT()/m_goodJets[1].pT()) > 0.5) vetoEvent; bool check = m_goodJets[0].abseta() < m_goodJets[1].abseta(); int pos_f = int(check); int pos_c = int(!check); double kappa3_f = CalculateJetCharge(m_goodJets[pos_f], 0.3, 0.5, 1.8); double kappa5_f = CalculateJetCharge(m_goodJets[pos_f], 0.5, 0.5, 1.2); double kappa7_f = CalculateJetCharge(m_goodJets[pos_f], 0.7, 0.5, 0.9); double pT_f = m_goodJets[pos_f].pT(); double kappa3_c = CalculateJetCharge(m_goodJets[pos_c], 0.3, 0.5, 1.8); double kappa5_c = CalculateJetCharge(m_goodJets[pos_c], 0.5, 0.5, 1.2); double kappa7_c = CalculateJetCharge(m_goodJets[pos_c], 0.7, 0.5, 0.9); double pT_c = m_goodJets[pos_c].pT(); forward_kappa3->fill(pT_f, kappa3_f); forward_kappa5->fill(pT_f, kappa5_f); forward_kappa7->fill(pT_f, kappa7_f); central_kappa3->fill(pT_c, kappa3_c); central_kappa5->fill(pT_c, kappa5_c); central_kappa7->fill(pT_c, kappa7_c); } double CalculateJetCharge(Jet& jet, double kappa=0.5, double pTcut=0.5, double Qmax=1.2) { double PTkap = pow(jet.momentum().pT(),kappa); double jetcharge = 0.; for (const Particle& p : jet.particles()) { if (p.pT() < pTcut) continue; if (p.threeCharge()) jetcharge += (p.threeCharge()/3.)*pow(p.pT(),kappa)/PTkap; } //Overflow and underflow if (jetcharge > Qmax) jetcharge = Qmax*0.9999; if (jetcharge < -Qmax) jetcharge = -Qmax*0.9999; return jetcharge; } /// Normalise histograms etc., after the run void finalize() { if (numEvents() > 2) { for (unsigned int i = 0; i < forward_kappa3->numBins(); ++i) { double stdv_fkappa3 = forward_kappa3->bin(i).numEntries() > 1? forward_kappa3->bin(i).stdDev() : 0.0; //See Eq. 3 for the factor of two: https://web.eecs.umich.edu/~fessler/papers/files/tr/stderr.pdf double yerr_fkappa3 = safediv(sqrt(forward_kappa3->bin(i).sumW2()), 2.*forward_kappa3->bin(i).sumW()); forwardRMS_kappa3->point(i).setY(stdv_fkappa3, yerr_fkappa3); double stdv_fkappa5 = forward_kappa5->bin(i).numEntries() > 1? forward_kappa5->bin(i).stdDev() : 0.0; double yerr_fkappa5 = safediv(sqrt(forward_kappa5->bin(i).sumW2()), 2.*forward_kappa5->bin(i).sumW()); forwardRMS_kappa5->point(i).setY(stdv_fkappa5, yerr_fkappa5); double stdv_fkappa7 = forward_kappa7->bin(i).numEntries() > 1? forward_kappa7->bin(i).stdDev() : 0.0; double yerr_fkappa7 = safediv(sqrt(forward_kappa7->bin(i).sumW2()), 2.*forward_kappa7->bin(i).sumW()); forwardRMS_kappa7->point(i).setY(stdv_fkappa7, yerr_fkappa7); double stdv_ckappa3 = central_kappa3->bin(i).numEntries() > 1? central_kappa3->bin(i).stdDev() : 0.0; double yerr_ckappa3 = safediv(sqrt(central_kappa3->bin(i).sumW2()), 2.*central_kappa3->bin(i).sumW()); centralRMS_kappa3->point(i).setY(stdv_ckappa3, yerr_ckappa3); double stdv_ckappa5 = central_kappa5->bin(i).numEntries() > 1? central_kappa5->bin(i).stdDev() : 0.0; double yerr_ckappa5 = safediv(sqrt(central_kappa5->bin(i).sumW2()), 2.*central_kappa5->bin(i).sumW()); centralRMS_kappa5->point(i).setY(stdv_ckappa5, yerr_ckappa5); double stdv_ckappa7 = central_kappa7->bin(i).numEntries() > 1? central_kappa7->bin(i).stdDev() : 0.0; double yerr_ckappa7 = safediv(sqrt(central_kappa7->bin(i).sumW2()), 2.*central_kappa7->bin(i).sumW()); centralRMS_kappa7->point(i).setY(stdv_ckappa7, yerr_ckappa7); } } } private: Profile1DPtr forward_kappa3; Profile1DPtr forward_kappa5; Profile1DPtr forward_kappa7; Profile1DPtr central_kappa3; Profile1DPtr central_kappa5; Profile1DPtr central_kappa7; Scatter2DPtr forwardRMS_kappa3; Scatter2DPtr forwardRMS_kappa5; Scatter2DPtr forwardRMS_kappa7; Scatter2DPtr centralRMS_kappa3; Scatter2DPtr centralRMS_kappa5; Scatter2DPtr centralRMS_kappa7; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2015_I1393758); } diff --git a/analyses/pluginATLAS/ATLAS_2016_I1419070.cc b/analyses/pluginATLAS/ATLAS_2016_I1419070.cc --- a/analyses/pluginATLAS/ATLAS_2016_I1419070.cc +++ b/analyses/pluginATLAS/ATLAS_2016_I1419070.cc @@ -1,139 +1,139 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class ATLAS_2016_I1419070 : public Analysis { public: /// Constructor ATLAS_2016_I1419070() : Analysis("ATLAS_2016_I1419070") { } public: void init() { - addProjection(FastJets(FinalState(), FastJets::ANTIKT, 0.4), "Jets"); + declare(FastJets(FinalState(), FastJets::ANTIKT, 0.4), "Jets"); book(forward_500MeV ,1, 1, 1); book(forward_2GeV ,2, 1, 1); book(forward_5GeV ,3, 1, 1); book(central_500MeV ,4, 1, 1); book(central_2GeV ,5, 1, 1); book(central_5GeV ,6, 1, 1); book(diff_500MeV, "d07-x01-y01", true); book(diff_2GeV , "d08-x01-y01", true); book(diff_5GeV , "d09-x01-y01", true); book(sum_500MeV, "d10-x01-y01", true); book(sum_2GeV , "d11-x01-y01", true); book(sum_5GeV , "d12-x01-y01", true); } /// Perform the per-event analysis void analyze(const Event& event) { Jets m_goodJets = applyProjection(event, "Jets").jetsByPt(Cuts::pT > 25*GeV && Cuts::abseta < 2.1); if (m_goodJets.size() < 2) vetoEvent; if (m_goodJets[0].pT() < 50*GeV) vetoEvent; if (m_goodJets[1].pT() < 50*GeV) vetoEvent; if (fabs(1.0 - m_goodJets[0].pT()/m_goodJets[1].pT()) > 0.5) vetoEvent; bool check = m_goodJets[0].abseta() < m_goodJets[1].abseta(); int pos_f = int(check); int pos_c = int(!check); double pt500MeV_f = CalculateNCharge(m_goodJets[pos_f], 0.5); double pt2GeV_f = CalculateNCharge(m_goodJets[pos_f], 2.0); double pt5GeV_f = CalculateNCharge(m_goodJets[pos_f], 5.0); double pT_f = m_goodJets[pos_f].pT(); double pt500MeV_c = CalculateNCharge(m_goodJets[pos_c], 0.5); double pt2GeV_c = CalculateNCharge(m_goodJets[pos_c], 2.0); double pt5GeV_c = CalculateNCharge(m_goodJets[pos_c], 5.0); double pT_c = m_goodJets[pos_c].pT(); forward_500MeV->fill(pT_f, pt500MeV_f); forward_2GeV->fill( pT_f, pt2GeV_f); forward_5GeV->fill( pT_f, pt5GeV_f); central_500MeV->fill(pT_c, pt500MeV_c); central_2GeV->fill( pT_c, pt2GeV_c); central_5GeV->fill( pT_c, pt5GeV_c); } double CalculateNCharge(Jet& jet, double pTcut=0.5) { unsigned int ncharge = 0; for (const Particle& p : jet.particles()) { if (p.pT() < pTcut) continue; if (p.threeCharge()) ++ncharge; } if (ncharge > 60) ncharge = 60; return double(ncharge); } /// Normalise histograms etc., after the run void finalize() { if (numEvents() > 2) { for (unsigned int i = 0; i < forward_500MeV->numBins(); ++i) { ProfileBin1D bsum = central_500MeV->bin(i) + forward_500MeV->bin(i); ProfileBin1D bsum2 = central_2GeV->bin(i) + forward_2GeV->bin(i); ProfileBin1D bsum5 = central_5GeV->bin(i) + forward_5GeV->bin(i); ProfileBin1D bdiff = central_500MeV->bin(i) - forward_500MeV->bin(i); ProfileBin1D bdiff2 = central_2GeV->bin(i) - forward_2GeV->bin(i); ProfileBin1D bdiff5 = central_5GeV->bin(i) - forward_5GeV->bin(i); double ydiff = central_500MeV->bin(i).numEntries()? central_500MeV->bin(i).mean() : 0.0; double ydiff2 = central_2GeV->bin(i).numEntries()? central_2GeV->bin(i).mean() : 0.0; double ydiff5 = central_5GeV->bin(i).numEntries()? central_5GeV->bin(i).mean() : 0.0; ydiff -= forward_500MeV->bin(i).numEntries()? forward_500MeV->bin(i).mean() : 0.0; ydiff2 -= forward_2GeV->bin(i).numEntries()? forward_2GeV->bin(i).mean() : 0.0; ydiff5 -= forward_5GeV->bin(i).numEntries()? forward_5GeV->bin(i).mean() : 0.0; double yerr = bsum.numEntries() > 1.0 ? bsum.stdErr() : 0.0; double yerr2 = bsum2.numEntries() > 1.0 ? bsum2.stdErr() : 0.0; double yerr5 = bsum5.numEntries() > 1.0 ? bsum5.stdErr() : 0.0; diff_500MeV->point(i).setY(ydiff, yerr); diff_2GeV->point(i).setY(ydiff2, yerr2); diff_5GeV->point(i).setY(ydiff5, yerr5); sum_500MeV->point(i).setY(bsum.numEntries()? bsum.mean() : 0.0, yerr); sum_2GeV->point(i).setY(bsum2.numEntries()? bsum2.mean() : 0.0, yerr2); sum_5GeV->point(i).setY(bsum5.numEntries()? bsum5.mean() : 0.0, yerr5); } } } private: Profile1DPtr forward_500MeV; Profile1DPtr forward_2GeV; Profile1DPtr forward_5GeV; Profile1DPtr central_500MeV; Profile1DPtr central_2GeV; Profile1DPtr central_5GeV; Scatter2DPtr sum_500MeV; Scatter2DPtr sum_2GeV; Scatter2DPtr sum_5GeV; Scatter2DPtr diff_500MeV; Scatter2DPtr diff_2GeV; Scatter2DPtr diff_5GeV; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2016_I1419070); } diff --git a/analyses/pluginATLAS/ATLAS_2016_I1426515.cc b/analyses/pluginATLAS/ATLAS_2016_I1426515.cc --- a/analyses/pluginATLAS/ATLAS_2016_I1426515.cc +++ b/analyses/pluginATLAS/ATLAS_2016_I1426515.cc @@ -1,268 +1,268 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" namespace Rivet { /// WW production at 8 TeV class ATLAS_2016_I1426515 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2016_I1426515); /// Book histograms and initialise projections before the run void init() { const FinalState fs(Cuts::abseta < 4.5); // Project photons for dressing IdentifiedFinalState photon_id(fs); photon_id.acceptIdPair(PID::PHOTON); // Project dressed electrons with pT > 15 GeV and |eta| < 2.47 PromptFinalState el_bare(FinalState(Cuts::abspid == PID::ELECTRON)); Cut cuts = (Cuts::abseta < 2.47) && ( (Cuts::abseta <= 1.37) || (Cuts::abseta >= 1.52) ) && (Cuts::pT > 10*GeV); DressedLeptons el_dressed_FS(photon_id, el_bare, 0.1, cuts, true); declare(el_dressed_FS, "EL_DRESSED_FS"); // Project dressed muons with pT > 15 GeV and |eta| < 2.5 PromptFinalState mu_bare(FinalState(Cuts::abspid == PID::MUON)); DressedLeptons mu_dressed_FS(photon_id, mu_bare, 0.1, Cuts::abseta < 2.4 && Cuts::pT > 15*GeV, true); declare(mu_dressed_FS, "MU_DRESSED_FS"); Cut cuts_WW = (Cuts::abseta < 2.5) && (Cuts::pT > 20*GeV); IdentifiedFinalState lep_id(fs); lep_id.acceptIdPair(PID::MUON); lep_id.acceptIdPair(PID::ELECTRON); PromptFinalState lep_bare(lep_id); DressedLeptons leptons(photon_id, lep_bare, 0.1, cuts_WW, true); declare(leptons,"leptons"); declare(FinalState(Cuts::abspid == PID::TAU || Cuts::abspid == PID::NU_TAU), "tau_id"); // Get MET from generic invisibles VetoedFinalState ivfs(fs); ivfs.addVetoOnThisFinalState(VisibleFinalState(fs)); - addProjection(ivfs, "InvisibleFS"); + declare(ivfs, "InvisibleFS"); // Project jets FastJets jets(fs, FastJets::ANTIKT, 0.4, JetAlg::Muons::NONE, JetAlg::Invisibles::NONE); - addProjection(jets, "jets"); + declare(jets, "jets"); // Integrated cross sections // d01 ee/mm fiducial integrated cross sections book(_hist_mm_fid_intxsec, 1, 1, 1); book(_hist_ee_fid_intxsec, 1, 1, 2); // d02 emme fiducial integrated cross sections book(_hist_emme_fid_intxsec, 2, 1, 1); // d10 emme fiducial differential cross section (leading lepton ptlead + ptlead normalized) book(_hist_emme_fid_ptlead, 10, 1, 1); book(_hist_emme_fid_ptleadnorm, 10, 1, 2); // d11 emme fiducial differential cross section (dilepton-system ptll + ptll normalized) book(_hist_emme_fid_ptll, 11, 1, 1); book(_hist_emme_fid_ptllnorm, 11, 1, 2); // d12 emme fiducial differential cross section (dilepton-system mll + mll normalized) book(_hist_emme_fid_mll, 12, 1, 1); book(_hist_emme_fid_mllnorm, 12, 1, 2); // d13 emme fiducial differential cross section (dilepton-system delta_phi_ll + dphill normalized) book(_hist_emme_fid_dphill, 13, 1, 1); book(_hist_emme_fid_dphillnorm, 13, 1, 2); // d14 emme fiducial differential cross section (absolute rapidity of dilepton-system y_ll + y_ll normalized) book(_hist_emme_fid_yll, 14, 1, 1); book(_hist_emme_fid_yllnorm, 14, 1, 2); // d15 emme fiducial differential cross section (absolute costheta* of dilepton-system costhetastar_ll + costhetastar_ll normalized) book(_hist_emme_fid_costhetastarll, 15, 1, 1); book(_hist_emme_fid_costhetastarllnorm, 15, 1, 2); } /// Perform the per-event analysis void analyze(const Event& event) { // Find leptons const FinalState& ifs = apply(event, "InvisibleFS"); const vector& leptons = apply(event, "leptons").dressedLeptons(); const vector& good_mu = apply(event, "MU_DRESSED_FS").dressedLeptons(); const vector& good_el = apply(event, "EL_DRESSED_FS").dressedLeptons(); const Jets& jets = applyProjection(event, "jets").jetsByPt(Cuts::pT > 25*GeV && Cuts::abseta < 4.5); // Taus are excluded from the fiducial cross section const Particles taus = applyProjection(event, "tau_id").particlesByPt(Cuts::pT>12.*GeV && Cuts::abseta < 3.0); if (!taus.empty()) vetoEvent; // Remove events that do not contain 2 good leptons (either muons or electrons) if (leptons.size() != 2 && (good_el.size() != 1 || good_mu.size() != 1)) vetoEvent; // Split into channels int channel = -1; // 1=mm, 2=ee; 3=emu; 4=mue if (good_mu.size() == 2) channel = 1; //mm else if (good_el.size() == 2) channel = 2; //ee else if (good_mu.size() == 1 && good_el.size() == 1 && good_el[0].pT() > good_mu[0].pT()) channel = 3; //emu else if (good_mu.size() == 1 && good_el.size() == 1 && good_el[0].pT() < good_mu[0].pT()) channel = 4; //mue if (channel == -1) vetoEvent; // Assign leptons const DressedLepton *lep1, *lep2; if (channel == 1) { //mm if (good_mu[0].pT() > good_mu[1].pT()) { lep1 = &good_mu[0]; lep2 = &good_mu[1]; } else { lep1 = &good_mu[1]; lep2 = &good_mu[0]; } } else if (channel == 2) { //ee if (good_el[0].pT() > good_el[1].pT()) { lep1 = &good_el[0]; lep2 = &good_el[1]; } else { lep1 = &good_el[1]; lep2 = &good_el[0]; } } else if (channel == 3) { //emu lep1 = &good_el[0]; lep2 = &good_mu[0]; } else { // if (channel == 4) { //mue lep1 = &good_mu[0]; lep2 = &good_el[0]; } // Cut on leptons if (lep1->pT() < 25*GeV || lep2->pT() < 20*GeV) vetoEvent; // Select jets isolated from electrons const Jets jets_selected = filter_select(jets, [&](const Jet& j){ return all(good_el, deltaRGtr(j, 0.3)); }); // Define variables const FourMomentum met = sum(ifs.particles(), FourMomentum()); const FourMomentum dilep = lep1->momentum() + lep2->momentum(); const double ptll = dilep.pT()/GeV; const double Mll = dilep.mass()/GeV; const double Yll = dilep.absrap(); const double DPhill = fabs(deltaPhi(*lep1, *lep2)); const double costhetastar = fabs(tanh((lep1->eta() - lep2->eta()) / 2)); // Calculate dphi to MET double DPhi_met = fabs(deltaPhi((*lep1), met)); if (fabs(deltaPhi( (*lep2), met)) < DPhi_met) DPhi_met = fabs(deltaPhi((*lep2), met)); if (DPhi_met > M_PI/2) DPhi_met = 1.; else DPhi_met = fabs(sin(DPhi_met)); // Apply selections // mll lower cut (reject quarkonia) if ((channel == 1 || channel == 2) && Mll < 15.) vetoEvent; else if (Mll < 10.) vetoEvent; // Z veto (reject Z -- only dilepton channels) if ((channel == 1 || channel == 2) && abs(Mll - 91.1876) < 15.) vetoEvent; // Met rel cut if ((channel == 1 || channel == 2) && met.pT()*DPhi_met < 45*GeV) vetoEvent; else if (met.pT()*DPhi_met < 15*GeV) vetoEvent; // MET (pt-MET) cut if ((channel == 1 || channel == 2) && met.pT() <= 45*GeV) vetoEvent; // begin MET cut else if (met.pT() <= 20*GeV) vetoEvent; // Require 0 jets if (!jets_selected.empty()) vetoEvent; // Fill histograms if (channel == 1) { _hist_mm_fid_intxsec->fill(1.0); } else if (channel == 2) { _hist_ee_fid_intxsec->fill(1.0); } else if (channel == 3 || channel == 4) { _hist_emme_fid_intxsec->fill(1.0); _hist_emme_fid_ptlead->fill(lep1->pT()/GeV); _hist_emme_fid_ptleadnorm->fill(lep1->pT()/GeV); _hist_emme_fid_ptll->fill(ptll); _hist_emme_fid_ptllnorm->fill(ptll); _hist_emme_fid_mll->fill(Mll); _hist_emme_fid_mllnorm->fill(Mll); _hist_emme_fid_dphill->fill(DPhill); _hist_emme_fid_dphillnorm->fill(DPhill); _hist_emme_fid_yll->fill(Yll); _hist_emme_fid_yllnorm->fill(Yll); _hist_emme_fid_costhetastarll->fill(costhetastar); _hist_emme_fid_costhetastarllnorm->fill(costhetastar); } } /// Normalise histograms etc., after the run void finalize() { const double sf(crossSection()/femtobarn/sumOfWeights()); scale({_hist_mm_fid_intxsec, _hist_ee_fid_intxsec, _hist_emme_fid_intxsec}, sf); scale({_hist_emme_fid_ptlead, _hist_emme_fid_ptll, _hist_emme_fid_mll, _hist_emme_fid_dphill, _hist_emme_fid_yll, _hist_emme_fid_costhetastarll}, sf); normalize({_hist_emme_fid_ptleadnorm, _hist_emme_fid_ptllnorm, _hist_emme_fid_mllnorm, _hist_emme_fid_dphillnorm, _hist_emme_fid_yllnorm, _hist_emme_fid_costhetastarllnorm}); } private: /// @name Histograms //@{ // d01 ee/mm fiducial integrated cross sections Histo1DPtr _hist_mm_fid_intxsec, _hist_ee_fid_intxsec; // d02 emme fiducial integrated cross sections Histo1DPtr _hist_emme_fid_intxsec; // d10 emme fiducial differential cross section (leading lepton ptlead + ptlead normalized) Histo1DPtr _hist_emme_fid_ptlead, _hist_emme_fid_ptleadnorm; // d11 emme fiducial differential cross section (dilepton-system ptll + ptll normalized) Histo1DPtr _hist_emme_fid_ptll, _hist_emme_fid_ptllnorm; // d12 emme fiducial differential cross section (dilepton-system mll + mll normalized) Histo1DPtr _hist_emme_fid_mll, _hist_emme_fid_mllnorm; // d13 emme fiducial differential cross section (dilepton-system delta_phi_ll + dphill normalized) Histo1DPtr _hist_emme_fid_dphill, _hist_emme_fid_dphillnorm; // d14 emme fiducial differential cross section (absolute rapidity of dilepton-system y_ll + y_ll normalized) Histo1DPtr _hist_emme_fid_yll, _hist_emme_fid_yllnorm; // d15 emme fiducial differential cross section (absolute costheta* of dilepton-system costhetastar_ll + costhetastar_ll normalized) Histo1DPtr _hist_emme_fid_costhetastarll, _hist_emme_fid_costhetastarllnorm; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2016_I1426515); } diff --git a/analyses/pluginATLAS/ATLAS_2016_I1426523.cc b/analyses/pluginATLAS/ATLAS_2016_I1426523.cc --- a/analyses/pluginATLAS/ATLAS_2016_I1426523.cc +++ b/analyses/pluginATLAS/ATLAS_2016_I1426523.cc @@ -1,426 +1,426 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// @brief Measurement of the WZ production cross section at 8 TeV class ATLAS_2016_I1426523 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2016_I1426523); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Lepton cuts Cut FS_Zlept = Cuts::abseta < 2.5 && Cuts::pT > 15*GeV; const FinalState fs; Cut fs_z = Cuts::abseta < 2.5 && Cuts::pT > 15*GeV; Cut fs_j = Cuts::abseta < 4.5 && Cuts::pT > 25*GeV; // Get photons to dress leptons PromptFinalState photons(Cuts::abspid == PID::PHOTON); // Electrons and muons in Fiducial PS PromptFinalState leptons(fs_z && (Cuts::abspid == PID::ELECTRON || Cuts::abspid == PID::MUON)); leptons.acceptTauDecays(false); DressedLeptons dressedleptons(photons, leptons, 0.1, FS_Zlept, true); - addProjection(dressedleptons, "DressedLeptons"); + declare(dressedleptons, "DressedLeptons"); // Electrons and muons in Total PS PromptFinalState leptons_total(Cuts::abspid == PID::ELECTRON || Cuts::abspid == PID::MUON); leptons_total.acceptTauDecays(false); DressedLeptons dressedleptonsTotal(photons, leptons_total, 0.1, Cuts::open(), true); - addProjection(dressedleptonsTotal, "DressedLeptonsTotal"); + declare(dressedleptonsTotal, "DressedLeptonsTotal"); // Neutrinos IdentifiedFinalState nu_id; nu_id.acceptNeutrinos(); PromptFinalState neutrinos(nu_id); neutrinos.acceptTauDecays(false); declare(neutrinos, "Neutrinos"); MSG_WARNING("\033[91;1mLIMITED VALIDITY - check info file for details!\033[m"); // Jets VetoedFinalState veto; veto.addVetoOnThisFinalState(dressedleptons); FastJets jets(veto, FastJets::ANTIKT, 0.4); declare(jets, "Jets"); // Book histograms book(_h["eee"] , 1, 1, 1); book(_h["mee"] , 1, 1, 2); book(_h["emm"] , 1, 1, 3); book(_h["mmm"] , 1, 1, 4); book(_h["fid"] , 1, 1, 5); book(_h["eee_Plus"] , 2, 1, 1); book(_h["mee_Plus"] , 2, 1, 2); book(_h["emm_Plus"] , 2, 1, 3); book(_h["mmm_Plus"] , 2, 1, 4); book(_h["fid_Plus"] , 2, 1, 5); book(_h["eee_Minus"] , 3, 1, 1); book(_h["mee_Minus"] , 3, 1, 2); book(_h["emm_Minus"] , 3, 1, 3); book(_h["mmm_Minus"] , 3, 1, 4); book(_h["fid_Minus"] , 3, 1, 5); book(_h["total"] , 5, 1, 1); book(_h["Njets"] , 27, 1, 1); book(_h["Njets_norm"], 41, 1, 1); bookHandler("ZpT", 12); bookHandler("ZpT_Plus", 13); bookHandler("ZpT_Minus", 14); bookHandler("WpT", 15); bookHandler("WpT_Plus", 16); bookHandler("WpT_Minus", 17); bookHandler("mTWZ", 18); bookHandler("mTWZ_Plus", 19); bookHandler("mTWZ_Minus", 20); bookHandler("pTv", 21); bookHandler("pTv_Plus", 22); bookHandler("pTv_Minus", 23); bookHandler("Deltay", 24); bookHandler("Deltay_Plus", 25); bookHandler("Deltay_Minus", 26); bookHandler("mjj", 28); bookHandler("Deltayjj", 29); bookHandler("ZpT_norm", 30); bookHandler("ZpT_Plus_norm", 31); bookHandler("ZpT_Minus_norm", 32); bookHandler("WpT_norm", 33); bookHandler("mTWZ_norm", 34); bookHandler("pTv_norm", 35); bookHandler("pTv_Plus_norm", 36); bookHandler("pTv_Minus_norm", 37); bookHandler("Deltay_norm", 38); bookHandler("Deltay_Minus_norm", 39); bookHandler("Deltay_Plus_norm", 40); bookHandler("mjj_norm", 42); bookHandler("Deltayjj_norm", 43); } void bookHandler(const string& tag, size_t ID) { book(_s[tag], ID, 1, 1); const string code1 = makeAxisCode(ID, 1, 1); const string code2 = makeAxisCode(ID, 1, 2); book(_h[tag], code2, refData(code1)); } /// Perform the per-event analysis void analyze(const Event& event) { const vector& dressedleptons = apply(event, "DressedLeptons").dressedLeptons(); const vector& dressedleptonsTotal = apply(event, "DressedLeptonsTotal").dressedLeptons(); const Particles& neutrinos = apply(event, "Neutrinos").particlesByPt(); Jets jets = apply(event, "Jets").jetsByPt( (Cuts::abseta < 4.5) && (Cuts::pT > 25*GeV) ); if ((dressedleptonsTotal.size()<3) || (neutrinos.size()<1)) vetoEvent; //---Total PS: assign leptons to W and Z bosons using Resonant shape algorithm // NB: This resonant shape algorithm assumes the Standard Model and can therefore // NOT be used for reinterpretation in terms of new-physics models. int i, j, k; double MassZ01 = 0., MassZ02 = 0., MassZ12 = 0.; double MassW0 = 0., MassW1 = 0., MassW2 = 0.; double WeightZ1, WeightZ2, WeightZ3; double WeightW1, WeightW2, WeightW3; double M1, M2, M3; double WeightTotal1, WeightTotal2, WeightTotal3; //try Z pair of leptons 01 if ( (dressedleptonsTotal[0].pid()==-(dressedleptonsTotal[1].pid())) && (dressedleptonsTotal[2].abspid()==neutrinos[0].abspid()-1)){ MassZ01 = (dressedleptonsTotal[0].momentum()+dressedleptonsTotal[1].momentum()).mass(); MassW2 = (dressedleptonsTotal[2].momentum()+neutrinos[0].momentum()).mass(); } //try Z pair of leptons 02 if ( (dressedleptonsTotal[0].pid()==-(dressedleptonsTotal[2].pid())) && (dressedleptonsTotal[1].abspid()==neutrinos[0].abspid()-1)){ MassZ02 = (dressedleptonsTotal[0].momentum()+dressedleptonsTotal[2].momentum()).mass(); MassW1 = (dressedleptonsTotal[1].momentum()+neutrinos[0].momentum()).mass(); } //try Z pair of leptons 12 if ( (dressedleptonsTotal[1].pid()==-(dressedleptonsTotal[2].pid())) && (dressedleptonsTotal[0].abspid()==neutrinos[0].abspid()-1)){ MassZ12 = (dressedleptonsTotal[1].momentum()+dressedleptonsTotal[2].momentum()).mass(); MassW0 = (dressedleptonsTotal[0].momentum()+neutrinos[0].momentum()).mass(); } WeightZ1 = 1/(pow(MassZ01*MassZ01 - MZ_PDG*MZ_PDG,2) + pow(MZ_PDG*GammaZ_PDG,2)); WeightW1 = 1/(pow(MassW2*MassW2 - MW_PDG*MW_PDG,2) + pow(MW_PDG*GammaW_PDG,2)); WeightTotal1 = WeightZ1*WeightW1; M1 = -1*WeightTotal1; WeightZ2 = 1/(pow(MassZ02*MassZ02- MZ_PDG*MZ_PDG,2) + pow(MZ_PDG*GammaZ_PDG,2)); WeightW2 = 1/(pow(MassW1*MassW1- MW_PDG*MW_PDG,2) + pow(MW_PDG*GammaW_PDG,2)); WeightTotal2 = WeightZ2*WeightW2; M2 = -1*WeightTotal2; WeightZ3 = 1/(pow(MassZ12*MassZ12 - MZ_PDG*MZ_PDG,2) + pow(MZ_PDG*GammaZ_PDG,2)); WeightW3 = 1/(pow(MassW0*MassW0 - MW_PDG*MW_PDG,2) + pow(MW_PDG*GammaW_PDG,2)); WeightTotal3 = WeightZ3*WeightW3; M3 = -1*WeightTotal3; if( (M1 < M2 && M1 < M3) || (MassZ01 != 0 && MassW2 != 0 && MassZ02 == 0 && MassZ12 == 0) ){ i = 0; j = 1; k = 2; } if( (M2 < M1 && M2 < M3) || (MassZ02 != 0 && MassW1 != 0 && MassZ01 == 0 && MassZ12 == 0) ){ i = 0; j = 2; k = 1; } if( (M3 < M1 && M3 < M2) || (MassZ12 != 0 && MassW0 != 0 && MassZ01 == 0 && MassZ02 == 0) ){ i = 1; j = 2; k = 0; } FourMomentum ZbosonTotal = dressedleptonsTotal[i].momentum()+dressedleptonsTotal[j].momentum(); if ( (ZbosonTotal.mass() >= 66*GeV) && (ZbosonTotal.mass() <= 116*GeV) ) _h["total"]->fill(8000); //---end Total PS //---Fiducial PS: assign leptons to W and Z bosons using Resonant shape algorithm if (dressedleptons.size() < 3 || neutrinos.size() < 1) vetoEvent; int EventType = -1; int Nel = 0, Nmu = 0; for (const DressedLepton& l : dressedleptons) { if (l.abspid() == 11) ++Nel; if (l.abspid() == 13) ++Nmu; } if ( Nel == 3 && Nmu==0 ) EventType = 3; if ( Nel == 2 && Nmu==1 ) EventType = 2; if ( Nel == 1 && Nmu==2 ) EventType = 1; if ( Nel == 0 && Nmu==3 ) EventType = 0; int EventCharge = -dressedleptons[0].charge()*dressedleptons[1].charge()*dressedleptons[2].charge(); MassZ01 = 0; MassZ02 = 0; MassZ12 = 0; MassW0 = 0; MassW1 = 0; MassW2 = 0; //try Z pair of leptons 01 if ( (dressedleptons[0].pid()==-(dressedleptons[1].pid())) && (dressedleptons[2].abspid()==neutrinos[0].abspid()-1)){ MassZ01 = (dressedleptons[0].momentum()+dressedleptons[1].momentum()).mass(); MassW2 = (dressedleptons[2].momentum()+neutrinos[0].momentum()).mass(); } //try Z pair of leptons 02 if ( (dressedleptons[0].pid()==-(dressedleptons[2].pid())) && (dressedleptons[1].abspid()==neutrinos[0].abspid()-1)){ MassZ02 = (dressedleptons[0].momentum()+dressedleptons[2].momentum()).mass(); MassW1 = (dressedleptons[1].momentum()+neutrinos[0].momentum()).mass(); } //try Z pair of leptons 12 if ( (dressedleptons[1].pid()==-(dressedleptons[2].pid())) && (dressedleptons[0].abspid()==neutrinos[0].abspid()-1)){ MassZ12 = (dressedleptons[1].momentum()+dressedleptons[2].momentum()).mass(); MassW0 = (dressedleptons[0].momentum()+neutrinos[0].momentum()).mass(); } WeightZ1 = 1/(pow(MassZ01*MassZ01 - MZ_PDG*MZ_PDG,2) + pow(MZ_PDG*GammaZ_PDG,2)); WeightW1 = 1/(pow(MassW2*MassW2 - MW_PDG*MW_PDG,2) + pow(MW_PDG*GammaW_PDG,2)); WeightTotal1 = WeightZ1*WeightW1; M1 = -1*WeightTotal1; WeightZ2 = 1/(pow(MassZ02*MassZ02- MZ_PDG*MZ_PDG,2) + pow(MZ_PDG*GammaZ_PDG,2)); WeightW2 = 1/(pow(MassW1*MassW1- MW_PDG*MW_PDG,2) + pow(MW_PDG*GammaW_PDG,2)); WeightTotal2 = WeightZ2*WeightW2; M2 = -1*WeightTotal2; WeightZ3 = 1/(pow(MassZ12*MassZ12 - MZ_PDG*MZ_PDG,2) + pow(MZ_PDG*GammaZ_PDG,2)); WeightW3 = 1/(pow(MassW0*MassW0 - MW_PDG*MW_PDG,2) + pow(MW_PDG*GammaW_PDG,2)); WeightTotal3 = WeightZ3*WeightW3; M3 = -1*WeightTotal3; if( (M1 < M2 && M1 < M3) || (MassZ01 != 0 && MassW2 != 0 && MassZ02 == 0 && MassZ12 == 0) ){ i = 0; j = 1; k = 2; } if( (M2 < M1 && M2 < M3) || (MassZ02 != 0 && MassW1 != 0 && MassZ01 == 0 && MassZ12 == 0) ){ i = 0; j = 2; k = 1; } if( (M3 < M1 && M3 < M2) || (MassZ12 != 0 && MassW0 != 0 && MassZ01 == 0 && MassZ02 == 0) ){ i = 1; j = 2; k = 0; } FourMomentum Zlepton1 = dressedleptons[i].momentum(); FourMomentum Zlepton2 = dressedleptons[j].momentum(); FourMomentum Wlepton = dressedleptons[k].momentum(); FourMomentum Zboson = dressedleptons[i].momentum()+dressedleptons[j].momentum(); FourMomentum Wboson = dressedleptons[k].momentum()+neutrinos[0].momentum(); double Wboson_mT = sqrt( 2 * Wlepton.pT() * neutrinos[0].pt() * (1 - cos(deltaPhi(Wlepton, neutrinos[0]))) )/GeV; if (fabs(Zboson.mass()-MZ_PDG)>=10.) vetoEvent; if (Wboson_mT<=30.) vetoEvent; if (Wlepton.pT()<=20.) vetoEvent; if (deltaR(Zlepton1,Zlepton2) < 0.2) vetoEvent; if (deltaR(Zlepton1,Wlepton) < 0.3) vetoEvent; if (deltaR(Zlepton2,Wlepton) < 0.3) vetoEvent; double WZ_pt = Zlepton1.pt() + Zlepton2.pt() + Wlepton.pt() + neutrinos[0].pt(); double WZ_px = Zlepton1.px() + Zlepton2.px() + Wlepton.px() + neutrinos[0].px(); double WZ_py = Zlepton1.py() + Zlepton2.py() + Wlepton.py() + neutrinos[0].py(); double mTWZ = sqrt( pow(WZ_pt, 2) - ( pow(WZ_px, 2) + pow(WZ_py,2) ) )/GeV; double AbsDeltay = fabs(Zboson.rapidity()-Wlepton.rapidity()); if (EventType == 3) _h["eee"]->fill(8000.); if (EventType == 2) _h["mee"]->fill(8000.); if (EventType == 1) _h["emm"]->fill(8000.); if (EventType == 0) _h["mmm"]->fill(8000.); _h["fid"]->fill(8000.); if (EventCharge == 1) { if (EventType == 3) _h["eee_Plus"]->fill(8000.); if (EventType == 2) _h["mee_Plus"]->fill(8000.); if (EventType == 1) _h["emm_Plus"]->fill(8000.); if (EventType == 0) _h["mmm_Plus"]->fill(8000.); _h["fid_Plus"]->fill(8000.); _h["Deltay_Plus"]->fill(AbsDeltay); _h["Deltay_Plus_norm"]->fill(AbsDeltay); fillWithOverflow("ZpT_Plus", Zboson.pT()/GeV, 220); fillWithOverflow("WpT_Plus", Wboson.pT()/GeV, 220); fillWithOverflow("mTWZ_Plus", mTWZ, 600); fillWithOverflow("pTv_Plus", neutrinos[0].pt(), 90); fillWithOverflow("ZpT_Plus_norm", Zboson.pT()/GeV, 220); fillWithOverflow("pTv_Plus_norm", neutrinos[0].pt()/GeV, 90); } else { if (EventType == 3) _h["eee_Minus"]->fill(8000.); if (EventType == 2) _h["mee_Minus"]->fill(8000.); if (EventType == 1) _h["emm_Minus"]->fill(8000.); if (EventType == 0) _h["mmm_Minus"]->fill(8000.); _h["fid_Minus"]->fill(8000.); _h["Deltay_Minus"]->fill(AbsDeltay); _h["Deltay_Minus_norm"]->fill(AbsDeltay); fillWithOverflow("ZpT_Minus", Zboson.pT()/GeV, 220); fillWithOverflow("WpT_Minus", Wboson.pT()/GeV, 220); fillWithOverflow("mTWZ_Minus", mTWZ, 600); fillWithOverflow("pTv_Minus", neutrinos[0].pt()/GeV, 90); fillWithOverflow("ZpT_Minus_norm", Zboson.pT()/GeV, 220); fillWithOverflow("pTv_Minus_norm", neutrinos[0].pt()/GeV, 90); } fillWithOverflow("ZpT", Zboson.pT()/GeV, 220); fillWithOverflow("WpT", Wboson.pT()/GeV, 220); fillWithOverflow("mTWZ", mTWZ, 600); fillWithOverflow("pTv", neutrinos[0].pt()/GeV, 90); _h["Deltay"]->fill(AbsDeltay); fillWithOverflow("Njets", jets.size(), 5); fillWithOverflow("Njets_norm", jets.size(), 5); fillWithOverflow("ZpT_norm", Zboson.pT()/GeV, 220); fillWithOverflow("WpT_norm", Wboson.pT()/GeV, 220); fillWithOverflow("mTWZ_norm", mTWZ, 600); fillWithOverflow("pTv_norm", neutrinos[0].pt()/GeV, 90); _h["Deltay_norm"]->fill(AbsDeltay); if (jets.size()>1) { double mjj = (jets[0].momentum()+jets[1].momentum()).mass()/GeV; fillWithOverflow("mjj", mjj, 800); fillWithOverflow("mjj_norm", mjj, 800); double DeltaYjj = fabs(jets[0].rapidity()-jets[1].rapidity()); fillWithOverflow("Deltayjj", DeltaYjj, 5); fillWithOverflow("Deltayjj_norm", DeltaYjj, 5); } } void fillWithOverflow(const string& tag, const double value, const double overflow){ if (value < overflow) _h[tag]->fill(value); else _h[tag]->fill(overflow - 0.45); } /// Normalise histograms etc., after the run void finalize() { const double xs_pb(crossSection() / picobarn); const double xs_fb(crossSection() / femtobarn); const double sumw(sumOfWeights()); MSG_INFO("Cross-Section/pb: " << xs_pb ); MSG_INFO("Cross-Section/fb: " << xs_fb ); MSG_INFO("Sum of weights : " << sumw ); MSG_INFO("nEvents : " << numEvents()); const double sf_pb(xs_pb / sumw); const double sf_fb(xs_fb / sumw); MSG_INFO("sf_pb : " << sf_pb); MSG_INFO("sf_fb : " << sf_fb); float totalBR= 4*0.1086*0.033658; // W and Z leptonic branching fractions for (map::iterator it = _h.begin(); it != _h.end(); ++it) { if (it->first.find("total") != string::npos) scale(it->second, sf_pb/totalBR); else if (it->first.find("norm") != string::npos) normalize(it->second); else if (it->first.find("fid") != string::npos) scale(it->second, sf_fb/4.); else if (it->first.find("Njets") != string::npos) scale(it->second, sf_fb/4.); else if (it->first.find("ZpT") != string::npos) scale(it->second, sf_fb/4.); else if (it->first.find("WpT") != string::npos) scale(it->second, sf_fb/4.); else if (it->first.find("mTWZ") != string::npos) scale(it->second, sf_fb/4.); else if (it->first.find("pTv") != string::npos) scale(it->second, sf_fb/4.); else if (it->first.find("Deltay") != string::npos) scale(it->second, sf_fb/4.); else if (it->first.find("mjj") != string::npos) scale(it->second, sf_fb/4.); else scale(it->second, sf_fb); } for (map::iterator it = _s.begin(); it != _s.end(); ++it) { makeScatterWithoutDividingByBinwidth(it->first); removeAnalysisObject(_h[it->first]); } } void makeScatterWithoutDividingByBinwidth(const string& tag) { vector points; //size_t nBins = _dummy->numBins(); for (const HistoBin1D &bin : _h[tag]->bins()) { double x = bin.midpoint(); double y = bin.sumW(); double ex = bin.xWidth()/2; double ey = sqrt(bin.sumW2()); points.push_back(Point2D(x, y, ex, ey)); } _s[tag]->addPoints(points); } //@} private: /// @name Histograms //@{ map _h; map _s; //@} double MZ_PDG = 91.1876; double MW_PDG = 83.385; double GammaZ_PDG = 2.4952; double GammaW_PDG = 2.085; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2016_I1426523); } diff --git a/analyses/pluginATLAS/ATLAS_2016_I1457605.cc b/analyses/pluginATLAS/ATLAS_2016_I1457605.cc --- a/analyses/pluginATLAS/ATLAS_2016_I1457605.cc +++ b/analyses/pluginATLAS/ATLAS_2016_I1457605.cc @@ -1,130 +1,130 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// Inclusive isolated prompt photon analysis with 2012 LHC data class ATLAS_2016_I1457605 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2016_I1457605); /// Book histograms and initialise projections before the run void init() { FinalState fs; - addProjection(fs, "FS"); + declare(fs, "FS"); // Consider the final state jets for the energy density calculation FastJets fj(fs, FastJets::KT, 0.5); fj.useJetArea(new fastjet::AreaDefinition(fastjet::VoronoiAreaSpec())); - addProjection(fj, "KtJetsD05"); + declare(fj, "KtJetsD05"); // Consider the leading pt photon with |eta| < 2.37 and pT > 25 GeV LeadingParticlesFinalState photonfs(PromptFinalState(FinalState(Cuts::abseta < 2.37 && Cuts::pT > 25*GeV))); photonfs.addParticleId(PID::PHOTON); - addProjection(photonfs, "LeadingPhoton"); + declare(photonfs, "LeadingPhoton"); // Book the dsigma/dEt (in eta bins) histograms for (size_t i = 0; i < _eta_bins.size() - 1; ++i) { if (fuzzyEquals(_eta_bins[i], 1.37)) continue; // skip this bin int offset = i > 2? 0 : 1; book(_h_Et_photon[i] ,i + offset, 1, 1); } } /// Return eta bin for either dsigma/dET histogram (area_eta=false) or energy density correction (area_eta=true) size_t _getEtaBin(double eta_w, bool area_eta) const { const double eta = fabs(eta_w); if (!area_eta) { return binIndex(eta, _eta_bins); } else { return binIndex(eta, _eta_bins_areaoffset); } } /// Perform the per-event analysis void analyze(const Event& event) { // Retrieve leading photon Particles photons = applyProjection(event, "LeadingPhoton").particles(); if (photons.size() < 1) vetoEvent; const Particle& leadingPhoton = photons[0]; // Veto events with photon in ECAL crack if (inRange(leadingPhoton.abseta(), 1.37, 1.56)) vetoEvent; // Compute isolation energy in cone of radius .4 around photon (all particles) FourMomentum mom_in_EtCone; Particles fs = applyProjection(event, "FS").particles(); for (const Particle& p : fs) { // Check if it's outside the cone of 0.4 if (deltaR(leadingPhoton, p) >= 0.4) continue; // Except muons or neutrinos if (PID::isNeutrino(p.abspid()) || p.abspid() == PID::MUON) continue; // Increment isolation energy mom_in_EtCone += p.momentum(); } // Remove the photon energy from the isolation mom_in_EtCone -= leadingPhoton.momentum(); // Get the area-filtered jet inputs for computing median energy density, etc. vector ptDensity; vector< vector > ptDensities(_eta_bins_areaoffset.size()-1); const FastJets& fast_jets = applyProjection(event, "KtJetsD05"); const auto clust_seq_area = fast_jets.clusterSeqArea(); for (const Jet& jet : fast_jets.jets()) { const double area = clust_seq_area->area(jet); if (area > 1e-3 && jet.abseta() < _eta_bins_areaoffset.back()) ptDensities.at( _getEtaBin(jet.abseta(), true) ) += jet.pT()/area; } // Compute the median energy density, etc. for (size_t b = 0; b < _eta_bins_areaoffset.size()-1; ++b) { const int njets = ptDensities[b].size(); ptDensity += (njets > 0) ? median(ptDensities[b]) : 0.0; } // Compute the isolation energy correction (cone area*energy density) const double etCone_area = PI * sqr(0.4); const double correction = ptDensity[_getEtaBin(leadingPhoton.abseta(), true)] * etCone_area; // Apply isolation cut on area-corrected value // cut is Etiso < 4.8GeV + 4.2E-03 * Et_gamma. if (mom_in_EtCone.Et() - correction > 4.8*GeV + 0.0042*leadingPhoton.Et()) vetoEvent; // Fill histograms const size_t eta_bin = _getEtaBin(leadingPhoton.abseta(), false); _h_Et_photon[eta_bin]->fill(leadingPhoton.Et()); } /// Normalise histograms etc., after the run void finalize() { double sf = crossSection() / (picobarn * sumOfWeights()); for (size_t i = 0; i < _eta_bins.size()-1; ++i) { if (fuzzyEquals(_eta_bins[i], 1.37)) continue; scale(_h_Et_photon[i], sf); } } private: Histo1DPtr _h_Et_photon[5]; const vector _eta_bins = {0.00, 0.60, 1.37, 1.56, 1.81, 2.37 }; const vector _eta_bins_areaoffset = {0.0, 1.5, 3.0}; }; DECLARE_RIVET_PLUGIN(ATLAS_2016_I1457605); } diff --git a/analyses/pluginATLAS/ATLAS_2016_I1468168.cc b/analyses/pluginATLAS/ATLAS_2016_I1468168.cc --- a/analyses/pluginATLAS/ATLAS_2016_I1468168.cc +++ b/analyses/pluginATLAS/ATLAS_2016_I1468168.cc @@ -1,81 +1,81 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" namespace Rivet { class ATLAS_2016_I1468168 : public Analysis { public: DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2016_I1468168); void init() { // Eta ranges Cut eta_full = Cuts::abseta < 5.0 && Cuts::pT >= 1.0*MeV; // Lepton cuts Cut lep_cuts = Cuts::abseta < 2.5 && Cuts::pT >= 25.0*GeV; // All final state particles FinalState fs(eta_full); // Get photons to dress leptons IdentifiedFinalState photons(fs); photons.acceptIdPair(PID::PHOTON); // Projection to find the electrons IdentifiedFinalState el_id(fs); el_id.acceptIdPair(PID::ELECTRON); PromptFinalState electrons(el_id); electrons.acceptTauDecays(true); DressedLeptons dressedelectrons(photons, electrons, 0.1, lep_cuts, true); - addProjection(dressedelectrons, "DressedElectrons"); + declare(dressedelectrons, "DressedElectrons"); // Projection to find the muons IdentifiedFinalState mu_id(fs); mu_id.acceptIdPair(PID::MUON); PromptFinalState muons(mu_id); muons.acceptTauDecays(true); DressedLeptons dressedmuons(photons, muons, 0.1, lep_cuts, true); - addProjection(dressedmuons, "DressedMuons"); + declare(dressedmuons, "DressedMuons"); /// @todo Make this a counter or Scatter1D? book(_hist ,"Passed_events", 1, 0, 1); } void analyze(const Event& event) { // Get the selected objects, using the projections. const size_t num_es = applyProjection(event, "DressedElectrons").dressedLeptons().size(); const size_t num_mus = applyProjection(event, "DressedMuons").dressedLeptons().size(); // Evaluate basic event selection const bool pass_emu = num_es == 1 && num_mus == 1; if (!pass_emu) vetoEvent; // Fill histogram to measure the event acceptance _hist->fill(0.5); } void finalize() { // Normalize to cross-section const double sf(crossSection() / sumOfWeights()); scale(_hist, sf); } private: Histo1DPtr _hist; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2016_I1468168); } diff --git a/analyses/pluginATLAS/ATLAS_2016_I1469071.cc b/analyses/pluginATLAS/ATLAS_2016_I1469071.cc --- a/analyses/pluginATLAS/ATLAS_2016_I1469071.cc +++ b/analyses/pluginATLAS/ATLAS_2016_I1469071.cc @@ -1,320 +1,320 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// @brief Measurement of the WZ production cross section at 13 TeV class ATLAS_2016_I1469071 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2016_I1469071); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Lepton cuts Cut FS_Zlept = Cuts::abseta < 2.5 && Cuts::pT > 15*GeV; FinalState fs; Cut fs_z = Cuts::abseta < 2.5 && Cuts::pT > 15*GeV; Cut fs_j = Cuts::abseta < 4.5 && Cuts::pT > 25*GeV; // Get photons to dress leptons PromptFinalState photons(Cuts::abspid == PID::PHOTON); // Electrons and muons in Fiducial PS PromptFinalState leptons(FinalState(fs_z && (Cuts::abspid == PID::ELECTRON || Cuts::abspid == PID::MUON))); leptons.acceptTauDecays(false); DressedLeptons dressedleptons(photons, leptons, 0.1, FS_Zlept, true); - addProjection(dressedleptons, "DressedLeptons"); + declare(dressedleptons, "DressedLeptons"); // Electrons and muons in Total PS PromptFinalState leptons_total(Cuts::abspid == PID::ELECTRON || Cuts::abspid == PID::MUON); leptons_total.acceptTauDecays(false); DressedLeptons dressedleptonsTotal(photons, leptons_total, 0.1, Cuts::open(), true); - addProjection(dressedleptonsTotal, "DressedLeptonsTotal"); + declare(dressedleptonsTotal, "DressedLeptonsTotal"); // Promot neutrinos (yikes!) IdentifiedFinalState nu_id; nu_id.acceptNeutrinos(); PromptFinalState neutrinos(nu_id); neutrinos.acceptTauDecays(false); declare(neutrinos, "Neutrinos"); MSG_WARNING("\033[91;1mLIMITED VALIDITY - check info file for details!\033[m"); // Jets VetoedFinalState veto; veto.addVetoOnThisFinalState(dressedleptons); FastJets jets(veto, FastJets::ANTIKT, 0.4); declare(jets, "Jets"); // Book histograms book(_h_eee , 1, 1, 1); book(_h_mee , 1, 1, 2); book(_h_emm , 1, 1, 3); book(_h_mmm , 1, 1, 4); book(_h_fid , 1, 1, 5); book(_h_eee_Plus , 2, 1, 1); book(_h_mee_Plus , 2, 1, 2); book(_h_emm_Plus , 2, 1, 3); book(_h_mmm_Plus , 2, 1, 4); book(_h_fid_Plus , 2, 1, 5); book(_h_eee_Minus, 3, 1, 1); book(_h_mee_Minus, 3, 1, 2); book(_h_emm_Minus, 3, 1, 3); book(_h_mmm_Minus, 3, 1, 4); book(_h_fid_Minus, 3, 1, 5); book(_h_total , 6, 1, 1); book(_h_Njets , 8, 1, 1); } void analyze(const Event& event) { const vector& dressedleptons = apply(event, "DressedLeptons").dressedLeptons(); const vector& dressedleptonsTotal = apply(event, "DressedLeptonsTotal").dressedLeptons(); const Particles& neutrinos = apply(event, "Neutrinos").particlesByPt(); Jets jets = apply(event, "Jets").jetsByPt( (Cuts::abseta < 4.5) && (Cuts::pT > 25*GeV) ); if (dressedleptonsTotal.size() < 3 || neutrinos.size() < 1) vetoEvent; //---Total PS: assign leptons to W and Z bosons using Resonant shape algorithm // NB: This resonant shape algorithm assumes the Standard Model and can therefore // NOT be used for any kind of reinterpretation in terms of new-physics models.. int i, j, k; double MassZ01 = 0., MassZ02 = 0., MassZ12 = 0.; double MassW0 = 0., MassW1 = 0., MassW2 = 0.; double WeightZ1, WeightZ2, WeightZ3; double WeightW1, WeightW2, WeightW3; double M1, M2, M3; double WeightTotal1, WeightTotal2, WeightTotal3; //try Z pair of leptons 01 if ( (dressedleptonsTotal[0].pid() ==-(dressedleptonsTotal[1].pid())) && (dressedleptonsTotal[2].abspid()==neutrinos[0].abspid()-1)){ MassZ01 = (dressedleptonsTotal[0].momentum()+dressedleptonsTotal[1].momentum()).mass(); MassW2 = (dressedleptonsTotal[2].momentum()+neutrinos[0].momentum()).mass(); } //try Z pair of leptons 02 if ( (dressedleptonsTotal[0].pid()==-(dressedleptonsTotal[2].pid())) && (dressedleptonsTotal[1].abspid()==neutrinos[0].abspid()-1)){ MassZ02 = (dressedleptonsTotal[0].momentum()+dressedleptonsTotal[2].momentum()).mass(); MassW1 = (dressedleptonsTotal[1].momentum()+neutrinos[0].momentum()).mass(); } //try Z pair of leptons 12 if ( (dressedleptonsTotal[1].pid()==-(dressedleptonsTotal[2].pid())) && (dressedleptonsTotal[0].abspid()==neutrinos[0].abspid()-1)){ MassZ12 = (dressedleptonsTotal[1].momentum()+dressedleptonsTotal[2].momentum()).mass(); MassW0 = (dressedleptonsTotal[0].momentum()+neutrinos[0].momentum()).mass(); } WeightZ1 = 1/(pow(MassZ01*MassZ01 - MZ_PDG*MZ_PDG,2) + pow(MZ_PDG*GammaZ_PDG,2)); WeightW1 = 1/(pow(MassW2*MassW2 - MW_PDG*MW_PDG,2) + pow(MW_PDG*GammaW_PDG,2)); WeightTotal1 = WeightZ1*WeightW1; M1 = -1*WeightTotal1; WeightZ2 = 1/(pow(MassZ02*MassZ02- MZ_PDG*MZ_PDG,2) + pow(MZ_PDG*GammaZ_PDG,2)); WeightW2 = 1/(pow(MassW1*MassW1- MW_PDG*MW_PDG,2) + pow(MW_PDG*GammaW_PDG,2)); WeightTotal2 = WeightZ2*WeightW2; M2 = -1*WeightTotal2; WeightZ3 = 1/(pow(MassZ12*MassZ12 - MZ_PDG*MZ_PDG,2) + pow(MZ_PDG*GammaZ_PDG,2)); WeightW3 = 1/(pow(MassW0*MassW0 - MW_PDG*MW_PDG,2) + pow(MW_PDG*GammaW_PDG,2)); WeightTotal3 = WeightZ3*WeightW3; M3 = -1*WeightTotal3; if( (M1 < M2 && M1 < M3) || (MassZ01 != 0 && MassW2 != 0 && MassZ02 == 0 && MassZ12 == 0) ){ i = 0; j = 1; k = 2; } if((M2 < M1 && M2 < M3) || (MassZ02 != 0 && MassW1 != 0 && MassZ01 == 0 && MassZ12 == 0) ){ i = 0; j = 2; k = 1; } if((M3 < M1 && M3 < M2) || (MassZ12 != 0 && MassW0 != 0 && MassZ01 == 0 && MassZ02 == 0) ){ i = 1; j = 2; k = 0; } FourMomentum ZbosonTotal = dressedleptonsTotal[i].momentum()+dressedleptonsTotal[j].momentum(); if ( ZbosonTotal.mass() >= 66*GeV && ZbosonTotal.mass() <= 116*GeV ) _h_total->fill(13000); //---end Total PS //---Fiducial PS: assign leptons to W and Z bosons using Resonant shape algorithm if (dressedleptons.size() < 3) vetoEvent; int EventType = -1; int Nel = 0, Nmu = 0; for (const DressedLepton& l : dressedleptons) { if (l.abspid() == 11) ++Nel; if (l.abspid() == 13) ++Nmu; } if ( (Nel == 3) && (Nmu==0) ) EventType = 3; if ( (Nel == 2) && (Nmu==1) ) EventType = 2; if ( (Nel == 1) && (Nmu==2) ) EventType = 1; if ( (Nel == 0) && (Nmu==3) ) EventType = 0; int EventCharge = -dressedleptons[0].charge() * dressedleptons[1].charge() * dressedleptons[2].charge(); MassZ01 = 0; MassZ02 = 0; MassZ12 = 0; MassW0 = 0; MassW1 = 0; MassW2 = 0; // try Z pair of leptons 01 if (dressedleptons[0].pid() == -dressedleptons[1].pid()) { MassZ01 = (dressedleptons[0].momentum() + dressedleptons[1].momentum()).mass(); MassW2 = (dressedleptons[2].momentum() + neutrinos[0].momentum()).mass(); } // try Z pair of leptons 02 if (dressedleptons[0].pid() == -dressedleptons[2].pid()) { MassZ02 = (dressedleptons[0].momentum() + dressedleptons[2].momentum()).mass(); MassW1 = (dressedleptons[1].momentum() + neutrinos[0].momentum()).mass(); } // try Z pair of leptons 12 if (dressedleptons[1].pid() == -dressedleptons[2].pid()) { MassZ12 = (dressedleptons[1].momentum() + dressedleptons[2].momentum()).mass(); MassW0 = (dressedleptons[0].momentum() + neutrinos[0].momentum()).mass(); } WeightZ1 = 1/(pow(MassZ01*MassZ01 - MZ_PDG*MZ_PDG,2) + pow(MZ_PDG*GammaZ_PDG,2)); WeightW1 = 1/(pow(MassW2*MassW2 - MW_PDG*MW_PDG,2) + pow(MW_PDG*GammaW_PDG,2)); WeightTotal1 = WeightZ1*WeightW1; M1 = -1*WeightTotal1; WeightZ2 = 1/(pow(MassZ02*MassZ02- MZ_PDG*MZ_PDG,2) + pow(MZ_PDG*GammaZ_PDG,2)); WeightW2 = 1/(pow(MassW1*MassW1- MW_PDG*MW_PDG,2) + pow(MW_PDG*GammaW_PDG,2)); WeightTotal2 = WeightZ2*WeightW2; M2 = -1*WeightTotal2; WeightZ3 = 1/(pow(MassZ12*MassZ12 - MZ_PDG*MZ_PDG,2) + pow(MZ_PDG*GammaZ_PDG,2)); WeightW3 = 1/(pow(MassW0*MassW0 - MW_PDG*MW_PDG,2) + pow(MW_PDG*GammaW_PDG,2)); WeightTotal3 = WeightZ3*WeightW3; M3 = -1*WeightTotal3; if( (M1 < M2 && M1 < M3) || (MassZ01 != 0 && MassW2 != 0 && MassZ02 == 0 && MassZ12 == 0) ) { i = 0; j = 1; k = 2; } if((M2 < M1 && M2 < M3) || (MassZ02 != 0 && MassW1 != 0 && MassZ01 == 0 && MassZ12 == 0) ) { i = 0; j = 2; k = 1; } if((M3 < M1 && M3 < M2) || (MassZ12 != 0 && MassW0 != 0 && MassZ01 == 0 && MassZ02 == 0) ) { i = 1; j = 2; k = 0; } FourMomentum Zlepton1 = dressedleptons[i].momentum(); FourMomentum Zlepton2 = dressedleptons[j].momentum(); FourMomentum Wlepton = dressedleptons[k].momentum(); FourMomentum Zboson = dressedleptons[i].momentum()+dressedleptons[j].momentum(); double Wboson_mT = sqrt( 2 * Wlepton.pT() * neutrinos[0].pt() * (1 - cos(deltaPhi(Wlepton, neutrinos[0]))) ); if (fabs(Zboson.mass()/GeV - MZ_PDG) >= 10.) vetoEvent; if (Wboson_mT <= 30*GeV) vetoEvent; if (Wlepton.pT() <= 20*GeV) vetoEvent; if (deltaR(Zlepton1, Zlepton2) < 0.2) vetoEvent; if (deltaR(Zlepton1, Wlepton) < 0.3) vetoEvent; if (deltaR(Zlepton2, Wlepton) < 0.3) vetoEvent; if (EventType == 3) _h_eee->fill(13000.); if (EventType == 2) _h_mee->fill(13000.); if (EventType == 1) _h_emm->fill(13000.); if (EventType == 0) _h_mmm->fill(13000.); _h_fid->fill(13000); if (EventCharge == 1) { if (EventType == 3) _h_eee_Plus->fill(13000.); if (EventType == 2) _h_mee_Plus->fill(13000.); if (EventType == 1) _h_emm_Plus->fill(13000.); if (EventType == 0) _h_mmm_Plus->fill(13000.); _h_fid_Plus->fill(13000); } else { if (EventType == 3) _h_eee_Minus->fill(13000.); if (EventType == 2) _h_mee_Minus->fill(13000.); if (EventType == 1) _h_emm_Minus->fill(13000.); if (EventType == 0) _h_mmm_Minus->fill(13000.); _h_fid_Minus->fill(13000); } if (jets.size() < 4) _h_Njets->fill(jets.size()); else _h_Njets->fill(4); } void finalize() { // Print summary info const double xs_pb(crossSection() / picobarn); const double xs_fb(crossSection() / femtobarn); const double sumw(sumOfWeights()); const double sf_pb(xs_pb / sumw); const double sf_fb(xs_fb / sumw); const float totalBR= 4*0.1086*0.033658; // W and Z leptonic branching fractions scale(_h_fid, sf_fb/4.); scale(_h_eee, sf_fb); scale(_h_mee, sf_fb); scale(_h_emm, sf_fb); scale(_h_mmm, sf_fb); scale(_h_fid_Plus, sf_fb/4.); scale(_h_eee_Plus, sf_fb); scale(_h_mee_Plus, sf_fb); scale(_h_emm_Plus, sf_fb); scale(_h_mmm_Plus, sf_fb); scale(_h_fid_Minus, sf_fb/4.); scale(_h_eee_Minus, sf_fb); scale(_h_mee_Minus, sf_fb); scale(_h_emm_Minus, sf_fb); scale(_h_mmm_Minus, sf_fb); scale(_h_Njets, sf_fb/4.); scale(_h_total, sf_pb/totalBR); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_eee; Histo1DPtr _h_mee; Histo1DPtr _h_emm; Histo1DPtr _h_mmm; Histo1DPtr _h_fid; Histo1DPtr _h_eee_Plus; Histo1DPtr _h_mee_Plus; Histo1DPtr _h_emm_Plus; Histo1DPtr _h_mmm_Plus; Histo1DPtr _h_fid_Plus; Histo1DPtr _h_eee_Minus; Histo1DPtr _h_mee_Minus; Histo1DPtr _h_emm_Minus; Histo1DPtr _h_mmm_Minus; Histo1DPtr _h_fid_Minus; Histo1DPtr _h_total; Histo1DPtr _h_Njets; //@} double MZ_PDG = 91.1876; double MW_PDG = 83.385; double GammaZ_PDG = 2.4952; double GammaW_PDG = 2.085; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2016_I1469071); } diff --git a/analyses/pluginATLAS/ATLAS_2016_I1479760.cc b/analyses/pluginATLAS/ATLAS_2016_I1479760.cc --- a/analyses/pluginATLAS/ATLAS_2016_I1479760.cc +++ b/analyses/pluginATLAS/ATLAS_2016_I1479760.cc @@ -1,120 +1,120 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// Hard double-parton scattering in four-jet events at 7 TeV class ATLAS_2016_I1479760 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2016_I1479760); /// Book histograms and initialise projections before the run void init() { /// Declare AntiKt 0.6 jets without muons and neutrinos FastJets fastJets(FinalState(), FastJets::ANTIKT, 0.6); fastJets.useInvisibles(JetAlg::Invisibles::NONE); fastJets.useMuons(JetAlg::Muons::NONE); - addProjection(fastJets, "AntiKt6Jets"); + declare(fastJets, "AntiKt6Jets"); book(_hists["deltaPt34"] , 1, 1, 1); book(_hists["deltaPhi34"] , 2, 1, 1); book(_hists["deltaPt12"] , 3, 1, 1); book(_hists["deltaPt13"] , 4, 1, 1); book(_hists["deltaPt23"] , 5, 1, 1); book(_hists["deltaPt14"] , 6, 1, 1); book(_hists["deltaPt24"] , 7, 1, 1); book(_hists["deltaPhi12"] , 8, 1, 1); book(_hists["deltaPhi13"] , 9, 1, 1); book(_hists["deltaPhi23"] , 10, 1, 1); book(_hists["deltaPhi14"] , 11, 1, 1); book(_hists["deltaPhi24"] , 12, 1, 1); book(_hists["deltaY12"] , 13, 1, 1); book(_hists["deltaY34"] , 14, 1, 1); book(_hists["deltaY13"] , 15, 1, 1); book(_hists["deltaY23"] , 16, 1, 1); book(_hists["deltaY14"] , 17, 1, 1); book(_hists["deltaY24"] , 18, 1, 1); book(_hists["deltaPhiPlanes12"], 19, 1, 1); book(_hists["deltaPhiPlanes13"], 20, 1, 1); book(_hists["deltaPhiPlanes14"], 21, 1, 1); } /// Calculate the DeltaPt variable double calcDeltaPt(const Jet& j1, const Jet& j2) { return (j1.momentum() + j2.momentum()).pT() / (j1.pT() + j2.pT()); } /// Calculate the DeltaPhi variable between event planes double calcDeltaPhiPlanes(const Jet& j1, const Jet& j2, const Jet& j3, const Jet& j4) { const FourMomentum sumVec1 = j1.momentum() + j2.momentum(); const FourMomentum sumVec2 = j3.momentum() + j4.momentum(); return deltaPhi(sumVec1, sumVec2); } /// Perform the per-event analysis void analyze(const Event& event) { // Retrieve all anti-kt R=0.6 jets with pT above 20 GeV and eta < 4.4 const Jets jets = applyProjection(event, "AntiKt6Jets").jetsByPt(Cuts::pT >= 20*GeV && Cuts::abseta <= 4.4); // Require at least 4 jets, with the leading jet pT above 42.5 GeV if (jets.size() < 4) vetoEvent; if (jets[0].pT() < 42.5*GeV) vetoEvent; /// Fill histograms _hists["deltaPt12"]->fill( calcDeltaPt( jets[0], jets[1] )); _hists["deltaPt34"]->fill( calcDeltaPt( jets[2], jets[3] )); _hists["deltaPt13"]->fill( calcDeltaPt( jets[0], jets[2] )); _hists["deltaPt23"]->fill( calcDeltaPt( jets[1], jets[2] )); _hists["deltaPt14"]->fill( calcDeltaPt( jets[0], jets[3] )); _hists["deltaPt24"]->fill( calcDeltaPt( jets[1], jets[3] )); // _hists["deltaPhi12"]->fill( deltaPhi( jets[0],jets[1] )); _hists["deltaPhi34"]->fill( deltaPhi( jets[2],jets[3] )); _hists["deltaPhi13"]->fill( deltaPhi( jets[0],jets[2] )); _hists["deltaPhi23"]->fill( deltaPhi( jets[1],jets[2] )); _hists["deltaPhi14"]->fill( deltaPhi( jets[0],jets[3] )); _hists["deltaPhi24"]->fill( deltaPhi( jets[1],jets[3] )); // _hists["deltaY12"]->fill( deltaRap( jets[0], jets[1] )); _hists["deltaY34"]->fill( deltaRap( jets[2], jets[3] )); _hists["deltaY13"]->fill( deltaRap( jets[0], jets[2] )); _hists["deltaY23"]->fill( deltaRap( jets[1], jets[2] )); _hists["deltaY14"]->fill( deltaRap( jets[0], jets[3] )); _hists["deltaY24"]->fill( deltaRap( jets[1], jets[3] )); // _hists["deltaPhiPlanes12"]->fill( calcDeltaPhiPlanes(jets[0], jets[1], jets[2], jets[3] )); _hists["deltaPhiPlanes13"]->fill( calcDeltaPhiPlanes(jets[0], jets[2], jets[1], jets[3] )); _hists["deltaPhiPlanes14"]->fill( calcDeltaPhiPlanes(jets[0], jets[3], jets[1], jets[2] )); } /// Post-run processing void finalize() { for (auto& key_hist : _hists) normalize(key_hist.second); } //@} /// Histograms map _hists; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2016_I1479760); } diff --git a/analyses/pluginATLAS/ATLAS_2017_I1495243.cc b/analyses/pluginATLAS/ATLAS_2017_I1495243.cc --- a/analyses/pluginATLAS/ATLAS_2017_I1495243.cc +++ b/analyses/pluginATLAS/ATLAS_2017_I1495243.cc @@ -1,221 +1,221 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief $t\bar{t}$ + jets at 13 TeV class ATLAS_2017_I1495243 : public Analysis { public: DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2017_I1495243); void init() { Cut eta_full = Cuts::abseta < 5.0 && Cuts::pT > 1.0*MeV; Cut eta_lep = Cuts::abseta < 2.5; // Collect final state particles FinalState FS(eta_full); // Get photons to dress leptons IdentifiedFinalState photons(FS); photons.acceptIdPair(PID::PHOTON); // Projection to find the electrons IdentifiedFinalState el_id(FS); el_id.acceptIdPair(PID::ELECTRON); PromptFinalState electrons(el_id); electrons.acceptTauDecays(false); DressedLeptons dressedelectrons(photons, electrons, 0.1, Cuts::abseta< 2.5 && Cuts::pT > 25.0*GeV, true); - addProjection(dressedelectrons, "electrons"); + declare(dressedelectrons, "electrons"); DressedLeptons fulldressedelectrons(photons, electrons, 0.1, eta_full, true); // Projection to find the muons IdentifiedFinalState mu_id(FS); mu_id.acceptIdPair(PID::MUON); PromptFinalState muons(mu_id); muons.acceptTauDecays(false); DressedLeptons dressedmuons(photons, muons, 0.1, Cuts::abseta < 2.5 && Cuts::pT > 25.0*GeV, true); - addProjection(dressedmuons, "muons"); + declare(dressedmuons, "muons"); DressedLeptons fulldressedmuons(photons, muons, 0.1, eta_full, true); // Projection to find neutrinos to exclude from jets IdentifiedFinalState nu_id; nu_id.acceptNeutrinos(); PromptFinalState neutrinos(nu_id); neutrinos.acceptTauDecays(false); // Jet clustering VetoedFinalState vfs; vfs.addVetoOnThisFinalState(fulldressedelectrons); vfs.addVetoOnThisFinalState(fulldressedmuons); vfs.addVetoOnThisFinalState(neutrinos); FastJets jets(vfs, FastJets::ANTIKT, 0.4); jets.useInvisibles(true); - addProjection(jets, "jets"); + declare(jets, "jets"); // Book Histograms book(_h["bjet_pt"] , 5,1,1); book(_h["2bjet_pt"], 6,1,1); book(_h["ljet_pt"] , 7,1,1); for (size_t i = 0; i < 4; ++i) { book(_h["njet" + to_str(i)], i+1, 1, 1); book(_h["Q0" + to_str(i)], "_Q0" + to_str(i+ 7), refData((i>1?"d":"d0") + to_str(i+ 8) + "-x01-y01")); book(_h["MQ0" + to_str(i)], "_MQ0" + to_str(i+12), refData("d" + to_str(i+12) + "-x01-y01")); book(_h["Qsum" + to_str(i)], "_Qsum" + to_str(i+16), refData("d" + to_str(i+16) + "-x01-y01")); book(_h["MQsum" + to_str(i)], "_MQsum" + to_str(i+20), refData("d" + to_str(i+20) + "-x01-y01")); book(_s["gapFracQ0" + to_str(i)], 8+i, 1 ,1, true); book(_s["gapFracMQ0" + to_str(i)], 12+i, 1, 1, true); book(_s["gapFracQsum" + to_str(i)], 16+i, 1, 1, true); book(_s["gapFracMQsum" + to_str(i)], 20+i, 1, 1, true); } } void analyze(const Event& event) { // Get the selected objects, using the projections. Jets all_jets = apply(event, "jets").jetsByPt(Cuts::pT > 25*GeV && Cuts::abseta < 2.5); const vector electrons = filter_discard(apply(event, "electrons").dressedLeptons(), [&](const DressedLepton &e) { return any(all_jets, deltaRLess(e, 0.4)); }); const vector muons = filter_discard(apply(event, "muons").dressedLeptons(), [&](const DressedLepton &m) { return any(all_jets, deltaRLess(m, 0.4)); }); if (electrons.size() != 1 || muons.size() != 1) vetoEvent; if (electrons[0].charge() == muons[0].charge()) vetoEvent; Jets bjets, extrajets; for (Jet j : all_jets) { size_t b_tagged = j.bTags(Cuts::pT > 5*GeV).size(); if (bjets.size() < 2 && b_tagged) bjets += j; else extrajets += j; } if (bjets.size() < 2) vetoEvent; double bjetpt = bjets[0].pt(); if (bjetpt > 250*GeV) bjetpt = 275*GeV; _h["bjet_pt"]->fill(bjetpt); double b2jetpt = bjets[1].pt(); if (b2jetpt > 150*GeV) b2jetpt = 175*GeV; _h["2bjet_pt"]->fill(b2jetpt); if (extrajets.size()) { double ljetpt = extrajets[0].pt(); if (ljetpt > 250*GeV) ljetpt = 275*GeV; _h["ljet_pt"]->fill(ljetpt); } double Memubb = (electrons[0].momentum() + muons[0].momentum() + bjets[0].momentum() + bjets[1].momentum()).mass(); vector leadpt = { 0., 0., 0., 0. }, ptsum = { 0., 0., 0., 0. }; vector njetcount = { 0, 0, 0, 0 }; for (size_t i = 0; i < extrajets.size(); ++i) { double absrap = extrajets[i].absrap(), pt = extrajets[i].pT(); if (pt > 25*GeV) ++njetcount[0]; if (pt > 40*GeV) ++njetcount[1]; if (pt > 60*GeV) ++njetcount[2]; if (pt > 80*GeV) ++njetcount[3]; if (absrap < 0.8 && pt > leadpt[0]) leadpt[0] = pt; else if (absrap > 0.8 && absrap < 1.5 && pt > leadpt[1]) leadpt[1] = pt; else if (absrap > 1.5 && absrap < 2.1 && pt > leadpt[2]) leadpt[2] = pt; if (absrap < 2.1 && pt > leadpt[3]) leadpt[3] = pt; if (absrap < 0.8) ptsum[0] += pt; else if (absrap > 0.8 && absrap < 1.5) ptsum[1] += pt; else if (absrap > 1.5 && absrap < 2.1) ptsum[2] += pt; if (absrap < 2.1) ptsum[3] += pt; } for (size_t i = 0; i < 4; ++i) { size_t cutoff = i? 3 : 4; if (njetcount[i] > cutoff) njetcount[i] = cutoff; _h["njet" + to_str(i)]->fill(njetcount[i]); if (leadpt[i] > 305*GeV) leadpt[i] = 305*GeV; _h["Q0" + to_str(i)]->fill(leadpt[i]); if (ptsum[i] > 505*GeV) ptsum[i] = 505*GeV; _h["Qsum" + to_str(i)]->fill(ptsum[i]); } for (size_t i = 0; i < 4; ++i) { if (i == 0 && !(Memubb < 300*GeV)) continue; if (i == 1 && !(Memubb > 300*GeV && Memubb < 425*GeV)) continue; if (i == 2 && !(Memubb > 425*GeV && Memubb < 600*GeV)) continue; if (i == 3 && !(Memubb > 600*GeV)) continue; _h["MQ0" + to_str(i)]->fill(leadpt[3]); _h["MQsum" + to_str(i)]->fill(ptsum[3]); } } void constructGapFraction(Scatter2DPtr out, Histo1DPtr in) { bool hasWeights = in->effNumEntries() != in->numEntries(); double denW = in->sumW(); double denW2 = in->sumW2(); size_t nEnd = out->numPoints(); for (size_t i = 0; i < nEnd; ++i) { double numW = in->sumW(), numW2 = in->sumW2(); for (size_t j = i; j < nEnd; ++j) { numW -= in->bin(j).sumW(); numW2 -= in->bin(j).sumW2(); } double yval = safediv(numW, denW); double yerr = sqrt(safediv(yval * (1 - yval), denW)); if (hasWeights) { // use F. James's approximation for weighted events yerr = sqrt( safediv((1 - 2 * yval) * numW2 + yval * yval * denW2, denW * denW) ); } out->point(i).setY(yval, yerr); } } void finalize() { // Build gap fraction plots for (size_t i = 0; i < 4; ++i) { constructGapFraction(_s["gapFracQ0" + to_str(i)], _h["Q0" + to_str(i)]); constructGapFraction(_s["gapFracMQ0" + to_str(i)], _h["MQ0" + to_str(i)]); constructGapFraction(_s["gapFracQsum" + to_str(i)], _h["Qsum" + to_str(i)]); constructGapFraction(_s["gapFracMQsum" + to_str(i)], _h["MQsum" + to_str(i)]); } // Normalize to cross-section for (map::iterator hit = _h.begin(); hit != _h.end(); ++hit) { if (hit->first.find("jet") != string::npos) normalize(hit->second); } } private: /// @name Histogram helper functions map _h; map _s; }; // Declare the class as a hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2017_I1495243); } diff --git a/analyses/pluginATLAS/ATLAS_2017_I1609253.cc b/analyses/pluginATLAS/ATLAS_2017_I1609253.cc --- a/analyses/pluginATLAS/ATLAS_2017_I1609253.cc +++ b/analyses/pluginATLAS/ATLAS_2017_I1609253.cc @@ -1,133 +1,133 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief Multijet transverse energy-energy correlations (TEEC) at 8 TeV class ATLAS_2017_I1609253 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2017_I1609253); /// Initialization, called once before running void init() { // Projections const FastJets jets(FinalState(), FastJets::ANTIKT, 0.4, JetAlg::Muons::ALL, JetAlg::Invisibles::ALL); - addProjection(jets, "Jets"); + declare(jets, "Jets"); // Book histograms book(_hist_EEC1 , 1, 1, 1); book(_hist_AEEC1, 2, 1, 1); book(_hist_EEC2 , 3, 1, 1); book(_hist_AEEC2, 4, 1, 1); book(_hist_EEC3 , 5, 1, 1); book(_hist_AEEC3, 6, 1, 1); book(_hist_EEC4 , 7, 1, 1); book(_hist_AEEC4, 8, 1, 1); book(_hist_EEC5 , 9, 1, 1); book(_hist_AEEC5, 10, 1, 1); book(_hist_EEC6 , 11, 1, 1); book(_hist_AEEC6, 12, 1, 1); } void analyze(const Event& event) { const Jets& jets = applyProjection(event, "Jets").jetsByPt(Cuts::abseta < 2.5 && Cuts::pT > 100*GeV); if (jets.size() < 2) vetoEvent; double sumPt12 = jets[0].pt() + jets[1].pt(); if (sumPt12 < 800*GeV) vetoEvent; double sumEt = 0.; for (const Jet& j : jets) sumEt += j.Et(); for (const Jet& j1 : jets) { double et1 = j1.Et(); for (const Jet& j2 : jets) { double et2 = j2.Et(); double etWeight = et1*et2/(sumEt*sumEt); double dPhi = deltaPhi(j1, j2); double cosPhi = cos(dPhi); if (cos(dPhi) == 1.0) cosPhi = 0.9999; if (sumPt12 > 800*GeV && sumPt12 <= 850*GeV) _hist_EEC1->fill(cosPhi, etWeight); if (sumPt12 > 850*GeV && sumPt12 <= 900*GeV) _hist_EEC2->fill(cosPhi, etWeight); if (sumPt12 > 900*GeV && sumPt12 <= 1000*GeV) _hist_EEC3->fill(cosPhi, etWeight); if (sumPt12 > 1000*GeV && sumPt12 <= 1100*GeV) _hist_EEC4->fill(cosPhi, etWeight); if (sumPt12 > 1100*GeV && sumPt12 <= 1400*GeV) _hist_EEC5->fill(cosPhi, etWeight); if (sumPt12 > 1400*GeV) _hist_EEC6->fill(cosPhi, etWeight); } } } void finalize() { normalize(_hist_EEC1); normalize(_hist_EEC2); normalize(_hist_EEC3); normalize(_hist_EEC4); normalize(_hist_EEC5); normalize(_hist_EEC6); vector points1, points2, points3, points4, points5, points6; size_t nBins = _hist_EEC1->numBins(); for (size_t k = 0; k < nBins/2; ++k) { double x = _hist_EEC1->bin(k).midpoint(); double ex = _hist_EEC1->bin(k).xWidth()/2; double y1 = _hist_EEC1->bin(k).height() - _hist_EEC1->bin(nBins-(k+1)).height(); double ey1 = sqrt( pow(_hist_EEC1->bin(k).heightErr(),2) + pow(_hist_EEC1->bin(nBins-(k+1)).heightErr(),2) ); points1.push_back(Point2D(x,y1,ex,ey1)); double y2 = _hist_EEC2->bin(k).height() - _hist_EEC2->bin(nBins-(k+1)).height(); double ey2 = sqrt( pow(_hist_EEC2->bin(k).heightErr(),2) + pow(_hist_EEC2->bin(nBins-(k+1)).heightErr(),2) ); points2.push_back(Point2D(x,y2,ex,ey2)); double y3 = _hist_EEC3->bin(k).height() - _hist_EEC3->bin(nBins-(k+1)).height(); double ey3 = sqrt( pow(_hist_EEC3->bin(k).heightErr(),2) + pow(_hist_EEC3->bin(nBins-(k+1)).heightErr(),2) ); points3.push_back(Point2D(x,y3,ex,ey3)); double y4 = _hist_EEC4->bin(k).height() - _hist_EEC4->bin(nBins-(k+1)).height(); double ey4 = sqrt( pow(_hist_EEC4->bin(k).heightErr(),2) + pow(_hist_EEC4->bin(nBins-(k+1)).heightErr(),2) ); points4.push_back(Point2D(x,y4,ex,ey4)); double y5 = _hist_EEC5->bin(k).height() - _hist_EEC5->bin(nBins-(k+1)).height(); double ey5 = sqrt( pow(_hist_EEC5->bin(k).heightErr(),2) + pow(_hist_EEC5->bin(nBins-(k+1)).heightErr(),2) ); points5.push_back(Point2D(x,y5,ex,ey5)); double y6 = _hist_EEC6->bin(k).height() - _hist_EEC6->bin(nBins-(k+1)).height(); double ey6 = sqrt( pow(_hist_EEC6->bin(k).heightErr(),2) + pow(_hist_EEC6->bin(nBins-(k+1)).heightErr(),2) ); points6.push_back(Point2D(x,y6,ex,ey6)); } _hist_AEEC1->addPoints(points1); _hist_AEEC2->addPoints(points2); _hist_AEEC3->addPoints(points3); _hist_AEEC4->addPoints(points4); _hist_AEEC5->addPoints(points5); _hist_AEEC6->addPoints(points6); } private: Histo1DPtr _hist_EEC1, _hist_EEC2, _hist_EEC3, _hist_EEC4, _hist_EEC5, _hist_EEC6; Scatter2DPtr _hist_AEEC1, _hist_AEEC2, _hist_AEEC3, _hist_AEEC4, _hist_AEEC5, _hist_AEEC6; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2017_I1609253); } diff --git a/analyses/pluginATLAS/ATLAS_2017_I1614149.cc b/analyses/pluginATLAS/ATLAS_2017_I1614149.cc --- a/analyses/pluginATLAS/ATLAS_2017_I1614149.cc +++ b/analyses/pluginATLAS/ATLAS_2017_I1614149.cc @@ -1,351 +1,351 @@ // -*- C++ -* #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "fastjet/tools/Filter.hh" // substructure includes included in fjcontrib-1.021 (http://fastjet.hepforge.org/contrib/) #include "Rivet/Tools/Nsubjettiness/Njettiness.hh" #include "Rivet/Tools/Nsubjettiness/Nsubjettiness.hh" #include "Rivet/Tools/Nsubjettiness/NjettinessPlugin.hh" namespace Rivet { class ATLAS_2017_I1614149 : public Analysis { public: /// Constructor ///@brief: Resolved and boosted ttbar l+jets cross sections at 13 TeV DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2017_I1614149); void init() { // Eta ranges Cut eta_full = (Cuts::abseta < 5.0); Cut lep_cuts = (Cuts::abseta < 2.5) && (Cuts::pT > 25*GeV); // All final state particles FinalState fs(eta_full); IdentifiedFinalState all_photons(fs); all_photons.acceptIdPair(PID::PHOTON); // Get photons to dress leptons IdentifiedFinalState ph_id(fs); ph_id.acceptIdPair(PID::PHOTON); // Projection to find the electrons IdentifiedFinalState el_id(fs); el_id.acceptIdPair(PID::ELECTRON); PromptFinalState photons(ph_id); photons.acceptTauDecays(true); - addProjection(photons, "photons"); + declare(photons, "photons"); PromptFinalState electrons(el_id); electrons.acceptTauDecays(true); DressedLeptons dressedelectrons(photons, electrons, 0.1, lep_cuts); - addProjection(dressedelectrons, "elecs"); + declare(dressedelectrons, "elecs"); DressedLeptons ewdressedelectrons(all_photons, electrons, 0.1, eta_full); // Projection to find the muons IdentifiedFinalState mu_id(fs); mu_id.acceptIdPair(PID::MUON); PromptFinalState muons(mu_id); muons.acceptTauDecays(true); DressedLeptons dressedmuons(photons, muons, 0.1, lep_cuts); - addProjection(dressedmuons, "muons"); + declare(dressedmuons, "muons"); DressedLeptons ewdressedmuons(all_photons, muons, 0.1, eta_full); // Projection to find MET declare(MissingMomentum(fs), "MET"); // remove prompt neutrinos from jet clustering IdentifiedFinalState nu_id(fs); nu_id.acceptNeutrinos(); PromptFinalState neutrinos(nu_id); neutrinos.acceptTauDecays(true); // Jet clustering. VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(ewdressedelectrons); vfs.addVetoOnThisFinalState(ewdressedmuons); vfs.addVetoOnThisFinalState(neutrinos); FastJets jets(vfs, FastJets::ANTIKT, 0.4); jets.useInvisibles(true); - addProjection(jets, "jets"); + declare(jets, "jets"); // Addition of the large-R jets VetoedFinalState vfs1(fs); vfs1.addVetoOnThisFinalState(neutrinos); FastJets fjets(vfs1, FastJets::ANTIKT, 1.); fjets.useInvisibles(JetAlg::Invisibles::NONE); fjets.useMuons(JetAlg::Muons::NONE); - addProjection(fjets, "fjets"); + declare(fjets, "fjets"); bookHists("top_pt_res", 15); bookHists("top_absrap_res", 17); bookHists("ttbar_pt_res", 19); bookHists("ttbar_absrap_res", 21); bookHists("ttbar_m_res", 23); bookHists("top_pt_boost", 25); bookHists("top_absrap_boost", 27); } void analyze(const Event& event) { // Get the selected objects, using the projections. vector electrons = apply(event, "elecs").dressedLeptons(); vector muons = apply(event, "muons").dressedLeptons(); const Jets& jets = apply(event, "jets").jetsByPt(Cuts::pT > 25*GeV && Cuts::abseta < 2.5); const PseudoJets& all_fjets = apply(event, "fjets").pseudoJetsByPt(); // get MET const Vector3 met = apply(event, "MET").vectorMPT(); Jets bjets, lightjets; for (const Jet& jet : jets) { bool b_tagged = jet.bTags(Cuts::pT > 5*GeV).size(); if ( b_tagged && bjets.size() < 2) bjets +=jet; else lightjets += jet; } // Implementing large-R jets definition // trim the jets PseudoJets trimmed_fatJets; float Rfilt = 0.2; float pt_fraction_min = 0.05; fastjet::Filter trimmer(fastjet::JetDefinition(fastjet::kt_algorithm, Rfilt), fastjet::SelectorPtFractionMin(pt_fraction_min)); for (PseudoJet pjet : all_fjets) trimmed_fatJets += trimmer(pjet); trimmed_fatJets = fastjet::sorted_by_pt(trimmed_fatJets); PseudoJets trimmed_jets; for (unsigned int i = 0; i < trimmed_fatJets.size(); ++i) { FourMomentum tj_mom = momentum(trimmed_fatJets[i]); if (tj_mom.pt() <= 300*GeV) continue; if (tj_mom.abseta() >= 2.0) continue; trimmed_jets.push_back(trimmed_fatJets[i]); } bool single_electron = (electrons.size() == 1) && (muons.empty()); bool single_muon = (muons.size() == 1) && (electrons.empty()); DressedLepton *lepton = NULL; if (single_electron) lepton = &electrons[0]; else if (single_muon) lepton = &muons[0]; if (!single_electron && !single_muon) vetoEvent; bool pass_resolved = true; bool num_b_tagged_jets = (bjets.size() == 2); if (!num_b_tagged_jets) pass_resolved = false; if (jets.size() < 4) pass_resolved = false; bool pass_boosted = true; int fatJetIndex = -1; bool passTopTag = false; bool passDphi = false; bool passAddJet = false; bool goodLepJet = false; bool lepbtag = false; bool hadbtag=false; vector lepJetIndex; vector jet_farFromHadTopJetCandidate; if (met.mod() < 20*GeV) pass_boosted = false; if (pass_boosted) { double transmass = _mT(lepton->momentum(), met); if (transmass + met.mod() < 60*GeV) pass_boosted = false; } if (pass_boosted) { if (trimmed_jets.size() >= 1) { for (unsigned int j = 0; j 100*GeV && momentum(trimmed_jets.at(j)).pt() > 300*GeV && momentum(trimmed_jets.at(j)).pt() < 1500*GeV && fabs(momentum(trimmed_jets.at(j)).eta()) < 2.) { passTopTag = true; fatJetIndex = j; break; } } } } if(!passTopTag && fatJetIndex == -1) pass_boosted = false; if (pass_boosted) { double dPhi_fatjet = deltaPhi(lepton->phi(), momentum(trimmed_jets.at(fatJetIndex)).phi()); double dPhi_fatjet_lep_cut = 1.0; //2.3 if (dPhi_fatjet > dPhi_fatjet_lep_cut ) { passDphi = true; } } if (!passDphi) pass_boosted = false; if (bjets.empty()) pass_boosted = false; if (pass_boosted) { for (unsigned int sj = 0; sj < jets.size(); ++sj) { double dR = deltaR(jets.at(sj).momentum(), momentum(trimmed_jets.at(fatJetIndex))); if(dR > 1.5) { passAddJet = true; jet_farFromHadTopJetCandidate.push_back(sj); } } } if (!passAddJet) pass_boosted = false; if (pass_boosted) { for (int ltj : jet_farFromHadTopJetCandidate) { double dR_jet_lep = deltaR(jets.at(ltj).momentum(), lepton->momentum()); double dR_jet_lep_cut = 2.0;//1.5 if (dR_jet_lep < dR_jet_lep_cut) { lepJetIndex.push_back(ltj); goodLepJet = true; } } } if(!goodLepJet) pass_boosted = false; if (pass_boosted) { for (int lepj : lepJetIndex) { lepbtag = jets.at(lepj).bTags(Cuts::pT > 5*GeV).size(); if (lepbtag) break; } } double dR_fatBjet_cut = 1.0; if (pass_boosted) { for (const Jet& bjet : bjets) { hadbtag |= deltaR(momentum(trimmed_jets.at(fatJetIndex)), bjet) < dR_fatBjet_cut; } } if (!(lepbtag || hadbtag)) pass_boosted = false; FourMomentum pbjet1; //Momentum of bjet1 FourMomentum pbjet2; //Momentum of bjet int Wj1index = -1, Wj2index = -1; if (pass_resolved) { if ( deltaR(bjets[0], *lepton) <= deltaR(bjets[1], *lepton) ) { pbjet1 = bjets[0].momentum(); pbjet2 = bjets[1].momentum(); } else { pbjet1 = bjets[1].momentum(); pbjet2 = bjets[0].momentum(); } double bestWmass = 1000.0*TeV; double mWPDG = 80.399*GeV; for (unsigned int i = 0; i < (lightjets.size() - 1); ++i) { for (unsigned int j = i + 1; j < lightjets.size(); ++j) { double wmass = (lightjets[i].momentum() + lightjets[j].momentum()).mass(); if (fabs(wmass - mWPDG) < fabs(bestWmass - mWPDG)) { bestWmass = wmass; Wj1index = i; Wj2index = j; } } } FourMomentum pjet1 = lightjets[Wj1index].momentum(); FourMomentum pjet2 = lightjets[Wj2index].momentum(); // compute hadronic W boson FourMomentum pWhadron = pjet1 + pjet2; double pz = computeneutrinoz(lepton->momentum(), met); FourMomentum ppseudoneutrino( sqrt(sqr(met.x()) + sqr(met.y()) + sqr(pz)), met.x(), met.y(), pz); //compute leptonic, hadronic, combined pseudo-top FourMomentum ppseudotoplepton = lepton->momentum() + ppseudoneutrino + pbjet1; FourMomentum ppseudotophadron = pbjet2 + pWhadron; FourMomentum pttbar = ppseudotoplepton + ppseudotophadron; fillHists("top_pt_res", ppseudotophadron.pt()/GeV); fillHists("top_absrap_res", ppseudotophadron.absrap()); fillHists("ttbar_pt_res", pttbar.pt()/GeV); fillHists("ttbar_absrap_res", pttbar.absrap()); fillHists("ttbar_m_res", pttbar.mass()/GeV); } if (pass_boosted) {// Boosted selection double hadtop_pt= momentum(trimmed_jets.at(fatJetIndex)).pt() / GeV; double hadtop_absrap= momentum(trimmed_jets.at(fatJetIndex)).absrap(); fillHists("top_pt_boost", hadtop_pt); fillHists("top_absrap_boost", hadtop_absrap); } } void finalize() { // Normalize to cross-section const double sf = (crossSection() / sumOfWeights()); for (HistoMap::value_type& hist : _h) { scale(hist.second, sf); if (hist.first.find("_norm") != string::npos) normalize(hist.second); } } void bookHists(std::string name, unsigned int index) { book(_h[name], index, 1 ,1); book(_h[name + "_norm"], index + 1, 1 ,1); } void fillHists(std::string name, double value) { _h[name]->fill(value); _h[name + "_norm"]->fill(value); } double _mT(const FourMomentum &l, const Vector3 &met) const { return sqrt(2.0 * l.pT() * met.mod() * (1 - cos(deltaPhi(l, met))) ); } double tau32(const fastjet::PseudoJet &jet, double jet_rad) const { double alpha = 1.0; Nsubjettiness::NormalizedCutoffMeasure normalized_measure(alpha, jet_rad, 1000000); // WTA definition // Nsubjettiness::OnePass_WTA_KT_Axes wta_kt_axes; // as in JetSubStructure recommendations Nsubjettiness::KT_Axes kt_axes; /// NsubjettinessRatio uses the results from Nsubjettiness to calculate the ratio /// tau_N/tau_M, where N and M are specified by the user. The ratio of different tau values /// is often used in analyses, so this class is helpful to streamline code. Nsubjettiness::NsubjettinessRatio tau32_kt(3, 2, kt_axes, normalized_measure); double tau32 = tau32_kt.result(jet); return tau32; } double computeneutrinoz(const FourMomentum& lepton, const Vector3 &met) const { //computing z component of neutrino momentum given lepton and met double pzneutrino; double m_W = 80.399; // in GeV, given in the paper double k = (( sqr( m_W ) - sqr( lepton.mass() ) ) / 2 ) + (lepton.px() * met.x() + lepton.py() * met.y()); double a = sqr ( lepton.E() )- sqr ( lepton.pz() ); double b = -2*k*lepton.pz(); double c = sqr( lepton.E() ) * sqr( met.mod() ) - sqr( k ); double discriminant = sqr(b) - 4 * a * c; double quad[2] = { (- b - sqrt(discriminant)) / (2 * a), (- b + sqrt(discriminant)) / (2 * a) }; //two possible quadratic solns if (discriminant < 0) pzneutrino = - b / (2 * a); //if the discriminant is negative else { //if the discriminant is greater than or equal to zero, take the soln with smallest absolute value double absquad[2]; for (int n=0; n<2; ++n) absquad[n] = fabs(quad[n]); if (absquad[0] < absquad[1]) pzneutrino = quad[0]; else pzneutrino = quad[1]; } return pzneutrino; } private: /// @name Objects that are used by the event selection decisions typedef map HistoMap; HistoMap _h; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2017_I1614149); } diff --git a/analyses/pluginATLAS/ATLAS_2017_I1624693.cc b/analyses/pluginATLAS/ATLAS_2017_I1624693.cc --- a/analyses/pluginATLAS/ATLAS_2017_I1624693.cc +++ b/analyses/pluginATLAS/ATLAS_2017_I1624693.cc @@ -1,435 +1,435 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" /// @todo Include more projections as required, e.g. ChargedFinalState, FastJets, ZFinder... namespace Rivet { class ATLAS_2017_I1624693 : public Analysis { public: /// Constructor /// @brief Study of ordered hadron chains at 7 TeV DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2017_I1624693); /// @name Analysis methods //@{ struct usedX { int locMin; int locMax; std::vector > chains; // Constructor usedX(int min, int max, int ic, float mass) { locMin=min; locMax=max; chains.clear(); chains.push_back(std::pair(ic,mass)); } // Constructor usedX(int min, int max) { locMin=min; locMax=max; chains.clear(); } void add(int jc, float mass) { if (chains.size()) { std::vector >::iterator it=chains.begin(); while ( it!=chains.end() && mass>(*it).second ) ++it; chains.insert(it,std::pair(jc,mass)); } else { chains.push_back(std::pair(jc,mass)); } } }; /// Book histograms and initialise projections before the run void init() { /// @todo Initialise and register projections here ChargedFinalState cfs(-2.5, 2.5, 0.1*GeV); - addProjection(cfs,"CFS"); + declare(cfs,"CFS"); // pion mass; pim = 0.1396; /// @todo Book histograms here, e.g.: book(_DeltaQ , 1, 1, 1); book(_Delta3h, 2, 1, 1); book(_dalitz , 3, 1, 1); // auxiliary book(_h_nch, "_nch", 200, -0.5, 199.5); } /// Perform the per-event analysis void analyze(const Event& event) { //const double weight = event.weight(); bool match =false; /// @todo Do the event by event analysis here const ChargedFinalState& had = applyProjection(event, "CFS"); ParticleVector hs=had.particles(); int nch = hs.size(); if (nch < 3) return; _h_nch->fill(1.*nch,1.); for (unsigned int i=0; i < hs.size() - 1; ++i) { for (unsigned int j=i+1; j < hs.size(); ++j) { double q12 = qq(hs[i],hs[j],match); if (match) _DeltaQ->fill(q12,-1.); else _DeltaQ->fill(q12,1.); } } // chain selection std::vector wchain; std::vector< std::vector > rchains; std::vector< std::vector > mchains; wchain.clear(); rchains.clear(); mchains.clear(); for (unsigned int ip1 = 0; ip1< hs.size(); ++ip1 ) { wchain.push_back(1.); std::vector cc(1,ip1); std::vector mc; double qlmin=10000.; int ilmin=-1; for (unsigned ip2 = 0; ip2 < hs.size(); ++ip2) { if (ip2==ip1) continue; double ql = qq(hs[ip1],hs[ip2],match); if (!match) continue; // looking for closest like-sign match if (ql ilmin && rchains[ilmin][1]==ip1) { // std::cout <<"exclusive match:"<< std::endl; wchain.back()=0.5; wchain[ilmin]=0.5; } double m3min=10000.; int ixmin=-1; for (unsigned ip2 = 0; ip2< hs.size(); ++ip2) { if (ip2==ip1 || int(ip2)==ilmin ) continue; double qx = qq(hs[ip1],hs[ip2],match); if (match) continue; double qxl = qq(hs[ip2],hs[ilmin],match); double m3 = sqrt(9*pim*pim+qxl*qxl+qlmin*qlmin+qx*qx); if (m3 assoc(hs.size(),0.); // cache for association rate std::vector accept(rchains.size(), false); // loop over chains and accept lowest masses while watching the association rate int inext = 0; while ( inext>-1 ) { inext = -1; float cMin = 100000.; // find non-accepted chain with lowest Q_ls; dissolve chains if association count over 2 for (unsigned int ic=0; ic < rchains.size(); ++ic) { if (rchains[ic].size() < 2) continue; if (accept[ic]) continue; if (mchains[ic][0] < cMin) { cMin = mchains[ic][0]; inext=ic; } } if (inext>-1 ) { unsigned int cloc0 = rchains[inext][0]; unsigned int cloc1 = rchains[inext][1]; if ( (assoc[cloc0] + 1. <= 2.) && (assoc[cloc1] + 1. <= 2.) ) { // chain can be accepted accept[inext]=true; assoc[cloc0]+=1.; assoc[cloc1]+=1.; if (wchain[inext]==0.5) { // accept the identical chain, too for (unsigned int ic=0; ic1 ) { // association count filled up, discard chain accept[inext]=true; wchain[inext]=0.; } else { // dissolve chain and find new association unsigned int i1 = rchains[inext][0]; float mMn = 1000000.; int ipn = -1; for (unsigned int i2=0; i2 1.) continue; if (m > 0. && m = 0) { rchains[inext][1]=ipn; mchains[inext][0]=mMn; // resolve chain weight : by default, it is 1. wchain[inext]=1.; // check exclusivity of pairing for (unsigned int ico=0; ico0.) continue; float q12 = qq(hs[ipn],hs[ij],match); double m3 = sqrt(9*pim*pim+q02*q02+mMn*mMn+q12*q12); if (m3>0. && m3 =0) { rchains[inext].push_back(ipnn); rchains[inext][2]=ipnn; mchains[inext][1]=mMnn; } else {accept[inext]=true; wchain[inext]=0.;} } else { // chain not recovered wchain[inext]=0.; accept[inext]=true; } } } } // end loop over chains // cleanup: association rate for unlike-sign pairs // third member verification std::vector accept3(rchains.size(),false); // watch unlike-sign combinations used std::vector used; // loop over chains and accept lowest masses while watching the association rate inext = 0; while ( inext>-1 ) { inext = -1; float cMin = 100000.; // find non-accepted chain with lowest mass; dissolve chains if association count over 3 for (unsigned int ic=0; ic < rchains.size(); ++ic) { if (rchains[ic].size() < 3 || !wchain[ic] || !accept[ic]) continue; if (accept3[ic]) continue; if (mchains[ic][1]-1 ) { unsigned int cloc0 = rchains[inext][0]; unsigned int cloc1 = rchains[inext][1]; unsigned int cloc2 = rchains[inext][2]; // map use of unlike sign pairs int iu0 = -1; float w0=0.; for (unsigned int iu=0; iu 0) for (unsigned int iw=0; iw0) for (unsigned int iw=0; iw 0.) continue; if (assoc[i3] > 3-wchain[inext]) continue; // check pair association w0=0.; w1=0.; for (unsigned int iu=0; iu 0) for (unsigned int iw=0; iw0) for (unsigned int iw=0; iw2. || w1+wchain[inext]>2.) continue; float q12 = qq(hs[i2],hs[i3],match); float q01 = qq(hs[i1],hs[i2],match); float m = sqrt(9*pim*pim+q02*q02+q01*q01+q12*q12); if (m>0. && m =0) { rchains[inext].push_back(ipn); rchains[inext][2]=iploc; mchains[inext][1]=mMn; } else { // chain not recovered wchain[inext]=0.; } } } } // end loop over chains // end 3rd member optimization for (unsigned int ip=0; ip < wchain.size(); ++ip) { if (!wchain[ip]) continue; if (rchains[ip].size() < 3) continue; float m3min = mchains[ip][1]; if (m3min > 0.59) continue; // dalitz plot std::pair dd = dalitz3(hs[rchains[ip][0]], hs[rchains[ip][1]], hs[rchains[ip][2]]); _dalitz->fill(dd.first,dd.second,1.*wchain[ip]); // Delta(Q) spectra float qlmin = mchains[ip][0]; float qxmin = qq(hs[rchains[ip][0]], hs[rchains[ip][2]], match); float xlmin = qq(hs[rchains[ip][1]], hs[rchains[ip][2]], match); _Delta3h->fill(qxmin, 0.5*wchain[ip]); _Delta3h->fill(xlmin, 0.5*wchain[ip]); _Delta3h->fill(qlmin, -1.*wchain[ip]); } } /// Normalise histograms etc., after the run void finalize() { // normalize by the number of charged particles // counter automatic division by bin size double norm = 0.01 / (_h_nch->xMean()*_h_nch->numEntries()); _dalitz->scaleW(norm); _DeltaQ->scaleW(norm); _Delta3h->scaleW(norm); } //@} double qq(const Particle& gp1, const Particle& gp2, bool& match) { match = gp1.charge() * gp2.charge() > 0; FourMomentum p1, p2; p1.setPM(gp1.px(), gp1.py(), gp1.pz(), pim); p2.setPM(gp2.px(), gp2.py(), gp2.pz(), pim); return sqrt(fmax(0., (p1 + p2).mass2() - 4*pim*pim)); } std::pair dalitz3(const Particle& gp1, const Particle& gp2, const Particle& gp3) const { float p1= gp1.pt(); float p2= gp2.pt(); float p3= gp3.pt(); float th1 = gp1.theta(); float th2 = gp2.theta(); float th3 = gp3.theta(); float ph1 = gp1.phi(); float ph2 = gp2.phi(); float ph3 = gp3.phi(); float e1 = sqrt(p1*p1+pim*pim); float e2 = sqrt(p2*p2+pim*pim); float e3 = sqrt(p3*p3+pim*pim); float p1x = p1*cos(ph1)*sin(th1); float p1y = p1*sin(ph1)*sin(th1); float p1z = p1*cos(th1); float p2x = p2*cos(ph2)*sin(th2); float p2y = p2*sin(ph2)*sin(th2); float p2z = p2*cos(th2); float p3x = p3*cos(ph3)*sin(th3); float p3y = p3*sin(ph3)*sin(th3); float p3z = p3*cos(th3); float px = p1x+p2x+p3x; float py = p1y+p2y+p3y; float pz = p1z+p2z+p3z; float ap = sqrt(px*px+py*py+pz*pz); float e=e1+e2+e3; float beta = ap/e; float gamma = 1./sqrt(1-beta*beta); float p1l = (p1x*px+p1y*py+p1z*pz)/ap; float p2l = (p2x*px+p2y*py+p2z*pz)/ap; float p3l = (p3x*px+p3y*py+p3z*pz)/ap; float e1_boost = gamma*e1-gamma*beta*p1l; float e2_boost = gamma*e2-gamma*beta*p2l; float e3_boost = gamma*e3-gamma*beta*p3l; float Q = sqrt(e*e-ap*ap)-3*pim; return std::pair(sqrt(3.)*(e1_boost-e2_boost)/Q , 3*(e3_boost-pim)/Q-1.); } private: // Data members like post-cuts event weight counters go here float pim; private: /// @name Histograms Histo1DPtr _DeltaQ; Histo1DPtr _Delta3h; Histo1DPtr _h_nch; Histo2DPtr _dalitz; //@} }; // This global object acts as a hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2017_I1624693); } diff --git a/analyses/pluginCMS/CMS_2012_PAS_FSQ_12_020.cc b/analyses/pluginCMS/CMS_2012_PAS_FSQ_12_020.cc --- a/analyses/pluginCMS/CMS_2012_PAS_FSQ_12_020.cc +++ b/analyses/pluginCMS/CMS_2012_PAS_FSQ_12_020.cc @@ -1,108 +1,108 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { /// @brief CMS underlying event in leading track events at 7 TeV /// @author Paolo Gunnellini (DESY) /// /// CMS measurement of the underlying event in "leading track" events. class CMS_2012_PAS_FSQ_12_020 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2012_PAS_FSQ_12_020); /// Book histograms and initialise projections before the run void init() { const ChargedFinalState cfs(Cuts::abseta < 0.8 && Cuts::pT > 0.5*GeV); - addProjection(cfs, "Tracks"); + declare(cfs, "Tracks"); book(_NchgPDFden1 ,7,1,1); book(_NchgPMNden1 ,6,1,1); book(_NchgPMXden1 ,5,1,1); book(_PTsumPDFden1,10,1,1); book(_PTsumPMNden1,9,1,1); book(_PTsumPMXden1,8,1,1); } /// Perform the per-event analysis void analyze(const Event& event) { // Require at least one track in the event with pT >= 0.5 GeV const FinalState& cfs = applyProjection(event, "Tracks"); if (cfs.empty()) vetoEvent; const Particles trks = cfs.particlesByPt(); // Identify leading track and its phi and pT const Particle p_lead = trks[0]; const double philead = p_lead.momentum().phi(); const double ptlead = p_lead.momentum().pT(); // Loop over particles and build transverse side variables double NchgP1 = 0, NchgP2 = 0, PTsumP1 = 0, PTsumP2 = 0; for (const Particle& p : trks) { // Region definition -- if not in transverse region, ignore const double dphi = mapAngle0To2Pi(p.phi() - philead); if (!inRange(dphi, PI/3, 2*PI/3) && !inRange(dphi, 4*PI/3, 5*PI/3)) continue; // Transverse region 1 if (inRange(dphi, PI/3, 2*PI/3)) { NchgP1 += 1; PTsumP1 += p.pT(); } // Transverse region 2 else if (inRange(dphi, 4*PI/3, 5*PI/3)) { NchgP2 += 1; PTsumP2 += p.pT(); } } // Calculate total variables // const double NchgPtot = (NchgP1 + NchgP2)/2; const double NchgPmax = max(NchgP1,NchgP2); const double NchgPmin = min(NchgP1,NchgP2); // const double PTsumPtot = (PTsumP1 + PTsumP2)/2; const double PTsumPmax = max(PTsumP1,PTsumP2); const double PTsumPmin = min(PTsumP1,PTsumP2); // const double PTsumPMXden = PTsumPmax/AREA; const double PTsumPMNden = PTsumPmin/AREA; const double NchgPMXden = NchgPmax/AREA; const double NchgPMNden = NchgPmin/AREA; // const double NchgPDFden = NchgPMXden - NchgPMNden; const double PTsumPDFden = PTsumPMXden - PTsumPMNden; // Fill histograms const double weight = 1.0; _NchgPMXden1->fill(ptlead/GeV, NchgPmax/AREA, weight); _NchgPMNden1->fill(ptlead/GeV, NchgPmin/AREA, weight); _NchgPDFden1->fill(ptlead/GeV, NchgPDFden, weight); _PTsumPMXden1->fill(ptlead/GeV, PTsumPmax/AREA, weight); _PTsumPMNden1->fill(ptlead/GeV, PTsumPmin/AREA, weight); _PTsumPDFden1->fill(ptlead/GeV, PTsumPDFden, weight); } /// eta-phi area of the transverse region constexpr static double AREA = 2*0.8 * M_PI/3; /// Histograms Profile1DPtr _NchgPden1, _NchgPMXden1, _NchgPMNden1, _NchgPDFden1, _PTsumPden1, _PTsumPMXden1, _PTsumPMNden1, _PTsumPDFden1; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2012_PAS_FSQ_12_020); } diff --git a/analyses/pluginCMS/CMS_2014_I1266056.cc b/analyses/pluginCMS/CMS_2014_I1266056.cc --- a/analyses/pluginCMS/CMS_2014_I1266056.cc +++ b/analyses/pluginCMS/CMS_2014_I1266056.cc @@ -1,141 +1,141 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief Measurement of gamma + jets + X triple differential cross-sections /// /// @author David Grellscheid class CMS_2014_I1266056 : public Analysis { public: // Constructor CMS_2014_I1266056() : Analysis("CMS_2014_I1266056") { } // Book histograms and initialise projections before the run void init() { // Final state FinalState fs(-3, 3); - addProjection(fs, "FS"); + declare(fs, "FS"); // Leading photon LeadingParticlesFinalState photonfs(FinalState(-2.5, 2.5, 40.0*GeV)); photonfs.addParticleId(PID::PHOTON); - addProjection(photonfs, "LeadingPhoton"); + declare(photonfs, "LeadingPhoton"); // FS excluding the leading photon VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(photonfs); - addProjection(vfs, "JetFS"); + declare(vfs, "JetFS"); // Jets FastJets jetpro(vfs, FastJets::ANTIKT, 0.5); //jetpro.useInvisibles(); - addProjection(jetpro, "Jets"); + declare(jetpro, "Jets"); book(_h_phverycentral_jetcentral, 1, 1, 1); book(_h_phcentral_jetcentral , 2, 1, 1); book(_h_phforward_jetcentral , 3, 1, 1); book(_h_phveryforward_jetcentral, 4, 1, 1); book(_h_phverycentral_jetforward, 1, 1, 2); book(_h_phcentral_jetforward , 2, 1, 2); book(_h_phforward_jetforward , 3, 1, 2); book(_h_phveryforward_jetforward, 4, 1, 2); } // Perform the per-event analysis void analyze(const Event& event) { // Get the photon const FinalState& photonfs = applyProjection(event, "LeadingPhoton"); if (photonfs.particles().empty()) vetoEvent; const FourMomentum photon = photonfs.particles().front().momentum(); // Get the jet Jets jets = applyProjection(event, "Jets").jetsByPt(30.0*GeV); if (jets.empty()) vetoEvent; FourMomentum leadingJet; for ( const Jet & j : jets ) { leadingJet = j.momentum(); // keep the first separated jet if (deltaR(photon, leadingJet) > 0.5) break; } if (deltaR(photon, leadingJet) < 0.5) vetoEvent; // Veto if leading jet is outside plotted rapidity regions if (leadingJet.abseta() > 2.5) vetoEvent; // TODO: photon isolation 'IsoGamma' needed? // Fill histos const double abs_jet_eta = leadingJet.abseta(); const double photon_pt = photon.pT()/GeV; const double abs_photon_eta = photon.abseta(); if (abs_jet_eta < 1.5) { if (abs_photon_eta < 0.9) _h_phverycentral_jetcentral->fill(photon_pt); else if (abs_photon_eta < 1.44) _h_phcentral_jetcentral->fill( photon_pt); else if (abs_photon_eta < 1.57) {} else if (abs_photon_eta < 2.1) _h_phforward_jetcentral->fill( photon_pt); else if (abs_photon_eta < 2.5) _h_phveryforward_jetcentral->fill(photon_pt); } else if (abs_jet_eta < 2.5) { if (abs_photon_eta < 0.9) _h_phverycentral_jetforward->fill(photon_pt); else if (abs_photon_eta < 1.44) _h_phcentral_jetforward->fill( photon_pt); else if (abs_photon_eta < 1.57) {} else if (abs_photon_eta < 2.1) _h_phforward_jetforward->fill( photon_pt); else if (abs_photon_eta < 2.5) _h_phveryforward_jetforward->fill(photon_pt); } } /// Normalise histograms etc., after the run void finalize() { const double scale_jetcentral = crossSection()/sumOfWeights(); // *3 (jet eta < 1.5) scale(_h_phverycentral_jetcentral, scale_jetcentral); // * 1.8 (photon eta < 0.9) scale(_h_phcentral_jetcentral , scale_jetcentral); // * 1.08 (0.9 .. 1.44) scale(_h_phforward_jetcentral , scale_jetcentral); // * 1.06 (1.57 .. 2.1) scale(_h_phveryforward_jetcentral, scale_jetcentral); // * 0.8 (2.1 .. 2.5) const double scale_jetforward = crossSection()/sumOfWeights(); // *2 (1.5 < eta < 2.5) scale(_h_phverycentral_jetforward, scale_jetforward); // .. as above .. scale(_h_phcentral_jetforward , scale_jetforward); // .. as above .. scale(_h_phforward_jetforward , scale_jetforward); // .. as above .. scale(_h_phveryforward_jetforward, scale_jetforward); // .. as above .. } private: Histo1DPtr _h_phverycentral_jetcentral; Histo1DPtr _h_phcentral_jetcentral ; Histo1DPtr _h_phforward_jetcentral ; Histo1DPtr _h_phveryforward_jetcentral; Histo1DPtr _h_phverycentral_jetforward; Histo1DPtr _h_phcentral_jetforward ; Histo1DPtr _h_phforward_jetforward ; Histo1DPtr _h_phveryforward_jetforward; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2014_I1266056); } diff --git a/analyses/pluginCMS/CMS_2015_I1397174.cc b/analyses/pluginCMS/CMS_2015_I1397174.cc --- a/analyses/pluginCMS/CMS_2015_I1397174.cc +++ b/analyses/pluginCMS/CMS_2015_I1397174.cc @@ -1,386 +1,386 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/PartonicTops.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// Fully leptonic partonic ttbar analysis class CMS_2015_I1397174 : public Analysis { public: /// Minimal constructor CMS_2015_I1397174() : Analysis("CMS_2015_I1397174") { } /// @name Analysis methods //@{ /// Set up projections and book histograms void init() { // Parton level top quarks - addProjection(PartonicTops(PartonicTops::DecayMode::E_MU, false), "PartonTops"); + declare(PartonicTops(PartonicTops::DecayMode::E_MU, false), "PartonTops"); // Find jets not related to the top/W decays VetoedFinalState vfs; vfs.addDecayProductsVeto(PID::WPLUSBOSON); vfs.addDecayProductsVeto(PID::WMINUSBOSON); FastJets fj(vfs, FastJets::ANTIKT, 0.5, JetAlg::Muons::ALL, JetAlg::Invisibles::ALL); - addProjection(fj, "Jets"); + declare(fj, "Jets"); // Book histograms book(_hVis_nJet30_abs , 1, 1, 1); book(_hVis_nJet30 , 2, 1, 1); book(_hVis_nJet60_abs , 3, 1, 1); book(_hVis_nJet60 , 4, 1, 1); book(_hVis_nJet100_abs , 5, 1, 1); book(_hVis_nJet100 , 6, 1, 1); book(_hVis_addJet1Pt_abs , 7, 1, 1); book(_hVis_addJet1Pt , 8, 1, 1); book(_hVis_addJet1Eta_abs , 9, 1, 1); book(_hVis_addJet1Eta ,10, 1, 1); book(_hVis_addJet2Pt_abs ,11, 1, 1); book(_hVis_addJet2Pt ,12, 1, 1); book(_hVis_addJet2Eta_abs ,13, 1, 1); book(_hVis_addJet2Eta ,14, 1, 1); book(_hVis_addJJMass_abs ,15, 1, 1); book(_hVis_addJJMass ,16, 1, 1); book(_hVis_addJJDR_abs ,17, 1, 1); book(_hVis_addJJDR ,18, 1, 1); book(_hVis_addJJHT_abs ,19, 1, 1); book(_hVis_addJJHT ,20, 1, 1); book(_hFull_addJet1Pt_abs ,21, 1, 1); book(_hFull_addJet1Pt ,22, 1, 1); book(_hFull_addJet1Eta_abs ,23, 1, 1); book(_hFull_addJet1Eta ,24, 1, 1); book(_hFull_addJet2Pt_abs ,25, 1, 1); book(_hFull_addJet2Pt ,26, 1, 1); book(_hFull_addJet2Eta_abs ,27, 1, 1); book(_hFull_addJet2Eta ,28, 1, 1); book(_hFull_addJJMass_abs ,29, 1, 1); book(_hFull_addJJMass ,30, 1, 1); book(_hFull_addJJDR_abs ,31, 1, 1); book(_hFull_addJJDR ,32, 1, 1); book(_hFull_addJJHT_abs ,33, 1, 1); book(_hFull_addJJHT ,34, 1, 1); book(_hVis_addBJet1Pt_abs ,35, 1, 1); book(_hVis_addBJet1Pt ,36, 1, 1); book(_hVis_addBJet1Eta_abs ,37, 1, 1); book(_hVis_addBJet1Eta ,38, 1, 1); book(_hVis_addBJet2Pt_abs ,39, 1, 1); book(_hVis_addBJet2Pt ,40, 1, 1); book(_hVis_addBJet2Eta_abs ,41, 1, 1); book(_hVis_addBJet2Eta ,42, 1, 1); book(_hVis_addBBMass_abs ,43, 1, 1); book(_hVis_addBBMass ,44, 1, 1); book(_hVis_addBBDR_abs ,45, 1, 1); book(_hVis_addBBDR ,46, 1, 1); book(_hFull_addBJet1Pt_abs ,47, 1, 1); book(_hFull_addBJet1Pt ,48, 1, 1); book(_hFull_addBJet1Eta_abs ,49, 1, 1); book(_hFull_addBJet1Eta ,50, 1, 1); book(_hFull_addBJet2Pt_abs ,51, 1, 1); book(_hFull_addBJet2Pt ,52, 1, 1); book(_hFull_addBJet2Eta_abs ,53, 1, 1); book(_hFull_addBJet2Eta ,54, 1, 1); book(_hFull_addBBMass_abs ,55, 1, 1); book(_hFull_addBBMass ,56, 1, 1); book(_hFull_addBBDR_abs ,57, 1, 1); book(_hFull_addBBDR ,58, 1, 1); book(_h_gap_addJet1Pt ,59, 1, 1); book(_h_gap_addJet1Pt_eta0 ,60, 1, 1); book(_h_gap_addJet1Pt_eta1 ,61, 1, 1); book(_h_gap_addJet1Pt_eta2 ,62, 1, 1); book(_h_gap_addJet2Pt ,63, 1, 1); book(_h_gap_addJet2Pt_eta0 ,64, 1, 1); book(_h_gap_addJet2Pt_eta1 ,65, 1, 1); book(_h_gap_addJet2Pt_eta2 ,66, 1, 1); book(_h_gap_addJetHT ,67, 1, 1); book(_h_gap_addJetHT_eta0 ,68, 1, 1); book(_h_gap_addJetHT_eta1 ,69, 1, 1); book(_h_gap_addJetHT_eta2 ,70, 1, 1); } void analyze(const Event& event) { // The objects used in the PAPER 12-041 are defined as follows (see p.16 for details): // // * Leptons : from the W boson decays after FSR // * Jets : anti-kT R=0.5 to all stable particles // exclude W->enu, munu, taunu // * B jet : B-Ghost matched // * B from top : B hadron from top->b decay // // Visible phase space definition: // // * Leptons : pT > 20, |eta| < 2.4 // * B jets from top : pT > 30, |eta| < 2.4 // Additional jets : pT > 20, |eta| < 2.4 // * // Full phase space definition: // // * Correction to dilepton BR from W boson BR // * No cut on top decay products // * Additional jets : pT > 20, |eta| < 2.4 // Do the analysis only for the ttbar full leptonic channel, removing tau decays const Particles partontops = apply(event, "PartonTops").particlesByPt(); if (partontops.size() != 2) vetoEvent; const Particle& t1 = partontops[0]; const Particle& t2 = partontops[1]; // Apply acceptance cuts on top-decay leptons (existence should be guaranteed) const auto isPromptChLepton = [](const Particle& p){return isChargedLepton(p) && !fromDecay(p);}; const Particle lep1 = t1.allDescendants(lastParticleWith(isPromptChLepton)).front(); const Particle lep2 = t2.allDescendants(lastParticleWith(isPromptChLepton)).front(); if (lep1.pT() < 1e-9*GeV || lep2.pT() < 1e-9*GeV) vetoEvent; // sanity check? const Jets jets = apply(event, "Jets").jetsByPt(Cuts::pT > 20*GeV && Cuts::abseta < 2.4); int nJet30 = 0, nJet60 = 0, nJet100 = 0; Jets topBJets, addJets, addBJets, addJets_eta0, addJets_eta1, addJets_eta2; for (const Jet& jet : jets) { if (jet.pT() > 30*GeV) nJet30 += 1; if (jet.pT() > 60*GeV) nJet60 += 1; if (jet.pT() > 100*GeV) nJet100 += 1; const bool isBtagged = jet.bTagged(); const bool isBFromTop = any(jet.bTags(), hasParticleAncestorWith(Cuts::abspid == PID::TQUARK)); if (isBFromTop) { if (jet.pT() > 30*GeV) topBJets.push_back(jet); } else { addJets.push_back(jet); if (isBtagged) addBJets.push_back(jet); if (jet.abseta() < 0.8 ) addJets_eta0.push_back(jet); else if (jet.abseta() < 1.5 ) addJets_eta1.push_back(jet); else if (jet.abseta() < 2.4 ) addJets_eta2.push_back(jet); } } const bool isVisiblePS = topBJets.size() >= 2 && lep1.pT() > 20*GeV && lep1.abseta() < 2.4 && lep2.pT() > 20*GeV && lep2.abseta() < 2.4; MSG_DEBUG(isVisiblePS << ": #b(top) = " << topBJets.size() << "; l1 = " << lep1.pT() << ", " << lep1.abseta() << "; l2 = " << lep2.pT() << ", " << lep2.abseta()); const double weight = 1.0; if (isVisiblePS) { fillWithOF(_hVis_nJet30_abs, nJet30, weight); fillWithOF(_hVis_nJet30, nJet30, weight); fillWithOF(_hVis_nJet60_abs, nJet60, weight); fillWithOF(_hVis_nJet60, nJet60, weight); fillWithOF(_hVis_nJet100_abs, nJet100, weight); fillWithOF(_hVis_nJet100, nJet100, weight); fillGapFractions(addJets, _h_gap_addJet1Pt, _h_gap_addJet2Pt, _h_gap_addJetHT, weight); fillGapFractions(addJets_eta0, _h_gap_addJet1Pt_eta0, _h_gap_addJet2Pt_eta0, _h_gap_addJetHT_eta0, weight); fillGapFractions(addJets_eta1, _h_gap_addJet1Pt_eta1, _h_gap_addJet2Pt_eta1, _h_gap_addJetHT_eta1, weight); fillGapFractions(addJets_eta2, _h_gap_addJet1Pt_eta2, _h_gap_addJet2Pt_eta2, _h_gap_addJetHT_eta2, weight); } // Plots with two additional jets if (addJets.size() >= 1) { const double ht = sum(addJets, pT, 0.0); _hFull_addJJHT_abs->fill(ht/GeV, weight); _hFull_addJJHT ->fill(ht/GeV, weight); if (isVisiblePS) { _hVis_addJJHT_abs->fill(ht/GeV, weight); _hVis_addJJHT ->fill(ht/GeV, weight); } const Jet& j1 = addJets[0]; _hFull_addJet1Pt_abs ->fill(j1.pT()/GeV, weight); _hFull_addJet1Pt ->fill(j1.pT()/GeV, weight); _hFull_addJet1Eta_abs->fill(j1.abseta(), weight); _hFull_addJet1Eta ->fill(j1.abseta(), weight); if (isVisiblePS) { _hVis_addJet1Pt_abs ->fill(j1.pT()/GeV, weight); _hVis_addJet1Pt ->fill(j1.pT()/GeV, weight); _hVis_addJet1Eta_abs->fill(j1.abseta(), weight); _hVis_addJet1Eta ->fill(j1.abseta(), weight); } if (addJets.size() >= 2) { const Jet& j2 = addJets[1]; _hFull_addJet2Pt_abs ->fill(j2.pT()/GeV, weight); _hFull_addJet2Pt ->fill(j2.pT()/GeV, weight); _hFull_addJet2Eta_abs->fill(j2.abseta(), weight); _hFull_addJet2Eta ->fill(j2.abseta(), weight); if (isVisiblePS) { _hVis_addJet2Pt_abs ->fill(j2.pT()/GeV, weight); _hVis_addJet2Pt ->fill(j2.pT()/GeV, weight); _hVis_addJet2Eta_abs->fill(j2.abseta(), weight); _hVis_addJet2Eta ->fill(j2.abseta(), weight); } const double jjmass = (j1.mom() + j2.mom()).mass(); const double jjdR = deltaR(j1, j2); _hFull_addJJMass_abs->fill(jjmass/GeV, weight); _hFull_addJJMass ->fill(jjmass/GeV, weight); _hFull_addJJDR_abs ->fill(jjdR, weight); _hFull_addJJDR ->fill(jjdR, weight); if (isVisiblePS) { _hVis_addJJMass_abs->fill(jjmass/GeV, weight); _hVis_addJJMass ->fill(jjmass/GeV, weight); _hVis_addJJDR_abs ->fill(jjdR, weight); _hVis_addJJDR ->fill(jjdR, weight); } } } // Same set of plots if there are additional b-jets if (addBJets.size() >= 1) { const Jet& b1 = addBJets[0]; _hFull_addBJet1Pt_abs ->fill(b1.pT()/GeV, weight); _hFull_addBJet1Pt ->fill(b1.pT()/GeV, weight); _hFull_addBJet1Eta_abs->fill(b1.abseta(), weight); _hFull_addBJet1Eta ->fill(b1.abseta(), weight); if (isVisiblePS) { _hVis_addBJet1Pt_abs ->fill(b1.pT()/GeV, weight); _hVis_addBJet1Pt ->fill(b1.pT()/GeV, weight); _hVis_addBJet1Eta_abs->fill(b1.abseta(), weight); _hVis_addBJet1Eta ->fill(b1.abseta(), weight); } if (addBJets.size() >= 2) { const Jet& b2 = addBJets[1]; _hFull_addBJet2Pt_abs ->fill(b2.pT()/GeV, weight); _hFull_addBJet2Pt ->fill(b2.pT()/GeV, weight); _hFull_addBJet2Eta_abs->fill(b2.abseta(), weight); _hFull_addBJet2Eta ->fill(b2.abseta(), weight); if (isVisiblePS) { _hVis_addBJet2Pt_abs ->fill(b2.pT()/GeV, weight); _hVis_addBJet2Pt ->fill(b2.pT()/GeV, weight); _hVis_addBJet2Eta_abs->fill(b2.abseta(), weight); _hVis_addBJet2Eta ->fill(b2.abseta(), weight); } const double bbmass = (b1.mom() + b2.mom()).mass(); const double bbdR = deltaR(b1, b2); _hFull_addBBMass_abs->fill(bbmass/GeV, weight); _hFull_addBBMass ->fill(bbmass/GeV, weight); _hFull_addBBDR_abs ->fill(bbdR, weight); _hFull_addBBDR ->fill(bbdR, weight); if (isVisiblePS) { _hVis_addBBMass_abs->fill(bbmass/GeV, weight); _hVis_addBBMass ->fill(bbmass/GeV, weight); _hVis_addBBDR_abs ->fill(bbdR, weight); _hVis_addBBDR ->fill(bbdR, weight); } } } } void finalize() { const double ttbarXS = !std::isnan(crossSectionPerEvent()) ? crossSection() : 252.89*picobarn; if (std::isnan(crossSectionPerEvent())) MSG_INFO("No valid cross-section given, using NNLO (arXiv:1303.6254; sqrt(s)=8 TeV, m_t=172.5 GeV): " << ttbarXS/picobarn << " pb"); normalize({_hVis_nJet30,_hVis_nJet60, _hVis_nJet100, _hVis_addJet1Pt, _hVis_addJet1Eta, _hVis_addJet2Pt, _hVis_addJet2Eta, _hVis_addJJMass, _hVis_addJJDR, _hVis_addJJHT, _hFull_addJet1Pt, _hFull_addJet1Eta, _hFull_addJet2Pt, _hFull_addJet2Eta, _hFull_addJJMass, _hFull_addJJDR, _hFull_addJJHT, _hVis_addBJet1Pt, _hVis_addBJet1Eta, _hVis_addBJet2Pt, _hVis_addBJet2Eta, _hVis_addBBMass, _hVis_addBBDR, _hFull_addBJet1Pt, _hFull_addBJet1Eta, _hFull_addBJet2Pt, _hFull_addBJet2Eta, _hFull_addBBMass, _hFull_addBBDR}); const double xsPerWeight = ttbarXS/picobarn / sumOfWeights(); scale({_hVis_nJet30_abs, _hVis_nJet60_abs, _hVis_nJet100_abs, _hVis_addJet1Pt_abs, _hVis_addJet1Eta_abs, _hVis_addJet2Pt_abs, _hVis_addJet2Eta_abs, _hVis_addJJMass_abs, _hVis_addJJDR_abs, _hVis_addJJHT_abs, _hVis_addBJet1Pt_abs, _hVis_addBJet1Eta_abs, _hVis_addBJet2Pt_abs, _hVis_addBJet2Eta_abs, _hVis_addBBMass_abs, _hVis_addBBDR_abs}, xsPerWeight); const double sfull = xsPerWeight / 0.0454; //< correct for dilepton branching fraction scale({_hFull_addJet1Pt_abs, _hFull_addJet1Eta_abs, _hFull_addJet2Pt_abs, _hFull_addJet2Eta_abs, _hFull_addJJMass_abs, _hFull_addJJDR_abs, _hFull_addJJHT_abs, _hFull_addBJet1Pt_abs, _hFull_addBJet1Eta_abs, _hFull_addBJet2Pt_abs, _hFull_addBJet2Eta_abs, _hFull_addBBMass_abs, _hFull_addBBDR_abs}, sfull); } //@} void fillWithOF(Histo1DPtr h, double x, double w) { h->fill(std::min(x, h->xMax()-1e-9), w); } void fillGapFractions(const Jets& addJets, Profile1DPtr h_gap_addJet1Pt, Profile1DPtr h_gap_addJet2Pt, Profile1DPtr h_gap_addJetHT, double weight) { const double j1pt = (addJets.size() > 0) ? addJets[0].pT() : 0; for (size_t i = 0; i < h_gap_addJet1Pt->numBins(); ++i) { const double binCenter = h_gap_addJet1Pt->bin(i).xMid(); h_gap_addJet1Pt->fillBin(i, int(j1pt/GeV < binCenter), weight); } const double j2pt = (addJets.size() > 1) ? addJets[1].pT() : 0; for (size_t i = 0; i < h_gap_addJet2Pt->numBins(); ++i) { const double binCenter = h_gap_addJet2Pt->bin(i).xMid(); h_gap_addJet2Pt->fillBin(i, int(j2pt/GeV < binCenter), weight); } const double ht = sum(addJets, pT, 0.); for (size_t i = 0; i < h_gap_addJetHT->numBins(); ++i) { const double binCenter = h_gap_addJetHT->bin(i).xMid(); h_gap_addJetHT->fillBin(i, int(ht/GeV < binCenter) , weight); } } // @name Histogram data members //@{ Histo1DPtr _hVis_nJet30_abs, _hVis_nJet60_abs, _hVis_nJet100_abs; Histo1DPtr _hVis_addJet1Pt_abs, _hVis_addJet1Eta_abs, _hVis_addJet2Pt_abs, _hVis_addJet2Eta_abs; Histo1DPtr _hVis_addJJMass_abs, _hVis_addJJDR_abs, _hVis_addJJHT_abs; Histo1DPtr _hFull_addJet1Pt_abs, _hFull_addJet1Eta_abs, _hFull_addJet2Pt_abs, _hFull_addJet2Eta_abs; Histo1DPtr _hFull_addJJMass_abs, _hFull_addJJDR_abs, _hFull_addJJHT_abs; Histo1DPtr _hVis_addBJet1Pt_abs, _hVis_addBJet1Eta_abs, _hVis_addBJet2Pt_abs, _hVis_addBJet2Eta_abs; Histo1DPtr _hVis_addBBMass_abs, _hVis_addBBDR_abs; Histo1DPtr _hFull_addBJet1Pt_abs, _hFull_addBJet1Eta_abs, _hFull_addBJet2Pt_abs, _hFull_addBJet2Eta_abs; Histo1DPtr _hFull_addBBMass_abs, _hFull_addBBDR_abs; Histo1DPtr _hVis_nJet30, _hVis_nJet60, _hVis_nJet100; Histo1DPtr _hVis_addJet1Pt, _hVis_addJet1Eta, _hVis_addJet2Pt, _hVis_addJet2Eta; Histo1DPtr _hVis_addJJMass, _hVis_addJJDR, _hVis_addJJHT; Histo1DPtr _hFull_addJet1Pt, _hFull_addJet1Eta, _hFull_addJet2Pt, _hFull_addJet2Eta; Histo1DPtr _hFull_addJJMass, _hFull_addJJDR, _hFull_addJJHT; Histo1DPtr _hVis_addBJet1Pt, _hVis_addBJet1Eta, _hVis_addBJet2Pt, _hVis_addBJet2Eta; Histo1DPtr _hVis_addBBMass, _hVis_addBBDR; Histo1DPtr _hFull_addBJet1Pt, _hFull_addBJet1Eta, _hFull_addBJet2Pt, _hFull_addBJet2Eta; Histo1DPtr _hFull_addBBMass, _hFull_addBBDR; Profile1DPtr _h_gap_addJet1Pt, _h_gap_addJet1Pt_eta0, _h_gap_addJet1Pt_eta1, _h_gap_addJet1Pt_eta2; Profile1DPtr _h_gap_addJet2Pt, _h_gap_addJet2Pt_eta0, _h_gap_addJet2Pt_eta1, _h_gap_addJet2Pt_eta2; Profile1DPtr _h_gap_addJetHT, _h_gap_addJetHT_eta0, _h_gap_addJetHT_eta1, _h_gap_addJetHT_eta2; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2015_I1397174); } diff --git a/analyses/pluginCMS/CMS_2016_I1413748.cc b/analyses/pluginCMS/CMS_2016_I1413748.cc --- a/analyses/pluginCMS/CMS_2016_I1413748.cc +++ b/analyses/pluginCMS/CMS_2016_I1413748.cc @@ -1,328 +1,328 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/PartonicTops.hh" namespace Rivet { /// CMS 8 TeV dilepton channel ttbar spin correlations and polarisation analysis class CMS_2016_I1413748 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_I1413748); /// Book histograms and initialise projections void init() { // Complete final state FinalState fs(-MAXDOUBLE, MAXDOUBLE, 0*GeV); // Projection for dressed electrons and muons IdentifiedFinalState photons(fs); photons.acceptIdPair(PID::PHOTON); IdentifiedFinalState el_id(fs); el_id.acceptIdPair(PID::ELECTRON); PromptFinalState electrons(el_id); - addProjection(electrons, "Electrons"); + declare(electrons, "Electrons"); DressedLeptons dressed_electrons(photons, electrons, 0.1); - addProjection(dressed_electrons, "DressedElectrons"); + declare(dressed_electrons, "DressedElectrons"); IdentifiedFinalState mu_id(fs); mu_id.acceptIdPair(PID::MUON); PromptFinalState muons(mu_id); - addProjection(muons, "Muons"); + declare(muons, "Muons"); DressedLeptons dressed_muons(photons, muons, 0.1); - addProjection(dressed_muons, "DressedMuons"); + declare(dressed_muons, "DressedMuons"); // Parton-level top quarks declare(PartonicTops(PartonicTops::DecayMode::E_MU, false), "LeptonicPartonTops"); // Booking of histograms // This histogram is independent of the parton-level information, and is an addition to the original analysis. // It is compared to the same data as the parton-level delta_phi histogram d02-x01-y01. book(_h_dphidressedleptons, "d00-x01-y01", _bins_dphi); // The remaining histos use parton-level information book(_h_dphi, "d02-x01-y01", _bins_dphi); book(_h_cos_opening_angle, "d05-x01-y01", _bins_cos_opening_angle); book(_h_c1c2, "d08-x01-y01", _bins_c1c2); book(_h_lep_costheta, "d11-x01-y01", _bins_lep_costheta); book(_h_lep_costheta_CPV, "d14-x01-y01", _bins_lep_costheta_CPV); // 2D histos book(_h_dphi_var[0], "d20-x01-y01", _bins_dphi, _bins_tt_mass); book(_h_cos_opening_angle_var[0], "d26-x01-y01", _bins_cos_opening_angle, _bins_tt_mass); book(_h_c1c2_var[0], "d32-x01-y01", _bins_c1c2, _bins_tt_mass); book(_h_lep_costheta_var[0], "d38-x01-y01", _bins_lep_costheta, _bins_tt_mass); book(_h_lep_costheta_CPV_var[0], "d44-x01-y01", _bins_lep_costheta_CPV, _bins_tt_mass); book(_h_dphi_var[1], "d50-x01-y01", _bins_dphi, _bins_tt_pT); book(_h_cos_opening_angle_var[1], "d56-x01-y01", _bins_cos_opening_angle, _bins_tt_pT); book(_h_c1c2_var[1], "d62-x01-y01", _bins_c1c2, _bins_tt_pT); book(_h_lep_costheta_var[1], "d68-x01-y01", _bins_lep_costheta, _bins_tt_pT); book(_h_lep_costheta_CPV_var[1], "d74-x01-y01", _bins_lep_costheta_CPV, _bins_tt_pT); book(_h_dphi_var[2], "d80-x01-y01", _bins_dphi, _bins_tt_absrapidity); book(_h_cos_opening_angle_var[2], "d86-x01-y01", _bins_cos_opening_angle, _bins_tt_absrapidity); book(_h_c1c2_var[2], "d92-x01-y01", _bins_c1c2, _bins_tt_absrapidity); book(_h_lep_costheta_var[2], "d98-x01-y01", _bins_lep_costheta, _bins_tt_absrapidity); book(_h_lep_costheta_CPV_var[2], "d104-x01-y01", _bins_lep_costheta_CPV, _bins_tt_absrapidity); // Profile histos for asymmetries book(_h_dphi_profile[0], "d17-x01-y01", _bins_tt_mass); book(_h_cos_opening_angle_profile[0], "d23-x01-y01", _bins_tt_mass); book(_h_c1c2_profile[0], "d29-x01-y01", _bins_tt_mass); book(_h_lep_costheta_profile[0], "d35-x01-y01", _bins_tt_mass); book(_h_lep_costheta_CPV_profile[0], "d41-x01-y01", _bins_tt_mass); book(_h_dphi_profile[1], "d47-x01-y01", _bins_tt_pT); book(_h_cos_opening_angle_profile[1], "d53-x01-y01", _bins_tt_pT); book(_h_c1c2_profile[1], "d59-x01-y01", _bins_tt_pT); book(_h_lep_costheta_profile[1], "d65-x01-y01", _bins_tt_pT); book(_h_lep_costheta_CPV_profile[1], "d71-x01-y01", _bins_tt_pT); book(_h_dphi_profile[2], "d77-x01-y01", _bins_tt_absrapidity); book(_h_cos_opening_angle_profile[2], "d83-x01-y01", _bins_tt_absrapidity); book(_h_c1c2_profile[2], "d89-x01-y01", _bins_tt_absrapidity); book(_h_lep_costheta_profile[2], "d95-x01-y01", _bins_tt_absrapidity); book(_h_lep_costheta_CPV_profile[2], "d101-x01-y01", _bins_tt_absrapidity); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; // Use particle-level leptons for the first histogram const DressedLeptons& dressed_electrons = applyProjection(event, "DressedElectrons"); const DressedLeptons& dressed_muons = applyProjection(event, "DressedMuons"); const vector dressedels = dressed_electrons.dressedLeptons(); const vector dressedmus = dressed_muons.dressedLeptons(); const size_t ndressedel = dressedels.size(); const size_t ndressedmu = dressedmus.size(); // For the particle-level histogram, require exactly one electron and exactly one muon, to select // the ttbar->emu channel. Note this means ttbar->emu events with additional PromptFinalState // dilepton pairs from the shower are vetoed - for PYTHIA8, this affects ~0.5% of events, so the // effect is well below the level of sensitivity of the measured distribution. if ( ndressedel == 1 && ndressedmu == 1 ) { const int electrontouse = 0, muontouse = 0; // Opposite-charge leptons only if ( sameSign(dressedels[electrontouse],dressedmus[muontouse]) ) { MSG_INFO("Error, e and mu have same charge, skipping event"); } else { //Get the four-momenta of the positively- and negatively-charged leptons FourMomentum lepPlus = dressedels[electrontouse].charge() > 0 ? dressedels[electrontouse] : dressedmus[muontouse]; FourMomentum lepMinus = dressedels[electrontouse].charge() > 0 ? dressedmus[muontouse] : dressedels[electrontouse]; // Now calculate the variable double dphi_temp = deltaPhi(lepPlus,lepMinus); fillWithUFOF( _h_dphidressedleptons, dphi_temp, weight ); } } // The remaining variables use parton-level information. // Get the leptonically decaying tops const Particles& leptonicpartontops = apply(event, "LeptonicPartonTops").particlesByPt(); Particles chargedleptons; unsigned int ntrueleptonictops = 0; bool oppositesign = false; if ( leptonicpartontops.size() == 2 ) { for (size_t k = 0; k < leptonicpartontops.size(); ++k) { // Get the lepton const Particle lepTop = leptonicpartontops[k]; const auto isPromptChargedLepton = [](const Particle& p){return (isChargedLepton(p) && isPrompt(p, false, false));}; Particles lepton_candidates = lepTop.allDescendants(firstParticleWith(isPromptChargedLepton), false); if ( lepton_candidates.size() < 1 ) MSG_WARNING("error, PartonicTops::DecayMode::E_MU top quark had no daughter lepton candidate, skipping event."); // In some cases there is no lepton from the W decay but only leptons from the decay of a radiated gamma. // These hadronic PartonicTops are currently being mistakenly selected by PartonicTops::DecayMode::E_MU (as of April 2017), and need to be rejected. // PartonicTops::DecayMode::E_MU is being fixed in Rivet, and when it is the veto below should do nothing. /// @todo Should no longer be necessary -- remove bool istrueleptonictop = false; for (size_t i = 0; i < lepton_candidates.size(); ++i) { const Particle& lepton_candidate = lepton_candidates[i]; if ( lepton_candidate.hasParent(PID::PHOTON) ) { MSG_DEBUG("Found gamma parent, top: " << k+1 << " of " << leptonicpartontops.size() << " , lepton: " << i+1 << " of " << lepton_candidates.size()); continue; } if ( !istrueleptonictop && sameSign(lepTop,lepton_candidate) ) { chargedleptons.push_back(lepton_candidate); istrueleptonictop = true; } else MSG_WARNING("Found extra prompt charged lepton from top decay (and without gamma parent), ignoring it."); } if ( istrueleptonictop ) ++ntrueleptonictops; } } if ( ntrueleptonictops == 2 ) { oppositesign = !( sameSign(chargedleptons[0],chargedleptons[1]) ); if ( !oppositesign ) MSG_WARNING("error, same charge tops, skipping event."); } if ( ntrueleptonictops == 2 && oppositesign ) { // Get the four-momenta of the positively- and negatively-charged leptons FourMomentum lepPlus = chargedleptons[0].charge() > 0 ? chargedleptons[0] : chargedleptons[1]; FourMomentum lepMinus = chargedleptons[0].charge() > 0 ? chargedleptons[1] : chargedleptons[0]; const double dphi_temp = deltaPhi(lepPlus,lepMinus); // Get the four-momenta of the positively- and negatively-charged tops FourMomentum topPlus_p4 = leptonicpartontops[0].pdgId() > 0 ? leptonicpartontops[0] : leptonicpartontops[1]; FourMomentum topMinus_p4 = leptonicpartontops[0].pdgId() > 0 ? leptonicpartontops[1] : leptonicpartontops[0]; const FourMomentum ttbar_p4 = topPlus_p4 + topMinus_p4; const double tt_mass_temp = ttbar_p4.mass(); const double tt_absrapidity_temp = ttbar_p4.absrapidity(); const double tt_pT_temp = ttbar_p4.pT(); // Lorentz transformations to calculate the spin observables in the helicity basis // Transform everything to the ttbar CM frame LorentzTransform ttCM; ttCM.setBetaVec(-ttbar_p4.boostVector()); topPlus_p4 = ttCM.transform(topPlus_p4); topMinus_p4 = ttCM.transform(topMinus_p4); lepPlus = ttCM.transform(lepPlus); lepMinus = ttCM.transform(lepMinus); // Now boost the leptons to their parent top CM frames LorentzTransform topPlus, topMinus; topPlus.setBetaVec(-topPlus_p4.boostVector()); topMinus.setBetaVec(-topMinus_p4.boostVector()); lepPlus = topPlus.transform(lepPlus); lepMinus = topMinus.transform(lepMinus); const double lepPlus_costheta_temp = lepPlus.vector3().dot(topPlus_p4.vector3()) / (lepPlus.vector3().mod() * topPlus_p4.vector3().mod()); const double lepMinus_costheta_temp = lepMinus.vector3().dot(topMinus_p4.vector3()) / (lepMinus.vector3().mod() * topMinus_p4.vector3().mod()); const double c1c2_temp = lepPlus_costheta_temp * lepMinus_costheta_temp; const double cos_opening_angle_temp = lepPlus.vector3().dot(lepMinus.vector3()) / (lepPlus.vector3().mod() * lepMinus.vector3().mod()); // Fill parton-level histos fillWithUFOF( _h_dphi, dphi_temp, weight ); fillWithUFOF( _h_cos_opening_angle, cos_opening_angle_temp, weight ); fillWithUFOF( _h_c1c2, c1c2_temp, weight ); fillWithUFOF( _h_lep_costheta, lepPlus_costheta_temp, weight ); fillWithUFOF( _h_lep_costheta, lepMinus_costheta_temp, weight ); fillWithUFOF( _h_lep_costheta_CPV, lepPlus_costheta_temp, weight ); fillWithUFOF( _h_lep_costheta_CPV, -lepMinus_costheta_temp, weight ); // Now fill the same variables in the 2D and profile histos vs ttbar invariant mass, pT, and absolute rapidity for (int i_var = 0; i_var < 3; ++i_var) { double var; if ( i_var == 0 ) { var = tt_mass_temp; } else if ( i_var == 1 ) { var = tt_pT_temp; } else { var = tt_absrapidity_temp; } fillWithUFOF( _h_dphi_var[i_var], dphi_temp, var, weight ); fillWithUFOF( _h_cos_opening_angle_var[i_var], cos_opening_angle_temp, var, weight ); fillWithUFOF( _h_c1c2_var[i_var], c1c2_temp, var, weight ); fillWithUFOF( _h_lep_costheta_var[i_var], lepPlus_costheta_temp, var, weight ); fillWithUFOF( _h_lep_costheta_var[i_var], lepMinus_costheta_temp, var, weight ); fillWithUFOF( _h_lep_costheta_CPV_var[i_var], lepPlus_costheta_temp, var, weight ); fillWithUFOF( _h_lep_costheta_CPV_var[i_var], -lepMinus_costheta_temp, var, weight ); fillWithUFOF( _h_dphi_profile[i_var], dphi_temp, var, weight, (_h_dphi->xMax() + _h_dphi->xMin())/2. ); fillWithUFOF( _h_cos_opening_angle_profile[i_var], cos_opening_angle_temp, var, weight, (_h_cos_opening_angle->xMax() + _h_cos_opening_angle->xMin())/2. ); fillWithUFOF( _h_c1c2_profile[i_var], c1c2_temp, var, weight, (_h_c1c2->xMax() + _h_c1c2->xMin())/2. ); fillWithUFOF( _h_lep_costheta_profile[i_var], lepPlus_costheta_temp, var, weight, (_h_lep_costheta->xMax() + _h_lep_costheta->xMin())/2. ); fillWithUFOF( _h_lep_costheta_profile[i_var], lepMinus_costheta_temp, var, weight, (_h_lep_costheta->xMax() + _h_lep_costheta->xMin())/2. ); fillWithUFOF( _h_lep_costheta_CPV_profile[i_var], lepPlus_costheta_temp, var, weight, (_h_lep_costheta_CPV->xMax() + _h_lep_costheta_CPV->xMin())/2. ); fillWithUFOF( _h_lep_costheta_CPV_profile[i_var], -lepMinus_costheta_temp, var, weight, (_h_lep_costheta_CPV->xMax() + _h_lep_costheta_CPV->xMin())/2. ); } } } /// Normalise histograms to unit area void finalize() { normalize(_h_dphidressedleptons); normalize(_h_dphi); normalize(_h_cos_opening_angle); normalize(_h_c1c2); normalize(_h_lep_costheta); normalize(_h_lep_costheta_CPV); for (int i_var = 0; i_var < 3; ++i_var) { normalize(_h_dphi_var[i_var]); normalize(_h_cos_opening_angle_var[i_var]); normalize(_h_c1c2_var[i_var]); normalize(_h_lep_costheta_var[i_var]); normalize(_h_lep_costheta_CPV_var[i_var]); } } private: Histo1DPtr _h_dphidressedleptons, _h_dphi, _h_lep_costheta, _h_lep_costheta_CPV, _h_c1c2, _h_cos_opening_angle; Histo2DPtr _h_dphi_var[3], _h_lep_costheta_var[3], _h_lep_costheta_CPV_var[3], _h_c1c2_var[3], _h_cos_opening_angle_var[3]; Profile1DPtr _h_dphi_profile[3], _h_lep_costheta_profile[3], _h_lep_costheta_CPV_profile[3], _h_c1c2_profile[3], _h_cos_opening_angle_profile[3]; const vector _bins_tt_mass = {300., 430., 530., 1200.}; const vector _bins_tt_pT = {0., 41., 92., 300.}; const vector _bins_tt_absrapidity = {0., 0.34, 0.75, 1.5}; const vector _bins_dphi = {0., 5.*M_PI/60., 10.*M_PI/60., 15.*M_PI/60., 20.*M_PI/60., 25.*M_PI/60., 30.*M_PI/60., 35.*M_PI/60., 40.*M_PI/60., 45.*M_PI/60., 50.*M_PI/60., 55.*M_PI/60., M_PI}; const vector _bins_lep_costheta = {-1., -2./3., -1./3., 0., 1./3., 2./3., 1.}; const vector _bins_lep_costheta_CPV = {-1., -2./3., -1./3., 0., 1./3., 2./3., 1.}; const vector _bins_c1c2 = {-1., -0.4, -10./60., 0., 10./60., 0.4, 1.}; const vector _bins_cos_opening_angle = {-1., -2./3., -1./3., 0., 1./3., 2./3., 1.}; void fillWithUFOF(Histo1DPtr h, double x, double w) { h->fill(std::max(std::min(x, h->xMax()-1e-9),h->xMin()+1e-9), w); } void fillWithUFOF(Histo2DPtr h, double x, double y, double w) { h->fill(std::max(std::min(x, h->xMax()-1e-9),h->xMin()+1e-9), std::max(std::min(y, h->yMax()-1e-9),h->yMin()+1e-9), w); } void fillWithUFOF(Profile1DPtr h, double x, double y, double w, double c) { h->fill(std::max(std::min(y, h->xMax()-1e-9),h->xMin()+1e-9), float(x > c) - float(x < c), w); } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2016_I1413748); } diff --git a/analyses/pluginCMS/CMS_2016_I1421646.cc b/analyses/pluginCMS/CMS_2016_I1421646.cc --- a/analyses/pluginCMS/CMS_2016_I1421646.cc +++ b/analyses/pluginCMS/CMS_2016_I1421646.cc @@ -1,62 +1,62 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Tools/BinnedHistogram.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// CMS azimuthal decorrelations at 8 TeV class CMS_2016_I1421646 : public Analysis { public: DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_I1421646); /// Book projections and histograms void init() { FastJets akt(FinalState(), FastJets::ANTIKT, 0.7); - addProjection(akt, "antikT"); + declare(akt, "antikT"); {Histo1DPtr tmp; _h_deltaPhi.add( 200., 300., book(tmp, 1, 1, 1));} {Histo1DPtr tmp; _h_deltaPhi.add( 300., 400., book(tmp, 2, 1, 1));} {Histo1DPtr tmp; _h_deltaPhi.add( 400., 500., book(tmp, 3, 1, 1));} {Histo1DPtr tmp; _h_deltaPhi.add( 500., 700., book(tmp, 4, 1, 1));} {Histo1DPtr tmp; _h_deltaPhi.add( 700., 900., book(tmp, 5, 1, 1));} {Histo1DPtr tmp; _h_deltaPhi.add( 900., 1100., book(tmp, 6, 1, 1));} {Histo1DPtr tmp; _h_deltaPhi.add( 1100., 4000., book(tmp, 7, 1, 1));} } /// Per-event analysis void analyze(const Event & event) { const Jets& jets = apply(event, "antikT").jetsByPt(Cuts::absrap < 5.0 && Cuts::pT > 100*GeV); if (jets.size() < 2) vetoEvent; if (jets[0].pT() < 200*GeV) vetoEvent; if (jets[0].absrap() > 2.5 || jets[1].absrap() > 2.5) vetoEvent; const double dphi = deltaPhi(jets[0].phi(), jets[1].phi()); _h_deltaPhi.fill(jets[0].pT(), dphi, 1.0); } /// Scale histograms void finalize() { for (Histo1DPtr histo : _h_deltaPhi.histos()) normalize(histo); } private: BinnedHistogram _h_deltaPhi; }; // A hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2016_I1421646); } diff --git a/analyses/pluginCMS/CMS_2016_I1430892.cc b/analyses/pluginCMS/CMS_2016_I1430892.cc --- a/analyses/pluginCMS/CMS_2016_I1430892.cc +++ b/analyses/pluginCMS/CMS_2016_I1430892.cc @@ -1,259 +1,259 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/PartonicTops.hh" namespace Rivet { /// CMS 8 TeV dilepton channel ttbar charge asymmetry analysis class CMS_2016_I1430892 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_I1430892); /// Book histograms and initialise projections void init() { // Complete final state FinalState fs(-MAXDOUBLE, MAXDOUBLE, 0*GeV); // Projection for dressed electrons and muons IdentifiedFinalState photons(fs); photons.acceptIdPair(PID::PHOTON); IdentifiedFinalState el_id(fs); el_id.acceptIdPair(PID::ELECTRON); PromptFinalState electrons(el_id); - addProjection(electrons, "Electrons"); + declare(electrons, "Electrons"); DressedLeptons dressed_electrons(photons, electrons, 0.1); - addProjection(dressed_electrons, "DressedElectrons"); + declare(dressed_electrons, "DressedElectrons"); IdentifiedFinalState mu_id(fs); mu_id.acceptIdPair(PID::MUON); PromptFinalState muons(mu_id); - addProjection(muons, "Muons"); + declare(muons, "Muons"); DressedLeptons dressed_muons(photons, muons, 0.1); - addProjection(dressed_muons, "DressedMuons"); + declare(dressed_muons, "DressedMuons"); // Parton-level top quarks declare(PartonicTops(PartonicTops::DecayMode::E_MU, false), "LeptonicPartonTops"); // Booking of histograms // This histogram is independent of the parton-level information, and is an // addition to the original analysis. It is compared to the same data as // the parton-level delta_abseta histogram d05-x01-y01. book(_h_dabsetadressedleptons, "d00-x01-y01", _bins_dabseta); // The remaining histos use parton-level information book(_h_dabseta, "d05-x01-y01", _bins_dabseta); book(_h_dabsrapidity, "d02-x01-y01", _bins_dabsrapidity); // 2D histos book(_h_dabsrapidity_var[0], "d11-x01-y01", _bins_dabsrapidity, _bins_tt_mass); book(_h_dabseta_var[0], "d17-x01-y01", _bins_dabseta, _bins_tt_mass); book(_h_dabsrapidity_var[1], "d23-x01-y01", _bins_dabsrapidity, _bins_tt_pT); book(_h_dabseta_var[1], "d29-x01-y01", _bins_dabseta, _bins_tt_pT); book(_h_dabsrapidity_var[2], "d35-x01-y01", _bins_dabsrapidity, _bins_tt_absrapidity); book(_h_dabseta_var[2], "d41-x01-y01", _bins_dabseta, _bins_tt_absrapidity); // Profile histos for asymmetries book(_h_dabsrapidity_profile[0], "d08-x01-y01", _bins_tt_mass); book(_h_dabseta_profile[0], "d14-x01-y01", _bins_tt_mass); book(_h_dabsrapidity_profile[1], "d20-x01-y01", _bins_tt_pT); book(_h_dabseta_profile[1], "d26-x01-y01", _bins_tt_pT); book(_h_dabsrapidity_profile[2], "d32-x01-y01", _bins_tt_absrapidity); book(_h_dabseta_profile[2], "d38-x01-y01", _bins_tt_absrapidity); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; // Use particle-level leptons for the first histogram const DressedLeptons& dressed_electrons = applyProjection(event, "DressedElectrons"); const DressedLeptons& dressed_muons = applyProjection(event, "DressedMuons"); const vector dressedels = dressed_electrons.dressedLeptons(); const vector dressedmus = dressed_muons.dressedLeptons(); const size_t ndressedel = dressedels.size(); const size_t ndressedmu = dressedmus.size(); // For the particle-level histogram, require exactly one electron and exactly one muon, to select the ttbar->emu channel. // Note this means ttbar->emu events with additional PromptFinalState dilepton pairs from the shower are vetoed - for PYTHIA8, // this affects ~0.5% of events, so the effect is well below the level of sensitivity of the measured distribution. if ( ndressedel == 1 && ndressedmu == 1 ) { const int electrontouse = 0, muontouse = 0; // Opposite-charge leptons only if ( sameSign(dressedels[electrontouse], dressedmus[muontouse]) ) { MSG_INFO("Error, e and mu have same charge, skipping event"); } else { // Get the four-momenta of the positively- and negatively-charged leptons FourMomentum lepPlus = dressedels[electrontouse].charge() > 0 ? dressedels[electrontouse] : dressedmus[muontouse]; FourMomentum lepMinus = dressedels[electrontouse].charge() > 0 ? dressedmus[muontouse] : dressedels[electrontouse]; // Now calculate the variable double dabseta_temp = lepPlus.abseta() - lepMinus.abseta(); fillWithUFOF( _h_dabsetadressedleptons, dabseta_temp, weight ); } } // The remaining variables use parton-level information. // Get the leptonically decaying tops const Particles leptonicpartontops = apply(event, "LeptonicPartonTops").particlesByPt(); Particles chargedleptons; unsigned int ntrueleptonictops = 0; bool oppositesign = false; if ( leptonicpartontops.size() == 2 ) { for (size_t k = 0; k < leptonicpartontops.size(); ++k) { // Get the lepton const Particle lepTop = leptonicpartontops[k]; const auto isPromptChargedLepton = [](const Particle& p){return (isChargedLepton(p) && isPrompt(p, false, false));}; Particles lepton_candidates = lepTop.allDescendants(firstParticleWith(isPromptChargedLepton), false); if ( lepton_candidates.size() < 1 ) MSG_WARNING("error, PartonicTops::DecayMode::E_MU top quark had no daughter lepton candidate, skipping event."); // In some cases there is no lepton from the W decay but only leptons from the decay of a radiated gamma. // These hadronic PartonicTops are currently being mistakenly selected by PartonicTops::DecayMode::E_MU (as of April 2017), and need to be rejected. // PartonicTops::DecayMode::E_MU is being fixed in Rivet, and when it is the veto below should do nothing. /// @todo Should no longer be necessary -- remove bool istrueleptonictop = false; for (size_t i = 0; i < lepton_candidates.size(); ++i) { const Particle& lepton_candidate = lepton_candidates[i]; if ( lepton_candidate.hasParent(PID::PHOTON) ) { MSG_DEBUG("Found gamma parent, top: " << k+1 << " of " << leptonicpartontops.size() << " , lepton: " << i+1 << " of " << lepton_candidates.size()); continue; } if ( !istrueleptonictop && sameSign(lepTop,lepton_candidate) ) { chargedleptons.push_back(lepton_candidate); istrueleptonictop = true; } else MSG_WARNING("Error, found extra prompt charged lepton from top decay (and without gamma parent), ignoring it."); } if ( istrueleptonictop ) ++ntrueleptonictops; } } if ( ntrueleptonictops == 2 ) { oppositesign = !( sameSign(chargedleptons[0],chargedleptons[1]) ); if ( !oppositesign ) MSG_WARNING("error, same charge tops, skipping event."); } if ( ntrueleptonictops == 2 && oppositesign ) { // Get the four-momenta of the positively- and negatively-charged leptons const FourMomentum lepPlus = chargedleptons[0].charge() > 0 ? chargedleptons[0] : chargedleptons[1]; const FourMomentum lepMinus = chargedleptons[0].charge() > 0 ? chargedleptons[1] : chargedleptons[0]; const double dabseta_temp = lepPlus.abseta() - lepMinus.abseta(); // Get the four-momenta of the positively- and negatively-charged tops const FourMomentum topPlus_p4 = leptonicpartontops[0].pdgId() > 0 ? leptonicpartontops[0] : leptonicpartontops[1]; const FourMomentum topMinus_p4 = leptonicpartontops[0].pdgId() > 0 ? leptonicpartontops[1] : leptonicpartontops[0]; const FourMomentum ttbar_p4 = topPlus_p4 + topMinus_p4; const double tt_mass_temp = ttbar_p4.mass(); const double tt_absrapidity_temp = ttbar_p4.absrapidity(); const double tt_pT_temp = ttbar_p4.pT(); const double dabsrapidity_temp = topPlus_p4.absrapidity() - topMinus_p4.absrapidity(); // Fill parton-level histos fillWithUFOF( _h_dabseta, dabseta_temp, weight ); fillWithUFOF( _h_dabsrapidity, dabsrapidity_temp, weight ); // Now fill the same variables in the 2D and profile histos vs ttbar invariant mass, pT, and absolute rapidity for (int i_var = 0; i_var < 3; ++i_var) { double var; if ( i_var == 0 ) { var = tt_mass_temp; } else if ( i_var == 1 ) { var = tt_pT_temp; } else { var = tt_absrapidity_temp; } fillWithUFOF( _h_dabsrapidity_var[i_var], dabsrapidity_temp, var, weight ); fillWithUFOF( _h_dabseta_var[i_var], dabseta_temp, var, weight ); fillWithUFOF( _h_dabsrapidity_profile[i_var], dabsrapidity_temp, var, weight, (_h_dabsrapidity->xMax() + _h_dabsrapidity->xMin())/2. ); fillWithUFOF( _h_dabseta_profile[i_var], dabseta_temp, var, weight, (_h_dabseta->xMax() + _h_dabseta->xMin())/2. ); } } } /// Normalise histograms to unit area void finalize() { normalize(_h_dabsetadressedleptons); normalize(_h_dabseta); normalize(_h_dabsrapidity); for (int i_var = 0; i_var < 3; ++i_var) { normalize(_h_dabsrapidity_var[i_var]); normalize(_h_dabseta_var[i_var]); } } private: Histo1DPtr _h_dabsetadressedleptons, _h_dabseta, _h_dabsrapidity; Histo2DPtr _h_dabseta_var[3], _h_dabsrapidity_var[3]; Profile1DPtr _h_dabseta_profile[3], _h_dabsrapidity_profile[3]; const vector _bins_tt_mass = {300., 430., 530., 1200.}; const vector _bins_tt_pT = {0., 41., 92., 300.}; const vector _bins_tt_absrapidity = {0., 0.34, 0.75, 1.5}; const vector _bins_dabseta = { -2., -68./60., -48./60., -32./60., -20./60., -8./60., 0., 8./60., 20./60., 32./60., 48./60., 68./60., 2.}; const vector _bins_dabsrapidity = {-2., -44./60., -20./60., 0., 20./60., 44./60., 2.}; void fillWithUFOF(Histo1DPtr h, double x, double w) { h->fill(std::max(std::min(x, h->xMax()-1e-9),h->xMin()+1e-9), w); } void fillWithUFOF(Histo2DPtr h, double x, double y, double w) { h->fill(std::max(std::min(x, h->xMax()-1e-9),h->xMin()+1e-9), std::max(std::min(y, h->yMax()-1e-9),h->yMin()+1e-9), w); } void fillWithUFOF(Profile1DPtr h, double x, double y, double w, double c) { h->fill(std::max(std::min(y, h->xMax()-1e-9),h->xMin()+1e-9), float(x > c) - float(x < c), w); } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2016_I1430892); } diff --git a/analyses/pluginCMS/CMS_2016_I1473674.cc b/analyses/pluginCMS/CMS_2016_I1473674.cc --- a/analyses/pluginCMS/CMS_2016_I1473674.cc +++ b/analyses/pluginCMS/CMS_2016_I1473674.cc @@ -1,124 +1,124 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/PartonicTops.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/InvMassFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" namespace Rivet { class CMS_2016_I1473674 : public Analysis { public: // Minimal constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_I1473674); // Set up projections and book histograms void init() { // Complete final state FinalState fs; // Parton level top quarks declare(PartonicTops(PartonicTops::DecayMode::E_MU, false), "LeptonicPartonTops"); declare(PartonicTops(PartonicTops::DecayMode::HADRONIC), "HadronicPartonTops"); // Projections for dressed electrons and muons IdentifiedFinalState photons(fs); photons.acceptIdPair(PID::PHOTON); // IdentifiedFinalState el_id(fs); el_id.acceptIdPair(PID::ELECTRON); PromptFinalState electrons(el_id); - addProjection(electrons, "Electrons"); + declare(electrons, "Electrons"); DressedLeptons dressed_electrons(photons, electrons, 0.1); - addProjection(dressed_electrons, "DressedElectrons"); + declare(dressed_electrons, "DressedElectrons"); // IdentifiedFinalState mu_id(fs); mu_id.acceptIdPair(PID::MUON); PromptFinalState muons(mu_id); - addProjection(muons, "Muons"); + declare(muons, "Muons"); DressedLeptons dressed_muons(photons, muons, 0.1); - addProjection(dressed_muons, "DressedMuons"); + declare(dressed_muons, "DressedMuons"); // Projection for jets VetoedFinalState fs_jets(FinalState(-MAXDOUBLE, MAXDOUBLE, 0*GeV)); fs_jets.addVetoOnThisFinalState(dressed_muons); - addProjection(FastJets(fs_jets, FastJets::ANTIKT, 0.5), "Jets"); + declare(FastJets(fs_jets, FastJets::ANTIKT, 0.5), "Jets"); // Projections for MET - addProjection(MissingMomentum(), "MET"); + declare(MissingMomentum(), "MET"); // Booking of histograms book(_hist_met ,5, 1, 1); book(_hist_ht ,6, 1, 1); book(_hist_st ,7, 1, 1); book(_hist_wpt ,8, 1, 1); } /// Per-event analysis void analyze(const Event& event) { const double weight = 1.0; // Select ttbar -> lepton+jets at parton level, removing tau decays const Particles leptonicpartontops = apply(event, "LeptonicPartonTops").particlesByPt(); if (leptonicpartontops.size() != 1) vetoEvent; const Particles hadronicpartontops = apply(event, "HadronicPartonTops").particlesByPt(); if (hadronicpartontops.size() != 1) vetoEvent; // Select ttbar -> lepton+jets at particle level const DressedLeptons& dressed_electrons = applyProjection(event, "DressedElectrons"); const DressedLeptons& dressed_muons = applyProjection(event, "DressedMuons"); if (dressed_electrons.dressedLeptons().size() + dressed_muons.dressedLeptons().size() != 1) vetoEvent; const FourMomentum lepton = (dressed_electrons.dressedLeptons().empty() ? dressed_muons : dressed_electrons).dressedLeptons()[0]; // MET const MissingMomentum& met = applyProjection(event, "MET"); _hist_met->fill(met.visibleMomentum().pT()/GeV, weight); // HT and ST const FastJets& jetpro = applyProjection(event, "Jets"); const Jets jets = jetpro.jetsByPt(20*GeV); double ht = 0.0; for (const Jet& j : jets) { if (deltaR(j.momentum(), lepton) > 0.3) { ht += j.pT(); } } double st = ht + lepton.pT() + met.visibleMomentum().pT(); _hist_ht->fill(ht/GeV, weight); _hist_st->fill(st/GeV, weight); // WPT const FourMomentum w = lepton - met.visibleMomentum(); _hist_wpt->fill(w.pT()/GeV, weight); } /// Normalize histograms void finalize() { normalize(_hist_met); normalize(_hist_ht); normalize(_hist_st); normalize(_hist_wpt); } private: Histo1DPtr _hist_met, _hist_ht, _hist_st, _hist_wpt; }; DECLARE_RIVET_PLUGIN(CMS_2016_I1473674); } diff --git a/analyses/pluginCMS/CMS_2016_I1486238.cc b/analyses/pluginCMS/CMS_2016_I1486238.cc --- a/analyses/pluginCMS/CMS_2016_I1486238.cc +++ b/analyses/pluginCMS/CMS_2016_I1486238.cc @@ -1,124 +1,124 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/InitialQuarks.hh" #include "Rivet/Projections/UnstableFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// Studies of 2 b-jet + 2 jet production in proton-proton collisions at 7 TeV class CMS_2016_I1486238 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_I1486238); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FastJets akt(FinalState(), FastJets::ANTIKT, 0.5); - addProjection(akt, "antikT"); + declare(akt, "antikT"); book(_h_Deltaphi_newway, 1,1,1); book(_h_deltaphiafterlight, 9,1,1); book(_h_SumPLight, 5,1,1); book(_h_LeadingBJetpt, 11,1,1); book(_h_SubleadingBJetpt, 15,1,1); book(_h_LeadingLightJetpt, 13,1,1); book(_h_SubleadingLightJetpt, 17,1,1); book(_h_LeadingBJeteta, 10,1,1); book(_h_SubleadingBJeteta, 14,1,1); book(_h_LeadingLightJeteta, 12,1,1); book(_h_SubleadingLightJeteta, 16,1,1); } /// Perform the per-event analysis void analyze(const Event& event) { const Jets& jets = apply(event, "antikT").jetsByPt(Cuts::absrap < 4.7 && Cuts::pT > 20*GeV); if (jets.size() < 4) vetoEvent; // Initial quarks /// @note Quark-level tagging... Particles bquarks; for (const GenParticle* p : particles(event.genEvent())) { if (abs(p->pdg_id()) == PID::BQUARK) bquarks += Particle(p); } Jets bjets, ljets; for (const Jet& j : jets) { const bool btag = any(bquarks, deltaRLess(j, 0.3)); // for (const Particle& b : bquarks) if (deltaR(j, b) < 0.3) btag = true; (btag && j.abseta() < 2.4 ? bjets : ljets).push_back(j); } // Fill histograms const double weight = 1.0; if (bjets.size() >= 2 && ljets.size() >= 2) { _h_LeadingBJetpt->fill(bjets[0].pT()/GeV, weight); _h_SubleadingBJetpt->fill(bjets[1].pT()/GeV, weight); _h_LeadingLightJetpt->fill(ljets[0].pT()/GeV, weight); _h_SubleadingLightJetpt->fill(ljets[1].pT()/GeV, weight); // _h_LeadingBJeteta->fill(bjets[0].eta(), weight); _h_SubleadingBJeteta->fill(bjets[1].eta(), weight); _h_LeadingLightJeteta->fill(ljets[0].eta(), weight); _h_SubleadingLightJeteta->fill(ljets[1].eta(), weight); const double lightdphi = deltaPhi(ljets[0], ljets[1]); _h_deltaphiafterlight->fill(lightdphi, weight); const double vecsumlightjets = sqrt(sqr(ljets[0].px()+ljets[1].px()) + sqr(ljets[0].py()+ljets[1].py())); //< @todo Just (lj0+lj1).pT()? Or use add_quad const double term2 = vecsumlightjets/(sqrt(sqr(ljets[0].px()) + sqr(ljets[0].py())) + sqrt(sqr(ljets[1].px()) + sqr(ljets[1].py()))); //< @todo lj0.pT() + lj1.pT()? Or add_quad _h_SumPLight->fill(term2, weight); const double pxBsyst2 = bjets[0].px()+bjets[1].px(); // @todo (bj0+bj1).px() const double pyBsyst2 = bjets[0].py()+bjets[1].py(); // @todo (bj0+bj1).py() const double pxJetssyst2 = ljets[0].px()+ljets[1].px(); // @todo (lj0+lj1).px() const double pyJetssyst2 = ljets[0].py()+ljets[1].py(); // @todo (lj0+lj1).py() const double modulusB2 = sqrt(sqr(pxBsyst2)+sqr(pyBsyst2)); //< @todo add_quad const double modulusJets2 = sqrt(sqr(pxJetssyst2)+sqr(pyJetssyst2)); //< @todo add_quad const double cosphiBsyst2 = pxBsyst2/modulusB2; const double cosphiJetssyst2 = pxJetssyst2/modulusJets2; const double phiBsyst2 = ((pyBsyst2 > 0) ? 1 : -1) * acos(cosphiBsyst2); //< @todo sign(pyBsyst2) const double phiJetssyst2 = sign(pyJetssyst2) * acos(cosphiJetssyst2); const double Dphi2 = deltaPhi(phiBsyst2, phiJetssyst2); _h_Deltaphi_newway->fill(Dphi2,weight); } } /// Normalise histograms etc., after the run void finalize() { const double invlumi = crossSection()/picobarn/sumOfWeights(); normalize({_h_SumPLight, _h_deltaphiafterlight, _h_Deltaphi_newway}); scale({_h_LeadingLightJetpt, _h_SubleadingLightJetpt, _h_LeadingBJetpt, _h_SubleadingBJetpt}, invlumi); scale({_h_LeadingLightJeteta, _h_SubleadingLightJeteta, _h_LeadingBJeteta, _h_SubleadingBJeteta}, invlumi); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_deltaphiafterlight, _h_Deltaphi_newway, _h_SumPLight; Histo1DPtr _h_LeadingBJetpt, _h_SubleadingBJetpt, _h_LeadingLightJetpt, _h_SubleadingLightJetpt; Histo1DPtr _h_LeadingBJeteta, _h_SubleadingBJeteta, _h_LeadingLightJeteta, _h_SubleadingLightJeteta; }; // Hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2016_I1486238); } diff --git a/analyses/pluginCMS/CMS_2016_I1487277.cc b/analyses/pluginCMS/CMS_2016_I1487277.cc --- a/analyses/pluginCMS/CMS_2016_I1487277.cc +++ b/analyses/pluginCMS/CMS_2016_I1487277.cc @@ -1,68 +1,68 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Tools/BinnedHistogram.hh" namespace Rivet { // Inclusive jet pT class CMS_2016_I1487277 : public Analysis { public: // Constructor CMS_2016_I1487277() : Analysis("CMS_2016_I1487277") {} // Book histograms and initialize projections: void init() { const FinalState fs; // Initialize the projectors: - addProjection(FastJets(fs, FastJets::ANTIKT, 0.7),"Jets"); + declare(FastJets(fs, FastJets::ANTIKT, 0.7),"Jets"); // Book histograms: {Histo1DPtr tmp; _hist_sigma.add(0.0, 0.5, book(tmp, 1, 1, 1));} {Histo1DPtr tmp; _hist_sigma.add(0.5, 1.0, book(tmp, 2, 1, 1));} {Histo1DPtr tmp; _hist_sigma.add(1.0, 1.5, book(tmp, 3, 1, 1));} {Histo1DPtr tmp; _hist_sigma.add(1.5, 2.0, book(tmp, 4, 1, 1));} {Histo1DPtr tmp; _hist_sigma.add(2.0, 2.5, book(tmp, 5, 1, 1));} {Histo1DPtr tmp; _hist_sigma.add(2.5, 3.0, book(tmp, 6, 1, 1));} {Histo1DPtr tmp; _hist_sigma.add(3.2, 4.7, book(tmp, 7, 1, 1));} } // Analysis void analyze(const Event &event) { const FastJets &fj = applyProjection(event,"Jets"); const Jets& jets = fj.jets(Cuts::ptIn(18*GeV, 5000.0*GeV) && Cuts::absrap < 5.2); // Fill the relevant histograms: for(const Jet &j : jets) { _hist_sigma.fill(j.absrap(), j.pT()); } } // Finalize void finalize() { _hist_sigma.scale(crossSection()/sumOfWeights()/2.0, this); } private: BinnedHistogram _hist_sigma; Histo1DPtr _hist_ptbins_y1; Histo1DPtr _hist_ptbins_y2; Histo1DPtr _hist_ptbins_y3; Histo1DPtr _hist_ptbins_y4; Histo1DPtr _hist_ptbins_y5; Histo1DPtr _hist_ptbins_y6; Histo1DPtr _hist_ptbins_y7; }; // This global object acts as a hook for the plugin system. DECLARE_RIVET_PLUGIN(CMS_2016_I1487277); } diff --git a/analyses/pluginCMS/CMS_2016_I1491950.cc b/analyses/pluginCMS/CMS_2016_I1491950.cc --- a/analyses/pluginCMS/CMS_2016_I1491950.cc +++ b/analyses/pluginCMS/CMS_2016_I1491950.cc @@ -1,500 +1,500 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Tools/ParticleName.hh" #include "Rivet/Tools/ParticleIdUtils.hh" namespace Rivet { namespace { //< only visible in this compilation unit /// @brief Special dressed lepton finder /// /// Find dressed leptons by clustering all leptons and photons class SpecialDressedLeptons : public FinalState { public: /// The default constructor. May specify cuts SpecialDressedLeptons(const FinalState& fs, const Cut& cut) : FinalState(cut) { setName("SpecialDressedLeptons"); IdentifiedFinalState ifs(fs); ifs.acceptIdPair(PID::PHOTON); ifs.acceptIdPair(PID::ELECTRON); ifs.acceptIdPair(PID::MUON); - addProjection(ifs, "IFS"); - addProjection(FastJets(ifs, FastJets::ANTIKT, 0.1), "LeptonJets"); + declare(ifs, "IFS"); + declare(FastJets(ifs, FastJets::ANTIKT, 0.1), "LeptonJets"); } /// Clone on the heap. virtual unique_ptr clone() const { return unique_ptr(new SpecialDressedLeptons(*this)); } /// Retrieve the dressed leptons const vector& dressedLeptons() const { return _clusteredLeptons; } private: /// Container which stores the clustered lepton objects vector _clusteredLeptons; public: void project(const Event& e) { _theParticles.clear(); _clusteredLeptons.clear(); vector allClusteredLeptons; const Jets jets = applyProjection(e, "LeptonJets").jetsByPt(5.*GeV); for (const Jet& jet : jets) { Particle lepCand; for (const Particle& cand : jet.particles()) { const int absPdgId = abs(cand.pdgId()); if (absPdgId == PID::ELECTRON || absPdgId == PID::MUON) { if (cand.pt() > lepCand.pt()) lepCand = cand; } } //Central lepton must be the major component if ((lepCand.pt() < jet.pt()/2.) || (lepCand.pdgId() == 0)) continue; DressedLepton lepton = DressedLepton(lepCand); for (const Particle& cand : jet.particles()) { if (cand == lepCand) continue; if (cand.pid() != PID::PHOTON) continue; lepton.addPhoton(cand, true); } allClusteredLeptons.push_back(lepton); } for (const DressedLepton& lepton : allClusteredLeptons) { if (accept(lepton)) { _clusteredLeptons.push_back(lepton); _theParticles.push_back(lepton.constituentLepton()); _theParticles += lepton.constituentPhotons(); } } } }; } class CMS_2016_I1491950 : public Analysis { public: /// Constructor CMS_2016_I1491950() : Analysis("CMS_2016_I1491950") { } /// Book histograms and initialise projections before the run void init() { FinalState fs(Cuts::pT > 0. && Cuts::abseta < 6.); PromptFinalState prompt_fs(fs); prompt_fs.acceptMuonDecays(true); prompt_fs.acceptTauDecays(true); // Projection for dressed electrons and muons Cut leptonCuts = Cuts::abseta < 2.5 and Cuts::pt > 30.*GeV; SpecialDressedLeptons dressedleptons(prompt_fs, leptonCuts); - addProjection(dressedleptons, "DressedLeptons"); + declare(dressedleptons, "DressedLeptons"); // Neutrinos IdentifiedFinalState neutrinos(prompt_fs); neutrinos.acceptNeutrinos(); - addProjection(neutrinos, "Neutrinos"); + declare(neutrinos, "Neutrinos"); // Projection for jets VetoedFinalState fsForJets(fs); fsForJets.addVetoOnThisFinalState(dressedleptons); fsForJets.addVetoOnThisFinalState(neutrinos); - addProjection(FastJets(fsForJets, FastJets::ANTIKT, 0.4, JetAlg::Muons::DECAY, JetAlg::Invisibles::DECAY), "Jets"); + declare(FastJets(fsForJets, FastJets::ANTIKT, 0.4, JetAlg::Muons::DECAY, JetAlg::Invisibles::DECAY), "Jets"); //book hists book(_hist_thadpt, "d01-x02-y01"); book(_hist_thady, "d03-x02-y01"); book(_hist_tleppt, "d05-x02-y01"); book(_hist_tlepy, "d07-x02-y01"); book(_hist_ttpt, "d09-x02-y01"); book(_hist_tty, "d13-x02-y01"); book(_hist_ttm, "d11-x02-y01"); book(_hist_njet, "d15-x02-y01"); book(_hist_njets_thadpt_1, "d17-x02-y01"); book(_hist_njets_thadpt_2, "d18-x02-y01"); book(_hist_njets_thadpt_3, "d19-x02-y01"); book(_hist_njets_thadpt_4, "d20-x02-y01"); book(_hist_njets_ttpt_1, "d22-x02-y01"); book(_hist_njets_ttpt_2, "d23-x02-y01"); book(_hist_njets_ttpt_3, "d24-x02-y01"); book(_hist_njets_ttpt_4, "d25-x02-y01"); book(_hist_thady_thadpt_1, "d27-x02-y01"); book(_hist_thady_thadpt_2, "d28-x02-y01"); book(_hist_thady_thadpt_3, "d29-x02-y01"); book(_hist_thady_thadpt_4, "d30-x02-y01"); book(_hist_ttm_tty_1, "d32-x02-y01"); book(_hist_ttm_tty_2, "d33-x02-y01"); book(_hist_ttm_tty_3, "d34-x02-y01"); book(_hist_ttm_tty_4, "d35-x02-y01"); book(_hist_ttpt_ttm_1, "d37-x02-y01"); book(_hist_ttpt_ttm_2, "d38-x02-y01"); book(_hist_ttpt_ttm_3, "d39-x02-y01"); book(_hist_ttpt_ttm_4, "d40-x02-y01"); book(_histnorm_thadpt, "d42-x02-y01"); book(_histnorm_thady, "d44-x02-y01"); book(_histnorm_tleppt, "d46-x02-y01"); book(_histnorm_tlepy, "d48-x02-y01"); book(_histnorm_ttpt, "d50-x02-y01"); book(_histnorm_tty, "d54-x02-y01"); book(_histnorm_ttm, "d52-x02-y01"); book(_histnorm_njet, "d56-x02-y01"); book(_histnorm_njets_thadpt_1, "d58-x02-y01"); book(_histnorm_njets_thadpt_2, "d59-x02-y01"); book(_histnorm_njets_thadpt_3, "d60-x02-y01"); book(_histnorm_njets_thadpt_4, "d61-x02-y01"); book(_histnorm_njets_ttpt_1, "d63-x02-y01"); book(_histnorm_njets_ttpt_2, "d64-x02-y01"); book(_histnorm_njets_ttpt_3, "d65-x02-y01"); book(_histnorm_njets_ttpt_4, "d66-x02-y01"); book(_histnorm_thady_thadpt_1, "d68-x02-y01"); book(_histnorm_thady_thadpt_2, "d69-x02-y01"); book(_histnorm_thady_thadpt_3, "d70-x02-y01"); book(_histnorm_thady_thadpt_4, "d71-x02-y01"); book(_histnorm_ttm_tty_1, "d73-x02-y01"); book(_histnorm_ttm_tty_2, "d74-x02-y01"); book(_histnorm_ttm_tty_3, "d75-x02-y01"); book(_histnorm_ttm_tty_4, "d76-x02-y01"); book(_histnorm_ttpt_ttm_1, "d78-x02-y01"); book(_histnorm_ttpt_ttm_2, "d79-x02-y01"); book(_histnorm_ttpt_ttm_3, "d80-x02-y01"); book(_histnorm_ttpt_ttm_4, "d81-x02-y01"); } /// Perform the per-event analysis void analyze(const Event& event) { // leptons const SpecialDressedLeptons& dressedleptons_proj = applyProjection(event, "DressedLeptons"); std::vector dressedLeptons = dressedleptons_proj.dressedLeptons(); if(dressedLeptons.size() != 1) return; // neutrinos const Particles neutrinos = applyProjection(event, "Neutrinos").particlesByPt(); _nusum = FourMomentum(0., 0., 0., 0.); for(const Particle& neutrino : neutrinos) { _nusum += neutrino.momentum(); } _wl = _nusum + dressedLeptons[0].momentum(); // jets Cut jet_cut = (Cuts::abseta < 2.5) and (Cuts::pT > 25.*GeV); const Jets jets = applyProjection(event, "Jets").jetsByPt(jet_cut); Jets allJets; for (const Jet& jet : jets) { allJets.push_back(jet); } Jets bJets; for (const Jet& jet : allJets) { if (jet.bTagged()) bJets.push_back(jet); } if(bJets.size() < 2 || allJets.size() < 4) return; //construct top quark proxies double Kmin = numeric_limits::max(); for(const Jet& itaj : allJets) { for(const Jet& itbj : allJets) { if (itaj.momentum() == itbj.momentum()) continue; FourMomentum wh(itaj.momentum() + itbj.momentum()); for(const Jet& ithbj : bJets) { if(itaj.momentum() == ithbj.momentum() || itbj.momentum() == ithbj.momentum()) continue; FourMomentum th(wh + ithbj.momentum()); for(const Jet& itlbj : bJets) { if(itaj.momentum() == itlbj.momentum() || itbj.momentum() == itlbj.momentum() || ithbj.momentum() == itlbj.momentum()) continue; FourMomentum tl(_wl + itlbj.momentum()); double K = pow(wh.mass() - 80.4, 2) + pow(th.mass() - 172.5, 2) + pow(tl.mass() - 172.5, 2); if(K < Kmin) { Kmin = K; _tl = tl; _th = th; _wh = wh; } } } } } _hist_thadpt->fill(_th.pt()); _hist_thady->fill(abs(_th.rapidity()) ); _hist_tleppt->fill(_tl.pt() ); _hist_tlepy->fill(abs(_tl.rapidity()) ); _histnorm_thadpt->fill(_th.pt()); _histnorm_thady->fill(abs(_th.rapidity()) ); _histnorm_tleppt->fill(_tl.pt() ); _histnorm_tlepy->fill(abs(_tl.rapidity()) ); FourMomentum tt(_tl+_th); _hist_ttpt->fill(tt.pt() ); _hist_tty->fill(abs(tt.rapidity()) ); _hist_ttm->fill(tt.mass() ); _hist_njet->fill(min(allJets.size()-4., 4.)); _histnorm_ttpt->fill(tt.pt() ); _histnorm_tty->fill(abs(tt.rapidity()) ); _histnorm_ttm->fill(tt.mass() ); _histnorm_njet->fill(min(allJets.size()-4., 4.)); if(allJets.size() == 4) { _hist_njets_thadpt_1->fill(_th.pt()); _hist_njets_ttpt_1->fill(tt.pt()); _histnorm_njets_thadpt_1->fill(_th.pt()); _histnorm_njets_ttpt_1->fill(tt.pt()); } else if(allJets.size() == 5) { _hist_njets_thadpt_2->fill(_th.pt()); _hist_njets_ttpt_2->fill(tt.pt()); _histnorm_njets_thadpt_2->fill(_th.pt()); _histnorm_njets_ttpt_2->fill(tt.pt()); } else if(allJets.size() == 6) { _hist_njets_thadpt_3->fill(_th.pt()); _hist_njets_ttpt_3->fill(tt.pt()); _histnorm_njets_thadpt_3->fill(_th.pt()); _histnorm_njets_ttpt_3->fill(tt.pt()); } else //>= 4 jets { _hist_njets_thadpt_4->fill(_th.pt()); _hist_njets_ttpt_4->fill(tt.pt()); _histnorm_njets_thadpt_4->fill(_th.pt()); _histnorm_njets_ttpt_4->fill(tt.pt()); } if(abs(_th.rapidity()) < 0.5) { _hist_thady_thadpt_1->fill(_th.pt()); _histnorm_thady_thadpt_1->fill(_th.pt()); } else if(abs(_th.rapidity()) < 1.0) { _hist_thady_thadpt_2->fill(_th.pt()); _histnorm_thady_thadpt_2->fill(_th.pt()); } else if(abs(_th.rapidity()) < 1.5) { _hist_thady_thadpt_3->fill(_th.pt()); _histnorm_thady_thadpt_3->fill(_th.pt()); } else if(abs(_th.rapidity()) < 2.5) { _hist_thady_thadpt_4->fill(_th.pt()); _histnorm_thady_thadpt_4->fill(_th.pt()); } if(tt.mass() >= 300. && tt.mass() < 450.) { _hist_ttm_tty_1->fill(abs(tt.rapidity())); _histnorm_ttm_tty_1->fill(abs(tt.rapidity())); } else if(tt.mass() >= 450. && tt.mass() < 625.) { _hist_ttm_tty_2->fill(abs(tt.rapidity())); _histnorm_ttm_tty_2->fill(abs(tt.rapidity())); } else if(tt.mass() >= 625. && tt.mass() < 850.) { _hist_ttm_tty_3->fill(abs(tt.rapidity())); _histnorm_ttm_tty_3->fill(abs(tt.rapidity())); } else if(tt.mass() >= 850. && tt.mass() < 2000.) { _hist_ttm_tty_4->fill(abs(tt.rapidity())); _histnorm_ttm_tty_4->fill(abs(tt.rapidity())); } if(tt.pt() < 35.) { _hist_ttpt_ttm_1->fill(tt.mass()); _histnorm_ttpt_ttm_1->fill(tt.mass()); } else if(tt.pt() < 80.) { _hist_ttpt_ttm_2->fill(tt.mass()); _histnorm_ttpt_ttm_2->fill(tt.mass()); } else if(tt.pt() < 140.) { _hist_ttpt_ttm_3->fill(tt.mass()); _histnorm_ttpt_ttm_3->fill(tt.mass()); } else if(tt.pt() < 500.) { _hist_ttpt_ttm_4->fill(tt.mass()); _histnorm_ttpt_ttm_4->fill(tt.mass()); } } /// Normalise histograms etc., after the run void finalize() { scale(_hist_thadpt, crossSection()/sumOfWeights()); scale(_hist_thady, crossSection()/sumOfWeights()); scale(_hist_tleppt, crossSection()/sumOfWeights()); scale(_hist_tlepy, crossSection()/sumOfWeights()); scale(_hist_ttpt, crossSection()/sumOfWeights()); scale(_hist_tty, crossSection()/sumOfWeights()); scale(_hist_ttm, crossSection()/sumOfWeights()); scale(_hist_njet, crossSection()/sumOfWeights()); scale(_hist_njets_thadpt_1, crossSection()/sumOfWeights()); scale(_hist_njets_thadpt_2, crossSection()/sumOfWeights()); scale(_hist_njets_thadpt_3, crossSection()/sumOfWeights()); scale(_hist_njets_thadpt_4, crossSection()/sumOfWeights()); scale(_hist_njets_ttpt_1, crossSection()/sumOfWeights()); scale(_hist_njets_ttpt_2, crossSection()/sumOfWeights()); scale(_hist_njets_ttpt_3, crossSection()/sumOfWeights()); scale(_hist_njets_ttpt_4, crossSection()/sumOfWeights()); scale(_hist_thady_thadpt_1, crossSection()/sumOfWeights()/0.5); scale(_hist_thady_thadpt_2, crossSection()/sumOfWeights()/0.5); scale(_hist_thady_thadpt_3, crossSection()/sumOfWeights()/0.5); scale(_hist_thady_thadpt_4, crossSection()/sumOfWeights()/1.0); scale(_hist_ttm_tty_1, crossSection()/sumOfWeights()/150.); scale(_hist_ttm_tty_2, crossSection()/sumOfWeights()/175.); scale(_hist_ttm_tty_3, crossSection()/sumOfWeights()/225.); scale(_hist_ttm_tty_4, crossSection()/sumOfWeights()/1150.); scale(_hist_ttpt_ttm_1, crossSection()/sumOfWeights()/35.); scale(_hist_ttpt_ttm_2, crossSection()/sumOfWeights()/45.); scale(_hist_ttpt_ttm_3, crossSection()/sumOfWeights()/60.); scale(_hist_ttpt_ttm_4, crossSection()/sumOfWeights()/360.); scale(_histnorm_thadpt, 1./_histnorm_thadpt->sumW(false)); scale(_histnorm_thady, 1./_histnorm_thady->sumW(false)); scale(_histnorm_tleppt, 1./_histnorm_tleppt->sumW(false)); scale(_histnorm_tlepy, 1./_histnorm_tlepy->sumW(false)); scale(_histnorm_ttpt, 1./_histnorm_ttpt->sumW(false)); scale(_histnorm_tty, 1./_histnorm_tty->sumW(false)); scale(_histnorm_ttm, 1./_histnorm_ttm->sumW(false)); scale(_histnorm_njet, 1./_histnorm_njet->sumW(false)); double sum_njets_thadpt = _histnorm_njets_thadpt_1->sumW(false) + _histnorm_njets_thadpt_2->sumW(false) + _histnorm_njets_thadpt_3->sumW(false) + _histnorm_njets_thadpt_4->sumW(false); scale(_histnorm_njets_thadpt_1, 1./sum_njets_thadpt); scale(_histnorm_njets_thadpt_2, 1./sum_njets_thadpt); scale(_histnorm_njets_thadpt_3, 1./sum_njets_thadpt); scale(_histnorm_njets_thadpt_4, 1./sum_njets_thadpt); double sum_njets_ttpt = _histnorm_njets_ttpt_1->sumW(false) + _histnorm_njets_ttpt_2->sumW(false) + _histnorm_njets_ttpt_3->sumW(false) + _histnorm_njets_ttpt_4->sumW(false); scale(_histnorm_njets_ttpt_1, 1./sum_njets_ttpt); scale(_histnorm_njets_ttpt_2, 1./sum_njets_ttpt); scale(_histnorm_njets_ttpt_3, 1./sum_njets_ttpt); scale(_histnorm_njets_ttpt_4, 1./sum_njets_ttpt); double sum_thady_thadpt = _histnorm_thady_thadpt_1->sumW(false) + _histnorm_thady_thadpt_2->sumW(false) + _histnorm_thady_thadpt_3->sumW(false) + _histnorm_thady_thadpt_4->sumW(false); scale(_histnorm_thady_thadpt_1, 1./sum_thady_thadpt/0.5); scale(_histnorm_thady_thadpt_2, 1./sum_thady_thadpt/0.5); scale(_histnorm_thady_thadpt_3, 1./sum_thady_thadpt/0.5); scale(_histnorm_thady_thadpt_4, 1./sum_thady_thadpt/1.0); double sum_ttm_tty = _histnorm_ttm_tty_1->sumW(false) + _histnorm_ttm_tty_2->sumW(false) + _histnorm_ttm_tty_3->sumW(false) + _histnorm_ttm_tty_4->sumW(false); scale(_histnorm_ttm_tty_1, 1./sum_ttm_tty/150.); scale(_histnorm_ttm_tty_2, 1./sum_ttm_tty/175.); scale(_histnorm_ttm_tty_3, 1./sum_ttm_tty/225.); scale(_histnorm_ttm_tty_4, 1./sum_ttm_tty/1150.); double sum_ttpt_ttm = _histnorm_ttpt_ttm_1->sumW(false) + _histnorm_ttpt_ttm_2->sumW(false) + _histnorm_ttpt_ttm_3->sumW(false) + _histnorm_ttpt_ttm_4->sumW(false); scale(_histnorm_ttpt_ttm_1, 1./sum_ttpt_ttm/35.); scale(_histnorm_ttpt_ttm_2, 1./sum_ttpt_ttm/45.); scale(_histnorm_ttpt_ttm_3, 1./sum_ttpt_ttm/60.); scale(_histnorm_ttpt_ttm_4, 1./sum_ttpt_ttm/360.); } private: FourMomentum _tl; FourMomentum _th; FourMomentum _wl; FourMomentum _wh; FourMomentum _nusum; Histo1DPtr _hist_thadpt; Histo1DPtr _hist_thady; Histo1DPtr _hist_tleppt; Histo1DPtr _hist_tlepy; Histo1DPtr _hist_ttpt; Histo1DPtr _hist_tty; Histo1DPtr _hist_ttm; Histo1DPtr _hist_njet; Histo1DPtr _hist_njets_thadpt_1; Histo1DPtr _hist_njets_thadpt_2; Histo1DPtr _hist_njets_thadpt_3; Histo1DPtr _hist_njets_thadpt_4; Histo1DPtr _hist_njets_ttpt_1; Histo1DPtr _hist_njets_ttpt_2; Histo1DPtr _hist_njets_ttpt_3; Histo1DPtr _hist_njets_ttpt_4; Histo1DPtr _hist_thady_thadpt_1; Histo1DPtr _hist_thady_thadpt_2; Histo1DPtr _hist_thady_thadpt_3; Histo1DPtr _hist_thady_thadpt_4; Histo1DPtr _hist_ttm_tty_1; Histo1DPtr _hist_ttm_tty_2; Histo1DPtr _hist_ttm_tty_3; Histo1DPtr _hist_ttm_tty_4; Histo1DPtr _hist_ttpt_ttm_1; Histo1DPtr _hist_ttpt_ttm_2; Histo1DPtr _hist_ttpt_ttm_3; Histo1DPtr _hist_ttpt_ttm_4; Histo1DPtr _histnorm_thadpt; Histo1DPtr _histnorm_thady; Histo1DPtr _histnorm_tleppt; Histo1DPtr _histnorm_tlepy; Histo1DPtr _histnorm_ttpt; Histo1DPtr _histnorm_tty; Histo1DPtr _histnorm_ttm; Histo1DPtr _histnorm_njet; Histo1DPtr _histnorm_njets_thadpt_1; Histo1DPtr _histnorm_njets_thadpt_2; Histo1DPtr _histnorm_njets_thadpt_3; Histo1DPtr _histnorm_njets_thadpt_4; Histo1DPtr _histnorm_njets_ttpt_1; Histo1DPtr _histnorm_njets_ttpt_2; Histo1DPtr _histnorm_njets_ttpt_3; Histo1DPtr _histnorm_njets_ttpt_4; Histo1DPtr _histnorm_thady_thadpt_1; Histo1DPtr _histnorm_thady_thadpt_2; Histo1DPtr _histnorm_thady_thadpt_3; Histo1DPtr _histnorm_thady_thadpt_4; Histo1DPtr _histnorm_ttm_tty_1; Histo1DPtr _histnorm_ttm_tty_2; Histo1DPtr _histnorm_ttm_tty_3; Histo1DPtr _histnorm_ttm_tty_4; Histo1DPtr _histnorm_ttpt_ttm_1; Histo1DPtr _histnorm_ttpt_ttm_2; Histo1DPtr _histnorm_ttpt_ttm_3; Histo1DPtr _histnorm_ttpt_ttm_4; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2016_I1491950); } diff --git a/analyses/pluginCMS/CMS_2016_I1491953.cc b/analyses/pluginCMS/CMS_2016_I1491953.cc --- a/analyses/pluginCMS/CMS_2016_I1491953.cc +++ b/analyses/pluginCMS/CMS_2016_I1491953.cc @@ -1,331 +1,331 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/WFinder.hh" namespace Rivet { /// @brief Differential cross sections for associated production of a W boson and jets at 8 TeV class CMS_2016_I1491953 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_I1491953); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections FinalState fs; WFinder wfinder_mu(fs, Cuts::abseta < 2.4 && Cuts::pT > 0*GeV, PID::MUON, 0*GeV, 1000000*GeV, 0*GeV, 0.1, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::YES, WFinder::MassWindow::MT); - addProjection(wfinder_mu, "WFinder_mu"); + declare(wfinder_mu, "WFinder_mu"); // Define veto FS VetoedFinalState vfs; vfs.addVetoOnThisFinalState(wfinder_mu); vfs.addVetoPairId(PID::MUON); vfs.vetoNeutrinos(); FastJets fastjets(vfs, FastJets::ANTIKT, 0.5); - addProjection(fastjets, "Jets"); + declare(fastjets, "Jets"); book(_hist_Mult_exc ,1,1,1); book(_hist_inc_WJetMult ,2,1,1); book(_hist_addJetPt1j ,3,1,1); book(_hist_addJetPt2j ,4,1,1); book(_hist_addJetPt3j ,5,1,1); book(_hist_addJetPt4j ,6,1,1); book(_hist_addHt_1j ,7,1,1); book(_hist_addHt_2j ,8,1,1); book(_hist_addHt_3j ,9,1,1); book(_hist_addHt_4j ,10,1,1); book(_hist_diJetPt_2j ,11,1,1); book(_hist_diJetPt_3j ,12,1,1); book(_hist_diJetPt_4j ,13,1,1); book(_hist_dijetM_2j ,14,1,1); book(_hist_dijetM_3j ,15,1,1); book(_hist_dijetM_4j ,16,1,1); book(_hist_Jeteta1j ,17,1,1); book(_hist_Jeteta2j ,18,1,1); book(_hist_Jeteta3j ,19,1,1); book(_hist_Jeteta4j ,20,1,1); book(_hist_dyj1j2_2j ,21,1,1); book(_hist_dyj1j2_3j ,22,1,1); book(_hist_dyj1j2_4j ,23,1,1); book(_hist_dyj1j3_3j ,24,1,1); book(_hist_dyj2j3_3j ,25,1,1); book(_hist_dyjFjB_2j ,26,1,1); book(_hist_dyjFjB_3j ,27,1,1); book(_hist_dyjFjB_4j ,28,1,1); book(_hist_dphij1j2_2j ,29,1,1); book(_hist_dphijFjB_2j ,30,1,1); book(_hist_dRj1j2_2j ,31,1,1); book(_hist_dphij1mu_1j ,32,1,1); book(_hist_dphij2mu_2j ,33,1,1); book(_hist_dphij3mu_3j ,34,1,1); book(_hist_dphij4mu_4j ,35,1,1); book(_hist_MeanNJht_1j ,36,1,1); book(_hist_MeanNJht_2j ,37,1,1); book(_hist_MeanNJdyj1j2_2j ,38,1,1); book(_hist_MeanNJdyjFjB_2j ,39,1,1); } // Define function used for filiing inc Njets histo void _fill(Histo1DPtr& _histJetMult, vector& finaljet_list) { _histJetMult->fill(0); for (size_t i = 0 ; i < finaljet_list.size() ; ++i) { if (i == 7) break; _histJetMult->fill(i+1); // inclusive multiplicity } } /// Perform the per-event analysis void analyze(const Event& event) { const WFinder& wfinder_mu = apply(event, "WFinder_mu"); if (wfinder_mu.bosons().size() != 1) vetoEvent; //const FourMomentum& lepton0 = wfinder_mu.constituentLeptons()[0].momentum(); //const FourMomentum& neutrino = wfinder_mu.constituentNeutrinos()[0].momentum(); //double WmT = sqrt( 2 * lepton0.pT() * neutrino.pT() * (1 - cos(deltaPhi(lepton0, neutrino))) ); const FourMomentum& lepton0 = wfinder_mu.constituentLepton().momentum(); double WmT = wfinder_mu.mT(); if (WmT < 50.0*GeV) vetoEvent; if (lepton0.abseta() > 2.1 || lepton0.pT() < 25.0*GeV) vetoEvent; // Select final jets, ordered by decreasing pT vector finaljet_list; double HT = 0.0; const Jets jListAll = apply(event, "Jets").jetsByPt(30.0*GeV); for (const Jet& j : jListAll) { if (j.abseta() < 2.4 && j.pT() > 30.0*GeV && deltaR(lepton0, j) > 0.5) { finaljet_list.push_back(j.momentum()); HT += j.pT(); } } // Another jet list, sorted by increasing rapidity vector jListRap = finaljet_list; std::sort(jListRap.begin(), jListRap.end(), cmpMomByRap); // Multiplicity exc plot. if (finaljet_list.size()<=7) { _hist_Mult_exc->fill(finaljet_list.size()); } else if (finaljet_list.size()>7){ _hist_Mult_exc->fill(7.); } // Multiplicity inc plot. _fill(_hist_inc_WJetMult, finaljet_list); if (finaljet_list.size()>=1) { _hist_addJetPt1j->fill(finaljet_list[0].pT()); _hist_Jeteta1j->fill(fabs(finaljet_list[0].eta())); _hist_addHt_1j->fill(HT); _hist_dphij1mu_1j->fill( deltaPhi(finaljet_list[0].phi(), lepton0.phi()) ); _hist_MeanNJht_1j->fill( HT, finaljet_list.size()); } if (finaljet_list.size()>=2) { _hist_addJetPt2j->fill(finaljet_list[1].pT()); _hist_Jeteta2j->fill(fabs(finaljet_list[1].eta())); _hist_addHt_2j->fill(HT); _hist_dyj1j2_2j ->fill( fabs(finaljet_list[0].rapidity() - finaljet_list[1].rapidity())); _hist_dyjFjB_2j ->fill( fabs(jListRap[0].rapidity() - jListRap[jListRap.size()-1].rapidity())); _hist_dphij1j2_2j ->fill( deltaPhi(finaljet_list[0].phi(), finaljet_list[1].phi())); _hist_dphijFjB_2j ->fill( deltaPhi(jListRap[0].phi(), jListRap[jListRap.size()-1].phi()) ); _hist_dijetM_2j ->fill( (add(finaljet_list[0], finaljet_list[1])).mass()); _hist_diJetPt_2j ->fill( (add(finaljet_list[0], finaljet_list[1])).pT()); _hist_dRj1j2_2j ->fill( deltaR(finaljet_list[0].rapidity(), finaljet_list[0].phi(), finaljet_list[1].rapidity(), finaljet_list[1].phi())); _hist_dphij2mu_2j ->fill( deltaPhi(finaljet_list[1].phi(), lepton0.phi()) ); _hist_MeanNJht_2j->fill( HT, finaljet_list.size()); _hist_MeanNJdyj1j2_2j->fill( fabs(finaljet_list[0].rapidity() - finaljet_list[1].rapidity()), finaljet_list.size()); _hist_MeanNJdyjFjB_2j->fill( fabs(jListRap[0].rapidity() - jListRap[jListRap.size()-1].rapidity()), finaljet_list.size()); } if (finaljet_list.size()>=3) { _hist_addJetPt3j->fill(finaljet_list[2].pT()); _hist_Jeteta3j->fill(fabs(finaljet_list[2].eta())); _hist_addHt_3j->fill(HT); _hist_dyj1j2_3j ->fill( fabs(finaljet_list[0].rapidity() - finaljet_list[1].rapidity())); _hist_dyj1j3_3j ->fill( fabs(finaljet_list[0].rapidity() - finaljet_list[2].rapidity())); _hist_dyj2j3_3j ->fill( fabs(finaljet_list[1].rapidity() - finaljet_list[2].rapidity())); _hist_dyjFjB_3j ->fill( fabs(jListRap[0].rapidity() - jListRap[jListRap.size()-1].rapidity())); _hist_dijetM_3j ->fill( (add(finaljet_list[0], finaljet_list[1])).mass()); _hist_diJetPt_3j ->fill( (add(finaljet_list[0], finaljet_list[1])).pT()); _hist_dphij3mu_3j->fill( deltaPhi(finaljet_list[2].phi(), lepton0.phi()) ); } if (finaljet_list.size()>=4) { _hist_addJetPt4j->fill(finaljet_list[3].pT()); _hist_Jeteta4j->fill(fabs(finaljet_list[3].eta())); _hist_addHt_4j->fill(HT); _hist_dyj1j2_4j ->fill( fabs(finaljet_list[0].rapidity() - finaljet_list[1].rapidity())); _hist_dyjFjB_4j ->fill( fabs(jListRap[0].rapidity() - jListRap[jListRap.size()-1].rapidity())); _hist_dijetM_4j ->fill( (add(finaljet_list[0], finaljet_list[1])).mass()); _hist_diJetPt_4j ->fill( (add(finaljet_list[0], finaljet_list[1])).pT()); _hist_dphij4mu_4j->fill( deltaPhi(finaljet_list[3].phi(), lepton0.phi()) ); } } //void loop /// Normalise histograms etc., after the run void finalize() { const double crossec = !std::isnan(crossSectionPerEvent()) ? crossSection() : 36703*picobarn; if (std::isnan(crossSectionPerEvent())){ MSG_INFO("No valid cross-section given, using NNLO xsec calculated by FEWZ " << crossec/picobarn << " pb"); } scale(_hist_Mult_exc, crossec/picobarn/sumOfWeights()); scale(_hist_inc_WJetMult, crossec/picobarn/sumOfWeights()); scale(_hist_addJetPt1j, crossec/picobarn/sumOfWeights()); scale(_hist_addJetPt2j, crossec/picobarn/sumOfWeights()); scale(_hist_addJetPt3j, crossec/picobarn/sumOfWeights()); scale(_hist_addJetPt4j, crossec/picobarn/sumOfWeights()); scale(_hist_Jeteta1j, crossec/picobarn/sumOfWeights()); scale(_hist_Jeteta2j, crossec/picobarn/sumOfWeights()); scale(_hist_Jeteta3j, crossec/picobarn/sumOfWeights()); scale(_hist_Jeteta4j, crossec/picobarn/sumOfWeights()); scale(_hist_addHt_1j, crossec/picobarn/sumOfWeights()); scale(_hist_addHt_2j, crossec/picobarn/sumOfWeights()); scale(_hist_addHt_3j, crossec/picobarn/sumOfWeights()); scale(_hist_addHt_4j, crossec/picobarn/sumOfWeights()); //------------------------------------- scale(_hist_dyj1j2_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dyj1j2_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dyj1j2_4j, crossec/picobarn/sumOfWeights()); scale(_hist_dyjFjB_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dyjFjB_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dyjFjB_4j, crossec/picobarn/sumOfWeights()); scale(_hist_dyj1j3_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dyj2j3_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij1j2_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dphijFjB_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dRj1j2_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dijetM_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dijetM_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dijetM_4j, crossec/picobarn/sumOfWeights()); scale(_hist_diJetPt_2j, crossec/picobarn/sumOfWeights()); scale(_hist_diJetPt_3j, crossec/picobarn/sumOfWeights()); scale(_hist_diJetPt_4j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij1mu_1j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij2mu_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij3mu_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij4mu_4j, crossec/picobarn/sumOfWeights()); } //@} private: /// @name Histograms //@{ Histo1DPtr _hist_inc_WJetMult; Histo1DPtr _hist_Mult_exc; Histo1DPtr _hist_addJetPt1j; Histo1DPtr _hist_addJetPt2j; Histo1DPtr _hist_addJetPt3j; Histo1DPtr _hist_addJetPt4j; Histo1DPtr _hist_Jeteta1j; Histo1DPtr _hist_Jeteta2j; Histo1DPtr _hist_Jeteta3j; Histo1DPtr _hist_Jeteta4j; Histo1DPtr _hist_addHt_1j; Histo1DPtr _hist_addHt_2j; Histo1DPtr _hist_addHt_3j; Histo1DPtr _hist_addHt_4j; //------------------------------------- Histo1DPtr _hist_dyj1j2_2j; Histo1DPtr _hist_dyj1j2_3j; Histo1DPtr _hist_dyj1j2_4j; Histo1DPtr _hist_dyjFjB_2j; Histo1DPtr _hist_dyjFjB_3j; Histo1DPtr _hist_dyjFjB_4j; Histo1DPtr _hist_dyj1j3_3j; Histo1DPtr _hist_dyj2j3_3j; Histo1DPtr _hist_dphij1j2_2j; Histo1DPtr _hist_dphijFjB_2j; Histo1DPtr _hist_dRj1j2_2j; Histo1DPtr _hist_dijetM_2j; Histo1DPtr _hist_dijetM_3j; Histo1DPtr _hist_dijetM_4j; Histo1DPtr _hist_diJetPt_2j; Histo1DPtr _hist_diJetPt_3j; Histo1DPtr _hist_diJetPt_4j; Histo1DPtr _hist_dphij1mu_1j; Histo1DPtr _hist_dphij2mu_2j; Histo1DPtr _hist_dphij3mu_3j; Histo1DPtr _hist_dphij4mu_4j; //------------------------------------- Profile1DPtr _hist_MeanNJht_1j; Profile1DPtr _hist_MeanNJht_2j; Profile1DPtr _hist_MeanNJdyj1j2_2j; Profile1DPtr _hist_MeanNJdyjFjB_2j; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2016_I1491953); } diff --git a/analyses/pluginCMS/CMS_2016_PAS_TOP_15_006.cc b/analyses/pluginCMS/CMS_2016_PAS_TOP_15_006.cc --- a/analyses/pluginCMS/CMS_2016_PAS_TOP_15_006.cc +++ b/analyses/pluginCMS/CMS_2016_PAS_TOP_15_006.cc @@ -1,183 +1,183 @@ #include "Rivet/Analysis.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Tools/ParticleName.hh" #include "Rivet/Tools/ParticleIdUtils.hh" namespace Rivet { namespace { //< only visible in this compilation unit /// @brief Special dressed lepton finder /// /// Find dressed leptons by clustering all leptons and photons class SpecialDressedLeptons : public FinalState { public: /// Constructor SpecialDressedLeptons(const FinalState& fs, const Cut& cut) : FinalState(cut) { setName("SpecialDressedLeptons"); IdentifiedFinalState ifs(fs); ifs.acceptIdPair(PID::PHOTON); ifs.acceptIdPair(PID::ELECTRON); ifs.acceptIdPair(PID::MUON); - addProjection(ifs, "IFS"); - addProjection(FastJets(ifs, FastJets::ANTIKT, 0.1), "LeptonJets"); + declare(ifs, "IFS"); + declare(FastJets(ifs, FastJets::ANTIKT, 0.1), "LeptonJets"); } /// Clone on the heap virtual unique_ptr clone() const { return unique_ptr(new SpecialDressedLeptons(*this)); } /// Retrieve the dressed leptons const vector& dressedLeptons() const { return _clusteredLeptons; } /// Perform the calculation void project(const Event& e) { _theParticles.clear(); _clusteredLeptons.clear(); vector allClusteredLeptons; const Jets jets = applyProjection(e, "LeptonJets").jetsByPt(5*GeV); for (const Jet& jet : jets) { Particle lepCand; for (const Particle& cand : jet.particles()) { const int absPdgId = cand.abspid(); if (absPdgId == PID::ELECTRON || absPdgId == PID::MUON) { if (cand.pt() > lepCand.pt()) lepCand = cand; } } // Central lepton must be the major component if ((lepCand.pt() < jet.pt()/2.) || (lepCand.pdgId() == 0)) continue; DressedLepton lepton = DressedLepton(lepCand); for (const Particle& cand : jet.particles()) { if (cand == lepCand) continue; lepton.addPhoton(cand, true); } allClusteredLeptons.push_back(lepton); } for (const DressedLepton& lepton : allClusteredLeptons) { if (accept(lepton)) { _clusteredLeptons.push_back(lepton); _theParticles.push_back(lepton.constituentLepton()); _theParticles += lepton.constituentPhotons(); } } } private: /// Container which stores the clustered lepton objects vector _clusteredLeptons; }; } /// Jet multiplicity in lepton+jets ttbar at 8 TeV class CMS_2016_PAS_TOP_15_006 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_PAS_TOP_15_006); /// @name Analysis methods //@{ /// Set up projections and book histograms void init() { // Complete final state FinalState fs; Cut superLooseLeptonCuts = Cuts::pt > 5*GeV; SpecialDressedLeptons dressedleptons(fs, superLooseLeptonCuts); - addProjection(dressedleptons, "DressedLeptons"); + declare(dressedleptons, "DressedLeptons"); // Projection for jets VetoedFinalState fsForJets(fs); fsForJets.addVetoOnThisFinalState(dressedleptons); - addProjection(FastJets(fsForJets, FastJets::ANTIKT, 0.5), "Jets"); + declare(FastJets(fsForJets, FastJets::ANTIKT, 0.5), "Jets"); // Booking of histograms book(_normedElectronMuonHisto, "normedElectronMuonHisto", 7, 3.5, 10.5, "Normalized differential cross section in lepton+jets channel", "Jet multiplicity", "Normed units"); book(_absXSElectronMuonHisto , "absXSElectronMuonHisto", 7, 3.5, 10.5, "Differential cross section in lepton+jets channel", "Jet multiplicity", "pb"); } /// Per-event analysis void analyze(const Event& event) { // Select ttbar -> lepton+jets const SpecialDressedLeptons& dressedleptons = applyProjection(event, "DressedLeptons"); vector selleptons; for (const DressedLepton& dressedlepton : dressedleptons.dressedLeptons()) { // Select good leptons if (dressedlepton.pT() > 30*GeV && dressedlepton.abseta() < 2.4) selleptons += dressedlepton.mom(); // Veto loose leptons else if (dressedlepton.pT() > 15*GeV && dressedlepton.abseta() < 2.5) vetoEvent; } if (selleptons.size() != 1) vetoEvent; // Identify hardest tight lepton const FourMomentum lepton = selleptons[0]; // Jets const FastJets& jets = applyProjection(event, "Jets"); const Jets jets30 = jets.jetsByPt(30*GeV); int nJets = 0, nBJets = 0; for (const Jet& jet : jets30) { if (jet.abseta() > 2.5) continue; if (deltaR(jet.momentum(), lepton) < 0.5) continue; nJets += 1; if (jet.bTagged(Cuts::pT > 5*GeV)) nBJets += 1; } // Require >= 4 resolved jets, of which two must be b-tagged if (nJets < 4 || nBJets < 2) vetoEvent; // Fill histograms _normedElectronMuonHisto->fill(min(nJets, 10)); _absXSElectronMuonHisto ->fill(min(nJets, 10)); } void finalize() { const double ttbarXS = !std::isnan(crossSectionPerEvent()) ? crossSection() : 252.89*picobarn; if (std::isnan(crossSectionPerEvent())) MSG_INFO("No valid cross-section given, using NNLO (arXiv:1303.6254; sqrt(s)=8 TeV, m_t=172.5 GeV): " << ttbarXS/picobarn << " pb"); const double xsPerWeight = ttbarXS/picobarn / sumOfWeights(); scale(_absXSElectronMuonHisto, xsPerWeight); normalize(_normedElectronMuonHisto); } //@} private: /// Histograms Histo1DPtr _normedElectronMuonHisto, _absXSElectronMuonHisto; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2016_PAS_TOP_15_006); } diff --git a/analyses/pluginCMS/CMS_2017_I1467451.cc b/analyses/pluginCMS/CMS_2017_I1467451.cc --- a/analyses/pluginCMS/CMS_2017_I1467451.cc +++ b/analyses/pluginCMS/CMS_2017_I1467451.cc @@ -1,103 +1,103 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedLeptons.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" namespace Rivet { /// Higgs -> WW -> emu + MET in 8 TeV pp collisions class CMS_2017_I1467451 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2017_I1467451); /// Book histograms and initialise projections before the run void init() { const double lepConeSize = 0.1; const double lepMaxEta = 2.5; const Cut lepton_cut = (Cuts::abseta < lepMaxEta); // Initialise and register projections FinalState fs(-2.5,2.5,0.0*GeV); FinalState fsm(-5,5,0.0*GeV); - addProjection(fs, "FS"); - addProjection(fsm, "FSM"); + declare(fs, "FS"); + declare(fsm, "FSM"); ChargedLeptons charged_leptons(fs); IdentifiedFinalState photons(fs); photons.acceptIdPair(PID::PHOTON); PromptFinalState prompt_leptons(charged_leptons); prompt_leptons.acceptMuonDecays(true); prompt_leptons.acceptTauDecays(false); PromptFinalState prompt_photons(photons); prompt_photons.acceptMuonDecays(true); prompt_photons.acceptTauDecays(false); DressedLeptons dressed_leptons = DressedLeptons(prompt_photons, prompt_leptons, lepConeSize, lepton_cut, true); - addProjection(dressed_leptons, "DressedLeptons"); + declare(dressed_leptons, "DressedLeptons"); MissingMomentum Met(fsm); - addProjection(Met, "MET"); + declare(Met, "MET"); // Book histograms book(histoPtH , 1,1,1); book(histoXsec, 2,1,1); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; Particles leptons = applyProjection(event, "DressedLeptons").particlesByPt(10.0*GeV); if (leptons.size() < 2) vetoEvent; if (leptons[0].pT() < 20*GeV || leptons[1].pT() < 10*GeV) vetoEvent; if (leptons[0].charge() == leptons[1].charge()) vetoEvent; if (leptons[0].abspid() == leptons[1].abspid()) vetoEvent; FourMomentum LL = (leptons[0].momentum() + leptons[1].momentum()); if (LL.mass() < 12*GeV) vetoEvent; if (LL.pT() < 30*GeV) vetoEvent; FourMomentum EtMiss = applyProjection(event,"MET").missingMomentum(); FourMomentum P4H = LL + EtMiss; double dphi = deltaPhi(LL, EtMiss); double mT = sqrt(2*LL.pT()*EtMiss.pT()*(1-cos(dphi))); if (mT < 50*GeV) vetoEvent; histoPtH->fill(min(P4H.pT()/GeV, 199.), weight); histoXsec->fill(8000, weight); ///< @todo Should probably be a Counter } /// Normalise histograms etc., after the run void finalize() { scale(histoPtH, crossSection()/sumOfWeights()); scale(histoXsec, (histoXsec->xMax()-histoXsec->xMin())*crossSection()/sumOfWeights()); } private: Histo1DPtr histoPtH, histoXsec; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2017_I1467451); } diff --git a/analyses/pluginCMS/CMS_2017_I1499471.cc b/analyses/pluginCMS/CMS_2017_I1499471.cc --- a/analyses/pluginCMS/CMS_2017_I1499471.cc +++ b/analyses/pluginCMS/CMS_2017_I1499471.cc @@ -1,295 +1,295 @@ #include "Rivet/Analysis.hh" #include "Rivet/Tools/Cuts.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ZFinder.hh" //#define DebugLog namespace Rivet { class CMS_2017_I1499471 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2017_I1499471); /// Book histograms and initialise projections before the run void init() { #ifdef DebugLog // set optionally the verbosity for the internal Rivet message system getLog().setLevel(0); #endif FinalState fs; ///< @todo No cuts? VisibleFinalState visfs(fs); ZFinder zeeFinder(fs, Cuts::abseta < 2.4 && Cuts::pT > 20*GeV, PID::ELECTRON, 71.0*GeV, 111.0*GeV, 0.1 ); - addProjection(zeeFinder, "ZeeFinder"); + declare(zeeFinder, "ZeeFinder"); ZFinder zmumuFinder(fs, Cuts::abseta < 2.4 && Cuts::pT > 20*GeV, PID::MUON, 71.0*GeV, 111.0*GeV, 0.1 ); - addProjection(zmumuFinder, "ZmumuFinder"); + declare(zmumuFinder, "ZmumuFinder"); VetoedFinalState jetConstits(visfs); jetConstits.addVetoOnThisFinalState(zeeFinder); jetConstits.addVetoOnThisFinalState(zmumuFinder); FastJets akt05Jets(jetConstits, FastJets::ANTIKT, 0.5); - addProjection(akt05Jets, "AntiKt05Jets"); + declare(akt05Jets, "AntiKt05Jets"); //Histograms booking book(_h_first_bjet_pt_b ,1,1,1); book(_h_first_bjet_abseta_b ,3,1,1); book(_h_Z_pt_b ,5,1,1); book(_h_HT_b ,7,1,1); book(_h_Dphi_Zb_b ,9,1,1); book(_h_first_jet_pt_ratio ,2,1,1); book(_h_first_jet_abseta_ratio ,4,1,1); book(_h_Z_pt_ratio ,6,1,1); book(_h_HT_ratio ,8,1,1); book(_h_Dphi_Zj_ratio ,10,1,1); book(_h_first_jet_pt, "first_jet_pt", refData(1,1,1) ); // (*_h_first_bjet_pt_b); book(_h_first_jet_abseta, "first_jet_abseta", refData(3,1,1) ); // (*_h_first_bjet_abseta_b); book(_h_Z_pt, "Z_pt", refData(5,1,1) ); // (*_h_Z_pt_b); book(_h_HT, "HT", refData(7,1,1) ); // (*_h_HT_b); book(_h_Dphi_Zj, "Dphi_Zj", refData(9,1,1) ); // (*_h_Dphi_Zb_b); book(_h_first_bjet_pt_bb ,11,1,1); book(_h_second_bjet_pt_bb ,12,1,1); book(_h_Z_pt_bb ,13,1,1); book(_h_bb_mass_bb ,14,1,1); book(_h_Zbb_mass_bb ,15,1,1); book(_h_Dphi_bb ,16,1,1); book(_h_DR_bb ,17,1,1); book(_h_DR_Zbmin_bb ,18,1,1); book(_h_A_DR_Zb_bb ,19,1,1); book(_h_bjet_multiplicity ,20,1,1); } /// Perform the per-event analysis void analyze(const Event& event) { const ZFinder& zeeFS = applyProjection(event, "ZeeFinder"); const ZFinder& zmumuFS = applyProjection(event, "ZmumuFinder"); const Particles& zees = zeeFS.bosons(); const Particles& zmumus = zmumuFS.bosons(); // We did not find exactly one Z. No good. if (zees.size() + zmumus.size() != 1) { MSG_DEBUG("Did not find exactly one good Z candidate"); vetoEvent; } //event identification depending on mass window bool ee_event=false; bool mm_event=false; if (zees.size() == 1) { ee_event = true; } if (zmumus.size() == 1) { mm_event = true; } const Particles& theLeptons = zees.size() ? zeeFS.constituents() : zmumuFS.constituents(); // Cluster jets // NB. Veto has already been applied on leptons and photons used for dressing const FastJets& fj = applyProjection(event, "AntiKt05Jets"); const Jets& jets = fj.jetsByPt(Cuts::abseta < 2.4 && Cuts::pT > 30*GeV); // Perform lepton-jet overlap and HT calculation double Ht = 0; Jets goodjets; for (const Jet& j : jets) { // Decide if this jet is "good", i.e. isolated from the leptons /// @todo Nice use-case for any() and a C++11 lambda bool overlap = false; for (const Particle& l : theLeptons) { if (Rivet::deltaR(j, l) < 0.5) { overlap = true; break; } } // Fill HT and good-jets collection if (overlap) continue; goodjets.push_back(j); Ht += j.pT(); } // We don't care about events with no isolated jets if (goodjets.empty()) { MSG_DEBUG("No jets in event"); vetoEvent; } Jets jb_final; //identification of bjets for (const Jet& j : goodjets) { if ( j.bTagged() ) { jb_final.push_back(j); } } //Event weight const double w = 0.5; //histogram filling if ((ee_event || mm_event) && goodjets.size() > 0) { FourMomentum j1(goodjets[0].momentum()); _h_first_jet_pt->fill(j1.pt(),w); _h_first_jet_abseta->fill(fabs(j1.eta()),w); if ( ee_event ) _h_Z_pt->fill(zees[0].pt(),w); if ( mm_event ) _h_Z_pt->fill(zmumus[0].pt(),w); _h_HT->fill(Ht,w); if ( ee_event ) _h_Dphi_Zj->fill(deltaPhi(zees[0], j1),w); if ( mm_event ) _h_Dphi_Zj->fill(deltaPhi(zmumus[0], j1),w); if ( jb_final.size() > 0 ) { FourMomentum b1(jb_final[0].momentum()); _h_bjet_multiplicity->fill(1.,w); _h_first_bjet_pt_b->fill(b1.pt(),w); _h_first_bjet_abseta_b->fill(fabs(b1.eta()),w); if ( ee_event ) _h_Z_pt_b->fill(zees[0].pt(),w); if ( mm_event ) _h_Z_pt_b->fill(zmumus[0].pt(),w); _h_HT_b->fill(Ht,w); if ( ee_event ) _h_Dphi_Zb_b->fill(deltaPhi(zees[0], b1.phi()),w); if ( mm_event ) _h_Dphi_Zb_b->fill(deltaPhi(zmumus[0], b1.phi()),w); if ( jb_final.size() > 1 ) { FourMomentum b2(jb_final[1].momentum()); _h_bjet_multiplicity->fill(2.,w); _h_first_bjet_pt_bb->fill(b1.pt(),w); _h_second_bjet_pt_bb->fill(b2.pt(),w); if ( ee_event ) _h_Z_pt_bb->fill(zees[0].pt(),w); if ( mm_event ) _h_Z_pt_bb->fill(zmumus[0].pt(),w); FourMomentum bb = add(b1,b2); FourMomentum Zbb; if (ee_event) Zbb = add(zees[0],bb); if (mm_event) Zbb = add(zmumus[0],bb); _h_bb_mass_bb->fill(bb.mass(),w); _h_Zbb_mass_bb->fill(Zbb.mass(),w); _h_Dphi_bb->fill(deltaPhi(b1,b2),w); _h_DR_bb->fill(deltaR(b1,b2),w); double DR_Z_b1(0.), DR_Z_b2(0.); if ( ee_event ) { DR_Z_b1 = deltaR(zees[0],b1); DR_Z_b2 = deltaR(zees[0],b2); } if ( mm_event ) { DR_Z_b1 = deltaR(zmumus[0],b1); DR_Z_b2 = deltaR(zmumus[0],b2); } double DR_Zb_min = DR_Z_b1; double DR_Zb_max = DR_Z_b2; if ( DR_Zb_min > DR_Zb_max ) { DR_Zb_min = DR_Z_b2; DR_Zb_max = DR_Z_b1; } double A_Zbb = (DR_Zb_max - DR_Zb_min)/(DR_Zb_max + DR_Zb_min); _h_DR_Zbmin_bb->fill(DR_Zb_min,w); _h_A_DR_Zb_bb->fill(A_Zbb,w); } } } } /// Normalise histograms etc., after the run void finalize() { const double norm = (sumOfWeights() != 0) ? crossSection()/picobarn/sumOfWeights() : 1.0; MSG_INFO("Cross section = " << std::setfill(' ') << std::setw(14) << std::fixed << std::setprecision(3) << crossSection() << " pb"); MSG_INFO("# Events = " << std::setfill(' ') << std::setw(14) << std::fixed << std::setprecision(3) << numEvents() ); MSG_INFO("SumW = " << std::setfill(' ') << std::setw(14) << std::fixed << std::setprecision(3) << sumOfWeights()); MSG_INFO("Norm factor = " << std::setfill(' ') << std::setw(14) << std::fixed << std::setprecision(6) << norm); scale( _h_first_bjet_pt_b, 100. ); scale( _h_first_bjet_abseta_b, 100. ); scale( _h_Z_pt_b, 100. ); scale( _h_HT_b, 100. ); scale( _h_Dphi_Zb_b, 100. ); divide( _h_first_bjet_pt_b , _h_first_jet_pt , _h_first_jet_pt_ratio ); divide( _h_first_bjet_abseta_b , _h_first_jet_abseta , _h_first_jet_abseta_ratio ); divide( _h_Z_pt_b , _h_Z_pt , _h_Z_pt_ratio ); divide( _h_HT_b , _h_HT , _h_HT_ratio ); divide( _h_Dphi_Zb_b , _h_Dphi_Zj , _h_Dphi_Zj_ratio ); scale( _h_first_bjet_pt_b, norm/100. ); scale( _h_first_bjet_abseta_b, norm/100. ); scale( _h_Z_pt_b, norm/100. ); scale( _h_HT_b, norm/100. ); scale( _h_Dphi_Zb_b, norm/100. ); scale( _h_first_bjet_pt_bb, norm); scale( _h_second_bjet_pt_bb, norm); scale( _h_Z_pt_bb, norm); scale( _h_bb_mass_bb, norm); scale( _h_Zbb_mass_bb, norm); scale( _h_Dphi_bb, norm); scale( _h_DR_bb, norm); scale( _h_DR_Zbmin_bb, norm); scale( _h_A_DR_Zb_bb, norm); scale( _h_bjet_multiplicity, norm ); } private: /// @name Histograms Histo1DPtr _h_first_jet_pt, _h_first_bjet_pt_b; Histo1DPtr _h_first_jet_abseta, _h_first_bjet_abseta_b; Histo1DPtr _h_Z_pt, _h_Z_pt_b; Histo1DPtr _h_HT, _h_HT_b; Histo1DPtr _h_Dphi_Zj, _h_Dphi_Zb_b; Scatter2DPtr _h_first_jet_pt_ratio; Scatter2DPtr _h_first_jet_abseta_ratio; Scatter2DPtr _h_Z_pt_ratio; Scatter2DPtr _h_HT_ratio; Scatter2DPtr _h_Dphi_Zj_ratio; Histo1DPtr _h_first_bjet_pt_bb, _h_second_bjet_pt_bb; Histo1DPtr _h_Z_pt_bb; Histo1DPtr _h_bb_mass_bb, _h_Zbb_mass_bb; Histo1DPtr _h_Dphi_bb, _h_DR_bb, _h_DR_Zbmin_bb, _h_A_DR_Zb_bb; Histo1DPtr _h_bjet_multiplicity; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2017_I1499471); } diff --git a/analyses/pluginCMS/CMS_2017_I1605749.cc b/analyses/pluginCMS/CMS_2017_I1605749.cc --- a/analyses/pluginCMS/CMS_2017_I1605749.cc +++ b/analyses/pluginCMS/CMS_2017_I1605749.cc @@ -1,145 +1,145 @@ // -*- C++ -*- // Rivet framework #include "Rivet/Analysis.hh" // Projections #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { using namespace Cuts; class CMS_2017_I1605749 : public Analysis { public: // Constructor CMS_2017_I1605749() : Analysis("CMS_2017_I1605749") { } // Book histograms and initialise projections before the run void init() { // Projections const FinalState fs(-5.0, 5.0, 0.0*GeV); - addProjection(FastJets(fs, FastJets::ANTIKT, 0.5), "Jets"); + declare(FastJets(fs, FastJets::ANTIKT, 0.5), "Jets"); // Jet Charge Histos for (int i = 1; i <= 18; i++) { book(_h_Charge[i - 1], i, 1, 1); } } // Perform the per-event analysis void analyze(const Event& event) { const Jets& jets = applyProjection(event, "Jets").jetsByPt(10.0*GeV); if (jets.size() < 2) vetoEvent; double leadingpt = jets[0].pt()/GeV; double subleadingpt = jets[1].pt()/GeV; if (jets.size() < 2 || jets[0].abseta() >= 1.5 || jets[1].abseta() >= 1.5 || leadingpt < 400.0 || subleadingpt < 100.0) { vetoEvent; } vector constituents1 = jets[0].constituents(); std::vector numerator(9, 0), denominator(9, 0); double t_jetcharge1, t_jetcharge1k6, t_jetcharge1k3; double t_jetchargeL1, t_jetchargeL1k6, t_jetchargeL1k3; double t_jetchargeT1, t_jetchargeT1k6, t_jetchargeT1k3; denominator[0] = leadingpt; denominator[1] = std::pow(leadingpt, 0.6); denominator[2] = std::pow(leadingpt, 0.3); if (constituents1.size() > 0) { for (unsigned j = 0; j < constituents1.size(); j++) { if (std::abs(constituents1[j].pdgId()) > 9 && std::abs(constituents1[j].pdgId())!= 21) { if (constituents1[j].pt() > 1*GeV) { double charge = constituents1[j].charge(); double mom = constituents1[j].pt(); double dotproduct = constituents1[j].p3().dot(jets[0].p3()) / jets[0].p(); double crossproduct = constituents1[j].p3().cross(jets[0].p3()).mod() / jets[0].p(); numerator[0] += (mom * charge); numerator[1] += ((std::pow(mom, 0.6)) * charge); numerator[2] += ((std::pow(mom, 0.3)) * charge); numerator[3] += (dotproduct * charge); numerator[4] += ((std::pow(dotproduct, 0.6)) * charge); numerator[5] += ((std::pow(dotproduct, 0.3)) * charge); denominator[3] += dotproduct; denominator[4] += (std::pow(dotproduct, 0.6)); denominator[5] += (std::pow(dotproduct, 0.3)); numerator[6] += (crossproduct * charge); numerator[7] += ((std::pow(crossproduct, 0.6)) * charge); numerator[8] += ((std::pow(crossproduct, 0.3)) * charge); denominator[6] += crossproduct; denominator[7] += (std::pow(crossproduct, 0.6)); denominator[8] += (std::pow(crossproduct, 0.3)); } } } } t_jetcharge1 = (denominator[0] > 0) ? numerator[0] / denominator[0] : 0; t_jetcharge1k6 = (denominator[1] > 0) ? numerator[1] / denominator[1] : 0; t_jetcharge1k3 = (denominator[2] > 0) ? numerator[2] / denominator[2] : 0; t_jetchargeL1 = (denominator[3] > 0) ? numerator[3] / denominator[3] : 0; t_jetchargeL1k6 = (denominator[4] > 0) ? numerator[4] / denominator[4] : 0; t_jetchargeL1k3 = (denominator[5] > 0) ? numerator[5] / denominator[5] : 0; t_jetchargeT1 = (denominator[6] > 0) ? numerator[6] / denominator[6] : 0; t_jetchargeT1k6 = (denominator[7] > 0) ? numerator[7] / denominator[7] : 0; t_jetchargeT1k3 = (denominator[8] > 0) ? numerator[8] / denominator[8] : 0; _h_Charge[0]->fill(t_jetcharge1); _h_Charge[1]->fill(t_jetcharge1k6); _h_Charge[2]->fill(t_jetcharge1k3); _h_Charge[3]->fill(t_jetchargeL1); _h_Charge[4]->fill(t_jetchargeL1k6); _h_Charge[5]->fill(t_jetchargeL1k3); _h_Charge[6]->fill(t_jetchargeT1); _h_Charge[7]->fill(t_jetchargeT1k6); _h_Charge[8]->fill(t_jetchargeT1k3); if (leadingpt > 400 && leadingpt < 700) { _h_Charge[9]->fill(t_jetcharge1k6); _h_Charge[12]->fill(t_jetchargeL1k6); _h_Charge[15]->fill(t_jetchargeT1k6); } else if (leadingpt > 700 && leadingpt < 1000) { _h_Charge[10]->fill(t_jetcharge1k6); _h_Charge[13]->fill(t_jetchargeL1k6); _h_Charge[16]->fill(t_jetchargeT1k6); } else if (leadingpt > 1000 && leadingpt < 1800) { _h_Charge[11]->fill(t_jetcharge1k6); _h_Charge[14]->fill(t_jetchargeL1k6); _h_Charge[17]->fill(t_jetchargeT1k6); } } // Normalise histograms etc., after the run void finalize() { for (int j = 0; j < 18; j++) { normalize(_h_Charge[j]); for (size_t i = 0; i < _h_Charge[j]-> numBins(); i++) { _h_Charge[j]->bin(i).scaleW(1.0 / _h_Charge[j]->bin(i).width()); } } } private: Histo1DPtr _h_Charge[18]; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2017_I1605749); } diff --git a/analyses/pluginCMS/CMS_2017_I1610623.cc b/analyses/pluginCMS/CMS_2017_I1610623.cc --- a/analyses/pluginCMS/CMS_2017_I1610623.cc +++ b/analyses/pluginCMS/CMS_2017_I1610623.cc @@ -1,261 +1,261 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/AnalysisLoader.hh" #include "Rivet/AnalysisInfo.hh" #include "Rivet/Tools/RivetYODA.hh" #include namespace Rivet { /// @brief Add a short analysis description here class CMS_2017_I1610623 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2017_I1610623); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections FinalState fs; WFinder wfinder_mu(fs, Cuts::abseta < 2.4 && Cuts::pT > 0*GeV, PID::MUON, 0*GeV, 1000000*GeV, 0*GeV, 0.1, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::YES, WFinder::MassWindow::MT); //WFinder wfinder_mu(fs, Cuts::abseta < 2.4 && Cuts::pT > 0*GeV, PID::MUON, 0*GeV, 1000000*GeV, 0*GeV, 0.1, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); - addProjection(wfinder_mu, "WFinder_mu"); + declare(wfinder_mu, "WFinder_mu"); // Define veto FS VetoedFinalState vfs; vfs.addVetoOnThisFinalState(wfinder_mu); vfs.addVetoPairId(PID::MUON); vfs.vetoNeutrinos(); FastJets fastjets(vfs, FastJets::ANTIKT, 0.4); - addProjection(fastjets, "Jets"); + declare(fastjets, "Jets"); //------------- book(_hist_Mult_exc ,"d01-x01-y01"); book(_hist_inc_WJetMult ,"d02-x01-y01"); //------------- book(_hist_JetPt1j ,"d03-x01-y01"); book(_hist_JetPt2j ,"d04-x01-y01"); book(_hist_JetPt3j ,"d05-x01-y01"); book(_hist_JetPt4j ,"d06-x01-y01"); //------------- book(_hist_JetRap1j ,"d07-x01-y01"); book(_hist_JetRap2j ,"d08-x01-y01"); book(_hist_JetRap3j ,"d09-x01-y01"); book(_hist_JetRap4j ,"d10-x01-y01"); //------------- book(_hist_Ht_1j ,"d11-x01-y01"); book(_hist_Ht_2j ,"d12-x01-y01"); book(_hist_Ht_3j ,"d13-x01-y01"); book(_hist_Ht_4j ,"d14-x01-y01"); //------------- book(_hist_dphij1mu_1j , "d15-x01-y01"); book(_hist_dphij2mu_2j , "d16-x01-y01"); book(_hist_dphij3mu_3j , "d17-x01-y01"); book(_hist_dphij4mu_4j , "d18-x01-y01"); //------------- book(_hist_dRmuj_1j , "d19-x01-y01"); } // define function used for filiing inc Njets histo void Fill(Histo1DPtr& _histJetMult, std::vector& finaljet_list){ _histJetMult->fill(0); for (size_t i=0 ; ifill(i+1); // inclusive multiplicity } } /// Perform the per-event analysis void analyze(const Event& event) { /// @todo Do the event by event analysis here const WFinder& wfinder_mu = applyProjection(event, "WFinder_mu"); if (wfinder_mu.bosons().size() != 1) { vetoEvent; } if (wfinder_mu.bosons().size() == 1) { const FourMomentum lepton0 = wfinder_mu.constituentLepton().momentum(); const FourMomentum neutrino = wfinder_mu.constituentNeutrino().momentum(); double WmT = wfinder_mu.mT(); if (WmT < 50.0*GeV) vetoEvent; double pt0 = lepton0.pT(); double eta0 = lepton0.eta(); if ( (fabs(eta0) > 2.4) || (pt0 < 25.0*GeV) ) vetoEvent; // Obtain the jets. vector finaljet_list; vector jet100_list; double HT = 0.0; // loop over jets in an event, pushback in finaljet_list collection for (const Jet& j : applyProjection(event, "Jets").jetsByPt(30.0*GeV)) { const double jrap = j.momentum().rap(); const double jpt = j.momentum().pT(); if ( (fabs(jrap) < 2.4) && (deltaR(lepton0, j.momentum()) > 0.4) ) { if(jpt > 30.0*GeV) { finaljet_list.push_back(j.momentum()); HT += j.momentum().pT(); } if(jpt > 100.0*GeV) { jet100_list.push_back(j.momentum()); } } } // end looping over jets //---------------------- FILL HISTOGRAMS ------------------ // Multiplicity exc plot. _hist_Mult_exc->fill(finaljet_list.size()); // Multiplicity inc plot. Fill(_hist_inc_WJetMult, finaljet_list); // dRmuj plot. double mindR(99999); if(jet100_list.size()>=1) { for (unsigned ji = 0; ji < jet100_list.size(); ji++){ double dr_(9999); dr_ = fabs(deltaR(lepton0, jet100_list[ji])); if (dr_ < mindR){ mindR = dr_; } } if(jet100_list[0].pT() > 300.0*GeV){ _hist_dRmuj_1j->fill(mindR); } } if(finaljet_list.size()>=1) { _hist_JetPt1j->fill(finaljet_list[0].pT()); _hist_JetRap1j->fill(fabs(finaljet_list[0].rap())); _hist_Ht_1j->fill(HT); _hist_dphij1mu_1j->fill(deltaPhi(finaljet_list[0].phi(), lepton0.phi())); } if(finaljet_list.size()>=2) { _hist_JetPt2j->fill(finaljet_list[1].pT()); _hist_JetRap2j->fill(fabs(finaljet_list[1].rap())); _hist_Ht_2j->fill(HT); _hist_dphij2mu_2j->fill(deltaPhi(finaljet_list[1].phi(), lepton0.phi())); } if(finaljet_list.size()>=3) { _hist_JetPt3j->fill(finaljet_list[2].pT()); _hist_JetRap3j->fill(fabs(finaljet_list[2].rap())); _hist_Ht_3j->fill(HT); _hist_dphij3mu_3j->fill(deltaPhi(finaljet_list[2].phi(), lepton0.phi())); } if(finaljet_list.size()>=4) { _hist_JetPt4j->fill(finaljet_list[3].pT()); _hist_JetRap4j->fill(fabs(finaljet_list[3].rap())); _hist_Ht_4j->fill(HT); _hist_dphij4mu_4j->fill(deltaPhi(finaljet_list[3].phi(), lepton0.phi())); } } // close the Wboson loop } //void loop /// Normalise histograms etc., after the run void finalize() { const double crossec = !std::isnan(crossSectionPerEvent()) ? crossSection() : 61526.7*picobarn; if (std::isnan(crossSectionPerEvent())){ MSG_INFO("No valid cross-section given, using NNLO xsec calculated by FEWZ " << crossec/picobarn << " pb"); } scale(_hist_Mult_exc, crossec/picobarn/sumOfWeights()); scale(_hist_inc_WJetMult, crossec/picobarn/sumOfWeights()); scale(_hist_JetPt1j, crossec/picobarn/sumOfWeights()); scale(_hist_JetPt2j, crossec/picobarn/sumOfWeights()); scale(_hist_JetPt3j, crossec/picobarn/sumOfWeights()); scale(_hist_JetPt4j, crossec/picobarn/sumOfWeights()); scale(_hist_JetRap1j, crossec/picobarn/sumOfWeights()); scale(_hist_JetRap2j, crossec/picobarn/sumOfWeights()); scale(_hist_JetRap3j, crossec/picobarn/sumOfWeights()); scale(_hist_JetRap4j, crossec/picobarn/sumOfWeights()); scale(_hist_Ht_1j, crossec/picobarn/sumOfWeights()); scale(_hist_Ht_2j, crossec/picobarn/sumOfWeights()); scale(_hist_Ht_3j, crossec/picobarn/sumOfWeights()); scale(_hist_Ht_4j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij1mu_1j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij2mu_2j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij3mu_3j, crossec/picobarn/sumOfWeights()); scale(_hist_dphij4mu_4j, crossec/picobarn/sumOfWeights()); scale(_hist_dRmuj_1j, crossec/picobarn/sumOfWeights()); } //@} private: /// @name Histograms //@{ Histo1DPtr _hist_Mult_exc; Histo1DPtr _hist_inc_WJetMult; Histo1DPtr _hist_JetPt1j; Histo1DPtr _hist_JetPt2j; Histo1DPtr _hist_JetPt3j; Histo1DPtr _hist_JetPt4j; Histo1DPtr _hist_JetRap1j; Histo1DPtr _hist_JetRap2j; Histo1DPtr _hist_JetRap3j; Histo1DPtr _hist_JetRap4j; Histo1DPtr _hist_Ht_1j; Histo1DPtr _hist_Ht_2j; Histo1DPtr _hist_Ht_3j; Histo1DPtr _hist_Ht_4j; Histo1DPtr _hist_dphij1mu_1j; Histo1DPtr _hist_dphij2mu_2j; Histo1DPtr _hist_dphij3mu_3j; Histo1DPtr _hist_dphij4mu_4j; Histo1DPtr _hist_dRmuj_1j; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2017_I1610623); } diff --git a/analyses/pluginCMS/CMS_2017_I1635889.cc b/analyses/pluginCMS/CMS_2017_I1635889.cc --- a/analyses/pluginCMS/CMS_2017_I1635889.cc +++ b/analyses/pluginCMS/CMS_2017_I1635889.cc @@ -1,119 +1,119 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" //#include "Rivet/ParticleName.hh" namespace Rivet { /// Underlying event activity in the Drell-Yan process at 13 TeV class CMS_2017_I1635889 : public Analysis { public: /// Constructor CMS_2017_I1635889() : Analysis("CMS_2017_I1635889") { } /// Initialization void init() { /// @note Using a bare muon Z (but with a clustering radius!?) Cut cut = Cuts::abseta < 2.4 && Cuts::pT > 10*GeV; ZFinder zfinder(FinalState(), cut, PID::MUON, 81*GeV, 101*GeV, 0.2, ZFinder::ClusterPhotons::NONE); - addProjection(zfinder, "ZFinder"); + declare(zfinder, "ZFinder"); ChargedFinalState cfs(zfinder.remainingFinalState()); - addProjection(cfs, "cfs"); + declare(cfs, "cfs"); book(_h_Nchg_towards_pTmumu , 1, 1, 1); book(_h_Nchg_transverse_pTmumu , 2, 1, 1); book(_h_Nchg_away_pTmumu , 3, 1, 1); book(_h_pTsum_towards_pTmumu , 4, 1, 1); book(_h_pTsum_transverse_pTmumu , 5, 1, 1); book(_h_pTsum_away_pTmumu , 6, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { const ZFinder& zfinder = applyProjection(event, "ZFinder"); if (zfinder.bosons().size() != 1) vetoEvent; if (zfinder.constituents()[0].pT()<20 && zfinder.constituents()[1].pT()<20)vetoEvent; //std::cout<<"pt[0] = "<(event, "cfs").particlesByPt(Cuts::pT > 0.5*GeV && Cuts::abseta <2.0); int nTowards = 0; int nTransverse = 0; int nAway = 0; double ptSumTowards = 0; double ptSumTransverse = 0; double ptSumAway = 0; for (const Particle& p : particles) { double dphi = fabs(deltaPhi(Zphi, p.phi())); double pT = p.pT(); if ( dphi < M_PI/3 ) { nTowards++; ptSumTowards += pT; } else if ( dphi < 2.*M_PI/3 ) { nTransverse++; ptSumTransverse += pT; } else { nAway++; ptSumAway += pT; } } // Loop over particles const double area = 8./3.*M_PI; _h_Nchg_towards_pTmumu-> fill(Zpt, 1./area * nTowards); _h_Nchg_transverse_pTmumu-> fill(Zpt, 1./area * nTransverse); _h_Nchg_away_pTmumu-> fill(Zpt, 1./area * nAway); _h_pTsum_towards_pTmumu-> fill(Zpt, 1./area * ptSumTowards); _h_pTsum_transverse_pTmumu-> fill(Zpt, 1./area * ptSumTransverse); _h_pTsum_away_pTmumu-> fill(Zpt, 1./area * ptSumAway); } /// Normalise histograms etc., after the run void finalize() { } private: /// @name Histogram objects //@{ Profile1DPtr _h_Nchg_towards_pTmumu; Profile1DPtr _h_Nchg_transverse_pTmumu; Profile1DPtr _h_Nchg_away_pTmumu; Profile1DPtr _h_pTsum_towards_pTmumu; Profile1DPtr _h_pTsum_transverse_pTmumu; Profile1DPtr _h_pTsum_away_pTmumu; //@} }; // Hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2017_I1635889); } diff --git a/analyses/pluginCMS/CMS_2018_I1663452.cc b/analyses/pluginCMS/CMS_2018_I1663452.cc --- a/analyses/pluginCMS/CMS_2018_I1663452.cc +++ b/analyses/pluginCMS/CMS_2018_I1663452.cc @@ -1,62 +1,62 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Tools/BinnedHistogram.hh" namespace Rivet { class CMS_2018_I1663452 : public Analysis { public: CMS_2018_I1663452() : Analysis("CMS_2018_I1663452") { } void init() { FinalState fs; FastJets antikt(fs, FastJets::ANTIKT, 0.4); - addProjection(antikt, "ANTIKT"); + declare(antikt, "ANTIKT"); {Histo1DPtr tmp; _h_chi_dijet.add(6000., 13000., book(tmp,1, 1, 1));} {Histo1DPtr tmp; _h_chi_dijet.add(5400., 6000., book(tmp,2, 1, 1));} {Histo1DPtr tmp; _h_chi_dijet.add(4800., 5400., book(tmp,3, 1, 1));} {Histo1DPtr tmp; _h_chi_dijet.add(4200., 4800., book(tmp,4, 1, 1));} {Histo1DPtr tmp; _h_chi_dijet.add(3600., 4200., book(tmp,5, 1, 1));} {Histo1DPtr tmp; _h_chi_dijet.add(3000., 3600., book(tmp,6, 1, 1));} {Histo1DPtr tmp; _h_chi_dijet.add(2400., 3000., book(tmp,7, 1, 1));} } void analyze(const Event& event) { const Jets& jets = applyProjection(event, "ANTIKT").jetsByPt(); if (jets.size() < 2) vetoEvent; FourMomentum j0(jets[0].momentum()); FourMomentum j1(jets[1].momentum()); double y0 = j0.rapidity(); double y1 = j1.rapidity(); if (fabs(y0+y1)/2. > 1.11) vetoEvent; double mjj = FourMomentum(j0+j1).mass(); double chi = exp(fabs(y0-y1)); if(chi<16.) _h_chi_dijet.fill(mjj, chi); } void finalize() { for (Histo1DPtr hist : _h_chi_dijet.histos()) { normalize(hist); } } private: BinnedHistogram _h_chi_dijet; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2018_I1663452); } diff --git a/analyses/pluginLEP/ALEPH_2016_I1492968.cc b/analyses/pluginLEP/ALEPH_2016_I1492968.cc --- a/analyses/pluginLEP/ALEPH_2016_I1492968.cc +++ b/analyses/pluginLEP/ALEPH_2016_I1492968.cc @@ -1,136 +1,136 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" namespace Rivet { // TODO this calculation needs checked! double impact(const FourMomentum& a, const FourMomentum& b) { const Vector3 a3 = a.vector3(); const Vector3 b3 = b.vector3(); double impact = 0; if (b3.polarRadius() !=0) { impact = (a3).cross((a3-b3)).polarRadius() / (b3).polarRadius(); } return impact; } /// @brief Add a short analysis description here class ALEPH_2016_I1492968 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ALEPH_2016_I1492968); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections const FinalState fs; - addProjection(fs, "FS"); + declare(fs, "FS"); FastJets jets(fs, FastJets::GENKTEE, 0.5, JetAlg::Muons::NONE, JetAlg::Invisibles::ALL); //FastJets jets(fs, FastJets::ANTIKT, 0.5, JetAlg::Muons::NONE, JetAlg::Invisibles::ALL); - addProjection(jets, "Jets"); + declare(jets, "Jets"); IdentifiedFinalState mu_id(fs); mu_id.acceptIdPair(PID::MUON); - addProjection(mu_id, "MUONS"); + declare(mu_id, "MUONS"); - addProjection(MissingMomentum(fs), "MissingMomenta"); + declare(MissingMomentum(fs), "MissingMomenta"); // Book histograms //_h_costheta = bookHisto1D(2, 1, 1); book(_h_m_OS, 3, 1, 1); book(_h_m_SS, 5, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { // B-jets const Jets jets = apply(event, "Jets").jetsByPt(Cuts::pT > 5*GeV); // tODO jet eta? const Jets bjets = filter_select(jets, [](const Jet& j) { return j.bTagged(); }); if (bjets.size()<2) vetoEvent; // Muons const Particles all_muons = applyProjection(event, "MUONS").particles(Cuts::pT>2.5/GeV, cmpMomByE); const Particles b_muons = filter_select(all_muons, [](const Particle& m) {return cos(m.theta()) < 0.7; }); if (b_muons.size()<2) vetoEvent; // Missing energy cut const MissingMomentum& met = applyProjection(event, "MissingMomenta"); double Pmiss = met.missingMomentum().p(); if (Pmiss/GeV>18) vetoEvent; // Impact paarameter considerations double b_muon_0_impactdistance = min(impact(b_muons[0].origin(), bjets[0].momentum()),impact(b_muons[0].origin(), bjets[1].momentum())); double b_muon_1_impactdistance = min(impact(b_muons[1].origin(), bjets[0].momentum()),impact(b_muons[1].origin(), bjets[1].momentum())); // Impact parameter cut if ((b_muon_0_impactdistance > 0.1) || (b_muon_1_impactdistance > 0.1)) vetoEvent; FourMomentum dimuon = b_muons[0].momentum() + b_muons[1].momentum(); // Same sign if (b_muons[0].charge()*b_muons[1].charge()>0) { _h_m_SS->fill( dimuon.mass()/GeV); } // Opposite sign else { _h_m_OS->fill( dimuon.mass()/GeV); // //FourMomentum muonminus; //if (b_muons[0].charge() < 0) muonminus = b_muons[0].momentum(); //else muonminus = b_muons[1].momentum(); //const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(-dimuon.betaVec()); //FourMomentum boostedmuon = cms_boost.transform(muonminus); //double cosmuonboosted = boostedmuon.vector3().dot(cms_boost.betaVec()) /// (boostedmuon.vector3().mod()*cms_boost.betaVec().mod()); //_h_costheta->fill( cosmuonboosted); } } /// Normalise histograms etc., after the run void finalize() { //normalize(_h_costheta); // Normalize to data according to Arno. normalize(_h_m_OS, 1387); normalize(_h_m_SS, 1047); } //@} /// @name Histograms //@{ //Histo1DPtr _h_costheta; Histo1DPtr _h_m_OS; Histo1DPtr _h_m_SS; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALEPH_2016_I1492968); } diff --git a/analyses/pluginLEP/OPAL_2004_I631361_qq.cc b/analyses/pluginLEP/OPAL_2004_I631361_qq.cc --- a/analyses/pluginLEP/OPAL_2004_I631361_qq.cc +++ b/analyses/pluginLEP/OPAL_2004_I631361_qq.cc @@ -1,241 +1,241 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/HadronicFinalState.hh" #include "Rivet/Jet.hh" #include "Rivet/Tools/BinnedHistogram.hh" #include "fastjet/JetDefinition.hh" namespace fastjet { class P_scheme : public JetDefinition::Recombiner { public: std::string description() const {return "";} void recombine(const PseudoJet & pa, const PseudoJet & pb, PseudoJet & pab) const { PseudoJet tmp = pa + pb; double E = sqrt(tmp.px()*tmp.px() + tmp.py()*tmp.py() + tmp.pz()*tmp.pz()); pab.reset_momentum(tmp.px(), tmp.py(), tmp.pz(), E); } void preprocess(PseudoJet & p) const { double E = sqrt(p.px()*p.px() + p.py()*p.py() + p.pz()*p.pz()); p.reset_momentum(p.px(), p.py(), p.pz(), E); } ~P_scheme() { } }; } namespace Rivet { class OPAL_2004_I631361_qq : public Analysis { public: /// Constructor OPAL_2004_I631361_qq() : Analysis("OPAL_2004_I631361_qq"), _sumWEbin(7,0.) { } /// @name Analysis methods //@{ int getEbin(double E_glue) { int ih = -1; if (inRange(E_glue/GeV, 5.0, 5.5)) { ih = 0; } else if (inRange(E_glue/GeV, 5.5, 6.5)) { ih = 1; } else if (inRange(E_glue/GeV, 6.5, 7.5)) { ih = 2; } else if (inRange(E_glue/GeV, 7.5, 9.5)) { ih = 3; } else if (inRange(E_glue/GeV, 9.5, 13.0)) { ih = 4; } else if (inRange(E_glue/GeV, 13.0, 16.0)) { ih = 5; } else if (inRange(E_glue/GeV, 16.0, 20.0)) { ih = 6; } assert(ih >= 0); return ih; } /// Book histograms and initialise projections before the run void init() { const FinalState fs; - addProjection(fs, "FS"); - addProjection(HadronicFinalState(fs), "HFS"); + declare(fs, "FS"); + declare(HadronicFinalState(fs), "HFS"); const ChargedFinalState cfs; - addProjection(cfs, "CFS"); - addProjection(HadronicFinalState(cfs), "HCFS"); + declare(cfs, "CFS"); + declare(HadronicFinalState(cfs), "HCFS"); {Histo1DPtr tmp; _h_chMult.add( 5.0, 5.5, book(tmp, 1,1,1));} {Histo1DPtr tmp; _h_chMult.add( 5.5, 6.5, book(tmp, 1,1,2));} {Histo1DPtr tmp; _h_chMult.add( 6.5, 7.5, book(tmp, 1,1,3));} {Histo1DPtr tmp; _h_chMult.add( 7.5, 9.5, book(tmp, 2,1,1));} {Histo1DPtr tmp; _h_chMult.add( 9.5, 13.0, book(tmp, 2,1,2));} {Histo1DPtr tmp; _h_chMult.add(13.0, 16.0, book(tmp, 3,1,1));} {Histo1DPtr tmp; _h_chMult.add(16.0, 20.0, book(tmp, 3,1,2));} {Histo1DPtr tmp; _h_chFragFunc.add(13.0, 16.0, book(tmp, 5,1,1));} {Histo1DPtr tmp; _h_chFragFunc.add(16.0, 20.0, book(tmp, 5,1,2));} } /// Perform the per-event analysis void analyze(const Event& event) { // cut on the number of charged particles const Particles& chParticles = applyProjection(event, "CFS").particles(); if(chParticles.size() < 5) vetoEvent; // cluster the jets const Particles& particles = applyProjection(event, "FS").particles(); fastjet::JetDefinition ee_kt_def(fastjet::ee_kt_algorithm, &p_scheme); PseudoJets pParticles; for(Particle p : particles) { PseudoJet temp = p.pseudojet(); if(p.fromBottom()) { temp.set_user_index(5); } pParticles.push_back(temp); } fastjet::ClusterSequence cluster(pParticles, ee_kt_def); // rescale energys to just keep the directions of the jets // and keep track of b tags PseudoJets pJets = sorted_by_E(cluster.exclusive_jets_up_to(3)); if(pJets.size() < 3) vetoEvent; array dirs; for(int i=0; i<3; i++) { dirs[i] = Vector3(pJets[i].px(),pJets[i].py(),pJets[i].pz()).unit(); } array bTagged; Jets jets; for(int i=0; i<3; i++) { double Ejet = sqrtS()*sin(angle(dirs[(i+1)%3],dirs[(i+2)%3])) / (sin(angle(dirs[i],dirs[(i+1)%3])) + sin(angle(dirs[i],dirs[(i+2)%3])) + sin(angle(dirs[(i+1)%3],dirs[(i+2)%3]))); jets.push_back(FourMomentum(Ejet,Ejet*dirs[i].x(),Ejet*dirs[i].y(),Ejet*dirs[i].z())); bTagged[i] = false; for(PseudoJet particle : pJets[i].constituents()) { if(particle.user_index() > 1 and !bTagged[i]) { bTagged[i] = true; } } } int QUARK1 = 0, QUARK2 = 1, GLUON = 2; if(jets[QUARK2].E() > jets[QUARK1].E()) swap(QUARK1, QUARK2); if(jets[GLUON].E() > jets[QUARK1].E()) swap(QUARK1, GLUON); if(!bTagged[QUARK2]) { if(!bTagged[GLUON]) vetoEvent; else swap(QUARK2, GLUON); } if(bTagged[GLUON]) vetoEvent; // exclude collinear or soft jets double k1 = jets[QUARK1].E()*min(angle(jets[QUARK1].momentum(),jets[QUARK2].momentum()), angle(jets[QUARK1].momentum(),jets[GLUON].momentum())); double k2 = jets[QUARK2].E()*min(angle(jets[QUARK2].momentum(),jets[QUARK1].momentum()), angle(jets[QUARK2].momentum(),jets[GLUON].momentum())); if(k1<8.0*GeV || k2<8.0*GeV) vetoEvent; double sqg = (jets[QUARK1].momentum()+jets[GLUON].momentum()).mass2(); double sgq = (jets[QUARK2].momentum()+jets[GLUON].momentum()).mass2(); double s = (jets[QUARK1].momentum()+jets[QUARK2].momentum()+jets[GLUON].momentum()).mass2(); double Eg = 0.5*sqrt(sqg*sgq/s); if(Eg < 5.0 || Eg > 20.0) { vetoEvent; } else if(Eg > 9.5) { //requirements for experimental reconstructability raise as energy raises if(!bTagged[QUARK1]) { vetoEvent; } } // all cuts applied, increment sum of weights const double weight = 1.0; _sumWEbin[getEbin(Eg)] += weight; // transform to frame with event in y-z and glue jet in z direction Matrix3 glueTOz(jets[GLUON].momentum().vector3(), Vector3(0,0,1)); Vector3 transQuark = glueTOz*jets[QUARK2].momentum().vector3(); Matrix3 quarksTOyz(Vector3(transQuark.x(), transQuark.y(), 0), Vector3(0,1,0)); // work out transformation to symmetric frame array x_cm; array x_cm_y; array x_cm_z; array x_pr; for(int i=0; i<3; i++) { x_cm[i] = 2*jets[i].E()/sqrt(s); Vector3 p_transf = quarksTOyz*glueTOz*jets[i].p3(); x_cm_y[i] = 2*p_transf.y()/sqrt(s); x_cm_z[i] = 2*p_transf.z()/sqrt(s); } x_pr[GLUON] = sqrt(4*(1-x_cm[QUARK1])*(1-x_cm[QUARK2])/(3+x_cm[GLUON])); x_pr[QUARK1] = x_pr[GLUON]/(1-x_cm[QUARK1]); x_pr[QUARK2] = x_pr[GLUON]/(1-x_cm[QUARK2]); double gamma = (x_pr[QUARK1] + x_pr[GLUON] + x_pr[QUARK2])/2; double beta_z = x_pr[GLUON]/(gamma*x_cm[GLUON]) - 1; double beta_y = (x_pr[QUARK2]/gamma - x_cm[QUARK2] - beta_z*x_cm_z[QUARK2])/x_cm_y[QUARK2]; LorentzTransform toSymmetric = LorentzTransform::mkObjTransformFromBeta(Vector3(0.,beta_y,beta_z)). postMult(quarksTOyz*glueTOz); FourMomentum transGlue = toSymmetric.transform(jets[GLUON].momentum()); double cutAngle = angle(toSymmetric.transform(jets[QUARK2].momentum()), transGlue)/2; int nCh = 0; for(const Particle& chP : chParticles ) { FourMomentum pSymmFrame = toSymmetric.transform(FourMomentum(chP.p3().mod(), chP.px(), chP.py(), chP.pz())); if(angle(pSymmFrame, transGlue) < cutAngle) { _h_chFragFunc.fill(Eg, pSymmFrame.E()*sin(cutAngle)/Eg, weight); nCh++; } } _h_chMult.fill(Eg, nCh, weight); } /// Normalise histograms etc., after the run void finalize() { for (Histo1DPtr hist : _h_chMult.histos()) { normalize(hist); } for (int i=0; i<2; i++) { if(!isZero(_sumWEbin[i+5])) { scale(_h_chFragFunc.histos()[i], 1./_sumWEbin[i+5]); } } } //@} private: // Data members like post-cuts event weight counters go here vector _sumWEbin; // p scheme jet definition fastjet::P_scheme p_scheme; /// @name Histograms //@{ BinnedHistogram _h_chMult; BinnedHistogram _h_chFragFunc; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_2004_I631361_qq); } diff --git a/analyses/pluginLHCf/LHCF_2015_I1351909.cc b/analyses/pluginLHCf/LHCF_2015_I1351909.cc --- a/analyses/pluginLHCf/LHCF_2015_I1351909.cc +++ b/analyses/pluginLHCf/LHCF_2015_I1351909.cc @@ -1,303 +1,303 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class LHCF_2015_I1351909 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(LHCF_2015_I1351909); static constexpr bool lhcf_like = true; static constexpr int ndecay = 1; static constexpr int nbeam = 2; static constexpr double D1_begin = 82000.; //mm 60000.; //mm static constexpr double D1_end = 82000; //mm 90000.; //mm static constexpr double IPtoLHCf = 141050.; //mm /// @name Analysis methods bool isParticleFromCollision(Particle p, vector parents) { bool beam[nbeam]={false}; if(parents.size()==nbeam) { for ( int ipar=0; ipar < nbeam; ++ipar ) beam[ipar] = parents[ipar].genParticle()->is_beam(); if(beam[0] && beam[1]) return true; } return false; } bool isParticleFromDecay(Particle p, vector parents) { if(parents.size()==ndecay) return true; else return false; } bool isDeviated(Particle p, Particle parent) { //Select/Remove particles decayed between IP and LHCf GenVertex* pv = p.genParticle()->production_vertex(); assert(pv != NULL); const double decay_vertex = pv->position().z()/mm; const double parent_charge = PID::charge(parent.pid()); const double descendant_charge = PID::charge(p.pid()); if(parent_charge == 0) { //Particles produced by neutral parent decay if(descendant_charge == 0) { return false; } else { if(decay_vertex >= D1_end) return false; else return true; //Remove charged descendants produced from decay before end of D1 } } else { //Particles produced by charged parent decay if(decay_vertex <= D1_begin) { if(descendant_charge == 0) return false; else return true; //Remove charged descendants produced from decay before end of D1 } else { return true; //Remove particles produced by charged parent decay after begin of D1 } } return false; } bool isSameParticle(Particle p1, Particle p2) { if(p1.pid() == p2.pid() && mom(p1).t() == mom(p2).t() && mom(p1).x() == mom(p2).x() && mom(p1).y() == mom(p2).y() && mom(p1).z() == mom(p2).z()) return true; else return false; } bool isAlreadyProcessed(Particle p, vector list) { for(unsigned int ipar=0; iparproduction_vertex(); const double x0 = pv->position().x()/mm; const double y0 = pv->position().y()/mm; const double z0 = pv->position().z()/mm; const double px = p.px()/MeV; const double py = p.py()/MeV; const double pz = abs(p.pz()/MeV); const double dist_to_lhcf = IPtoLHCf - z0; const double x1 = x0 + (dist_to_lhcf * px/pz); const double y1 = y0 + (dist_to_lhcf * py/pz); const double r = sqrt(pow(x1, 2.)+pow(y1, 2.)); const double theta = atan(abs(r / IPtoLHCf)); const double pseudorapidity = - log (tan (theta/2.) ); return pseudorapidity; } /// Book histograms and initialise projections before the run void init() { // Initialise and register projections // declare(FinalState("FS"); - addProjection(FinalState(), "FS"); + declare(FinalState(), "FS"); // Book histograms book(_h_n_en_eta1, 1, 1, 1); book(_h_n_en_eta2, 1, 1, 2); book(_h_n_en_eta3, 1, 1, 3); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState &fs = applyProjection (event, "FS"); Particles fs_particles = fs.particles(); vector processed_parents; processed_parents.clear(); for (Particle& p: fs_particles ) { if(p.pz()/GeV<0.) continue; double eta = 0.; double en = 0.; if(lhcf_like) { //====================================================================== //========== LHCf-like analysis ======================================== //====================================================================== vector parents = p.parents(); if(isParticleFromCollision(p, parents)) { //Particles directly produced in collisions if(!PID::isHadron(p.pid())) continue; //Remove non-hadron particles if(PID::charge(p.pid()) != 0) continue; //Remove charged particles eta = p.eta(); en = p.E()/GeV; } else if(isParticleFromDecay(p, parents)) { //Particles produced from decay GenVertex* pv = p.genParticle()->production_vertex(); assert(pv != NULL); const double decay_vertex = pv->position().z()/mm; Particle parent = parents[0]; if(decay_vertex < IPtoLHCf) { //If decay happens before LHCf we consider descendants if(!PID::isHadron(p.pid())) continue; //Remove non-hadron descendants if(isDeviated(p, parent)) continue; //Remove descendants deviated by D1 eta = RecomputeEta(p); en = p.E()/GeV; } else {//If decay happens after LHCf we consider parents vector ancestors; ancestors.clear(); int ngeneration=0; bool isValid=true; bool isEnded=false; while(!isEnded) //Loop over all generations in the decay { vector temp_part; temp_part.clear(); if(ngeneration==0) { parent = parents[0]; temp_part = parent.parents(); } else { parent = ancestors[0]; temp_part = parent.parents(); } ancestors.clear(); ancestors = temp_part; Particle ancestor = ancestors[0]; if(isParticleFromCollision(parent, ancestors)) { //if we found first particles produced in collisions we consider them isEnded=true; if(!PID::isHadron(parent.pid())) isValid=false; //Remove non-hadron ancestors/parents if(PID::charge(parent.pid()) != 0) isValid=false; //Remove charged ancestors/parents if(isAlreadyProcessed(parent, processed_parents)) isValid=false; //Remove already processed ancestors/parents when looping other descendants else processed_parents.push_back(parent); //Fill ancestors/parents in the list eta = parent.eta(); en = parent.E()/GeV; } else if (isParticleFromDecay(parent, ancestors)) { //if we found first particles produced entering LHCf we consider them GenVertex* pv_prev = parent.genParticle()->production_vertex(); assert(pv_prev != NULL); const double previous_decay_vertex = pv_prev->position().z()/mm; if(previous_decay_vertex < IPtoLHCf) { isEnded=true; if(!PID::isHadron(parent.pid())) isValid=false; //Remove non-hadron ancestors/parents if(isDeviated(parent, ancestor)) isValid=false; //Remove ancestors/parents deviated by D1 if(isAlreadyProcessed(parent, processed_parents)) isValid=false; //Remove already processed ancestors/parents when looping other descendants else processed_parents.push_back(parent); //Fill ancestors/parents in the list eta = RecomputeEta(parent); en = parent.E()/GeV; } } else { //This condition should never happen cout << "Looping over particles generation ended without match : Exit..." << endl; exit(EXIT_FAILURE); } ++ngeneration; } if(!isValid) continue; } } else { //This condition should never happen cout << "Particle seems not to be produced in collision or decay : Exit..." << endl; exit(EXIT_FAILURE); } } else { //====================================================================== //========== Only neutrons at IP ======================================= //====================================================================== vector parents = p.parents(); //if(isParticleFromCollision(p, parents)) { //Particles directly produced in collisions if(p.pid() != 2112 ) continue; eta = p.eta(); en = p.E()/GeV; //} } // Fill histograms if( eta > 10.76 ){ _h_n_en_eta1->fill( en ); }else if(eta > 8.99 && eta < 9.22){ _h_n_en_eta2->fill( en ); }else if(eta > 8.81 && eta < 8.99){ _h_n_en_eta3->fill( en ); } } } /// Normalise histograms etc., after the run void finalize() { scale(_h_n_en_eta1, crossSection()/millibarn/sumOfWeights()); // norm to cross section scale(_h_n_en_eta2, crossSection()/millibarn/sumOfWeights()); // norm to cross section scale(_h_n_en_eta3, crossSection()/millibarn/sumOfWeights()); // norm to cross section } //@} private: /// @name Histograms //@{ Histo1DPtr _h_n_en_eta1; Histo1DPtr _h_n_en_eta2; Histo1DPtr _h_n_en_eta3; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(LHCF_2015_I1351909); } diff --git a/analyses/pluginLHCf/LHCF_2016_I1385877.cc b/analyses/pluginLHCf/LHCF_2016_I1385877.cc --- a/analyses/pluginLHCf/LHCF_2016_I1385877.cc +++ b/analyses/pluginLHCf/LHCF_2016_I1385877.cc @@ -1,230 +1,230 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Tools/BinnedHistogram.hh" #include "Rivet/Projections/UnstableFinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class LHCF_2016_I1385877 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(LHCF_2016_I1385877); //In case of some models there can be very small value pT but greater than 0. //In order to avoid unphysical behavior in the first bin a cutoff is needed //If you are sure the model does not have this problem you can set pt_cutoff to 0. const double pt_cutoff = 0.01; /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections - addProjection(UnstableFinalState(), "UFS"); - addProjection(Beam(), "Beam"); + declare(UnstableFinalState(), "UFS"); + declare(Beam(), "Beam"); // calculate beam rapidity const Particle bm1 = beams().first; const Particle bm2 = beams().second; _beam_rap_1 = bm1.rap(); _beam_rap_2 = bm2.rap(); MSG_INFO("Beam 1 : momentum " << bm1.pz() << " PID " << bm1.pid() << " rapidity " << bm1.rap() ); MSG_INFO("Beam 2 : momentum " << bm2.pz() << " PID " << bm2.pid() << " rapidity " << bm2.rap() ); const double _sqrts = sqrtS(); MSG_INFO("CM energy: " << _sqrts ); _beam_rap = _beam_rap_1; if(bm1.pid()==2212 && bm2.pid()==2212) { //p-p _pp_Pb = true; if( fuzzyEquals( _sqrts/GeV, 7000., 1E-3) ) { book(_p_pi0_rap_apT, 1, 1, 2); {Histo1DPtr tmp; _h_pi0_rap_pT.add( 8.8, 9.0, book(tmp, 2, 1, 2));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 9.0, 9.2, book(tmp, 3, 1, 2));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 9.2, 9.4, book(tmp, 4, 1, 2));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 9.4, 9.6, book(tmp, 5, 1, 2));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 9.6, 9.8, book(tmp, 6, 1, 2));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 9.8, 10.0, book(tmp, 7, 1, 2));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 10.0, 10.2, book(tmp, 8, 1, 2));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 10.2, 10.4, book(tmp, 9, 1, 2));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 10.4, 10.6, book(tmp, 10, 1, 2));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 10.6, 10.8, book(tmp, 11, 1, 2));} {Histo1DPtr tmp; _h_pi0_pT_pZ.add( 0.0, 0.2, book(tmp, 12, 1, 2));} {Histo1DPtr tmp; _h_pi0_pT_pZ.add( 0.2, 0.4, book(tmp, 13, 1, 2));} {Histo1DPtr tmp; _h_pi0_pT_pZ.add( 0.4, 0.6, book(tmp, 14, 1, 2));} {Histo1DPtr tmp; _h_pi0_pT_pZ.add( 0.6, 0.8, book(tmp, 15, 1, 2));} {Histo1DPtr tmp; _h_pi0_pT_pZ.add( 0.8, 1.0, book(tmp, 16, 1, 2));} book(_h_pi0_rap, 21, 1, 2); book(_p_pi0_raploss_apT, 22, 1, 2); book(_h_pi0_raploss, 23, 1, 2); } else if(fuzzyEquals( _sqrts/GeV, 2760., 1E-3) ){ book(_p_pi0_rap_apT, 1, 1, 1); {Histo1DPtr tmp; _h_pi0_rap_pT.add( 8.8, 9.0, book(tmp, 2, 1, 1));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 9.0, 9.2, book(tmp, 3, 1, 1));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 9.2, 9.4, book(tmp, 4, 1, 1));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 9.4, 9.6, book(tmp, 5, 1, 1));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 9.6, 9.8, book(tmp, 6, 1, 1));} {Histo1DPtr tmp; _h_pi0_pT_pZ.add( 0.0, 0.2, book(tmp, 12, 1, 1));} {Histo1DPtr tmp; _h_pi0_pT_pZ.add( 0.2, 0.4, book(tmp, 13, 1, 1));} book(_h_pi0_rap, 21, 1, 1); book(_p_pi0_raploss_apT, 22, 1, 1); book(_h_pi0_raploss, 23, 1, 1); }else{ MSG_INFO("p-p collisions : energy out of range!"); } } else if (bm1.pid()==2212 && bm2.pid()==1000822080){ //p-Pb _pp_Pb = false; if( fuzzyEquals( _sqrts/sqrt(208.)/GeV, 5020., 1E-3) ) { book(_p_pi0_rap_apT, 1, 1, 3); {Histo1DPtr tmp; _h_pi0_rap_pT.add( 8.8, 9.0, book(tmp, 2, 1, 3));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 9.0, 9.2, book(tmp, 3, 1, 3));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 9.2, 9.4, book(tmp, 4, 1, 3));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 9.4, 9.6, book(tmp, 5, 1, 3));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 9.6, 9.8, book(tmp, 6, 1, 3));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 9.8, 10.0, book(tmp, 7, 1, 3));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 10.0, 10.2, book(tmp, 8, 1, 3));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 10.2, 10.4, book(tmp, 9, 1, 3));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 10.4, 10.6, book(tmp, 10, 1, 3));} {Histo1DPtr tmp; _h_pi0_rap_pT.add( 10.6, 10.8, book(tmp, 11, 1, 3));} {Histo1DPtr tmp; _h_pi0_pT_pZ.add( 0.0, 0.2, book(tmp, 12, 1, 3));} {Histo1DPtr tmp; _h_pi0_pT_pZ.add( 0.2, 0.4, book(tmp, 13, 1, 3));} {Histo1DPtr tmp; _h_pi0_pT_pZ.add( 0.4, 0.6, book(tmp, 14, 1, 3));} {Histo1DPtr tmp; _h_pi0_pT_pZ.add( 0.6, 0.8, book(tmp, 15, 1, 3));} {Histo1DPtr tmp; _h_pi0_pT_pZ.add( 0.8, 1.0, book(tmp, 16, 1, 3));} //_h_pi0_rap = book(tmp, 21, 1, 3); book(_p_pi0_raploss_apT, 22, 1, 3); //_h_pi0_raploss = book(tmp, 23, 1, 3); }else{ MSG_INFO("p-Pb collisions : energy out of range!"); } } else { MSG_INFO("Beam PDGID out of range!"); } _nevt = 0.; } /// Perform the per-event analysis void analyze(const Event& event) { _nevt = _nevt + 1.; const UnstableFinalState &ufs = applyProjection (event, "UFS"); Particles ufs_particles = ufs.particles(); for (Particle& p: ufs_particles ) { // select neutral pion if(p.abspid() != 111) continue; if(p.pz()/GeV<0.) continue; if(p.pT()/GeVfill( rap , pT_MeV , 1.0 ); _h_pi0_rap_pT.fill( rap, pT , 1.0 / pT ); _h_pi0_pT_pZ.fill( pT, pZ , en / pT); _h_pi0_rap->fill( rap, 1.0 ); _p_pi0_raploss_apT->fill( raploss , pT_MeV , 1.0 ); _h_pi0_raploss->fill( raploss, 1.0 ); } else {//pPb collisions const double pZ = p.pz()/GeV; const double pT = p.pT()/GeV; const double pT_MeV = p.pT()/MeV; const double en = p.E()/GeV; const double rap = p.rap(); const double raploss = _beam_rap_1 - p.rap(); //mitsuka-like _p_pi0_rap_apT->fill( rap , pT_MeV , 1.0 ); _h_pi0_rap_pT.fill( rap, pT , 1.0 / pT ); _h_pi0_pT_pZ.fill( pT, pZ , en / pT); //_h_pi0_rap->fill( rap, 1.0 ); _p_pi0_raploss_apT->fill( raploss , pT_MeV , 1.0 ); //_h_pi0_raploss->fill( raploss, 1.0 ); } } } /// Normalise histograms etc., after the run void finalize() { const double inv_scale_factor = 1. / _nevt / (2.*PI); const double pt_bin_width = 0.2; for (Histo1DPtr h: _h_pi0_pT_pZ.histos()){ if(h->path() == "/LHCF_2016_I1385877/d12-x01-y01" || h->path() == "/LHCF_2016_I1385877/d12-x01-y02" || h->path() == "/LHCF_2016_I1385877/d12-x01-y03") h->scaleW( inv_scale_factor / (pt_bin_width-pt_cutoff) ); else h->scaleW( inv_scale_factor / pt_bin_width ); } const double scale_factor = 1. / _nevt / (2.*PI); const double rap_bin_width = 0.2; for (Histo1DPtr h: _h_pi0_rap_pT.histos()) { const int cutoff_bin = h->binIndexAt(pt_cutoff); if(cutoff_bin>=0) { // for(unsigned int ibin=0; ibinnumBins(); ++ibin) // cout << ibin << " " << h->bin(ibin).area() << endl; const double cutoff_wdt = h->bin(cutoff_bin).xMax()-h->bin(cutoff_bin).xMin(); h->bin(cutoff_bin).scaleW((cutoff_wdt)/(cutoff_wdt-pt_cutoff)); // for(unsigned int ibin=0; ibinnumBins(); ++ibin) // cout << ibin << " " << h->bin(ibin).area() << endl; } h->scaleW( scale_factor / rap_bin_width ); } if(_pp_Pb) { scale( _h_pi0_rap , 1. / _nevt ); scale( _h_pi0_raploss , 1. / _nevt ); } } //@} private: /// @name Histograms //@{ bool _pp_Pb; double _nevt; double _beam_rap; double _beam_rap_1; double _beam_rap_2; BinnedHistogram _h_pi0_pT_pZ; BinnedHistogram _h_pi0_rap_pT; Profile1DPtr _p_pi0_rap_apT; Histo1DPtr _h_pi0_rap; Profile1DPtr _p_pi0_raploss_apT; Histo1DPtr _h_pi0_raploss; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(LHCF_2016_I1385877); } diff --git a/include/Rivet/ProjectionApplier.hh b/include/Rivet/ProjectionApplier.hh --- a/include/Rivet/ProjectionApplier.hh +++ b/include/Rivet/ProjectionApplier.hh @@ -1,205 +1,199 @@ // -*- C++ -*- #ifndef RIVET_ProjectionApplier_HH #define RIVET_ProjectionApplier_HH #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Projection.fhh" #include "Rivet/ProjectionHandler.hh" #include "Rivet/Tools/Logging.hh" namespace Rivet { // Forward declarations class Event; /// @brief Common base class for Projection and Analysis, used for internal polymorphism /// /// Empty interface used for storing Projection and Analysis pointers in the /// same container (used by the ProjectionHandler) class ProjectionApplier { public: // The proj handler needs access to reset the _allowProjReg flag before calling a.init() // friend class ProjectionHandler; /// Constructor ProjectionApplier(); // Virtual destructor: ensure that inheritance is possible. virtual ~ProjectionApplier(); /// @name Metadata functions //@{ /// Get the name of this Projection or Analysis class virtual std::string name() const = 0; //@} /// @name Projection "getting" functions //@{ /// Get the contained projections, including recursion. std::set getProjections() const { return getProjHandler().getChildProjections(*this, ProjectionHandler::DEEP); } /// Does this applier have a projection registered under the name @a name? bool hasProjection(const std::string& name) const { return getProjHandler().hasProjection(*this, name); } /// Get the named projection, specifying return type via a template argument. /// @todo Add SFINAE to require that PROJ inherit from Projection template const PROJ& getProjection(const std::string& name) const { const Projection& p = getProjHandler().getProjection(*this, name); return pcast(p); } /// Get the named projection, specifying return type via a template argument (user-facing alias). /// @todo Add SFINAE to require that PROJ inherit from Projection template const PROJ& get(const std::string& name) const { return getProjection(name); } /// Get the named projection (non-templated, so returns as a reference to a /// Projection base class). const Projection& getProjection(const std::string& name) const { return getProjHandler().getProjection(*this, name); } //@} /// @name Projection applying functions //@{ /// Apply the supplied projection on event @a evt. template /// @todo Add SFINAE to require that PROJ inherit from Projection const PROJ& applyProjection(const Event& evt, const Projection& proj) const { return pcast(_applyProjection(evt, proj)); } /// Apply the supplied projection on event @a evt (user-facing alias). /// @todo Add SFINAE to require that PROJ inherit from Projection template const PROJ& apply(const Event& evt, const Projection& proj) const { return applyProjection(evt, proj); } /// Apply the supplied projection on event @a evt. template /// @todo Add SFINAE to require that PROJ inherit from Projection const PROJ& applyProjection(const Event& evt, const PROJ& proj) const { return pcast(_applyProjection(evt, proj)); } /// Apply the supplied projection on event @a evt (user-facing alias). /// @todo Add SFINAE to require that PROJ inherit from Projection template const PROJ& apply(const Event& evt, const PROJ& proj) const { return applyProjection(evt, proj); } /// Apply the named projection on event @a evt. /// @todo Add SFINAE to require that PROJ inherit from Projection template const PROJ& applyProjection(const Event& evt, const std::string& name) const { return pcast(_applyProjection(evt, name)); } /// Apply the supplied projection on event @a evt (user-facing alias). /// @todo Add SFINAE to require that PROJ inherit from Projection template const PROJ& apply(const Event& evt, const std::string& name) const { return applyProjection(evt, name); } /// Apply the supplied projection on event @a evt (convenience arg-reordering alias). /// @todo Add SFINAE to require that PROJ inherit from Projection template const PROJ& apply(const std::string& name, const Event& evt) const { return applyProjection(evt, name); } //@} /// Mark this object as owned by a proj-handler void markAsOwned() const { _owned = true; } protected: Log& getLog() const { return Log::getLog("Rivet.ProjectionHandler"); } /// Get a reference to the ProjectionHandler for this thread. ProjectionHandler& getProjHandler() const { return _projhandler; } /// @name Projection registration functions //@{ /// @brief Register a contained projection /// /// The type of the argument is used to instantiate a new projection /// internally: this new object is applied to events rather than the /// argument object. Hence you are advised to only use locally-scoped /// Projection objects in your Projection and Analysis constructors, and to /// avoid polymorphism (e.g. handling @c ConcreteProjection via a pointer or /// reference to type @c Projection) since this will screw up the internal /// type management. /// /// @todo Add SFINAE to require that PROJ inherit from Projection template const PROJ& declareProjection(const PROJ& proj, const std::string& name) { const Projection& reg = _declareProjection(proj, name); const PROJ& rtn = dynamic_cast(reg); return rtn; } /// @brief Register a contained projection (user-facing version) /// @todo Add SFINAE to require that PROJ inherit from Projection template const PROJ& declare(const PROJ& proj, const std::string& name) { return declareProjection(proj, name); } /// @brief Register a contained projection (user-facing, arg-reordered version) /// @todo Add SFINAE to require that PROJ inherit from Projection template const PROJ& declare(const std::string& name, const PROJ& proj) { return declareProjection(proj, name); } - /// @brief Register a contained projection (user-facing version) - /// @deprecated Use declareProjection() or declare() - /// @todo Add SFINAE to require that PROJ inherit from Projection - template - const PROJ& addProjection(const PROJ& proj, const std::string& name) { return declareProjection(proj, name); } - /// Untemplated function to do the work... const Projection& _declareProjection(const Projection& proj, const std::string& name); //@} /// Non-templated version of string-based applyProjection, to work around /// header dependency issue. const Projection& _applyProjection(const Event& evt, const std::string& name) const; /// Non-templated version of proj-based applyProjection, to work around /// header dependency issue. const Projection& _applyProjection(const Event& evt, const Projection& proj) const; /// Flag to forbid projection registration in analyses until the init phase bool _allowProjReg; private: /// Mark object as owned by the _projhandler mutable bool _owned; /// Pointer to projection handler. ProjectionHandler& _projhandler; }; } #endif diff --git a/include/Rivet/Projections/BeamThrust.hh b/include/Rivet/Projections/BeamThrust.hh --- a/include/Rivet/Projections/BeamThrust.hh +++ b/include/Rivet/Projections/BeamThrust.hh @@ -1,78 +1,78 @@ // -*- C++ -*- #ifndef RIVET_BeamThrust_HH #define RIVET_BeamThrust_HH #include "Rivet/Projection.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Event.hh" namespace Rivet { class BeamThrust : public Projection { public: /// Constructor. BeamThrust() {} BeamThrust(const FinalState& fsp) { setName("BeamThrust"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(BeamThrust); protected: /// Perform the projection on the Event void project(const Event& e) { const vector ps = applyProjection(e, "FS").particles(); calc(ps); } /// Compare projections CmpState compare(const Projection& p) const { return mkNamedPCmp(p, "FS"); } public: double beamthrust() const { return _beamthrust; } public: /// @name Direct methods /// Ways to do the calculation directly, without engaging the caching system //@{ /// Manually calculate the beamthrust, without engaging the caching system void calc(const FinalState& fs); /// Manually calculate the beamthrust, without engaging the caching system void calc(const vector& fsparticles); /// Manually calculate the beamthrust, without engaging the caching system void calc(const vector& fsmomenta); //@} private: /// The beamthrust scalar. double _beamthrust; private: /// Explicitly calculate the beamthrust values. void _calcBeamThrust(const vector& fsmomenta); }; } #endif diff --git a/include/Rivet/Projections/CentralEtHCM.hh b/include/Rivet/Projections/CentralEtHCM.hh --- a/include/Rivet/Projections/CentralEtHCM.hh +++ b/include/Rivet/Projections/CentralEtHCM.hh @@ -1,58 +1,58 @@ // -*- C++ -*- #ifndef RIVET_CentralEtHCM_HH #define RIVET_CentralEtHCM_HH #include "Rivet/Particle.hh" #include "Rivet/Event.hh" #include "Rivet/Projections/DISFinalState.hh" namespace Rivet { /// @brief Summed \f$ E_\perp \f$ of central particles in HCM system. /// /// Sum up \f$ E_\perp \f$ of all particles in the hadronic final state in the /// central rapidity bin of the HCM system. class CentralEtHCM : public Projection { public: /// The default constructor. Must specify a FinalStateHCM projection /// object which is guaranteed to live throughout the run. CentralEtHCM(const DISFinalState& fs) { setName("CentralEtHCM"); - addProjection(fs, "FS"); + declare(fs, "FS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(CentralEtHCM); protected: /// Apply the projection on to the Event. void project(const Event& e); /// Compare with other projections CmpState compare(const Projection& p) const { return mkNamedPCmp(p, "FS"); } public: /// The sum of the Et in the central rapidity bin. double sumEt() const { return _sumet; } private: /// The sum of the Et in the central rapidity bin. double _sumet; }; } #endif diff --git a/include/Rivet/Projections/ChargedLeptons.hh b/include/Rivet/Projections/ChargedLeptons.hh --- a/include/Rivet/Projections/ChargedLeptons.hh +++ b/include/Rivet/Projections/ChargedLeptons.hh @@ -1,55 +1,55 @@ // -*- C++ -*- #ifndef RIVET_ChargedLeptons_HH #define RIVET_ChargedLeptons_HH #include "Rivet/Projection.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Particle.hh" #include "Rivet/Event.hh" namespace Rivet { /// @brief Get charged final-state leptons /// /// @todo This is just electrons and muons, unless you set taus stable! class ChargedLeptons : public FinalState { public: /// Constructor ChargedLeptons(const FinalState& fsp=FinalState()) { setName("ChargedLeptons"); - addProjection(ChargedFinalState(fsp), "ChFS"); + declare(ChargedFinalState(fsp), "ChFS"); } /// Constructor via Cut ChargedLeptons(const Cut& c) : ChargedLeptons(FinalState(c)) { } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(ChargedLeptons); protected: /// Apply the projection to the event. void project(const Event& evt); /// Compare projections. CmpState compare(const Projection& other) const; public: /// Access the projected leptons. const Particles& chargedLeptons() const { return _theParticles; } }; } #endif diff --git a/include/Rivet/Projections/DISKinematics.hh b/include/Rivet/Projections/DISKinematics.hh --- a/include/Rivet/Projections/DISKinematics.hh +++ b/include/Rivet/Projections/DISKinematics.hh @@ -1,124 +1,124 @@ // -*- C++ -*- #ifndef RIVET_DISKinematics_HH #define RIVET_DISKinematics_HH #include "Rivet/Particle.hh" #include "Rivet/Event.hh" #include "Rivet/Projection.hh" #include "Rivet/Projections/DISLepton.hh" #include "Rivet/Projections/Beam.hh" namespace Rivet { /// @brief Get the DIS kinematic variables and relevant boosts for an event. class DISKinematics : public Projection { public: /// The default constructor. DISKinematics() : _theQ2(-1.0), _theW2(-1.0), _theX(-1.0), _theY(-1.0), _theS(-1.0) { setName("DISKinematics"); //addPdgIdPair(ANY, hadid); - addProjection(Beam(), "Beam"); - addProjection(DISLepton(), "Lepton"); + declare(Beam(), "Beam"); + declare(DISLepton(), "Lepton"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(DISKinematics); protected: /// Perform the projection operation on the supplied event. virtual void project(const Event& e); /// Compare with other projections. virtual CmpState compare(const Projection& p) const; public: /// The \f$Q^2\f$. double Q2() const { return _theQ2; } /// The \f$W^2\f$. double W2() const { return _theW2; } /// The Bjorken \f$x\f$. double x() const { return _theX; } /// The inelasticity \f$y\f$ double y() const { return _theY; } /// The centre of mass energy \f$s\f$ double s() const { return _theS; } /// The LorentzRotation needed to boost a particle to the hadronic CM frame. const LorentzTransform& boostHCM() const { return _hcm; } /// The LorentzRotation needed to boost a particle to the hadronic Breit frame. const LorentzTransform& boostBreit() const { return _breit; } /// The incoming hadron beam particle const Particle& beamHadron() const { return _inHadron; } /// The incoming lepton beam particle const Particle& beamLepton() const { return _inLepton; } /// The scattered DIS lepton const Particle& scatteredLepton() const { return _outLepton; } /// @brief 1/-1 multiplier indicating (respectively) whether the event has conventional orientation or not /// /// Conventional DIS orientation has the hadron travelling in the +z direction const int orientation() const { return sign(_inHadron.pz()); } private: /// The \f$Q^2\f$. double _theQ2; /// The \f$W^2\f$. double _theW2; /// The Bjorken \f$x\f$. double _theX; /// The Inelasticity \f$y\f$ double _theY; /// The centre of mass energy \f$s\f$ double _theS; /// Incoming and outgoing DIS particles Particle _inHadron, _inLepton, _outLepton; /// The LorentzRotation needed to boost a particle to the hadronic CM frame. LorentzTransform _hcm; /// The LorentzRotation needed to boost a particle to the hadronic Breit frame. LorentzTransform _breit; }; } #endif diff --git a/include/Rivet/Projections/DISLepton.hh b/include/Rivet/Projections/DISLepton.hh --- a/include/Rivet/Projections/DISLepton.hh +++ b/include/Rivet/Projections/DISLepton.hh @@ -1,69 +1,69 @@ // -*- C++ -*- #ifndef RIVET_DISLepton_HH #define RIVET_DISLepton_HH #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Particle.hh" #include "Rivet/Event.hh" namespace Rivet { /// @brief Get the incoming and outgoing leptons in a DIS event. class DISLepton : public Projection { public: /// @name Constructors. //@{ DISLepton(){ setName("DISLepton"); - addProjection(Beam(), "Beam"); - addProjection(PromptFinalState(), "FS"); + declare(Beam(), "Beam"); + declare(PromptFinalState(), "FS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(DISLepton); //@} protected: /// Perform the projection operation on the supplied event. virtual void project(const Event& e); /// Compare with other projections. virtual CmpState compare(const Projection& p) const; public: /// The incoming lepton const Particle& in() const { return _incoming; } /// The outgoing lepton const Particle& out() const { return _outgoing; } /// Sign of the incoming lepton pz component int pzSign() const { return sign(_incoming.pz()); } private: /// The incoming lepton Particle _incoming; /// The outgoing lepton Particle _outgoing; // /// The charge sign of the DIS current // double _charge; }; } #endif diff --git a/include/Rivet/Projections/FoxWolframMoments.hh b/include/Rivet/Projections/FoxWolframMoments.hh --- a/include/Rivet/Projections/FoxWolframMoments.hh +++ b/include/Rivet/Projections/FoxWolframMoments.hh @@ -1,72 +1,72 @@ // -*- C++ -*- #ifndef RIVET_FoxWolframMoments_HH #define RIVET_FoxWolframMoments_HH #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Projection.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Particle.hh" #include "Rivet/Event.hh" #include #define MAXMOMENT 5 namespace Rivet { /// @brief Calculate Fox-Wolfram moments class FoxWolframMoments : public Projection { public: /// Constructor. FoxWolframMoments(const FinalState& fsp) { setName("FoxWolframMoments"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); /// @todo Let the user supply any projection they like? VisibleFinalState vfs(fsp); - addProjection(vfs, "VFS"); + declare(vfs, "VFS"); // Initialize moments vector for (int i = 0; i < MAXMOMENT ; ++i) { _fwmoments.push_back(0.0); } } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(FoxWolframMoments); /// The projected Fox-Wolfram Moment of order l double getFoxWolframMoment(unsigned int l) const { if (l < MAXMOMENT) { return _fwmoments[l]; } /// @todo What?!? return -666.0; } protected: /// Apply the projection to the event. void project(const Event& e); /// Compare projections. CmpState compare(const Projection& p) const; private: vector _fwmoments; }; } #endif diff --git a/include/Rivet/Projections/HadronicFinalState.hh b/include/Rivet/Projections/HadronicFinalState.hh --- a/include/Rivet/Projections/HadronicFinalState.hh +++ b/include/Rivet/Projections/HadronicFinalState.hh @@ -1,51 +1,51 @@ // -*- C++ -*- #ifndef RIVET_HadronicFinalState_HH #define RIVET_HadronicFinalState_HH #include "Rivet/Tools/Logging.hh" #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Particle.hh" #include "Rivet/Event.hh" #include "Rivet/Projection.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Project only hadronic final state particles. class HadronicFinalState : public FinalState { public: /// Constructor: the supplied FinalState projection is assumed to live through the run. HadronicFinalState(const FinalState& fsp) { setName("HadronicFinalState"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); } HadronicFinalState(double mineta = -MAXDOUBLE, double maxeta = MAXDOUBLE, double minpt = 0.0*GeV) { setName("HadronicFinalState"); - addProjection(FinalState(mineta, maxeta, minpt), "FS"); + declare(FinalState(mineta, maxeta, minpt), "FS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(HadronicFinalState); protected: /// Apply the projection on the supplied event. void project(const Event& e); /// Compare projections. CmpState compare(const Projection& p) const; }; } #endif diff --git a/include/Rivet/Projections/HeavyHadrons.hh b/include/Rivet/Projections/HeavyHadrons.hh --- a/include/Rivet/Projections/HeavyHadrons.hh +++ b/include/Rivet/Projections/HeavyHadrons.hh @@ -1,112 +1,112 @@ // -*- C++ -*- #ifndef RIVET_HeavyHadrons_HH #define RIVET_HeavyHadrons_HH #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/UnstableFinalState.hh" #include "Rivet/Particle.hh" #include "Rivet/Event.hh" namespace Rivet { /// @brief Project out the last pre-decay b and c hadrons. /// /// This currently defines a c-hadron as one which contains a @a c quark and /// @a{not} a @a b quark. /// /// @todo This assumes that the heavy hadrons are unstable... should we also look for stable ones in case the decays are disabled? class HeavyHadrons : public FinalState { public: /// @name Constructors and destructors. //@{ /// Constructor with specification of the minimum and maximum pseudorapidity /// \f$ \eta \f$ and the min \f$ p_T \f$ (in GeV). HeavyHadrons(const Cut& c=Cuts::open()) { setName("HeavyHadrons"); - addProjection(UnstableFinalState(c), "UFS"); + declare(UnstableFinalState(c), "UFS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(HeavyHadrons); //@} /// @name b hadron accessors //@{ /// Get all weakly decaying b hadrons (return by reference) const Particles& bHadrons() const { return _theBs; } /// Get weakly decaying b hadrons with a Cut applied (return by value) Particles bHadrons(const Cut& c) const { return filter_select(bHadrons(), c); } /// Get weakly decaying b hadrons with a pTmin cut (return by value) /// @deprecated Prefer bHadrons(Cuts::pT > x) Particles bHadrons(double ptmin) const { return bHadrons(Cuts::pT > ptmin); } /// Get weakly decaying b hadrons with a general filter function applied (return by value) Particles bHadrons(const ParticleSelector& s) const { return filter_select(bHadrons(), s); } //@} /// @name b hadron accessors //@{ /// Get all weakly decaying c hadrons (return by reference) const Particles& cHadrons() const { return _theCs; } /// Get weakly decaying c hadrons with a Cut applied (return by value) Particles cHadrons(const Cut& c) const { return filter_select(cHadrons(), c); } /// Get weakly decaying c hadrons with a pTmin cut (return by value) /// @deprecated Prefer cHadrons(Cuts::pT > x) Particles cHadrons(double ptmin) const { return cHadrons(Cuts::pT > ptmin); } /// Get weakly decaying c hadrons with a general filter function applied (return by value) Particles cHadrons(const ParticleSelector& s) const { return filter_select(cHadrons(), s); } //@} protected: /// Apply the projection to the event. virtual void project(const Event& e); /// Compare projections (only difference is in UFS definition) virtual CmpState compare(const Projection& p) const { return mkNamedPCmp(p, "UFS"); } /// b and c hadron containers Particles _theBs, _theCs; }; } #endif diff --git a/include/Rivet/Projections/Hemispheres.hh b/include/Rivet/Projections/Hemispheres.hh --- a/include/Rivet/Projections/Hemispheres.hh +++ b/include/Rivet/Projections/Hemispheres.hh @@ -1,175 +1,175 @@ // -*- C++ -*- #ifndef RIVET_Hemispheres_HH #define RIVET_Hemispheres_HH #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/AxesDefinition.hh" namespace Rivet { /// @brief Calculate the hemisphere masses and broadenings. /// /// Calculate the hemisphere masses and broadenings, with event hemispheres /// defined by the plane normal to the thrust vector, \f$ \vec{n}_\mathrm{T} \f$. /// /// The "high" hemisphere mass, /// \f$ M^2_\mathrm{high} / E^2_\mathrm{vis} \f$, is defined as /// \f[ /// \frac{M^2_\mathrm{high}}{E^2_\mathrm{vis}} = /// \frac{1}{E^2_\mathrm{vis}} \max /// \left( /// \left| \sum_{\vec{p}_k \cdot \vec{n}_\mathrm{T} > 0} p_k \right|^2 , /// \left| \sum_{\vec{p}_k \cdot \vec{n}_\mathrm{T} < 0} p_k \right|^2 /// \right) /// \f] /// and the corresponding "low" hemisphere mass, /// \f$ M^2_\mathrm{low} / E^2_\mathrm{vis} \f$, /// is the sum of momentum vectors in the opposite hemisphere, i.e. /// \f$ \max \rightarrow \min \f$ in the formula above. /// /// Finally, we define a hemisphere mass difference: /// \f[ /// \frac{M^2_\mathrm{diff} }{ E^2_\mathrm{vis}} = /// \frac{ M^2_\mathrm{high} - M^2_\mathrm{low} }{ E^2_\mathrm{vis}} . /// \f] /// /// Similarly to the masses, we also define hemisphere broadenings, using the /// momenta transverse to the thrust axis: /// \f[ /// B_\pm = /// \frac{ /// \sum{\pm \vec{p}_i \cdot \vec{n}_\mathrm{T} > 0} /// |\vec{p}_i \times \vec{n}_\mathrm{T} | /// }{ /// 2 \sum_i | \vec{p}_i | /// } /// \f] /// and then a set of the broadening maximum, minimum, sum and difference as follows: /// \f[ B_\mathrm{max} = \max(B_+, B_-) \f] /// \f[ B_\mathrm{min} = \min(B_+, B_-) \f] /// \f[ B_\mathrm{sum} = B_+ + B_- \f] /// \f[ B_\mathrm{diff} = |B_+ - B_-| \f] /// /// Internally, this projection uses a Thrust or Sphericity projection to /// determine the hemisphere orientation. class Hemispheres : public Projection { public: /// Constructor. Hemispheres(const AxesDefinition& ax) { setName("Hemispheres"); - addProjection(ax, "Axes"); + declare(ax, "Axes"); clear(); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(Hemispheres); /// Reset the projection void clear() { _E2vis = -1; _M2high = -1; _M2low = -1; _Bmax = -1; _Bmin = -1; _highMassEqMaxBroad = true; } /// Use the projection manually (i.e. outside the projection mechanism) with raw 4-momentum inputs. void calc(const Vector3& n, const std::vector& p4s); /// Use the projection manually (i.e. outside the projection mechanism) with particle inputs. void calc(const Vector3& n, const Particles& particles); /// Use the projection manually (i.e. outside the projection mechanism) with jet inputs. void calc(const Vector3& n, const Jets& jets); protected: /// Perform the projection on the Event. void project(const Event& e); /// Compare with other projections. CmpState compare(const Projection& p) const { return mkNamedPCmp(p, "Axes"); } public: /// @name Hemisphere masses (scaled by \f$ 1 / E^2_\mathrm{vis} \f$). //@{ double E2vis() const { return _E2vis; } double Evis() const { return sqrt(_E2vis); } double M2high() const { return _M2high; } double Mhigh() const { return sqrt(M2high()); } double M2low() const { return _M2low; } double Mlow() const { return sqrt(M2low()); } double M2diff() const { return _M2high -_M2low; } double Mdiff() const { return sqrt(M2diff()); } double M2sum() const { return _M2high +_M2low; } double Msum() const { return sqrt(M2sum()); } double scaledM2high() const { if (isZero(_M2high)) return 0.0; if (!isZero(_E2vis)) return _M2high/_E2vis; else return std::numeric_limits::max(); } double scaledMhigh() const { return sqrt(scaledM2high()); } double scaledM2low() const { if (isZero(_M2low)) return 0.0; if (!isZero(_E2vis)) return _M2low/_E2vis; else return std::numeric_limits::max(); } double scaledMlow() const { return sqrt(scaledM2low()); } double scaledM2diff() const { if (M2diff() == 0.0) return 0.0; if (_E2vis != 0.0) return M2diff()/_E2vis; else return std::numeric_limits::max(); } double scaledMdiff() const { return sqrt(scaledM2diff()); } //@} /// @name Hemisphere broadenings. //@{ double Bmax() const { return _Bmax; } double Bmin() const { return _Bmin; } double Bsum() const { return _Bmax + _Bmin; } double Bdiff() const { return fabs(_Bmax - _Bmin); } // <- fabs(), just in case... //@} /// Is the hemisphere with the max mass the same as the one with the max broadening? bool massMatchesBroadening() { return _highMassEqMaxBroad; } private: /// Visible energy-squared, \f$ E^2_\mathrm{vis} \f$. double _E2vis; /// Hemisphere mass variables. double _M2high, _M2low; /// Hemisphere broadening variables. double _Bmax, _Bmin; /// Is the hemisphere with the max mass the same as the one with the max broadening? bool _highMassEqMaxBroad; }; } #endif diff --git a/include/Rivet/Projections/LeadingParticlesFinalState.hh b/include/Rivet/Projections/LeadingParticlesFinalState.hh --- a/include/Rivet/Projections/LeadingParticlesFinalState.hh +++ b/include/Rivet/Projections/LeadingParticlesFinalState.hh @@ -1,75 +1,75 @@ // -*- C++ -*- #ifndef RIVET_LeadingParticlesFinalState_HH #define RIVET_LeadingParticlesFinalState_HH #include "Rivet/Event.hh" #include "Rivet/Projection.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Get the highest-pT occurrences of FS particles with the specified PDG IDs. class LeadingParticlesFinalState : public FinalState { public: /// Constructor: the supplied FinalState projection is assumed to live through the run. LeadingParticlesFinalState(const FinalState& fsp) : FinalState(), _leading_only(false) { setName("LeadingParticlesFinalState"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(LeadingParticlesFinalState); /// Add a particle ID to the list of leading particles selected LeadingParticlesFinalState& addParticleId(long id) { _ids.insert(id); return *this; } /// Add a particle ID to the list of leading particles selected LeadingParticlesFinalState& addParticleIdPair(long id) { _ids.insert(id); _ids.insert(-id); return *this; } /// Toggle whether to keep track only of the leading particle of any ID, /// or the leading particle of all IDs separately /// Default is the latter (=false) void setLeadingOnly(const bool& leadingonly) { _leading_only = leadingonly; } // /// Check if a particle of a particular ID was found in the current event // bool hasParticleId(const PdgId pid) const; // /// Get a particle of a particular ID (check it exists first) // bool get(const PdgId pid) const; /// Apply the projection on the supplied event. void project(const Event& e); /// Compare projections. CmpState compare(const Projection& p) const; private: /// Check if the particle's ID is in the list bool inList(const Particle& particle) const; /// IDs of the leading particles to be selected std::set_ids; bool _leading_only; }; } #endif diff --git a/include/Rivet/Projections/LossyFinalState.hh b/include/Rivet/Projections/LossyFinalState.hh --- a/include/Rivet/Projections/LossyFinalState.hh +++ b/include/Rivet/Projections/LossyFinalState.hh @@ -1,81 +1,81 @@ // -*- C++ -*- #ifndef RIVET_LossyFinalState_HH #define RIVET_LossyFinalState_HH #include "Rivet/Tools/Logging.hh" #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Particle.hh" #include "Rivet/Event.hh" #include "Rivet/Projection.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Templated FS projection which can lose some of the supplied particles. template class LossyFinalState : public FinalState { public: /// @name Constructors //@{ /// Constructor from FinalState. LossyFinalState(const FinalState& fsp, FILTER filter) : _filter(filter) { setName("LossyFinalState"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); } /// Stand-alone constructor. Initialises the base FinalState projection. LossyFinalState(FILTER filter, double mineta = -MAXDOUBLE, double maxeta = MAXDOUBLE, double minpt = 0.0) : _filter(filter) { setName("LossyFinalState"); - addProjection(FinalState(mineta, maxeta, minpt), "FS"); + declare(FinalState(mineta, maxeta, minpt), "FS"); } /// Virtual destructor, to allow subclassing virtual ~LossyFinalState() { } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(LossyFinalState); //@} /// Apply the projection on the supplied event. void project(const Event& e) { const FinalState& fs = applyProjection(e, "FS"); getLog() << Log::DEBUG << "Pre-loss number of FS particles = " << fs.particles().size() << '\n'; _theParticles.clear(); std::remove_copy_if(fs.particles().begin(), fs.particles().end(), std::back_inserter(_theParticles), _filter); getLog() << Log::DEBUG << "Filtered number of FS particles = " << _theParticles.size() << '\n'; } /// Compare projections. CmpState compare(const Projection& p) const { const LossyFinalState& other = pcast< LossyFinalState >(p); const CmpState fscmp = mkNamedPCmp(other, "FS"); if (fscmp != CmpState::EQ) return fscmp; return _filter.compare(other._filter); } protected: /// Filtering object: must support operator(const Particle&) and compare(const Filter&) FILTER _filter; }; } #endif diff --git a/include/Rivet/Projections/MergedFinalState.hh b/include/Rivet/Projections/MergedFinalState.hh --- a/include/Rivet/Projections/MergedFinalState.hh +++ b/include/Rivet/Projections/MergedFinalState.hh @@ -1,46 +1,46 @@ // -*- C++ -*- #ifndef RIVET_MergedFinalState_HH #define RIVET_MergedFinalState_HH #include "Rivet/Tools/Logging.hh" #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Particle.hh" #include "Rivet/Event.hh" #include "Rivet/Projection.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Get final state particles merged from two FinalState projections. class MergedFinalState : public FinalState { public: /// @name Constructors //@{ MergedFinalState(const FinalState& fspa, const FinalState& fspb) { setName("MergedFinalState"); - addProjection(fspa, "FSA"); - addProjection(fspb, "FSB"); + declare(fspa, "FSA"); + declare(fspb, "FSB"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(MergedFinalState); //@} protected: /// Apply the projection on the supplied event. void project(const Event& e); /// Compare projections. CmpState compare(const Projection& p) const; }; } #endif diff --git a/include/Rivet/Projections/MissingMomentum.hh b/include/Rivet/Projections/MissingMomentum.hh --- a/include/Rivet/Projections/MissingMomentum.hh +++ b/include/Rivet/Projections/MissingMomentum.hh @@ -1,154 +1,154 @@ // -*- C++ -*- #ifndef RIVET_MissingMomentum_HH #define RIVET_MissingMomentum_HH #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Projection.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Particle.hh" #include "Rivet/Event.hh" namespace Rivet { /// @brief Calculate missing \f$ E \f$, \f$ E_\perp \f$ etc. /// /// Project out the total visible energy vector, allowing missing /// \f$ E \f$, \f$ E_\perp \f$ etc. to be calculated. Final state /// visibility restrictions are automatic. class MissingMomentum : public Projection { public: /// Default constructor with optional cut. MissingMomentum(const Cut& c=Cuts::open()) { setName("MissingMomentum"); FinalState fs(c); - addProjection(fs, "FS"); - addProjection(VisibleFinalState(fs), "VisibleFS"); + declare(fs, "FS"); + declare(VisibleFinalState(fs), "VisibleFS"); } /// Constructor. MissingMomentum(const FinalState& fs) { setName("MissingMomentum"); - addProjection(fs, "FS"); - addProjection(VisibleFinalState(fs), "VisibleFS"); + declare(fs, "FS"); + declare(VisibleFinalState(fs), "VisibleFS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(MissingMomentum); /// @name Visible/missing four-momentum functions //@{ /// The vector-summed visible four-momentum in the event. /// /// @note Reverse this vector with .reverse() to get the missing momentum vector. /// /// @note The optional @a mass argument is used to set a mass on the 4-vector. By /// default it is zero (since missing momentum is really a 3-momentum quantity: /// adding the E components of visible momenta just gives a huge mass) const FourMomentum visibleMomentum(double mass=0*GeV) const; /// Alias for visibleMomentum const FourMomentum visibleMom(double mass=0*GeV) const { return visibleMomentum(mass); } /// The missing four-momentum in the event, required to balance the final state. /// /// @note The optional @a mass argument is used to set a mass on the 4-vector. By /// default it is zero (since missing momentum is really a 3-momentum quantity: /// adding the E components of visible momenta just gives a huge mass) const FourMomentum missingMomentum(double mass=0*GeV) const { return visibleMomentum(mass).reverse(); } /// Alias for missingMomentum const FourMomentum missingMom(double mass=0*GeV) const { return missingMomentum(mass); } //@} /// @name Transverse momentum functions /// @note This may be what you want, even if the paper calls it "missing Et"! /// @todo Move into a common base class for MissingMomentum and SmearedMET -- MomentumBalance, METFinder? //@{ /// The vector-summed visible transverse momentum in the event, as a 3-vector with z=0 /// @note Reverse this vector with operator- to get the missing pT vector. const Vector3& vectorPt() const { return _vpt; } /// Convenience vector MPT function const Vector3 vectorMissingPt() const { return -vectorPt(); } // Alias const Vector3 vectorMPT() const { return vectorMissingPt(); } /// The vector-summed missing transverse momentum in the event. double missingPt() const { return vectorPt().mod(); } // /// Alias for missingPt // double mpt() const { return missingPt(); } /// The scalar-summed visible transverse momentum in the event. double scalarPt() const { return _spt; } // /// Alias for scalarPt // double spt() const { return scalarPt(); } //@} /// @name Transverse energy functions /// @warning Despite the common names "MET" and "SET", what's often meant is the pT functions above! /// @todo Move into a common base class for MissingMomentum and SmearedMET -- MomentumBalance, METFinder? //@{ /// The vector-summed visible transverse energy in the event, as a 3-vector with z=0 /// @note Reverse this vector with operator- to get the missing ET vector. const Vector3& vectorEt() const { return _vet; } /// Convenience vector MET function const Vector3 vectorMissingEt() const { return -vectorEt(); } // Alias const Vector3 vectorMET() const { return vectorMissingEt(); } /// The vector-summed missing transverse energy in the event. double missingEt() const { return vectorEt().mod(); } /// Alias for missingEt double met() const { return missingEt(); } /// The scalar-summed visible transverse energy in the event. double scalarEt() const { return _set; } /// Alias for scalarEt double set() const { return scalarEt(); } //@} public: /// Clear the projection results. void clear(); protected: /// Apply the projection to the event. void project(const Event& e); /// Compare projections. CmpState compare(const Projection& p) const; private: /// The total visible momentum FourMomentum _momentum; /// Scalar transverse energy double _set, _spt; /// Vector transverse energy Vector3 _vet, _vpt; }; } #endif diff --git a/include/Rivet/Projections/NeutralFinalState.hh b/include/Rivet/Projections/NeutralFinalState.hh --- a/include/Rivet/Projections/NeutralFinalState.hh +++ b/include/Rivet/Projections/NeutralFinalState.hh @@ -1,65 +1,65 @@ // -*- C++ -*- #ifndef RIVET_NeutralFinalState_HH #define RIVET_NeutralFinalState_HH #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Project only neutral final state particles. class NeutralFinalState : public FinalState { public: /// @name Constructors //@{ /// Construction from another FinalState NeutralFinalState(const FinalState& fsp, double etmin=0*GeV) : _Etmin(etmin) { setName("NeutralFinalState"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); } /// Construction using Cuts object NeutralFinalState(const Cut& c=Cuts::open()) : _Etmin(0.0*GeV) { setName("NeutralFinalState"); - addProjection(FinalState(c), "FS"); + declare(FinalState(c), "FS"); } /// Construction from explicit eta range and min ET cut values NeutralFinalState(double mineta, double maxeta, double etmin=0*GeV) : _Etmin(etmin) { setName("NeutralFinalState"); - addProjection(FinalState(mineta, maxeta, 0.0*GeV), "FS"); + declare(FinalState(mineta, maxeta, 0.0*GeV), "FS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(NeutralFinalState); //@} /// Apply the projection on the supplied event. void project(const Event& e); /// Compare projections. CmpState compare(const Projection& p) const; protected: /// The minimum allowed transverse energy. /// @todo Remove in favour of a Cut double _Etmin; }; } #endif diff --git a/include/Rivet/Projections/NonHadronicFinalState.hh b/include/Rivet/Projections/NonHadronicFinalState.hh --- a/include/Rivet/Projections/NonHadronicFinalState.hh +++ b/include/Rivet/Projections/NonHadronicFinalState.hh @@ -1,50 +1,50 @@ // -*- C++ -*- #ifndef RIVET_NonHadronicFinalState_HH #define RIVET_NonHadronicFinalState_HH #include "Rivet/Tools/Logging.hh" #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Particle.hh" #include "Rivet/Event.hh" #include "Rivet/Projection.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Project only hadronic final state particles. class NonHadronicFinalState : public FinalState { public: /// Constructor: the supplied FinalState projection is assumed to live through the run. NonHadronicFinalState(FinalState& fsp) { setName("NonHadronicFinalState"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); } NonHadronicFinalState(double mineta = -MAXDOUBLE, double maxeta = MAXDOUBLE, double minpt = 0.0*GeV) { setName("NonHadronicFinalState"); - addProjection(FinalState(mineta, maxeta, minpt), "FS"); + declare(FinalState(mineta, maxeta, minpt), "FS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(NonHadronicFinalState); /// Apply the projection on the supplied event. void project(const Event& e); /// Compare projections. CmpState compare(const Projection& p) const; }; } #endif diff --git a/include/Rivet/Projections/ParisiTensor.hh b/include/Rivet/Projections/ParisiTensor.hh --- a/include/Rivet/Projections/ParisiTensor.hh +++ b/include/Rivet/Projections/ParisiTensor.hh @@ -1,100 +1,100 @@ // -*- C++ -*- #ifndef RIVET_ParisiTensor_HH #define RIVET_ParisiTensor_HH #include "Rivet/Projection.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/Sphericity.hh" #include "Rivet/Event.hh" namespace Rivet { /// @brief Calculate the Parisi event shape tensor (or linear momentum tensor). /// /// The Parisi event shape C and D variables are derived from the eigenvalues of /// the linear momentum tensor /// \f[ /// \theta^{\alpha \beta} = /// \frac{\sum_i \frac{p_i^\alpha p_i^\beta}{|\mathbf{p}_i|}} /// {\sum_i |\mathbf{p}_i|} /// \f] /// which is actually a linearized (and hence infra-red safe) version of the /// {@link Sphericity} tensor. /// /// Defining the three eigenvalues of \f$\theta\f$ /// \f$ \lambda_1 \ge \lambda_2 \ge \lambda_3 \f$, with \f$ \lambda_1 + \lambda_2 + \lambda_3 = 1 \f$, /// the C and D parameters are defined as /// \f[ /// C = 3(\lambda_1\lambda_2 + \lambda_1\lambda_3 + \lambda_2\lambda_3) /// \f] /// and /// \f[ /// D = 27 \lambda_1\lambda_2\lambda_3 /// \f] /// /// Internally, this Projection uses the Sphericity projection with the generalising /// \f$r\f$ parameter set to 1. /// class ParisiTensor : public Projection { public: /// Constructor. The provided FinalState projection must live throughout the run. ParisiTensor(const FinalState& fsp) { setName("ParisiTensor"); - addProjection(fsp, "FS"); - addProjection(Sphericity(fsp, 1.0), "Sphericity"); + declare(fsp, "FS"); + declare(Sphericity(fsp, 1.0), "Sphericity"); clear(); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(ParisiTensor); protected: /// Perform the projection on the Event. void project(const Event& e); /// Compare with other projections. CmpState compare(const Projection& p) const; public: /// Clear the projection. void clear(); public: /// @name Access the C and D params. ///@{ double C() const { return _C; } double D() const { return _D; } ///@} /// @name Access the eigenvalues of \f$\theta\f$. ///@{ double lambda1() const { return _lambda[0]; } double lambda2() const { return _lambda[1]; } double lambda3() const { return _lambda[2]; } ///@} private: /// The Parisi event shape variables. double _C, _D; /// Eigenvalues. double _lambda[3]; }; } #endif diff --git a/include/Rivet/Projections/PrimaryHadrons.hh b/include/Rivet/Projections/PrimaryHadrons.hh --- a/include/Rivet/Projections/PrimaryHadrons.hh +++ b/include/Rivet/Projections/PrimaryHadrons.hh @@ -1,55 +1,55 @@ // -*- C++ -*- #ifndef RIVET_PrimaryHadrons_HH #define RIVET_PrimaryHadrons_HH #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/UnstableFinalState.hh" #include "Rivet/Particle.hh" #include "Rivet/Event.hh" namespace Rivet { /// @brief Project out the first hadrons from hadronisation. /// /// @todo Also be able to return taus? Prefer a separate tau finder. /// @todo This assumes that the primary hadrons are unstable... should we also look for stable primary hadrons? class PrimaryHadrons : public FinalState { public: /// @name Constructors and destructors. //@{ /// Constructor with cuts argument PrimaryHadrons(const Cut& c=Cuts::open()) { setName("PrimaryHadrons"); - addProjection(UnstableFinalState(c), "UFS"); + declare(UnstableFinalState(c), "UFS"); } /// Constructor with specification of the minimum and maximum pseudorapidity /// \f$ \eta \f$ and the min \f$ p_T \f$ (in GeV). PrimaryHadrons(double mineta, double maxeta, double minpt=0.0*GeV) { setName("PrimaryHadrons"); - addProjection(UnstableFinalState(Cuts::etaIn(mineta, maxeta) && Cuts::pT > minpt), "UFS"); + declare(UnstableFinalState(Cuts::etaIn(mineta, maxeta) && Cuts::pT > minpt), "UFS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(PrimaryHadrons); //@} /// Apply the projection to the event. virtual void project(const Event& e); // /// Compare projections. // CmpState compare(const Projection& p) const; }; } #endif diff --git a/include/Rivet/Projections/SmearedJets.hh b/include/Rivet/Projections/SmearedJets.hh --- a/include/Rivet/Projections/SmearedJets.hh +++ b/include/Rivet/Projections/SmearedJets.hh @@ -1,196 +1,196 @@ // -*- C++ -*- #ifndef RIVET_SmearedJets_HH #define RIVET_SmearedJets_HH #include "Rivet/Jet.hh" #include "Rivet/Particle.hh" #include "Rivet/Projection.hh" #include "Rivet/Projections/JetAlg.hh" #include "Rivet/Tools/SmearingFunctions.hh" #include namespace Rivet { // // Recursive variadic template arg decoding // namespace { // template // vector& toEffSmearFns(vector& v, const T& t) { // v.push_back(JetEffSmearFn(t)); // return v; // } // template // vector& toEffSmearFns(vector& v, const T& first, ARGS... args) { // v.push_back(JetEffSmearFn(first)); // toEffSmearFns(v, args...); // return v; // } // } /// Wrapper projection for smearing {@link Jet}s with detector resolutions and efficiencies class SmearedJets : public JetAlg { public: /// @name Constructors etc. //@{ /// @brief Constructor with a reco efficiency and optional tagging efficiencies SmearedJets(const JetAlg& ja, const JetSmearFn& smearFn, const JetEffFn& bTagEffFn=JET_BTAG_PERFECT, const JetEffFn& cTagEffFn=JET_CTAG_PERFECT) : SmearedJets(ja, vector{smearFn}, bTagEffFn, cTagEffFn) { } /// @brief Constructor with tagging efficiencies, plus an ordered init-list of efficiency and smearing functions SmearedJets(const JetAlg& ja, const JetEffFn& bTagEffFn=JET_BTAG_PERFECT, const JetEffFn& cTagEffFn=JET_CTAG_PERFECT, const initializer_list& effSmearFns={}) : SmearedJets(ja, vector{effSmearFns}, bTagEffFn, cTagEffFn) { } /// @brief Constructor with tagging efficiencies, plus an ordered vector of efficiency and smearing functions SmearedJets(const JetAlg& ja, const JetEffFn& bTagEffFn=JET_BTAG_PERFECT, const JetEffFn& cTagEffFn=JET_CTAG_PERFECT, const vector& effSmearFns={}) : SmearedJets(ja, effSmearFns, bTagEffFn, cTagEffFn) { } /// @brief Constructor with an ordered init-list of efficiency and smearing functions, plus optional tagging efficiencies SmearedJets(const JetAlg& ja, const initializer_list& effSmearFns, const JetEffFn& bTagEffFn=JET_BTAG_PERFECT, const JetEffFn& cTagEffFn=JET_CTAG_PERFECT) : SmearedJets(ja, vector{effSmearFns}, bTagEffFn, cTagEffFn) { } /// @brief Constructor with an ordered vector of efficiency and smearing functions, plus optional tagging efficiencies SmearedJets(const JetAlg& ja, const vector& effSmearFns, const JetEffFn& bTagEffFn=JET_BTAG_PERFECT, const JetEffFn& cTagEffFn=JET_CTAG_PERFECT) : _detFns(effSmearFns), _bTagEffFn(bTagEffFn), _cTagEffFn(cTagEffFn) { setName("SmearedJets"); - addProjection(ja, "TruthJets"); + declare(ja, "TruthJets"); } /// @brief Constructor with trailing efficiency arg /// @deprecated Use the version with pair-smearing list as 2nd argument SmearedJets(const JetAlg& ja, const JetSmearFn& smearFn, const JetEffFn& bTagEffFn, const JetEffFn& cTagEffFn, const JetEffFn& jetEffFn) : SmearedJets(ja, {jetEffFn,smearFn}, bTagEffFn, cTagEffFn) { } /// @todo How to include tagging effs? /// @todo Variadic eff/smear fn list? /// @todo Add a trailing Cut arg cf. SmearedParticles? -- wrap into an eff function /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(SmearedJets); //@} /// Compare to another SmearedJets CmpState compare(const Projection& p) const { // Compare truth jets definitions const CmpState teq = mkPCmp(p, "TruthJets"); if (teq != CmpState::EQ) return teq; // Compare lists of detector functions const SmearedJets& other = dynamic_cast(p); const CmpState nfeq = cmp(_detFns.size(), other._detFns.size()); if (nfeq != CmpState::EQ) return nfeq; for (size_t i = 0; i < _detFns.size(); ++i) { const CmpState feq = _detFns[i].cmp(other._detFns[i]); if (feq != CmpState::EQ) return feq; } // If we got this far, we're equal return CmpState::EQ; } /// Perform the jet finding & smearing calculation void project(const Event& e) { // Copying and filtering const Jets& truthjets = apply(e, "TruthJets").jetsByPt(); _recojets.clear(); _recojets.reserve(truthjets.size()); // Apply jet smearing and efficiency transforms for (const Jet& j : truthjets) { Jet jdet = j; bool keep = true; MSG_DEBUG("Truth jet: " << "mom=" << jdet.mom()/GeV << " GeV, pT=" << jdet.pT()/GeV << ", eta=" << jdet.eta()); for (const JetEffSmearFn& fn : _detFns) { double jeff = -1; std::tie(jdet, jeff) = fn(jdet); // smear & eff // Re-add constituents & tags if (we assume accidentally) they were lost by the smearing function if (jdet.particles().empty() && !j.particles().empty()) jdet.particles() = j.particles(); if (jdet.tags().empty() && !j.tags().empty()) jdet.tags() = j.tags(); MSG_DEBUG(" ->" << "mom=" << jdet.mom()/GeV << " GeV, pT=" << jdet.pT()/GeV << ", eta=" << jdet.eta()); // MSG_DEBUG("New det jet: " // << "mom=" << jdet.mom()/GeV << " GeV, pT=" << jdet.pT()/GeV << ", eta=" << jdet.eta() // << ", b-tag=" << boolalpha << jdet.bTagged() // << ", c-tag=" << boolalpha << jdet.cTagged() // << " : eff=" << 100*jeff << "%"); if (jeff <= 0) { keep = false; break; } //< no need to roll expensive dice (and we deal with -ve probabilities, just in case) if (jeff < 1 && rand01() > jeff) { keep = false; break; } //< roll dice (and deal with >1 probabilities, just in case) } if (keep) _recojets.push_back(jdet); } // Apply tagging efficiencies, using smeared kinematics as input to the tag eff functions for (Jet& j : _recojets) { // Decide whether or not there should be a b-tag on this jet const double beff = _bTagEffFn ? _bTagEffFn(j) : j.bTagged(); const bool btag = beff == 1 || (beff != 0 && rand01() < beff); // Remove b-tags if needed, and add a dummy one if needed if (!btag && j.bTagged()) j.tags().erase(std::remove_if(j.tags().begin(), j.tags().end(), hasBottom), j.tags().end()); if (btag && !j.bTagged()) j.tags().push_back(Particle(PID::BQUARK, j.mom())); ///< @todo Or could use the/an actual clustered b-quark momentum? // Decide whether or not there should be a c-tag on this jet const double ceff = _cTagEffFn ? _cTagEffFn(j) : j.cTagged(); const bool ctag = ceff == 1 || (ceff != 0 && rand01() < beff); // Remove c-tags if needed, and add a dummy one if needed if (!ctag && j.cTagged()) j.tags().erase(std::remove_if(j.tags().begin(), j.tags().end(), hasCharm), j.tags().end()); if (ctag && !j.cTagged()) j.tags().push_back(Particle(PID::CQUARK, j.mom())); ///< @todo As above... ? } } /// Return the full jet list for the JetAlg methods to use Jets _jets() const { return _recojets; } /// Reset the projection. Smearing functions will be unchanged. void reset() { _recojets.clear(); } private: /// Smeared jets Jets _recojets; /// Stored efficiency & smearing functions vector _detFns; /// Stored efficiency functions JetEffFn _bTagEffFn, _cTagEffFn; }; } #endif diff --git a/include/Rivet/Projections/SmearedMET.hh b/include/Rivet/Projections/SmearedMET.hh --- a/include/Rivet/Projections/SmearedMET.hh +++ b/include/Rivet/Projections/SmearedMET.hh @@ -1,130 +1,130 @@ // -*- C++ -*- #ifndef RIVET_SmearedMET_HH #define RIVET_SmearedMET_HH #include "Rivet/Projection.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "Rivet/Tools/SmearingFunctions.hh" #include namespace Rivet { /// Wrapper projection for smearing missing (transverse) energy/momentum with detector resolutions class SmearedMET : public Projection { public: /// @name Constructors etc. //@{ /// @brief Constructor from a MissingMomentum projection and a smearing function /// /// Smearing function maps a 3-vector MET and scalar SET to a new MET 3-vector: f(V3, double) -> V3 template SmearedMET(const MissingMomentum& mm, const V2VFN& metSmearFn) : _metSmearFn(metSmearFn) { setName("SmearedMET"); - addProjection(mm, "TruthMET"); + declare(mm, "TruthMET"); } /// @brief Constructor from a Cut (on the particles used to determine missing momentum) and a smearing function template SmearedMET(const V2VFN& metSmearFn, const Cut& cut) : _metSmearFn(metSmearFn) { setName("SmearedMET"); - addProjection(MissingMomentum(cut), "TruthMET"); + declare(MissingMomentum(cut), "TruthMET"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(SmearedMET); //@} /// Compare to another SmearedMET CmpState compare(const Projection& p) const { const SmearedMET& other = dynamic_cast(p); if (get_address(_metSmearFn) == 0) return cmp((size_t)this, (size_t)&p); MSG_TRACE("Smear hashes = " << get_address(_metSmearFn) << "," << get_address(other._metSmearFn)); return mkPCmp(other, "TruthMET") || cmp(get_address(_metSmearFn), get_address(other._metSmearFn)); } /// Perform the MET finding & smearing calculation void project(const Event& e) { const auto& mm = apply(e, "TruthMET"); _vet = mm.vectorEt(); if (_metSmearFn) _vet = _metSmearFn(_vet, mm.scalarEt()); //< smearing } /// @name Transverse momentum functions /// @note This may be what you want, even if the paper calls it "missing Et"! /// @todo Move into a common base class for MissingMomentum and SmearedMET -- MomentumBalance, METFinder? //@{ /// The vector-summed visible transverse momentum in the event, as a 3-vector with z=0 /// @note Reverse this vector with operator- to get the missing pT vector. /// @todo Currently equivalent to vectorEt const Vector3& vectorPt() const { return vectorEt(); } /// Convenience vector MPT function const Vector3 vectorMissingPt() const { return -vectorPt(); } // Alias const Vector3 vectorMPT() const { return vectorMissingPt(); } /// The vector-summed missing transverse momentum in the event. double missingPt() const { return vectorPt().mod(); } // /// Alias for missingPt // double mpt() const { return missingPt(); } // /// The scalar-summed visible transverse momentum in the event. // double scalarPt() const { return _spt; } // // /// Alias for scalarPt // // double spt() const { return scalarPt(); } //@} /// @name Transverse energy functions /// @warning Despite the common names "MET" and "SET", what's often meant is the pT functions above! /// @todo Move into a common base class for MissingMomentum and SmearedMET -- MomentumBalance, METFinder? //@{ /// The vector-summed visible transverse energy in the event, as a 3-vector with z=0 /// @note Reverse this vector with operator- to get the missing ET vector. const Vector3& vectorEt() const { return _vet; } /// Convenience vector MET function const Vector3 vectorMissingEt() const { return -vectorEt(); } // Alias const Vector3 vectorMET() const { return vectorMissingEt(); } /// The vector-summed missing transverse energy in the event. double missingEt() const { return vectorEt().mod(); } /// Alias for missingEt double met() const { return missingEt(); } //@} /// Reset the projection. Smearing functions will be unchanged. void reset() { } private: Vector3 _vet; /// Stored smearing function std::function _metSmearFn; }; } #endif diff --git a/include/Rivet/Projections/SmearedParticles.hh b/include/Rivet/Projections/SmearedParticles.hh --- a/include/Rivet/Projections/SmearedParticles.hh +++ b/include/Rivet/Projections/SmearedParticles.hh @@ -1,173 +1,173 @@ // -*- C++ -*- #ifndef RIVET_SmearedParticles_HH #define RIVET_SmearedParticles_HH #include "Rivet/Particle.hh" #include "Rivet/Projection.hh" #include "Rivet/Projections/ParticleFinder.hh" #include "Rivet/Tools/SmearingFunctions.hh" namespace Rivet { // Recursive variadic template arg decoding namespace { template vector& toEffSmearFns(vector& v, const T& t) { v.push_back(ParticleEffSmearFn(t)); return v; } template vector& toEffSmearFns(vector& v, const T& first, ARGS... args) { v.push_back(ParticleEffSmearFn(first)); toEffSmearFns(v, args...); return v; } } /// Wrapper projection for smearing {@link Jet}s with detector resolutions and efficiencies class SmearedParticles : public ParticleFinder { public: /// @name Constructors etc. //@{ /// @brief Constructor with const efficiency SmearedParticles(const ParticleFinder& pf, double eff, const Cut& c=Cuts::open()) : SmearedParticles(pf, {{eff}}, c) { } /// @brief Constructor with an efficiency function SmearedParticles(const ParticleFinder& pf, const ParticleEffFn& effFn, const Cut& c=Cuts::open()) : SmearedParticles(pf, {{effFn}}, c) { } /// @brief Constructor with const efficiency followed by a smearing function SmearedParticles(const ParticleFinder& pf, double eff, const ParticleSmearFn& smearFn, const Cut& c=Cuts::open()) : SmearedParticles(pf, {eff, smearFn}, c) { } /// @brief Constructor with a smearing function followed by const efficiency SmearedParticles(const ParticleFinder& pf, const ParticleSmearFn& smearFn, double eff, const Cut& c=Cuts::open()) : SmearedParticles(pf, {smearFn, eff}, c) { } /// @brief Constructor with an efficiency function followed by a smearing function SmearedParticles(const ParticleFinder& pf, const ParticleEffFn& effFn, const ParticleSmearFn& smearFn, const Cut& c=Cuts::open()) : SmearedParticles(pf, {effFn, smearFn}, c) { } /// @brief Constructor with a smearing function followed by an efficiency function SmearedParticles(const ParticleFinder& pf, const ParticleSmearFn& smearFn, const ParticleEffFn& effFn, const Cut& c=Cuts::open()) : SmearedParticles(pf, {smearFn, effFn}, c) { } /// @brief Constructor with an ordered list of efficiency and/or smearing functions SmearedParticles(const ParticleFinder& pf, const vector& effSmearFns, const Cut& c=Cuts::open()) : ParticleFinder(c), _detFns(effSmearFns) { setName("SmearedParticles"); - addProjection(pf, "TruthParticles"); + declare(pf, "TruthParticles"); } /// @brief Constructor with an ordered list of efficiency and/or smearing functions SmearedParticles(const ParticleFinder& pf, const initializer_list& effSmearFns, const Cut& c=Cuts::open()) : SmearedParticles(pf, vector{effSmearFns}, c) { } /// @brief Constructor with a variadic ordered list of efficiency and smearing function args /// @note The Cut must be provided *before* the eff/smearing functions /// @todo Wouldn't it be nice if the Cut could also go *after* the parameter pack? template SmearedParticles(const ParticleFinder& pf, const Cut& c, ARGS... effSmearFns) : SmearedParticles(pf, toEffSmearFns(_detFns, effSmearFns...), c) { } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(SmearedParticles); //@} /// Compare to another SmearedParticles CmpState compare(const Projection& p) const { const SmearedParticles& other = dynamic_cast(p); // Compare truth particles definitions const CmpState teq = mkPCmp(other, "TruthParticles"); if (teq != CmpState::EQ) return teq; // Compare lists of detector functions const CmpState nfeq = cmp(_detFns.size(), other._detFns.size()); if (nfeq != CmpState::EQ) return nfeq; for (size_t i = 0; i < _detFns.size(); ++i) { const CmpState feq = _detFns[i].cmp(other._detFns[i]); if (feq != CmpState::EQ) return feq; } // If we got this far, we're equal return CmpState::EQ; } /// Perform the particle finding & smearing calculation void project(const Event& e) { // Copying and filtering const Particles& truthparticles = apply(e, "TruthParticles").particlesByPt(); _theParticles.clear(); _theParticles.reserve(truthparticles.size()); for (const Particle& p : truthparticles) { Particle pdet = p; double peff = -1; bool keep = true; for (const ParticleEffSmearFn& fn : _detFns) { std::tie(pdet, peff) = fn(pdet); // smear & eff MSG_DEBUG("New det particle: pid=" << pdet.pid() << ", mom=" << pdet.mom()/GeV << " GeV, " << "pT=" << pdet.pT()/GeV << ", eta=" << pdet.eta() << " : eff=" << 100*peff << "%"); if (peff <= 0) { keep = false; break; } //< no need to roll expensive dice (and we deal with -ve probabilities, just in case) if (peff < 1 && rand01() > peff) { keep = false; break; } //< roll dice (and deal with >1 probabilities, just in case) } if (keep) { pdet.addConstituent(p); //< record where the smearing was built from _theParticles.push_back(pdet); } } } /// Reset the projection. Smearing functions will be unchanged. void reset() { _theParticles.clear(); } private: /// Stored efficiency & smearing functions vector _detFns; }; } #endif diff --git a/include/Rivet/Projections/Spherocity.hh b/include/Rivet/Projections/Spherocity.hh --- a/include/Rivet/Projections/Spherocity.hh +++ b/include/Rivet/Projections/Spherocity.hh @@ -1,134 +1,134 @@ // -*- C++ -*- #ifndef RIVET_Spherocity_HH #define RIVET_Spherocity_HH #include "Rivet/Projection.hh" #include "Rivet/Projections/AxesDefinition.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Event.hh" namespace Rivet { /// @brief Get the transverse spherocity scalars for hadron-colliders. /// /// @author Holger Schulz /// /// The scalar (minimum) transverse spherocity is defined as /// \f[ /// S = \frac{\pi^2}{4} \mathrm{min}_{\vec{n}_\perp} \left( \frac{\sum_i \left|\vec{p}_{\perp,i} \times \vec{n}_\perp \right|}{\sum_i |\vec{p}_{\perp,i}|} \right)^2 /// \f], /// with the direction of the unit vector \f$ \vec{n_\perp} \f$ which minimises \f$ T \f$ /// being identified as the spherocity axis. The unit vector which maximises the spherocity /// scalar in the plane perpendicular to \f$ \vec{n} \f$ is the "spherocity major" /// direction, and the vector perpendicular to both the spherocity and spherocity major directions /// is the spherocity minor. Both the major and minor directions have associated spherocity /// scalars. /// /// Care must be taken in the case of Drell-Yan processes - there we should use the /// newly proposed observable \f$ a_T \f$. class Spherocity : public AxesDefinition { public: // Default Constructor Spherocity() {} /// Constructor. Spherocity(const FinalState& fsp) : _calculatedSpherocity(false) { setName("Spherocity"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(Spherocity); protected: /// Perform the projection on the Event void project(const Event& e) { const vector ps = applyProjection(e, "FS").particles(); calc(ps); } /// Compare projections CmpState compare(const Projection& p) const { return mkNamedPCmp(p, "FS"); } public: /// @name Spherocity scalar accessors //@{ /// The spherocity scalar, \f$ S \f$, (minimum spherocity). double spherocity() const { return _spherocities[0]; } //@} /// @name Spherocity axis accessors //@{ /// The spherocity axis. const Vector3& spherocityAxis() const { return _spherocityAxes[0]; } /// The spherocity major axis (axis of max spherocity perpendicular to spherocity axis). const Vector3& spherocityMajorAxis() const { return _spherocityAxes[1]; } /// The spherocity minor axis (axis perpendicular to spherocity and spherocity major). const Vector3& spherocityMinorAxis() const { return _spherocityAxes[2]; } //@} /// @name AxesDefinition axis accessors. //@{ const Vector3& axis1() const { return spherocityAxis(); } const Vector3& axis2() const { return spherocityMajorAxis(); } const Vector3& axis3() const { return spherocityMinorAxis(); } ///@} public: /// @name Direct methods /// Ways to do the calculation directly, without engaging the caching system //@{ /// Manually calculate the spherocity, without engaging the caching system void calc(const FinalState& fs); /// Manually calculate the spherocity, without engaging the caching system void calc(const vector& fsparticles); /// Manually calculate the spherocity, without engaging the caching system void calc(const vector& fsmomenta); /// Manually calculate the spherocity, without engaging the caching system void calc(const vector& threeMomenta); //@} private: /// The spherocity scalars. vector _spherocities; /// The spherocity axes. vector _spherocityAxes; /// Caching flag to avoid costly recalculations. bool _calculatedSpherocity; private: /// Explicitly calculate the spherocity values. void _calcSpherocity(const vector& fsmomenta); }; } #endif diff --git a/include/Rivet/Projections/TauFinder.hh b/include/Rivet/Projections/TauFinder.hh --- a/include/Rivet/Projections/TauFinder.hh +++ b/include/Rivet/Projections/TauFinder.hh @@ -1,72 +1,72 @@ #ifndef RIVET_TauFinder_HH #define RIVET_TauFinder_HH #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/UnstableFinalState.hh" namespace Rivet { /// @brief Convenience finder of unstable taus /// /// @todo Convert to a general ParticleFinder, since it's not a true final state? Needs some care... class TauFinder : public FinalState { public: enum class DecayMode { ANY = 0, ALL = 0, LEPTONIC, HADRONIC }; static bool isHadronic(const Particle& tau) { assert(tau.abspid() == PID::TAU); return any(tau.stableDescendants(), isHadron); } static bool isLeptonic(const Particle& tau) { return !isHadronic(tau); } TauFinder(DecayMode decaymode, const Cut& cut=Cuts::open()) { /// @todo What about directness/promptness? setName("TauFinder"); _decmode = decaymode; - addProjection(UnstableFinalState(cut), "UFS"); + declare(UnstableFinalState(cut), "UFS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(TauFinder); const Particles& taus() const { return _theParticles; } protected: /// Apply the projection on the supplied event. void project(const Event& e); /// Compare with other projections. virtual CmpState compare(const Projection& p) const; private: /// The decaymode enum DecayMode _decmode; }; /// @todo Make this the canonical name in future using Taus = TauFinder; } #endif diff --git a/include/Rivet/Projections/Thrust.hh b/include/Rivet/Projections/Thrust.hh --- a/include/Rivet/Projections/Thrust.hh +++ b/include/Rivet/Projections/Thrust.hh @@ -1,140 +1,140 @@ // -*- C++ -*- #ifndef RIVET_Thrust_HH #define RIVET_Thrust_HH #include "Rivet/Projection.hh" #include "Rivet/Projections/AxesDefinition.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Event.hh" namespace Rivet { /** @brief Get the e+ e- thrust basis and the thrust, thrust major and thrust minor scalars. @author Andy Buckley The scalar (maximum) thrust is defined as \f[ T = \mathrm{max}_{\vec{n}} \frac{\sum_i \left|\vec{p}_i \cdot \vec{n} \right|}{\sum_i |\vec{p}_i|} \f], with the direction of the unit vector \f$ \vec{n} \f$ which maximises \f$ T \f$ being identified as the thrust axis. The unit vector which maximises the thrust scalar in the plane perpendicular to \f$ \vec{n} \f$ is the "thrust major" direction, and the vector perpendicular to both the thrust and thrust major directions is the thrust minor. Both the major and minor directions have associated thrust scalars. Thrust calculations have particularly simple forms for less than 4 particles, and in those cases this projection is computationally minimal. For 4 or more particles, a more general calculation must be carried out, based on the Brandt/Dahmen method from Z. Phys. C1 (1978). While a polynomial improvement on the exponential scaling of the naive method, this algorithm scales asymptotically as \f$ \mathcal{O}\left( n^3 \right) \f$. Be aware that the thrust may easily be the most computationally demanding projection in Rivet for large events! The Rivet implementation of thrust is based heavily on Stefan Gieseke's Herwig++ re-coding of the 'tasso' code from HERWIG. NB. special case with >= 4 coplanar particles will still fail. NB. Thrust assumes all momenta are in the CoM system: no explicit boost is performed. This can be dealt with by appropriate choice of the supplied FinalState. */ class Thrust : public AxesDefinition { public: /// Constructor. Thrust() {} Thrust(const FinalState& fsp) { setName("Thrust"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(Thrust); protected: /// Perform the projection on the Event void project(const Event& e) { const vector ps = applyProjection(e, "FS").particles(); calc(ps); } /// Compare projections CmpState compare(const Projection& p) const { return mkNamedPCmp(p, "FS"); } public: ///@{ Thrust scalar accessors /// The thrust scalar, \f$ T \f$, (maximum thrust). double thrust() const { return _thrusts[0]; } /// The thrust major scalar, \f$ M \f$, (thrust along thrust major axis). double thrustMajor() const { return _thrusts[1]; } /// The thrust minor scalar, \f$ m \f$, (thrust along thrust minor axis). double thrustMinor() const { return _thrusts[2]; } /// The oblateness, \f$ O = M - m \f$ . double oblateness() const { return _thrusts[1] - _thrusts[2]; } ///@} ///@{ Thrust axis accessors /// The thrust axis. const Vector3& thrustAxis() const { return _thrustAxes[0]; } /// The thrust major axis (axis of max thrust perpendicular to thrust axis). const Vector3& thrustMajorAxis() const { return _thrustAxes[1]; } /// The thrust minor axis (axis perpendicular to thrust and thrust major). const Vector3& thrustMinorAxis() const { return _thrustAxes[2]; } ///@} ///@{ AxesDefinition axis accessors. const Vector3& axis1() const { return thrustAxis(); } const Vector3& axis2() const { return thrustMajorAxis(); } const Vector3& axis3() const { return thrustMinorAxis(); } ///@} public: /// @name Direct methods /// Ways to do the calculation directly, without engaging the caching system //@{ /// Manually calculate the thrust, without engaging the caching system void calc(const FinalState& fs); /// Manually calculate the thrust, without engaging the caching system void calc(const vector& fsparticles); /// Manually calculate the thrust, without engaging the caching system void calc(const vector& fsmomenta); /// Manually calculate the thrust, without engaging the caching system void calc(const vector& threeMomenta); //@} private: /// The thrust scalars. vector _thrusts; /// The thrust axes. vector _thrustAxes; private: /// Explicitly calculate the thrust values. void _calcThrust(const vector& fsmomenta); }; } #endif diff --git a/include/Rivet/Projections/TriggerCDFRun0Run1.hh b/include/Rivet/Projections/TriggerCDFRun0Run1.hh --- a/include/Rivet/Projections/TriggerCDFRun0Run1.hh +++ b/include/Rivet/Projections/TriggerCDFRun0Run1.hh @@ -1,55 +1,55 @@ // -*- C++ -*- #ifndef RIVET_TriggerCDFRun0Run1_HH #define RIVET_TriggerCDFRun0Run1_HH #include "Rivet/Projection.hh" #include "Rivet/Event.hh" #include "Rivet/Particle.hh" #include "Rivet/Projections/Beam.hh" namespace Rivet { /// @brief Access to the min bias triggers used by CDF in Run 0 and Run 1 class TriggerCDFRun0Run1 : public Projection { public: /// Default constructor. TriggerCDFRun0Run1() { setName("TriggerCDFRun0Run1"); - addProjection(ChargedFinalState(-5.9, 5.9), "CFS"); + declare(ChargedFinalState(-5.9, 5.9), "CFS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(TriggerCDFRun0Run1); /// The trigger result bool minBiasDecision() const { return _decision_mb; } /// Project on to the Event void project(const Event& evt); protected: /// Compare with other projections. virtual CmpState compare(const Projection&) const { return CmpState::EQ; } private: /// The min bias trigger decision bool _decision_mb; }; } #endif diff --git a/include/Rivet/Projections/TriggerCDFRun2.hh b/include/Rivet/Projections/TriggerCDFRun2.hh --- a/include/Rivet/Projections/TriggerCDFRun2.hh +++ b/include/Rivet/Projections/TriggerCDFRun2.hh @@ -1,55 +1,55 @@ // -*- C++ -*- #ifndef RIVET_TriggerCDFRun2_HH #define RIVET_TriggerCDFRun2_HH #include "Rivet/Projection.hh" #include "Rivet/Event.hh" #include "Rivet/Particle.hh" #include "Rivet/Projections/Beam.hh" namespace Rivet { /// @brief Access to the min bias triggers used by CDF in Run 0 and Run 1 class TriggerCDFRun2 : public Projection { public: /// Default constructor. TriggerCDFRun2() { setName("TriggerCDFRun2"); - addProjection(ChargedFinalState(-4.7, 4.7), "CFS"); + declare(ChargedFinalState(-4.7, 4.7), "CFS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(TriggerCDFRun2); /// The trigger result bool minBiasDecision() const { return _decision_mb; } /// Project on to the Event void project(const Event& evt); protected: /// Compare with other projections. virtual CmpState compare(const Projection&) const { return CmpState::EQ; } private: /// The min bias trigger decision bool _decision_mb; }; } #endif diff --git a/include/Rivet/Projections/VetoedFinalState.hh b/include/Rivet/Projections/VetoedFinalState.hh --- a/include/Rivet/Projections/VetoedFinalState.hh +++ b/include/Rivet/Projections/VetoedFinalState.hh @@ -1,181 +1,181 @@ // -*- C++ -*- #ifndef RIVET_VetoedFinalState_HH #define RIVET_VetoedFinalState_HH #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief FS modifier to exclude classes of particles from the final state. class VetoedFinalState : public FinalState { public: /// Typedef for a pair of back-to-back cuts. typedef pair BinaryCut; /// Typedef for a vetoing entry. typedef map VetoDetails; /// Typedef for a veto on a composite particle mass. typedef multimap CompositeVeto; /// @name Constructors //@{ /// Default constructor. VetoedFinalState() { setName("VetoedFinalState"); - addProjection(FinalState(), "FS"); + declare(FinalState(), "FS"); } /// Constructor with specific FinalState. VetoedFinalState(const FinalState& fsp) { setName("VetoedFinalState"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); } /// You can add a map of ID plus a pair containing \f$ p_{Tmin} \f$ and /// \f$ p_{Tmax} \f$ - these define the range of particles to be vetoed. VetoedFinalState(const VetoDetails& vetocodes) : _vetoCodes(vetocodes) { setName("VetoedFinalState"); - addProjection(FinalState(), "FS"); + declare(FinalState(), "FS"); } /// You can add a map of ID plus a pair containing \f$ p_{Tmin} \f$ and /// \f$ p_{Tmax} \f$ - these define the range of particles to be vetoed. /// This version also supplies a specific FinalState to be used. VetoedFinalState(const FinalState& fsp, const VetoDetails& vetocodes) : _vetoCodes(vetocodes) { setName("VetoedFinalState"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(VetoedFinalState); //@} /// Get the list of particle IDs and \f$ p_T \f$ ranges to veto. const VetoDetails& vetoDetails() const { return _vetoCodes; } /// Add a particle ID and \f$ p_T \f$ range to veto. Particles with \f$ p_T \f$ /// IN the given range will be rejected. VetoedFinalState& addVetoDetail(const long id, const double ptmin, const double ptmax) { BinaryCut ptrange(ptmin, ptmax); _vetoCodes.insert(make_pair(id, ptrange)); return *this; } /// Add a particle/antiparticle pair to veto in a given \f$ p_T \f$ range. Given a single ID, both /// the particle and its conjugate antiparticle will be rejected if their \f$ p_T \f$ is IN the given range. VetoedFinalState& addVetoPairDetail(const long id, const double ptmin, const double ptmax) { addVetoDetail(id, ptmin, ptmax); addVetoDetail(-id, ptmin, ptmax); return *this; } /// Add a particle/antiparticle pair to veto. Given a single ID, both the particle and its corresponding /// antiparticle (for all \f$ p_T \f$ values) will be vetoed. VetoedFinalState& addVetoPairId(const long id) { addVetoId(id); addVetoId(-id); return *this; } /// Add a particle ID to veto (all \f$ p_T \f$ range will be vetoed). VetoedFinalState& addVetoId(const long id) { BinaryCut ptrange(0.0, std::numeric_limits::max()); _vetoCodes.insert(make_pair(id, ptrange)); return *this; } /// Veto all neutrinos (convenience method) VetoedFinalState& vetoNeutrinos() { addVetoPairId(PID::NU_E); addVetoPairId(PID::NU_MU); addVetoPairId(PID::NU_TAU); return *this; } /// Add a veto on composite masses within a given width. /// The composite mass is composed of nProducts decay products /// @ todo might we want to specify a range of pdg ids for the decay products? VetoedFinalState& addCompositeMassVeto(const double &mass, const double &width, int nProducts=2){ double halfWidth = 0.5*width; BinaryCut massRange(mass - halfWidth, mass + halfWidth); _compositeVetoes.insert(make_pair(nProducts, massRange)); _nCompositeDecays.insert(nProducts); return *this; } /// Veto the decay products of particle with pdg id /// @todo Need HepMC to sort themselves out and keep vector bosons from /// the hard vtx in the event record before this will work reliably for all pdg ids VetoedFinalState& addDecayProductsVeto(const long id) { _parentVetoes.insert(id); return *this; } /// Set the list of particle IDs and \f$ p_T \f$ ranges to veto. VetoedFinalState& setVetoDetails(const VetoDetails& ids) { _vetoCodes = ids; return *this; } /// Clear the list of particle IDs and ranges to veto. VetoedFinalState& reset() { _vetoCodes.clear(); return *this; } /// Veto particles from a supplied final state VetoedFinalState& addVetoOnThisFinalState(const ParticleFinder& fs) { const string name = "FS_" + to_str(_vetofsnames.size()); - addProjection(fs, name); + declare(fs, name); _vetofsnames.insert(name); return *this; } /// Apply the projection on the supplied event. void project(const Event& e); /// Compare projections. CmpState compare(const Projection& p) const; private: /// The final-state particles. VetoDetails _vetoCodes; /// Composite particle masses to veto CompositeVeto _compositeVetoes; set _nCompositeDecays; typedef set ParentVetos; /// Set of decaying particle IDs to veto ParentVetos _parentVetoes; /// Set of finalstate to be vetoed set _vetofsnames; }; } #endif diff --git a/include/Rivet/Projections/VisibleFinalState.hh b/include/Rivet/Projections/VisibleFinalState.hh --- a/include/Rivet/Projections/VisibleFinalState.hh +++ b/include/Rivet/Projections/VisibleFinalState.hh @@ -1,55 +1,55 @@ // -*- C++ -*- #ifndef RIVET_VisibleFinalState_HH #define RIVET_VisibleFinalState_HH #include "Rivet/Tools/Logging.hh" #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Particle.hh" #include "Rivet/Event.hh" #include "Rivet/Projection.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Final state modifier excluding particles which are not experimentally visible class VisibleFinalState : public FinalState { public: /// @name Constructors //@{ /// Constructor with min and max pseudorapidity \f$ \eta \f$ and min \f$ p_T \f$ (in GeV). VisibleFinalState(double mineta = -MAXDOUBLE, double maxeta = MAXDOUBLE, double minpt = 0.0*GeV) { setName("VisibleFinalState"); - addProjection(FinalState(mineta, maxeta, minpt), "FS"); + declare(FinalState(mineta, maxeta, minpt), "FS"); } /// Constructor with specific FinalState. VisibleFinalState(const FinalState& fsp) { setName("VisibleFinalState"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(VisibleFinalState); //@} /// Apply the projection on the supplied event. void project(const Event& e); /// Compare projections. CmpState compare(const Projection& p) const; }; } #endif diff --git a/src/Projections/ChargedFinalState.cc b/src/Projections/ChargedFinalState.cc --- a/src/Projections/ChargedFinalState.cc +++ b/src/Projections/ChargedFinalState.cc @@ -1,48 +1,48 @@ // -*- C++ -*- #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { ChargedFinalState::ChargedFinalState(const FinalState& fsp) { setName("ChargedFinalState"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); } ChargedFinalState::ChargedFinalState(const Cut& c) { setName("ChargedFinalState"); - addProjection(FinalState(c), "FS"); + declare(FinalState(c), "FS"); } ChargedFinalState::ChargedFinalState(double mineta, double maxeta, double minpt) { setName("ChargedFinalState"); - addProjection(FinalState(mineta, maxeta, minpt), "FS"); + declare(FinalState(mineta, maxeta, minpt), "FS"); } CmpState ChargedFinalState::compare(const Projection& p) const { return mkNamedPCmp(p, "FS"); } } namespace { inline bool chargedParticleFilter(const Rivet::Particle& p) { return Rivet::PID::threeCharge(p.pdgId()) == 0; } } namespace Rivet { void ChargedFinalState::project(const Event& e) { const FinalState& fs = applyProjection(e, "FS"); _theParticles.clear(); std::remove_copy_if(fs.particles().begin(), fs.particles().end(), std::back_inserter(_theParticles), chargedParticleFilter); MSG_DEBUG("Number of charged final-state particles = " << _theParticles.size()); if (getLog().isActive(Log::TRACE)) { for (vector::iterator p = _theParticles.begin(); p != _theParticles.end(); ++p) { MSG_TRACE("Selected: " << p->pdgId() << ", charge = " << PID::threeCharge(p->pdgId())/3.0); } } } } diff --git a/src/Projections/DressedLeptons.cc b/src/Projections/DressedLeptons.cc --- a/src/Projections/DressedLeptons.cc +++ b/src/Projections/DressedLeptons.cc @@ -1,137 +1,137 @@ // -*- C++ -*- #include "Rivet/Projections/DressedLeptons.hh" namespace Rivet { // On DressedLepton helper class //{ DressedLepton::DressedLepton(const Particle& dlepton) : Particle(dlepton) { setConstituents({{dlepton}}); //< bare lepton is first constituent } DressedLepton::DressedLepton(const Particle& lepton, const Particles& photons, bool momsum) : Particle(lepton.pid(), lepton.momentum()) { setConstituents({{lepton}}); //< bare lepton is first constituent addConstituents(photons, momsum); } void DressedLepton::addPhoton(const Particle& p, bool momsum) { if (p.pid() != PID::PHOTON) throw Error("Clustering a non-photon on to a DressedLepton:"+to_string(p.pid())); addConstituent(p, momsum); } const Particle& DressedLepton::bareLepton() const { const Particle& l = constituents().front(); if (!l.isChargedLepton()) throw Error("First constituent of a DressedLepton is not a bare lepton: oops"); return l; } //} // Separate-FS version DressedLeptons::DressedLeptons(const FinalState& photons, const FinalState& bareleptons, double dRmax, const Cut& cut, bool useDecayPhotons) : FinalState(cut), _dRmax(dRmax), _fromDecay(useDecayPhotons) { setName("DressedLeptons"); IdentifiedFinalState photonfs(photons, PID::PHOTON); - addProjection(photonfs, "Photons"); + declare(photonfs, "Photons"); IdentifiedFinalState leptonfs(bareleptons); leptonfs.acceptIdPairs({PID::ELECTRON, PID::MUON, PID::TAU}); - addProjection(leptonfs, "Leptons"); + declare(leptonfs, "Leptons"); } // Single-FS version DressedLeptons::DressedLeptons(const FinalState& barefs, double dRmax, const Cut& cut, bool useDecayPhotons) : DressedLeptons(barefs, barefs, dRmax, cut, useDecayPhotons) { } CmpState DressedLeptons::compare(const Projection& p) const { // Compare the two as final states (for pT and eta cuts) const DressedLeptons& other = dynamic_cast(p); CmpState fscmp = FinalState::compare(other); if (fscmp != CmpState::EQ) return fscmp; const PCmp phcmp = mkNamedPCmp(p, "Photons"); if (phcmp != CmpState::EQ) return phcmp; const PCmp sigcmp = mkNamedPCmp(p, "Leptons"); if (sigcmp != CmpState::EQ) return sigcmp; return (cmp(_dRmax, other._dRmax) || cmp(_fromDecay, other._fromDecay)); } void DressedLeptons::project(const Event& e) { _theParticles.clear(); // Get bare leptons const FinalState& signal = applyProjection(e, "Leptons"); Particles bareleptons = signal.particles(); if (bareleptons.empty()) return; // Initialise DL collection with bare leptons vector allClusteredLeptons; allClusteredLeptons.reserve(bareleptons.size()); for (const Particle& bl : bareleptons) { Particle dl(bl.pid(), bl.momentum()); dl.setConstituents({bl}); allClusteredLeptons += dl; } // If the radius is 0 or negative, don't even attempt to cluster if (_dRmax > 0) { // Match each photon to its closest charged lepton within the dR cone const FinalState& photons = applyProjection(e, "Photons"); for (const Particle& photon : photons.particles()) { // Ignore photon if it's from a hadron/tau decay and we're avoiding those if (!_fromDecay && photon.fromDecay()) continue; const FourMomentum& p_P = photon.momentum(); double dRmin = _dRmax; int idx = -1; for (size_t i = 0; i < bareleptons.size(); ++i) { const Particle& bl = bareleptons[i]; // Only cluster photons around *charged* signal particles if (bl.charge3() == 0) continue; // Find the closest lepton double dR = deltaR(bl, p_P); if (dR < dRmin) { dRmin = dR; idx = i; } } if (idx > -1) allClusteredLeptons[idx].addConstituent(photon, true); } } // Fill the canonical particles collection with the composite DL Particles for (const Particle& lepton : allClusteredLeptons) { const bool acc = accept(lepton); MSG_TRACE("Clustered lepton " << lepton << " with constituents = " << lepton.constituents() << ", cut-pass = " << std::boolalpha << acc); if (acc) _theParticles.push_back(lepton); } MSG_DEBUG("#dressed leptons = " << allClusteredLeptons.size() << " -> " << _theParticles.size() << " after cuts"); } } diff --git a/src/Projections/FParameter.cc b/src/Projections/FParameter.cc --- a/src/Projections/FParameter.cc +++ b/src/Projections/FParameter.cc @@ -1,110 +1,110 @@ // -*- C++ -*- #include "Rivet/Projections/FParameter.hh" namespace Rivet { FParameter::FParameter(const FinalState& fsp) { setName("FParameter"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); clear(); } void FParameter::clear() { _lambdas = vector(2, 0); } void FParameter::project(const Event& e) { const Particles prts = applyProjection(e, "FS").particles(); calc(prts); } void FParameter::calc(const FinalState& fs) { calc(fs.particles()); } void FParameter::calc(const vector& fsparticles) { vector threeMomenta; threeMomenta.reserve(fsparticles.size()); for (const Particle& p : fsparticles) { const Vector3 p3 = p.momentum().vector3(); threeMomenta.push_back(p3); } _calcFParameter(threeMomenta); } void FParameter::calc(const vector& fsmomenta) { vector threeMomenta; threeMomenta.reserve(fsmomenta.size()); for (const FourMomentum& v : fsmomenta) { threeMomenta.push_back(v.vector3()); } _calcFParameter(threeMomenta); } void FParameter::calc(const vector& fsmomenta) { _calcFParameter(fsmomenta); } // Actually do the calculation void FParameter::_calcFParameter(const vector& fsmomenta) { // Return (with "safe nonsense" sphericity params) if there are no final state particles. if (fsmomenta.empty()) { MSG_DEBUG("No particles in final state..."); clear(); return; } // A small iteration over full momenta but set z-coord. to 0.0 to get transverse momenta vector fsperpmomenta; for (const Vector3& p : fsmomenta) { fsperpmomenta.push_back(Vector3(p.x(), p.y(), 0.0)); } // Iterate over all the final state particles. Matrix<2> mMom; MSG_DEBUG("Number of particles = " << fsperpmomenta.size()); for (const Vector3& p3 : fsperpmomenta) { double prefactor = 1.0/p3.mod(); Matrix<2> mMomPart; for (size_t i = 0; i < 2; ++i) { for (size_t j = 0; j < 2; ++j) { mMomPart.set(i,j, p3[i]*p3[j]); } } mMom += prefactor * mMomPart; } MSG_DEBUG("Linearised transverse momentum tensor = " << mMom); // Check that the matrix is symmetric. const bool isSymm = mMom.isSymm(); if (!isSymm) { MSG_ERROR("Error: momentum tensor not symmetric:"); MSG_ERROR("[0,1] vs. [1,0]: " << mMom.get(0,1) << ", " << mMom.get(1,0)); } // If not symmetric, something's wrong (we made sure the error msg appeared first). assert(isSymm); const double a = mMom.get(0,0); const double b = mMom.get(1,1); const double c = mMom.get(1,0); const double l1 = 0.5*(a+b+sqrt( (a-b)*(a-b) + 4 *c*c)); const double l2 = 0.5*(a+b-sqrt( (a-b)*(a-b) + 4 *c*c)); _lambdas = {l1, l2}; // Debug output. MSG_DEBUG("Lambdas = (" << lambda1() << ", " << lambda2() << ")"); MSG_DEBUG("Sum of lambdas = " << lambda1() + lambda2()); MSG_DEBUG("F-Parameter = " << F()); } } diff --git a/src/Projections/FastJets.cc b/src/Projections/FastJets.cc --- a/src/Projections/FastJets.cc +++ b/src/Projections/FastJets.cc @@ -1,214 +1,214 @@ // -*- C++ -*- #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/HeavyHadrons.hh" #include "Rivet/Projections/TauFinder.hh" namespace Rivet { void FastJets::_initBase() { setName("FastJets"); - addProjection(HeavyHadrons(), "HFHadrons"); - addProjection(TauFinder(TauFinder::DecayMode::HADRONIC), "Taus"); + declare(HeavyHadrons(), "HFHadrons"); + declare(TauFinder(TauFinder::DecayMode::HADRONIC), "Taus"); } void FastJets::_initJdef(Algo alg, double rparameter, double seed_threshold) { MSG_DEBUG("JetAlg = " << static_cast(alg)); MSG_DEBUG("R parameter = " << rparameter); MSG_DEBUG("Seed threshold = " << seed_threshold); if (alg == KT) { _jdef = fastjet::JetDefinition(fastjet::kt_algorithm, rparameter, fastjet::E_scheme); } else if (alg == CAM) { _jdef = fastjet::JetDefinition(fastjet::cambridge_algorithm, rparameter, fastjet::E_scheme); } else if (alg == ANTIKT) { _jdef = fastjet::JetDefinition(fastjet::antikt_algorithm, rparameter, fastjet::E_scheme); } else if (alg == DURHAM) { _jdef = fastjet::JetDefinition(fastjet::ee_kt_algorithm, fastjet::E_scheme); } else if (alg == GENKTEE) { _jdef = fastjet::JetDefinition(fastjet::ee_genkt_algorithm, rparameter, -1); } else { // Plugins: if (alg == SISCONE) { const double OVERLAP_THRESHOLD = 0.75; _plugin.reset(new fastjet::SISConePlugin(rparameter, OVERLAP_THRESHOLD)); // } else if (alg == PXCONE) { // string msg = "PxCone currently not supported, since FastJet doesn't install it by default. "; // msg += "Please notify the Rivet authors if this behaviour should be changed."; // throw Error(msg); // _plugin.reset(new fastjet::PxConePlugin(rparameter)); } else if (alg == ATLASCONE) { const double OVERLAP_THRESHOLD = 0.5; _plugin.reset(new fastjet::ATLASConePlugin(rparameter, seed_threshold, OVERLAP_THRESHOLD)); } else if (alg == CMSCONE) { _plugin.reset(new fastjet::CMSIterativeConePlugin(rparameter, seed_threshold)); } else if (alg == CDFJETCLU) { const double OVERLAP_THRESHOLD = 0.75; _plugin.reset(new fastjet::CDFJetCluPlugin(rparameter, OVERLAP_THRESHOLD, seed_threshold)); } else if (alg == CDFMIDPOINT) { const double OVERLAP_THRESHOLD = 0.5; _plugin.reset(new fastjet::CDFMidPointPlugin(rparameter, OVERLAP_THRESHOLD, seed_threshold)); } else if (alg == D0ILCONE) { const double min_jet_Et = 6.0; _plugin.reset(new fastjet::D0RunIIConePlugin(rparameter, min_jet_Et)); } else if (alg == JADE) { _plugin.reset(new fastjet::JadePlugin()); } else if (alg == TRACKJET) { _plugin.reset(new fastjet::TrackJetPlugin(rparameter)); } _jdef = fastjet::JetDefinition(_plugin.get()); } } CmpState FastJets::compare(const Projection& p) const { const FastJets& other = dynamic_cast(p); return \ cmp(_useMuons, other._useMuons) || cmp(_useInvisibles, other._useInvisibles) || mkNamedPCmp(other, "FS") || cmp(_jdef.jet_algorithm(), other._jdef.jet_algorithm()) || cmp(_jdef.recombination_scheme(), other._jdef.recombination_scheme()) || cmp(_jdef.plugin(), other._jdef.plugin()) || cmp(_jdef.R(), other._jdef.R()) || cmp(_adef, other._adef); } // STATIC PseudoJets FastJets::mkClusterInputs(const Particles& fsparticles, const Particles& tagparticles) { PseudoJets pjs; /// @todo Use FastJet3's UserInfo system to store Particle pointers directly? // Store 4 vector data about each particle into FastJet's PseudoJets for (size_t i = 0; i < fsparticles.size(); ++i) { fastjet::PseudoJet pj = fsparticles[i]; pj.set_user_index(i+1); pjs.push_back(pj); } // And the same for ghost tagging particles (with negative user indices) for (size_t i = 0; i < tagparticles.size(); ++i) { fastjet::PseudoJet pj = tagparticles[i]; pj *= 1e-20; ///< Ghostify the momentum pj.set_user_index(-i-1); pjs.push_back(pj); } return pjs; } // STATIC Jet FastJets::mkJet(const PseudoJet& pj, const Particles& fsparticles, const Particles& tagparticles) { const PseudoJets pjconstituents = pj.constituents(); Particles constituents, tags; constituents.reserve(pjconstituents.size()); for (const fastjet::PseudoJet& pjc : pjconstituents) { // Pure ghosts don't have corresponding particles if (pjc.has_area() && pjc.is_pure_ghost()) continue; // Default user index = 0 doesn't give valid particle lookup if (pjc.user_index() == 0) continue; // Split by index sign into constituent & tag lookup if (pjc.user_index() > 0) { // Find constituents if index > 0 const size_t i = pjc.user_index() - 1; if (i >= fsparticles.size()) throw RangeError("FS particle lookup failed in jet construction"); constituents.push_back(fsparticles.at(i)); } else if (!tagparticles.empty()) { // Find tags if index < 0 const size_t i = abs(pjc.user_index()) - 1; if (i >= tagparticles.size()) throw RangeError("Tag particle lookup failed in jet construction"); tags.push_back(tagparticles.at(i)); } } return Jet(pj, constituents, tags); } // STATIC Jets FastJets::mkJets(const PseudoJets& pjs, const Particles& fsparticles, const Particles& tagparticles) { Jets rtn; rtn.reserve(pjs.size()); for (const PseudoJet pj : pjs) { rtn.push_back(FastJets::mkJet(pj, fsparticles, tagparticles)); } return rtn; } void FastJets::project(const Event& e) { // Assemble final state particles const string fskey = (_useInvisibles == JetAlg::Invisibles::NONE) ? "VFS" : "FS"; Particles fsparticles = applyProjection(e, fskey).particles(); // Remove prompt invisibles if needed (already done by VFS if using NO_INVISIBLES) if (_useInvisibles == JetAlg::Invisibles::DECAY) { ifilter_discard(fsparticles, [](const Particle& p) { return !(p.isVisible() || p.fromDecay()); }); } // Remove prompt/all muons if needed if (_useMuons == JetAlg::Muons::DECAY) { ifilter_discard(fsparticles, [](const Particle& p) { return isMuon(p) && !p.fromDecay(); }); } else if (_useMuons == JetAlg::Muons::NONE) { ifilter_discard(fsparticles, isMuon); } // Tagging particles const Particles chadrons = applyProjection(e, "HFHadrons").cHadrons(); const Particles bhadrons = applyProjection(e, "HFHadrons").bHadrons(); const Particles taus = applyProjection(e, "Taus").particles(); calc(fsparticles, chadrons+bhadrons+taus); } void FastJets::calc(const Particles& fsparticles, const Particles& tagparticles) { MSG_DEBUG("Finding jets from " << fsparticles.size() << " input particles + " << tagparticles.size() << " tagging particles"); _fsparticles = fsparticles; _tagparticles = tagparticles; // Make pseudojets, with mapping info to Rivet FS and tag particles PseudoJets pjs = mkClusterInputs(_fsparticles, _tagparticles); // Run either basic or area-calculating cluster sequence as reqd. if (_adef) { _cseq.reset(new fastjet::ClusterSequenceArea(pjs, _jdef, *_adef)); } else { _cseq.reset(new fastjet::ClusterSequence(pjs, _jdef)); } MSG_DEBUG("ClusterSequence constructed; Njets_tot = " << _cseq->inclusive_jets().size() << ", Njets(pT > 10 GeV) = " << _cseq->inclusive_jets(10*GeV).size()); } void FastJets::reset() { _yscales.clear(); _fsparticles.clear(); _tagparticles.clear(); /// @todo _cseq = fastjet::ClusterSequence(); } Jets FastJets::_jets() const { /// @todo Cache? return mkJets(pseudojets(), _fsparticles, _tagparticles); } Jet FastJets::trimJet(const Jet& input, const fastjet::Filter& trimmer) const { if (input.pseudojet().associated_cluster_sequence() != clusterSeq().get()) throw Error("To trim a Rivet::Jet, its associated PseudoJet must have come from this FastJets' ClusterSequence"); PseudoJet pj = trimmer(input); return mkJet(pj, _fsparticles, _tagparticles); } PseudoJets FastJets::pseudoJets(double ptmin) const { return clusterSeq() ? clusterSeq()->inclusive_jets(ptmin) : PseudoJets(); } } diff --git a/src/Projections/FinalState.cc b/src/Projections/FinalState.cc --- a/src/Projections/FinalState.cc +++ b/src/Projections/FinalState.cc @@ -1,101 +1,101 @@ // -*- C++ -*- #include "Rivet/Projections/FinalState.hh" namespace Rivet { FinalState::FinalState(const Cut& c) : ParticleFinder(c) { setName("FinalState"); const bool isopen = (c == Cuts::open()); MSG_TRACE("Check for open FS conditions: " << std::boolalpha << isopen); - if (!isopen) addProjection(FinalState(), "OpenFS"); + if (!isopen) declare(FinalState(), "OpenFS"); } FinalState::FinalState(const FinalState& fsp, const Cut& c) : ParticleFinder(c) { setName("FinalState"); MSG_TRACE("Registering base FSP as 'PrevFS'"); - addProjection(fsp, "PrevFS"); + declare(fsp, "PrevFS"); } /// @deprecated, keep for backwards compatibility for now. FinalState::FinalState(double mineta, double maxeta, double minpt) { setName("FinalState"); const bool openpt = isZero(minpt); const bool openeta = (mineta <= -MAXDOUBLE && maxeta >= MAXDOUBLE); MSG_TRACE("Check for open FS conditions:" << std::boolalpha << " eta=" << openeta << ", pt=" << openpt); if (openpt && openeta) { _cuts = Cuts::open(); } else { - addProjection(FinalState(), "OpenFS"); + declare(FinalState(), "OpenFS"); if (openeta) _cuts = (Cuts::pT >= minpt); else if ( openpt ) _cuts = Cuts::etaIn(mineta, maxeta); else _cuts = (Cuts::etaIn(mineta, maxeta) && Cuts::pT >= minpt); } } CmpState FinalState::compare(const Projection& p) const { const FinalState& other = dynamic_cast(p); // First check if there is a PrevFS and it it matches if (hasProjection("PrevFS") != other.hasProjection("PrevFS")) return CmpState::UNDEF; if (hasProjection("PrevFS")) { const PCmp prevcmp = mkPCmp(other, "PrevFS"); if (prevcmp != CmpState::EQ) return prevcmp; } // Then check the extra cuts const bool cutcmp = _cuts == other._cuts; MSG_TRACE(_cuts << " VS " << other._cuts << " -> EQ == " << std::boolalpha << cutcmp); if (!cutcmp) return CmpState::UNDEF; // Checks all passed: these FSes are equivalent return CmpState::EQ; } void FinalState::project(const Event& e) { _theParticles.clear(); // Handle "open FS" special case, which should not/cannot recurse if (_cuts == Cuts::OPEN) { MSG_TRACE("Open FS processing: should only see this once per event (" << e.genEvent()->event_number() << ")"); for (const GenParticle* p : Rivet::particles(e.genEvent())) { if (p->status() == 1) { MSG_TRACE("FS GV = " << p->production_vertex()); _theParticles.push_back(Particle(*p)); } } return; } // Base the calculation on PrevFS if available, otherwise OpenFS /// @todo In general, we'd like to calculate a restrictive FS based on the most restricted superset FS. const Particles& allstable = applyProjection(e, (hasProjection("PrevFS") ? "PrevFS" : "OpenFS")).particles(); for (const Particle& p : allstable) { const bool passed = accept(p); MSG_TRACE("Choosing: ID = " << p.pid() << ", pT = " << p.pT()/GeV << " GeV" << ", eta = " << p.eta() << ": result = " << std::boolalpha << passed); if (passed) _theParticles.push_back(p); } MSG_TRACE("Number of final-state particles = " << _theParticles.size()); } /// Decide if a particle is to be accepted or not. bool FinalState::accept(const Particle& p) const { // Not having status == 1 should never happen! assert(p.genParticle() == NULL || p.genParticle()->status() == 1); return _cuts->accept(p); } } diff --git a/src/Projections/IdentifiedFinalState.cc b/src/Projections/IdentifiedFinalState.cc --- a/src/Projections/IdentifiedFinalState.cc +++ b/src/Projections/IdentifiedFinalState.cc @@ -1,72 +1,72 @@ // -*- C++ -*- #include "Rivet/Projections/IdentifiedFinalState.hh" namespace Rivet { IdentifiedFinalState::IdentifiedFinalState(const FinalState& fsp, const vector& pids) { setName("IdentifiedFinalState"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); acceptIds(pids); } IdentifiedFinalState::IdentifiedFinalState(const FinalState& fsp, PdgId pid) { setName("IdentifiedFinalState"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); acceptId(pid); } IdentifiedFinalState::IdentifiedFinalState(const Cut& c, const vector& pids) { setName("IdentifiedFinalState"); - addProjection(FinalState(c), "FS"); + declare(FinalState(c), "FS"); acceptIds(pids); } IdentifiedFinalState::IdentifiedFinalState(const vector& pids, const Cut& c) { setName("IdentifiedFinalState"); - addProjection(FinalState(c), "FS"); + declare(FinalState(c), "FS"); acceptIds(pids); } IdentifiedFinalState::IdentifiedFinalState(const Cut& c, PdgId pid) { setName("IdentifiedFinalState"); - addProjection(FinalState(c), "FS"); + declare(FinalState(c), "FS"); acceptId(pid); } IdentifiedFinalState::IdentifiedFinalState(PdgId pid, const Cut& c) { setName("IdentifiedFinalState"); - addProjection(FinalState(c), "FS"); + declare(FinalState(c), "FS"); acceptId(pid); } CmpState IdentifiedFinalState::compare(const Projection& p) const { const PCmp fscmp = mkNamedPCmp(p, "FS"); if (fscmp != CmpState::EQ) return fscmp; const IdentifiedFinalState& other = dynamic_cast(p); CmpState pidssize = cmp(_pids.size(), other._pids.size()); if (pidssize != CmpState::EQ) return pidssize; return cmp(_pids, other._pids); } void IdentifiedFinalState::project(const Event& e) { const FinalState& fs = applyProjection(e, "FS"); _theParticles.clear(); _theParticles.reserve(fs.particles().size()); _remainingParticles.clear(); _remainingParticles.reserve(fs.particles().size()); for (const Particle& p : fs.particles()) { if (acceptedIds().find(p.pid()) != acceptedIds().end()) { _theParticles.push_back(p); // Identified } else { _remainingParticles.push_back(p); // Remaining } } } } diff --git a/src/Projections/InvMassFinalState.cc b/src/Projections/InvMassFinalState.cc --- a/src/Projections/InvMassFinalState.cc +++ b/src/Projections/InvMassFinalState.cc @@ -1,185 +1,185 @@ // -*- C++ -*- #include "Rivet/Projections/InvMassFinalState.hh" namespace Rivet { InvMassFinalState::InvMassFinalState(const FinalState& fsp, const pair& idpair, // pair of decay products double minmass, // min inv mass double maxmass, // max inv mass double masstarget) : _minmass(minmass), _maxmass(maxmass), _masstarget(masstarget), _useTransverseMass(false) { setName("InvMassFinalState"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); _decayids.push_back(idpair); } InvMassFinalState::InvMassFinalState(const FinalState& fsp, const vector >& idpairs, // vector of pairs of decay products double minmass, // min inv mass double maxmass, // max inv mass double masstarget) : _decayids(idpairs), _minmass(minmass), _maxmass(maxmass), _masstarget(masstarget), _useTransverseMass(false) { setName("InvMassFinalState"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); } InvMassFinalState::InvMassFinalState(const pair& idpair, // pair of decay products double minmass, // min inv mass double maxmass, // max inv mass double masstarget) : _minmass(minmass), _maxmass(maxmass), _masstarget(masstarget), _useTransverseMass(false) { setName("InvMassFinalState"); _decayids.push_back(idpair); } InvMassFinalState::InvMassFinalState(const vector >& idpairs, // vector of pairs of decay products double minmass, // min inv mass double maxmass, // max inv mass double masstarget) : _decayids(idpairs), _minmass(minmass), _maxmass(maxmass), _masstarget(masstarget), _useTransverseMass(false) { setName("InvMassFinalState"); } CmpState InvMassFinalState::compare(const Projection& p) const { // First compare the final states we are running on CmpState fscmp = mkNamedPCmp(p, "FS"); if (fscmp != CmpState::EQ) return fscmp; // Then compare the two as final states const InvMassFinalState& other = dynamic_cast(p); fscmp = FinalState::compare(other); if (fscmp != CmpState::EQ) return fscmp; // Compare the mass limits CmpState masstypecmp = cmp(_useTransverseMass, other._useTransverseMass); if (masstypecmp != CmpState::EQ) return masstypecmp; CmpState massllimcmp = cmp(_minmass, other._minmass); if (massllimcmp != CmpState::EQ) return massllimcmp; CmpState masshlimcmp = cmp(_maxmass, other._maxmass); if (masshlimcmp != CmpState::EQ) return masshlimcmp; // Compare the decay species CmpState decaycmp = cmp(_decayids, other._decayids); if (decaycmp != CmpState::EQ) return decaycmp; // Finally compare them as final states return FinalState::compare(other); } void InvMassFinalState::project(const Event& e) { const FinalState& fs = applyProjection(e, "FS"); calc(fs.particles()); } void InvMassFinalState::calc(const Particles& inparticles) { _theParticles.clear(); _particlePairs.clear(); // Containers for the particles of type specified in the pair vector type1, type2; // Get all the particles of the type specified in the pair from the particle list for (const Particle& ipart : inparticles) { // Loop around possible particle pairs for (const PdgIdPair& ipair : _decayids) { if (ipart.pid() == ipair.first) { if (accept(ipart)) type1 += &ipart; } else if (ipart.pid() == ipair.second) { if (accept(ipart)) type2 += &ipart; } } } if (type1.empty() || type2.empty()) return; // Temporary container of selected particles iterators // Useful to compare iterators and avoid double occurrences of the same // particle in case it matches with more than another particle vector tmp; // Now calculate the inv mass pair > closestPair; closestPair.first = 1e30; for (const Particle* i1 : type1) { for (const Particle* i2 : type2) { // Check this is actually a pair // (if more than one pair in vector particles can be unrelated) bool found = false; for (const PdgIdPair& ipair : _decayids) { if (i1->pid() == ipair.first && i2->pid() == ipair.second) { found = true; break; } } if (!found) continue; FourMomentum v4 = i1->momentum() + i2->momentum(); if (v4.mass2() < 0) { MSG_DEBUG("Constructed negative inv mass2: skipping!"); continue; } bool passedMassCut = false; if (_useTransverseMass) { passedMassCut = inRange(mT(i1->momentum(), i2->momentum()), _minmass, _maxmass); } else { passedMassCut = inRange(v4.mass(), _minmass, _maxmass); } if (passedMassCut) { MSG_DEBUG("Selecting particles with IDs " << i1->pid() << " & " << i2->pid() << " and mass = " << v4.mass()/GeV << " GeV"); // Store accepted particles, avoiding duplicates if (find(tmp.begin(), tmp.end(), i1) == tmp.end()) { tmp.push_back(i1); _theParticles += *i1; } if (find(tmp.begin(), tmp.end(), i2) == tmp.end()) { tmp.push_back(i2); _theParticles += *i2; } // Store accepted particle pairs _particlePairs += make_pair(*i1, *i2); if (_masstarget>0.0) { double diff=fabs(v4.mass()-_masstarget); if (diff 0.0 && closestPair.first < 1e30) { _theParticles.clear(); _particlePairs.clear(); _theParticles += closestPair.second.first; _theParticles += closestPair.second.second; _particlePairs += closestPair.second; } MSG_DEBUG("Selected " << _theParticles.size() << " particles " << "(" << _particlePairs.size() << " pairs)"); if (getLog().isActive(Log::TRACE)) { for (const Particle& p : _theParticles) { MSG_TRACE("ID: " << p.pid() << ", barcode: " << p.genParticle()->barcode()); } } } /// Constituent pairs const std::vector >& InvMassFinalState::particlePairs() const { return _particlePairs; } } diff --git a/src/Projections/JetAlg.cc b/src/Projections/JetAlg.cc --- a/src/Projections/JetAlg.cc +++ b/src/Projections/JetAlg.cc @@ -1,18 +1,18 @@ // -*- C++ -*- #include "Rivet/Projections/JetAlg.hh" namespace Rivet { JetAlg::JetAlg(const FinalState& fs, Muons usemuons, Invisibles useinvis) : _useMuons(usemuons), _useInvisibles(useinvis) { setName("JetAlg"); - addProjection(fs, "FS"); + declare(fs, "FS"); VisibleFinalState vfs(fs); // MSG_DEBUG("Making visible final state from provided FS"); - addProjection(vfs, "VFS"); + declare(vfs, "VFS"); } } diff --git a/src/Projections/JetShape.cc b/src/Projections/JetShape.cc --- a/src/Projections/JetShape.cc +++ b/src/Projections/JetShape.cc @@ -1,114 +1,114 @@ // -*- C++ -*- #include "Rivet/Tools/Logging.hh" #include "Rivet/Projections/JetShape.hh" namespace Rivet { // Constructor. JetShape::JetShape(const JetAlg& jetalg, double rmin, double rmax, size_t nbins, double ptmin, double ptmax, double absrapmin, double absrapmax, RapScheme rapscheme) : _rapscheme(rapscheme) { setName("JetShape"); _binedges = linspace(nbins, rmin, rmax); _ptcuts = make_pair(ptmin, ptmax); _rapcuts = make_pair(absrapmin, absrapmax); - addProjection(jetalg, "Jets"); + declare(jetalg, "Jets"); } // Constructor. JetShape::JetShape(const JetAlg& jetalg, vector binedges, double ptmin, double ptmax, double absrapmin, double absrapmax, RapScheme rapscheme) : _binedges(binedges), _rapscheme(rapscheme) { setName("JetShape"); _ptcuts = make_pair(ptmin, ptmax); _rapcuts = make_pair(absrapmin, absrapmax); - addProjection(jetalg, "Jets"); + declare(jetalg, "Jets"); } CmpState JetShape::compare(const Projection& p) const { const CmpState jcmp = mkNamedPCmp(p, "Jets"); if (jcmp != CmpState::EQ) return jcmp; const JetShape& other = pcast(p); const CmpState ptcmp = cmp(ptMin(), other.ptMin()) || cmp(ptMax(), other.ptMax()); if (ptcmp != CmpState::EQ) return ptcmp; const CmpState rapcmp = cmp(_rapcuts.first, other._rapcuts.first) || cmp(_rapcuts.second, other._rapcuts.second); if (rapcmp != CmpState::EQ) return rapcmp; CmpState bincmp = cmp(numBins(), other.numBins()); if (bincmp != CmpState::EQ) return bincmp; for (size_t i = 0; i < _binedges.size(); ++i) { bincmp = cmp(_binedges[i], other._binedges[i]); if (bincmp != CmpState::EQ) return bincmp; } return CmpState::EQ; } void JetShape::clear() { _diffjetshapes.clear(); } void JetShape::calc(const Jets& jets) { clear(); for (const Jet& j : jets) { // Apply jet cuts const FourMomentum& pj = j.momentum(); if (!inRange(pj.pT(), _ptcuts)) continue; /// @todo Use Cut for better eta/y selection if (_rapscheme == PSEUDORAPIDITY && !inRange(fabs(pj.eta()), _rapcuts)) continue; if (_rapscheme == RAPIDITY && !inRange(fabs(pj.rapidity()), _rapcuts)) continue; // Fill bins vector bins(numBins(), 0.0); for (const Particle& p : j.particles()) { const double dR = deltaR(pj, p.momentum(), _rapscheme); const int dRindex = binIndex(dR, _binedges); if (dRindex == -1) continue; ///< Out of histo range bins[dRindex] += p.pT(); } // Add bin vector for this jet to the diffjetshapes container _diffjetshapes += bins; } // Normalize to total pT for (vector& binsref : _diffjetshapes) { double integral = 0.0; for (size_t i = 0; i < numBins(); ++i) { integral += binsref[i]; } if (integral > 0) { for (size_t i = 0; i < numBins(); ++i) { binsref[i] /= integral; } } else { // It's just-about conceivable that a jet would have no particles in the given Delta(r) range... MSG_DEBUG("No pT contributions in jet Delta(r) range: weird!"); } } } void JetShape::project(const Event& e) { const Jets jets = applyProjection(e, "Jets").jets(Cuts::ptIn(_ptcuts.first, _ptcuts.second) & ((_rapscheme == PSEUDORAPIDITY) ? Cuts::etaIn(-_rapcuts.second, _rapcuts.second) : Cuts::rapIn(-_rapcuts.second, _rapcuts.second)) ); calc(jets); } } diff --git a/src/Projections/NonPromptFinalState.cc b/src/Projections/NonPromptFinalState.cc --- a/src/Projections/NonPromptFinalState.cc +++ b/src/Projections/NonPromptFinalState.cc @@ -1,46 +1,46 @@ // -*- C++ -*- #include "Rivet/Projections/NonPromptFinalState.hh" namespace Rivet { NonPromptFinalState::NonPromptFinalState(const FinalState& fsp, bool accepttaudecays, bool acceptmudecays) : _acceptMuDecays(acceptmudecays), _acceptTauDecays(accepttaudecays) { setName("NonPromptFinalState"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); } NonPromptFinalState::NonPromptFinalState(const Cut& c, bool accepttaudecays, bool acceptmudecays) : _acceptMuDecays(acceptmudecays), _acceptTauDecays(accepttaudecays) { setName("NonPromptFinalState"); - addProjection(FinalState(c), "FS"); + declare(FinalState(c), "FS"); } CmpState NonPromptFinalState::compare(const Projection& p) const { const PCmp fscmp = mkNamedPCmp(p, "FS"); if (fscmp != CmpState::EQ) return fscmp; const NonPromptFinalState& other = dynamic_cast(p); return cmp(_acceptMuDecays, other._acceptMuDecays) || cmp(_acceptTauDecays, other._acceptTauDecays); } void NonPromptFinalState::project(const Event& e) { _theParticles.clear(); const Particles& particles = applyProjection(e, "FS").particles(); for (const Particle& p : particles) if (!isPrompt(p, !_acceptTauDecays, !_acceptMuDecays)) _theParticles.push_back(p); MSG_DEBUG("Number of final state particles from hadron decays = " << _theParticles.size()); if (getLog().isActive(Log::TRACE)) { for (const Particle& p : _theParticles) MSG_TRACE("Selected: " << p.pid() << ", charge = " << p.charge()); } } } diff --git a/src/Projections/PromptFinalState.cc b/src/Projections/PromptFinalState.cc --- a/src/Projections/PromptFinalState.cc +++ b/src/Projections/PromptFinalState.cc @@ -1,53 +1,53 @@ // -*- C++ -*- #include "Rivet/Projections/PromptFinalState.hh" namespace Rivet { PromptFinalState::PromptFinalState(bool accepttaudecays, bool acceptmudecays) : _acceptMuDecays(acceptmudecays), _acceptTauDecays(accepttaudecays) { setName("PromptFinalState"); - addProjection(FinalState(), "FS"); + declare(FinalState(), "FS"); } PromptFinalState::PromptFinalState(const Cut& c, bool accepttaudecays, bool acceptmudecays) : _acceptMuDecays(acceptmudecays), _acceptTauDecays(accepttaudecays) { setName("PromptFinalState"); - addProjection(FinalState(c), "FS"); + declare(FinalState(c), "FS"); } PromptFinalState::PromptFinalState(const FinalState& fsp, bool accepttaudecays, bool acceptmudecays) : _acceptMuDecays(acceptmudecays), _acceptTauDecays(accepttaudecays) { setName("PromptFinalState"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); } CmpState PromptFinalState::compare(const Projection& p) const { const PCmp fscmp = mkNamedPCmp(p, "FS"); if (fscmp != CmpState::EQ) return fscmp; const PromptFinalState& other = dynamic_cast(p); return cmp(_acceptMuDecays, other._acceptMuDecays) || cmp(_acceptTauDecays, other._acceptTauDecays); } void PromptFinalState::project(const Event& e) { _theParticles.clear(); const Particles& particles = applyProjection(e, "FS").particles(); for (const Particle& p : particles) if (isPrompt(p, _acceptTauDecays, _acceptMuDecays)) _theParticles.push_back(p); MSG_DEBUG("Number of final state particles not from hadron decays = " << _theParticles.size()); if (getLog().isActive(Log::TRACE)) { for (const Particle& p : _theParticles) MSG_TRACE("Selected: " << p.pid() << ", charge = " << p.charge()); } } } diff --git a/src/Projections/Sphericity.cc b/src/Projections/Sphericity.cc --- a/src/Projections/Sphericity.cc +++ b/src/Projections/Sphericity.cc @@ -1,178 +1,178 @@ // -*- C++ -*- #include "Rivet/Projections/Sphericity.hh" #include "Rivet/Jet.hh" namespace Rivet { Sphericity::Sphericity(const FinalState& fsp, double rparam) : _regparam(rparam) { setName("Sphericity"); - addProjection(fsp, "FS"); + declare(fsp, "FS"); clear(); } void Sphericity::clear() { _lambdas = vector(3, 0); _sphAxes = vector(3, Vector3()); } CmpState Sphericity::compare(const Projection& p) const { PCmp fscmp = mkNamedPCmp(p, "FS"); if (fscmp != CmpState::EQ) return fscmp; const Sphericity& other = dynamic_cast(p); if (fuzzyEquals(_regparam, other._regparam)) return CmpState::EQ; return cmp(_regparam, other._regparam); } void Sphericity::project(const Event& e) { const Particles prts = applyProjection(e, "FS").particles(); calc(prts); } void Sphericity::calc(const FinalState& fs) { calc(fs.particles()); } void Sphericity::calc(const Particles& particles) { vector threeMomenta; transform(particles, threeMomenta, p3); calc(threeMomenta); } void Sphericity::calc(const Jets& jets) { vector threeMomenta; transform(jets, threeMomenta, p3); calc(threeMomenta); } void Sphericity::calc(const vector& momenta) { vector threeMomenta; transform(momenta, threeMomenta, [](const FourMomentum& p4){return p4.vector3();}); calc(threeMomenta); } Vector3 Sphericity::mkEigenVector(Matrix3 A, const double &lambda) { const double b = A.get(0,1); const double c = A.get(0,2); const double d = A.get(1,1); const double e = A.get(1,2); const double f = A.get(2,2); const double denom = b*e -c*d + c*lambda; double x = e*(b*f -c*e - b*lambda)/denom/c + (lambda -f)/c; double y = (c*e -b*f +b*lambda)/denom; Vector3 E(x,y,1); return E.unit(); } void Sphericity::calc(const vector& momenta) { MSG_DEBUG("Calculating sphericity with r = " << _regparam); // Return (with "safe nonsense" sphericity params) if there are no final state particles if (momenta.empty()) { MSG_DEBUG("Not enough momenta given..."); clear(); return; } // Iterate over all the final state particles. Matrix3 mMom; double totalMomentum = 0.0; MSG_DEBUG("Number of particles = " << momenta.size()); for (const Vector3& p3 : momenta) { // Build the (regulated) normalising factor. totalMomentum += pow(p3.mod(), _regparam); // Build (regulated) quadratic momentum components. const double regfactor = pow(p3.mod(), _regparam-2); if (!fuzzyEquals(regfactor, 1.0)) { MSG_TRACE("Regfactor (r=" << _regparam << ") = " << regfactor); } Matrix3 mMomPart; for (size_t i = 0; i < 3; ++i) { for (size_t j = 0; j < 3; ++j) { mMomPart.set(i,j, p3[i]*p3[j]); } } mMom += regfactor * mMomPart; } if (mMom.get(2,0) == 0 && mMom.get(2,1) == 0 && mMom.get(2,2) == 0) { MSG_DEBUG("No longitudinal momenta given..."); clear(); return; } // Normalise to total (regulated) momentum. mMom /= totalMomentum; MSG_DEBUG("Momentum tensor = " << "\n" << mMom); // Check that the matrix is symmetric. const bool isSymm = mMom.isSymm(); if (!isSymm) { MSG_ERROR("Error: momentum tensor not symmetric (r=" << _regparam << ")"); MSG_ERROR("[0,1] vs. [1,0]: " << mMom.get(0,1) << ", " << mMom.get(1,0)); MSG_ERROR("[0,2] vs. [2,0]: " << mMom.get(0,2) << ", " << mMom.get(2,0)); MSG_ERROR("[1,2] vs. [2,1]: " << mMom.get(1,2) << ", " << mMom.get(2,1)); } // If not symmetric, something's wrong (we made sure the error msg appeared first). assert(isSymm); // Eigenvalues const double q = mMom.trace()/3.; const double p1 = mMom.get(0,1)*mMom.get(0,1) + mMom.get(0,2)*mMom.get(0,2) + mMom.get(1,2)*mMom.get(1,2); const double p2 = (mMom.get(0,0) - q)*(mMom.get(0,0) - q) + (mMom.get(1,1) - q)*(mMom.get(1,1) - q) + (mMom.get(2,2) - q)*(mMom.get(2,2) - q) + 2.*p1; const double p = sqrt(p2/6.); Matrix3 I3 = Matrix3::mkIdentity(); const double r = ( 1./p * (mMom - q*I3)).det()/2.; double phi(0); if (r <= -1) phi = M_PI / 3.; else if (r >= 1) phi = 0; else phi = acos(r) / 3.; const double l1 = q + 2 * p * cos(phi); const double l3 = q + 2 * p * cos(phi + (2*M_PI/3.)); const double l2 = 3 * q - l1 - l3; if (l1 == 0 || l2 == 0 || l3 == 0) { MSG_DEBUG("Zero eigenvalue..."); clear(); return; } _lambdas.clear(); _sphAxes.clear(); _sphAxes.push_back(mkEigenVector(mMom, l1)); _sphAxes.push_back(mkEigenVector(mMom, l2)); _sphAxes.push_back(mkEigenVector(mMom, l3)); _lambdas.push_back(l1); _lambdas.push_back(l2); _lambdas.push_back(l3); // Debug output. MSG_DEBUG("Lambdas = (" << lambda1() << ", " << lambda2() << ", " << lambda3() << ")"); MSG_DEBUG("Sum of lambdas = " << lambda1() + lambda2() + lambda3()); MSG_DEBUG("Vectors = " << sphericityAxis() << ", " << sphericityMajorAxis() << ", " << sphericityMinorAxis() << ")"); } } diff --git a/src/Projections/TriggerUA5.cc b/src/Projections/TriggerUA5.cc --- a/src/Projections/TriggerUA5.cc +++ b/src/Projections/TriggerUA5.cc @@ -1,52 +1,52 @@ // -*- C++ -*- #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/TriggerUA5.hh" namespace Rivet { TriggerUA5::TriggerUA5() { setName("TriggerUA5"); - addProjection(Beam(), "Beam"); - addProjection(ChargedFinalState(-5.6, 5.6), "CFS"); + declare(Beam(), "Beam"); + declare(ChargedFinalState(-5.6, 5.6), "CFS"); } void TriggerUA5::project(const Event& evt) { _n_plus = 0; _n_minus = 0; // Start with the assumption that the trigger fails _decision_sd = false; _decision_nsd_1 = false; _decision_nsd_2 = false; // Triggers can be different for pp and ppbar running const Beam& b = applyProjection(evt, "Beam"); _samebeams = (b.beams().first.pid() == b.beams().second.pid()); // Count hodoscope hits const ChargedFinalState& cfs = applyProjection(evt, "CFS"); for (const Particle& p : cfs.particles()) { if (inRange(p.eta(), -5.6, -2.0)) _n_minus++; else if (inRange(p.eta(), 2.0, 5.6)) _n_plus++; } MSG_DEBUG("Trigger -: " << _n_minus << ", Trigger +: " << _n_plus); // Common SD/NSD trigger requirement: must activate at least one hodoscope if (_n_minus == 0 && _n_plus == 0) return; _decision_sd = true; // Extra NSD trigger requirements if (_n_minus == 0 || _n_plus == 0) return; _decision_nsd_1 = true; if (_n_minus < 2 || _n_plus < 2) return; _decision_nsd_2 = true; } } diff --git a/src/Projections/WFinder.cc b/src/Projections/WFinder.cc --- a/src/Projections/WFinder.cc +++ b/src/Projections/WFinder.cc @@ -1,163 +1,163 @@ // -*- C++ -*- #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/InvMassFinalState.hh" #include "Rivet/Projections/MergedFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/Beam.hh" namespace Rivet { WFinder::WFinder(const FinalState& inputfs, const Cut& leptoncuts, PdgId pid, double minmass, double maxmass, double missingET, double dRmax, ChargedLeptons chLeptons, ClusterPhotons clusterPhotons, AddPhotons trackPhotons, MassWindow masstype, double masstarget) { setName("WFinder"); _etMissMin = missingET; _minmass = minmass; _maxmass = maxmass; _masstarget = masstarget; _pid = abs(pid); _trackPhotons = trackPhotons; _useTransverseMass = (masstype == MassWindow::MT); // Check that the arguments are legal if (_pid != PID::ELECTRON && _pid != PID::MUON) throw Error("Invalid charged lepton PID given to WFinder"); // Identify bare leptons for dressing // Bit of a code nightmare -- FS projection copy constructors don't work? /// @todo Fix FS copy constructors!! if (chLeptons == ChargedLeptons::PROMPT) { PromptFinalState inputfs_prompt(inputfs); IdentifiedFinalState bareleptons(inputfs_prompt); bareleptons.acceptIdPair(_pid); declare(bareleptons, "BareLeptons"); } else { IdentifiedFinalState bareleptons(inputfs); bareleptons.acceptIdPair(_pid); declare(bareleptons, "BareLeptons"); } // Dress the bare leptons const bool doClustering = (clusterPhotons != ClusterPhotons::NONE); const bool useDecayPhotons = (clusterPhotons == ClusterPhotons::ALL); DressedLeptons leptons(inputfs, get("BareLeptons"), (doClustering ? dRmax : -1.), leptoncuts, useDecayPhotons); - addProjection(leptons, "DressedLeptons"); + declare(leptons, "DressedLeptons"); // Add MissingMomentum proj to calc MET MissingMomentum vismom(inputfs); - addProjection(vismom, "MissingET"); + declare(vismom, "MissingET"); // Identify the non-Z part of the event VetoedFinalState remainingFS; remainingFS.addVetoOnThisFinalState(*this); - addProjection(remainingFS, "RFS"); + declare(remainingFS, "RFS"); } ///////////////////////////////////////////////////// const VetoedFinalState& WFinder::remainingFinalState() const { return getProjection("RFS"); } const MissingMomentum& WFinder::missingMom() const { return getProjection("MissingET"); } CmpState WFinder::compare(const Projection& p) const { PCmp dlcmp = mkNamedPCmp(p, "DressedLeptons"); if (dlcmp != CmpState::EQ) return dlcmp; const WFinder& other = dynamic_cast(p); return (cmp(_minmass, other._minmass) || cmp(_maxmass, other._maxmass) || cmp(_useTransverseMass, other._useTransverseMass) || cmp(_etMissMin, other._etMissMin) || cmp(_pid, other._pid) || cmp(_trackPhotons, other._trackPhotons)); } void WFinder::project(const Event& e) { clear(); _leptons.clear(); _neutrinos.clear(); // Check missing ET const MissingMomentum& missmom = applyProjection(e, "MissingET"); const double met = missmom.vectorEt().mod(); MSG_TRACE("MET = " << met/GeV << " GeV vs. required > " << _etMissMin/GeV << " GeV"); if (met < _etMissMin) { MSG_DEBUG("Not enough missing ET: " << met/GeV << " GeV vs. required > " << _etMissMin/GeV << " GeV"); return; } // Get lepton const DressedLeptons& leptons = applyProjection(e, "DressedLeptons"); if ( leptons.dressedLeptons().empty() ) { MSG_DEBUG("No dressed leptons"); return; } MSG_DEBUG("Found at least one dressed lepton: " << leptons.dressedLeptons().front().momentum() ); // Get missing momentum 4-vector, assuming a massless invisible particle const FourMomentum pmiss = missmom.missingMomentum(0*GeV); MSG_DEBUG("Found missing 4-momentum: " << pmiss); // Compute an invariant mass final state for the W decay leptons (using pseudo-neutrinos from ETmiss) PdgId _nu_pid = _pid + 1; assert(_nu_pid == PID::NU_E || _nu_pid == PID::NU_MU); vector > l_nu_ids; l_nu_ids += make_pair(_pid, -_nu_pid); l_nu_ids += make_pair(-_pid, _nu_pid); InvMassFinalState imfs(l_nu_ids, _minmass, _maxmass, _masstarget); imfs.useTransverseMass(_useTransverseMass); Particles tmp = leptons.particles(); tmp += { Particle( _nu_pid, pmiss), Particle(-_nu_pid, pmiss) }; // fake (anti)neutrinos from ETmiss vector imfs.calc(tmp); if (imfs.particlePairs().size() < 1) return; // Assemble a pseudo-W particle const ParticlePair Wconstituents = imfs.particlePairs().front(); const Particle& p1(Wconstituents.first), p2(Wconstituents.second); const FourMomentum pW = p1.momentum() + p2.momentum(); const int wcharge3 = p1.charge3() + p2.charge3(); assert(abs(wcharge3) == 3); const int wcharge = wcharge3/3; const PdgId wpid = (wcharge == 1) ? PID::WPLUSBOSON : PID::WMINUSBOSON; Particle w(wpid, pW); MSG_DEBUG(w << " reconstructed from: " << p1 << " + " << p2); // Add (dressed) lepton constituents to the W (skipping photons if requested) /// @todo Do we need to add all used invisibles to _theParticles ? const Particle l = p1.isChargedLepton() ? p1 : p2; _leptons += (_trackPhotons == AddPhotons::YES) ? l : l.constituents().front(); w.addConstituent(_leptons.back()); const Particle nu = p1.isNeutrino() ? p1 : p2; _neutrinos += nu; w.addConstituent(nu); // Register the completed W _theParticles.push_back(w); } } diff --git a/src/Projections/ZFinder.cc b/src/Projections/ZFinder.cc --- a/src/Projections/ZFinder.cc +++ b/src/Projections/ZFinder.cc @@ -1,118 +1,118 @@ // -*- C++ -*- #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/InvMassFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { ZFinder::ZFinder(const FinalState& inputfs, const Cut & fsCut, PdgId pid, double minmass, double maxmass, double dRmax, ChargedLeptons chLeptons, ClusterPhotons clusterPhotons, AddPhotons trackPhotons, double masstarget) { setName("ZFinder"); _minmass = minmass; _maxmass = maxmass; _masstarget = masstarget; _pid = abs(pid); _trackPhotons = trackPhotons; // Identify bare leptons for dressing // Bit of a code nightmare -- FS projection copy constructors don't work? /// @todo Fix FS copy constructors!! if (chLeptons == ChargedLeptons::PROMPT) { PromptFinalState inputfs_prompt(inputfs); IdentifiedFinalState bareleptons = IdentifiedFinalState(inputfs_prompt); bareleptons.acceptIdPair(_pid); declare(bareleptons, "BareLeptons"); } else { IdentifiedFinalState bareleptons = IdentifiedFinalState(inputfs); bareleptons.acceptIdPair(_pid); declare(bareleptons, "BareLeptons"); } // Dress the bare leptons const bool doClustering = (clusterPhotons != ClusterPhotons::NONE); const bool useDecayPhotons = (clusterPhotons == ClusterPhotons::ALL); DressedLeptons leptons(inputfs, get("BareLeptons"), (doClustering ? dRmax : -1.0), fsCut, useDecayPhotons); - addProjection(leptons, "DressedLeptons"); + declare(leptons, "DressedLeptons"); // Identify the non-Z part of the event VetoedFinalState remainingFS; remainingFS.addVetoOnThisFinalState(*this); - addProjection(remainingFS, "RFS"); + declare(remainingFS, "RFS"); } ///////////////////////////////////////////////////// const Particles & ZFinder::constituentLeptons() const { static const Particles none; if (empty()) return none; return boson().constituents(); } const VetoedFinalState& ZFinder::remainingFinalState() const { return getProjection("RFS"); } CmpState ZFinder::compare(const Projection& p) const { PCmp LCcmp = mkNamedPCmp(p, "DressedLeptons"); if (LCcmp != CmpState::EQ) return LCcmp; const ZFinder& other = dynamic_cast(p); return (cmp(_minmass, other._minmass) || cmp(_maxmass, other._maxmass) || cmp(_pid, other._pid) || cmp(_trackPhotons, other._trackPhotons)); } void ZFinder::project(const Event& e) { clear(); // Get leptons and find an acceptable invariant mass OSSF pair const DressedLeptons& leptons = applyProjection(e, "DressedLeptons"); InvMassFinalState imfs({_pid, -_pid}, _minmass, _maxmass, _masstarget); imfs.calc(leptons.particles()); if (imfs.particlePairs().empty()) { MSG_TRACE("No acceptable inv-mass lepton/antilepton pairs found"); return; } // Assemble a pseudo-Z particle const ParticlePair& Zconstituents = imfs.particlePairs().front(); const Particle& p1(Zconstituents.first), p2(Zconstituents.second); const FourMomentum pZ = p1.momentum() + p2.momentum(); assert(p1.charge3() + p2.charge3() == 0); Particle z(PID::Z0BOSON, pZ); MSG_DEBUG(z << " reconstructed from: " << p1 << " + " << p2); // Add (dressed) lepton constituents to the Z (skipping photons if requested) // Keep the DressedLeptons found by the ZFinder const Particle& l1 = p1.charge() > 0 ? p1 : p2; const Particle& l2 = p2.charge() < 0 ? p2 : p1; MSG_TRACE("l1 = " << l1.constituents()); MSG_TRACE("l2 = " << l2.constituents()); z.addConstituent(_trackPhotons == AddPhotons::YES ? l1 : l1.constituents().front()); z.addConstituent(_trackPhotons == AddPhotons::YES ? l2 : l2.constituents().front()); MSG_DEBUG("Number of stored raw Z constituents = " << z.rawConstituents().size() << " " << z.rawConstituents()); // Register the completed Z _theParticles.push_back(z); } }